Merged revision 156805 into branch.
[official-gcc.git] / gcc / config / i386 / i386.c
blobac5ee3d812cd8f0530607261651b5f8a6db52351
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "gimple.h"
51 #include "dwarf2.h"
52 #include "df.h"
53 #include "tm-constrs.h"
54 #include "params.h"
55 #include "cselib.h"
57 static rtx legitimize_dllimport_symbol (rtx, bool);
59 #ifndef CHECK_STACK_LIMIT
60 #define CHECK_STACK_LIMIT (-1)
61 #endif
63 /* Return index of given mode in mult and division cost tables. */
64 #define MODE_INDEX(mode) \
65 ((mode) == QImode ? 0 \
66 : (mode) == HImode ? 1 \
67 : (mode) == SImode ? 2 \
68 : (mode) == DImode ? 3 \
69 : 4)
71 /* Processor costs (relative to an add) */
72 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
73 #define COSTS_N_BYTES(N) ((N) * 2)
75 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
77 const
78 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
79 COSTS_N_BYTES (2), /* cost of an add instruction */
80 COSTS_N_BYTES (3), /* cost of a lea instruction */
81 COSTS_N_BYTES (2), /* variable shift costs */
82 COSTS_N_BYTES (3), /* constant shift costs */
83 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
84 COSTS_N_BYTES (3), /* HI */
85 COSTS_N_BYTES (3), /* SI */
86 COSTS_N_BYTES (3), /* DI */
87 COSTS_N_BYTES (5)}, /* other */
88 0, /* cost of multiply per each bit set */
89 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
90 COSTS_N_BYTES (3), /* HI */
91 COSTS_N_BYTES (3), /* SI */
92 COSTS_N_BYTES (3), /* DI */
93 COSTS_N_BYTES (5)}, /* other */
94 COSTS_N_BYTES (3), /* cost of movsx */
95 COSTS_N_BYTES (3), /* cost of movzx */
96 0, /* "large" insn */
97 2, /* MOVE_RATIO */
98 2, /* cost for loading QImode using movzbl */
99 {2, 2, 2}, /* cost of loading integer registers
100 in QImode, HImode and SImode.
101 Relative to reg-reg move (2). */
102 {2, 2, 2}, /* cost of storing integer registers */
103 2, /* cost of reg,reg fld/fst */
104 {2, 2, 2}, /* cost of loading fp registers
105 in SFmode, DFmode and XFmode */
106 {2, 2, 2}, /* cost of storing fp registers
107 in SFmode, DFmode and XFmode */
108 3, /* cost of moving MMX register */
109 {3, 3}, /* cost of loading MMX registers
110 in SImode and DImode */
111 {3, 3}, /* cost of storing MMX registers
112 in SImode and DImode */
113 3, /* cost of moving SSE register */
114 {3, 3, 3}, /* cost of loading SSE registers
115 in SImode, DImode and TImode */
116 {3, 3, 3}, /* cost of storing SSE registers
117 in SImode, DImode and TImode */
118 3, /* MMX or SSE register to integer */
119 0, /* size of l1 cache */
120 0, /* size of l2 cache */
121 0, /* size of prefetch block */
122 0, /* number of parallel prefetches */
123 2, /* Branch cost */
124 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
125 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
126 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
127 COSTS_N_BYTES (2), /* cost of FABS instruction. */
128 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
129 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
130 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
131 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
132 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
133 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
134 1, /* scalar_stmt_cost. */
135 1, /* scalar load_cost. */
136 1, /* scalar_store_cost. */
137 1, /* vec_stmt_cost. */
138 1, /* vec_to_scalar_cost. */
139 1, /* scalar_to_vec_cost. */
140 1, /* vec_align_load_cost. */
141 1, /* vec_unalign_load_cost. */
142 1, /* vec_store_cost. */
143 1, /* cond_taken_branch_cost. */
144 1, /* cond_not_taken_branch_cost. */
147 /* Processor costs (relative to an add) */
148 static const
149 struct processor_costs i386_cost = { /* 386 specific costs */
150 COSTS_N_INSNS (1), /* cost of an add instruction */
151 COSTS_N_INSNS (1), /* cost of a lea instruction */
152 COSTS_N_INSNS (3), /* variable shift costs */
153 COSTS_N_INSNS (2), /* constant shift costs */
154 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
155 COSTS_N_INSNS (6), /* HI */
156 COSTS_N_INSNS (6), /* SI */
157 COSTS_N_INSNS (6), /* DI */
158 COSTS_N_INSNS (6)}, /* other */
159 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
160 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
161 COSTS_N_INSNS (23), /* HI */
162 COSTS_N_INSNS (23), /* SI */
163 COSTS_N_INSNS (23), /* DI */
164 COSTS_N_INSNS (23)}, /* other */
165 COSTS_N_INSNS (3), /* cost of movsx */
166 COSTS_N_INSNS (2), /* cost of movzx */
167 15, /* "large" insn */
168 3, /* MOVE_RATIO */
169 4, /* cost for loading QImode using movzbl */
170 {2, 4, 2}, /* cost of loading integer registers
171 in QImode, HImode and SImode.
172 Relative to reg-reg move (2). */
173 {2, 4, 2}, /* cost of storing integer registers */
174 2, /* cost of reg,reg fld/fst */
175 {8, 8, 8}, /* cost of loading fp registers
176 in SFmode, DFmode and XFmode */
177 {8, 8, 8}, /* cost of storing fp registers
178 in SFmode, DFmode and XFmode */
179 2, /* cost of moving MMX register */
180 {4, 8}, /* cost of loading MMX registers
181 in SImode and DImode */
182 {4, 8}, /* cost of storing MMX registers
183 in SImode and DImode */
184 2, /* cost of moving SSE register */
185 {4, 8, 16}, /* cost of loading SSE registers
186 in SImode, DImode and TImode */
187 {4, 8, 16}, /* cost of storing SSE registers
188 in SImode, DImode and TImode */
189 3, /* MMX or SSE register to integer */
190 0, /* size of l1 cache */
191 0, /* size of l2 cache */
192 0, /* size of prefetch block */
193 0, /* number of parallel prefetches */
194 1, /* Branch cost */
195 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
196 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
197 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
198 COSTS_N_INSNS (22), /* cost of FABS instruction. */
199 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
200 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
201 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
202 DUMMY_STRINGOP_ALGS},
203 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
204 DUMMY_STRINGOP_ALGS},
205 1, /* scalar_stmt_cost. */
206 1, /* scalar load_cost. */
207 1, /* scalar_store_cost. */
208 1, /* vec_stmt_cost. */
209 1, /* vec_to_scalar_cost. */
210 1, /* scalar_to_vec_cost. */
211 1, /* vec_align_load_cost. */
212 2, /* vec_unalign_load_cost. */
213 1, /* vec_store_cost. */
214 3, /* cond_taken_branch_cost. */
215 1, /* cond_not_taken_branch_cost. */
218 static const
219 struct processor_costs i486_cost = { /* 486 specific costs */
220 COSTS_N_INSNS (1), /* cost of an add instruction */
221 COSTS_N_INSNS (1), /* cost of a lea instruction */
222 COSTS_N_INSNS (3), /* variable shift costs */
223 COSTS_N_INSNS (2), /* constant shift costs */
224 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
225 COSTS_N_INSNS (12), /* HI */
226 COSTS_N_INSNS (12), /* SI */
227 COSTS_N_INSNS (12), /* DI */
228 COSTS_N_INSNS (12)}, /* other */
229 1, /* cost of multiply per each bit set */
230 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
231 COSTS_N_INSNS (40), /* HI */
232 COSTS_N_INSNS (40), /* SI */
233 COSTS_N_INSNS (40), /* DI */
234 COSTS_N_INSNS (40)}, /* other */
235 COSTS_N_INSNS (3), /* cost of movsx */
236 COSTS_N_INSNS (2), /* cost of movzx */
237 15, /* "large" insn */
238 3, /* MOVE_RATIO */
239 4, /* cost for loading QImode using movzbl */
240 {2, 4, 2}, /* cost of loading integer registers
241 in QImode, HImode and SImode.
242 Relative to reg-reg move (2). */
243 {2, 4, 2}, /* cost of storing integer registers */
244 2, /* cost of reg,reg fld/fst */
245 {8, 8, 8}, /* cost of loading fp registers
246 in SFmode, DFmode and XFmode */
247 {8, 8, 8}, /* cost of storing fp registers
248 in SFmode, DFmode and XFmode */
249 2, /* cost of moving MMX register */
250 {4, 8}, /* cost of loading MMX registers
251 in SImode and DImode */
252 {4, 8}, /* cost of storing MMX registers
253 in SImode and DImode */
254 2, /* cost of moving SSE register */
255 {4, 8, 16}, /* cost of loading SSE registers
256 in SImode, DImode and TImode */
257 {4, 8, 16}, /* cost of storing SSE registers
258 in SImode, DImode and TImode */
259 3, /* MMX or SSE register to integer */
260 4, /* size of l1 cache. 486 has 8kB cache
261 shared for code and data, so 4kB is
262 not really precise. */
263 4, /* size of l2 cache */
264 0, /* size of prefetch block */
265 0, /* number of parallel prefetches */
266 1, /* Branch cost */
267 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
268 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
269 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
270 COSTS_N_INSNS (3), /* cost of FABS instruction. */
271 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
272 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
273 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
274 DUMMY_STRINGOP_ALGS},
275 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
276 DUMMY_STRINGOP_ALGS},
277 1, /* scalar_stmt_cost. */
278 1, /* scalar load_cost. */
279 1, /* scalar_store_cost. */
280 1, /* vec_stmt_cost. */
281 1, /* vec_to_scalar_cost. */
282 1, /* scalar_to_vec_cost. */
283 1, /* vec_align_load_cost. */
284 2, /* vec_unalign_load_cost. */
285 1, /* vec_store_cost. */
286 3, /* cond_taken_branch_cost. */
287 1, /* cond_not_taken_branch_cost. */
290 static const
291 struct processor_costs pentium_cost = {
292 COSTS_N_INSNS (1), /* cost of an add instruction */
293 COSTS_N_INSNS (1), /* cost of a lea instruction */
294 COSTS_N_INSNS (4), /* variable shift costs */
295 COSTS_N_INSNS (1), /* constant shift costs */
296 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
297 COSTS_N_INSNS (11), /* HI */
298 COSTS_N_INSNS (11), /* SI */
299 COSTS_N_INSNS (11), /* DI */
300 COSTS_N_INSNS (11)}, /* other */
301 0, /* cost of multiply per each bit set */
302 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
303 COSTS_N_INSNS (25), /* HI */
304 COSTS_N_INSNS (25), /* SI */
305 COSTS_N_INSNS (25), /* DI */
306 COSTS_N_INSNS (25)}, /* other */
307 COSTS_N_INSNS (3), /* cost of movsx */
308 COSTS_N_INSNS (2), /* cost of movzx */
309 8, /* "large" insn */
310 6, /* MOVE_RATIO */
311 6, /* cost for loading QImode using movzbl */
312 {2, 4, 2}, /* cost of loading integer registers
313 in QImode, HImode and SImode.
314 Relative to reg-reg move (2). */
315 {2, 4, 2}, /* cost of storing integer registers */
316 2, /* cost of reg,reg fld/fst */
317 {2, 2, 6}, /* cost of loading fp registers
318 in SFmode, DFmode and XFmode */
319 {4, 4, 6}, /* cost of storing fp registers
320 in SFmode, DFmode and XFmode */
321 8, /* cost of moving MMX register */
322 {8, 8}, /* cost of loading MMX registers
323 in SImode and DImode */
324 {8, 8}, /* cost of storing MMX registers
325 in SImode and DImode */
326 2, /* cost of moving SSE register */
327 {4, 8, 16}, /* cost of loading SSE registers
328 in SImode, DImode and TImode */
329 {4, 8, 16}, /* cost of storing SSE registers
330 in SImode, DImode and TImode */
331 3, /* MMX or SSE register to integer */
332 8, /* size of l1 cache. */
333 8, /* size of l2 cache */
334 0, /* size of prefetch block */
335 0, /* number of parallel prefetches */
336 2, /* Branch cost */
337 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
338 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
339 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
340 COSTS_N_INSNS (1), /* cost of FABS instruction. */
341 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
342 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
343 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
344 DUMMY_STRINGOP_ALGS},
345 {{libcall, {{-1, rep_prefix_4_byte}}},
346 DUMMY_STRINGOP_ALGS},
347 1, /* scalar_stmt_cost. */
348 1, /* scalar load_cost. */
349 1, /* scalar_store_cost. */
350 1, /* vec_stmt_cost. */
351 1, /* vec_to_scalar_cost. */
352 1, /* scalar_to_vec_cost. */
353 1, /* vec_align_load_cost. */
354 2, /* vec_unalign_load_cost. */
355 1, /* vec_store_cost. */
356 3, /* cond_taken_branch_cost. */
357 1, /* cond_not_taken_branch_cost. */
360 static const
361 struct processor_costs pentiumpro_cost = {
362 COSTS_N_INSNS (1), /* cost of an add instruction */
363 COSTS_N_INSNS (1), /* cost of a lea instruction */
364 COSTS_N_INSNS (1), /* variable shift costs */
365 COSTS_N_INSNS (1), /* constant shift costs */
366 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
367 COSTS_N_INSNS (4), /* HI */
368 COSTS_N_INSNS (4), /* SI */
369 COSTS_N_INSNS (4), /* DI */
370 COSTS_N_INSNS (4)}, /* other */
371 0, /* cost of multiply per each bit set */
372 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
373 COSTS_N_INSNS (17), /* HI */
374 COSTS_N_INSNS (17), /* SI */
375 COSTS_N_INSNS (17), /* DI */
376 COSTS_N_INSNS (17)}, /* other */
377 COSTS_N_INSNS (1), /* cost of movsx */
378 COSTS_N_INSNS (1), /* cost of movzx */
379 8, /* "large" insn */
380 6, /* MOVE_RATIO */
381 2, /* cost for loading QImode using movzbl */
382 {4, 4, 4}, /* cost of loading integer registers
383 in QImode, HImode and SImode.
384 Relative to reg-reg move (2). */
385 {2, 2, 2}, /* cost of storing integer registers */
386 2, /* cost of reg,reg fld/fst */
387 {2, 2, 6}, /* cost of loading fp registers
388 in SFmode, DFmode and XFmode */
389 {4, 4, 6}, /* cost of storing fp registers
390 in SFmode, DFmode and XFmode */
391 2, /* cost of moving MMX register */
392 {2, 2}, /* cost of loading MMX registers
393 in SImode and DImode */
394 {2, 2}, /* cost of storing MMX registers
395 in SImode and DImode */
396 2, /* cost of moving SSE register */
397 {2, 2, 8}, /* cost of loading SSE registers
398 in SImode, DImode and TImode */
399 {2, 2, 8}, /* cost of storing SSE registers
400 in SImode, DImode and TImode */
401 3, /* MMX or SSE register to integer */
402 8, /* size of l1 cache. */
403 256, /* size of l2 cache */
404 32, /* size of prefetch block */
405 6, /* number of parallel prefetches */
406 2, /* Branch cost */
407 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
408 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
409 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
410 COSTS_N_INSNS (2), /* cost of FABS instruction. */
411 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
412 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
413 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
414 the alignment). For small blocks inline loop is still a noticeable win, for bigger
415 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
416 more expensive startup time in CPU, but after 4K the difference is down in the noise.
418 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
419 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
420 DUMMY_STRINGOP_ALGS},
421 {{rep_prefix_4_byte, {{1024, unrolled_loop},
422 {8192, rep_prefix_4_byte}, {-1, libcall}}},
423 DUMMY_STRINGOP_ALGS},
424 1, /* scalar_stmt_cost. */
425 1, /* scalar load_cost. */
426 1, /* scalar_store_cost. */
427 1, /* vec_stmt_cost. */
428 1, /* vec_to_scalar_cost. */
429 1, /* scalar_to_vec_cost. */
430 1, /* vec_align_load_cost. */
431 2, /* vec_unalign_load_cost. */
432 1, /* vec_store_cost. */
433 3, /* cond_taken_branch_cost. */
434 1, /* cond_not_taken_branch_cost. */
437 static const
438 struct processor_costs geode_cost = {
439 COSTS_N_INSNS (1), /* cost of an add instruction */
440 COSTS_N_INSNS (1), /* cost of a lea instruction */
441 COSTS_N_INSNS (2), /* variable shift costs */
442 COSTS_N_INSNS (1), /* constant shift costs */
443 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
444 COSTS_N_INSNS (4), /* HI */
445 COSTS_N_INSNS (7), /* SI */
446 COSTS_N_INSNS (7), /* DI */
447 COSTS_N_INSNS (7)}, /* other */
448 0, /* cost of multiply per each bit set */
449 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
450 COSTS_N_INSNS (23), /* HI */
451 COSTS_N_INSNS (39), /* SI */
452 COSTS_N_INSNS (39), /* DI */
453 COSTS_N_INSNS (39)}, /* other */
454 COSTS_N_INSNS (1), /* cost of movsx */
455 COSTS_N_INSNS (1), /* cost of movzx */
456 8, /* "large" insn */
457 4, /* MOVE_RATIO */
458 1, /* cost for loading QImode using movzbl */
459 {1, 1, 1}, /* cost of loading integer registers
460 in QImode, HImode and SImode.
461 Relative to reg-reg move (2). */
462 {1, 1, 1}, /* cost of storing integer registers */
463 1, /* cost of reg,reg fld/fst */
464 {1, 1, 1}, /* cost of loading fp registers
465 in SFmode, DFmode and XFmode */
466 {4, 6, 6}, /* cost of storing fp registers
467 in SFmode, DFmode and XFmode */
469 1, /* cost of moving MMX register */
470 {1, 1}, /* cost of loading MMX registers
471 in SImode and DImode */
472 {1, 1}, /* cost of storing MMX registers
473 in SImode and DImode */
474 1, /* cost of moving SSE register */
475 {1, 1, 1}, /* cost of loading SSE registers
476 in SImode, DImode and TImode */
477 {1, 1, 1}, /* cost of storing SSE registers
478 in SImode, DImode and TImode */
479 1, /* MMX or SSE register to integer */
480 64, /* size of l1 cache. */
481 128, /* size of l2 cache. */
482 32, /* size of prefetch block */
483 1, /* number of parallel prefetches */
484 1, /* Branch cost */
485 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
486 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
487 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
488 COSTS_N_INSNS (1), /* cost of FABS instruction. */
489 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
490 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
491 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
492 DUMMY_STRINGOP_ALGS},
493 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
494 DUMMY_STRINGOP_ALGS},
495 1, /* scalar_stmt_cost. */
496 1, /* scalar load_cost. */
497 1, /* scalar_store_cost. */
498 1, /* vec_stmt_cost. */
499 1, /* vec_to_scalar_cost. */
500 1, /* scalar_to_vec_cost. */
501 1, /* vec_align_load_cost. */
502 2, /* vec_unalign_load_cost. */
503 1, /* vec_store_cost. */
504 3, /* cond_taken_branch_cost. */
505 1, /* cond_not_taken_branch_cost. */
508 static const
509 struct processor_costs k6_cost = {
510 COSTS_N_INSNS (1), /* cost of an add instruction */
511 COSTS_N_INSNS (2), /* cost of a lea instruction */
512 COSTS_N_INSNS (1), /* variable shift costs */
513 COSTS_N_INSNS (1), /* constant shift costs */
514 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
515 COSTS_N_INSNS (3), /* HI */
516 COSTS_N_INSNS (3), /* SI */
517 COSTS_N_INSNS (3), /* DI */
518 COSTS_N_INSNS (3)}, /* other */
519 0, /* cost of multiply per each bit set */
520 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
521 COSTS_N_INSNS (18), /* HI */
522 COSTS_N_INSNS (18), /* SI */
523 COSTS_N_INSNS (18), /* DI */
524 COSTS_N_INSNS (18)}, /* other */
525 COSTS_N_INSNS (2), /* cost of movsx */
526 COSTS_N_INSNS (2), /* cost of movzx */
527 8, /* "large" insn */
528 4, /* MOVE_RATIO */
529 3, /* cost for loading QImode using movzbl */
530 {4, 5, 4}, /* cost of loading integer registers
531 in QImode, HImode and SImode.
532 Relative to reg-reg move (2). */
533 {2, 3, 2}, /* cost of storing integer registers */
534 4, /* cost of reg,reg fld/fst */
535 {6, 6, 6}, /* cost of loading fp registers
536 in SFmode, DFmode and XFmode */
537 {4, 4, 4}, /* cost of storing fp registers
538 in SFmode, DFmode and XFmode */
539 2, /* cost of moving MMX register */
540 {2, 2}, /* cost of loading MMX registers
541 in SImode and DImode */
542 {2, 2}, /* cost of storing MMX registers
543 in SImode and DImode */
544 2, /* cost of moving SSE register */
545 {2, 2, 8}, /* cost of loading SSE registers
546 in SImode, DImode and TImode */
547 {2, 2, 8}, /* cost of storing SSE registers
548 in SImode, DImode and TImode */
549 6, /* MMX or SSE register to integer */
550 32, /* size of l1 cache. */
551 32, /* size of l2 cache. Some models
552 have integrated l2 cache, but
553 optimizing for k6 is not important
554 enough to worry about that. */
555 32, /* size of prefetch block */
556 1, /* number of parallel prefetches */
557 1, /* Branch cost */
558 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
559 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
560 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
561 COSTS_N_INSNS (2), /* cost of FABS instruction. */
562 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
563 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
564 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
565 DUMMY_STRINGOP_ALGS},
566 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
567 DUMMY_STRINGOP_ALGS},
568 1, /* scalar_stmt_cost. */
569 1, /* scalar load_cost. */
570 1, /* scalar_store_cost. */
571 1, /* vec_stmt_cost. */
572 1, /* vec_to_scalar_cost. */
573 1, /* scalar_to_vec_cost. */
574 1, /* vec_align_load_cost. */
575 2, /* vec_unalign_load_cost. */
576 1, /* vec_store_cost. */
577 3, /* cond_taken_branch_cost. */
578 1, /* cond_not_taken_branch_cost. */
581 static const
582 struct processor_costs athlon_cost = {
583 COSTS_N_INSNS (1), /* cost of an add instruction */
584 COSTS_N_INSNS (2), /* cost of a lea instruction */
585 COSTS_N_INSNS (1), /* variable shift costs */
586 COSTS_N_INSNS (1), /* constant shift costs */
587 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
588 COSTS_N_INSNS (5), /* HI */
589 COSTS_N_INSNS (5), /* SI */
590 COSTS_N_INSNS (5), /* DI */
591 COSTS_N_INSNS (5)}, /* other */
592 0, /* cost of multiply per each bit set */
593 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
594 COSTS_N_INSNS (26), /* HI */
595 COSTS_N_INSNS (42), /* SI */
596 COSTS_N_INSNS (74), /* DI */
597 COSTS_N_INSNS (74)}, /* other */
598 COSTS_N_INSNS (1), /* cost of movsx */
599 COSTS_N_INSNS (1), /* cost of movzx */
600 8, /* "large" insn */
601 9, /* MOVE_RATIO */
602 4, /* cost for loading QImode using movzbl */
603 {3, 4, 3}, /* cost of loading integer registers
604 in QImode, HImode and SImode.
605 Relative to reg-reg move (2). */
606 {3, 4, 3}, /* cost of storing integer registers */
607 4, /* cost of reg,reg fld/fst */
608 {4, 4, 12}, /* cost of loading fp registers
609 in SFmode, DFmode and XFmode */
610 {6, 6, 8}, /* cost of storing fp registers
611 in SFmode, DFmode and XFmode */
612 2, /* cost of moving MMX register */
613 {4, 4}, /* cost of loading MMX registers
614 in SImode and DImode */
615 {4, 4}, /* cost of storing MMX registers
616 in SImode and DImode */
617 2, /* cost of moving SSE register */
618 {4, 4, 6}, /* cost of loading SSE registers
619 in SImode, DImode and TImode */
620 {4, 4, 5}, /* cost of storing SSE registers
621 in SImode, DImode and TImode */
622 5, /* MMX or SSE register to integer */
623 64, /* size of l1 cache. */
624 256, /* size of l2 cache. */
625 64, /* size of prefetch block */
626 6, /* number of parallel prefetches */
627 5, /* Branch cost */
628 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
629 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
630 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
631 COSTS_N_INSNS (2), /* cost of FABS instruction. */
632 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
633 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
634 /* For some reason, Athlon deals better with REP prefix (relative to loops)
635 compared to K8. Alignment becomes important after 8 bytes for memcpy and
636 128 bytes for memset. */
637 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
638 DUMMY_STRINGOP_ALGS},
639 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
640 DUMMY_STRINGOP_ALGS},
641 1, /* scalar_stmt_cost. */
642 1, /* scalar load_cost. */
643 1, /* scalar_store_cost. */
644 1, /* vec_stmt_cost. */
645 1, /* vec_to_scalar_cost. */
646 1, /* scalar_to_vec_cost. */
647 1, /* vec_align_load_cost. */
648 2, /* vec_unalign_load_cost. */
649 1, /* vec_store_cost. */
650 3, /* cond_taken_branch_cost. */
651 1, /* cond_not_taken_branch_cost. */
654 static const
655 struct processor_costs k8_cost = {
656 COSTS_N_INSNS (1), /* cost of an add instruction */
657 COSTS_N_INSNS (2), /* cost of a lea instruction */
658 COSTS_N_INSNS (1), /* variable shift costs */
659 COSTS_N_INSNS (1), /* constant shift costs */
660 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
661 COSTS_N_INSNS (4), /* HI */
662 COSTS_N_INSNS (3), /* SI */
663 COSTS_N_INSNS (4), /* DI */
664 COSTS_N_INSNS (5)}, /* other */
665 0, /* cost of multiply per each bit set */
666 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
667 COSTS_N_INSNS (26), /* HI */
668 COSTS_N_INSNS (42), /* SI */
669 COSTS_N_INSNS (74), /* DI */
670 COSTS_N_INSNS (74)}, /* other */
671 COSTS_N_INSNS (1), /* cost of movsx */
672 COSTS_N_INSNS (1), /* cost of movzx */
673 8, /* "large" insn */
674 9, /* MOVE_RATIO */
675 4, /* cost for loading QImode using movzbl */
676 {3, 4, 3}, /* cost of loading integer registers
677 in QImode, HImode and SImode.
678 Relative to reg-reg move (2). */
679 {3, 4, 3}, /* cost of storing integer registers */
680 4, /* cost of reg,reg fld/fst */
681 {4, 4, 12}, /* cost of loading fp registers
682 in SFmode, DFmode and XFmode */
683 {6, 6, 8}, /* cost of storing fp registers
684 in SFmode, DFmode and XFmode */
685 2, /* cost of moving MMX register */
686 {3, 3}, /* cost of loading MMX registers
687 in SImode and DImode */
688 {4, 4}, /* cost of storing MMX registers
689 in SImode and DImode */
690 2, /* cost of moving SSE register */
691 {4, 3, 6}, /* cost of loading SSE registers
692 in SImode, DImode and TImode */
693 {4, 4, 5}, /* cost of storing SSE registers
694 in SImode, DImode and TImode */
695 5, /* MMX or SSE register to integer */
696 64, /* size of l1 cache. */
697 512, /* size of l2 cache. */
698 64, /* size of prefetch block */
699 /* New AMD processors never drop prefetches; if they cannot be performed
700 immediately, they are queued. We set number of simultaneous prefetches
701 to a large constant to reflect this (it probably is not a good idea not
702 to limit number of prefetches at all, as their execution also takes some
703 time). */
704 100, /* number of parallel prefetches */
705 3, /* Branch cost */
706 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
707 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
708 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
709 COSTS_N_INSNS (2), /* cost of FABS instruction. */
710 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
711 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
712 /* K8 has optimized REP instruction for medium sized blocks, but for very small
713 blocks it is better to use loop. For large blocks, libcall can do
714 nontemporary accesses and beat inline considerably. */
715 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
716 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
717 {{libcall, {{8, loop}, {24, unrolled_loop},
718 {2048, rep_prefix_4_byte}, {-1, libcall}}},
719 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
720 4, /* scalar_stmt_cost. */
721 2, /* scalar load_cost. */
722 2, /* scalar_store_cost. */
723 5, /* vec_stmt_cost. */
724 0, /* vec_to_scalar_cost. */
725 2, /* scalar_to_vec_cost. */
726 2, /* vec_align_load_cost. */
727 3, /* vec_unalign_load_cost. */
728 3, /* vec_store_cost. */
729 3, /* cond_taken_branch_cost. */
730 2, /* cond_not_taken_branch_cost. */
733 struct processor_costs amdfam10_cost = {
734 COSTS_N_INSNS (1), /* cost of an add instruction */
735 COSTS_N_INSNS (2), /* cost of a lea instruction */
736 COSTS_N_INSNS (1), /* variable shift costs */
737 COSTS_N_INSNS (1), /* constant shift costs */
738 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
739 COSTS_N_INSNS (4), /* HI */
740 COSTS_N_INSNS (3), /* SI */
741 COSTS_N_INSNS (4), /* DI */
742 COSTS_N_INSNS (5)}, /* other */
743 0, /* cost of multiply per each bit set */
744 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
745 COSTS_N_INSNS (35), /* HI */
746 COSTS_N_INSNS (51), /* SI */
747 COSTS_N_INSNS (83), /* DI */
748 COSTS_N_INSNS (83)}, /* other */
749 COSTS_N_INSNS (1), /* cost of movsx */
750 COSTS_N_INSNS (1), /* cost of movzx */
751 8, /* "large" insn */
752 9, /* MOVE_RATIO */
753 4, /* cost for loading QImode using movzbl */
754 {3, 4, 3}, /* cost of loading integer registers
755 in QImode, HImode and SImode.
756 Relative to reg-reg move (2). */
757 {3, 4, 3}, /* cost of storing integer registers */
758 4, /* cost of reg,reg fld/fst */
759 {4, 4, 12}, /* cost of loading fp registers
760 in SFmode, DFmode and XFmode */
761 {6, 6, 8}, /* cost of storing fp registers
762 in SFmode, DFmode and XFmode */
763 2, /* cost of moving MMX register */
764 {3, 3}, /* cost of loading MMX registers
765 in SImode and DImode */
766 {4, 4}, /* cost of storing MMX registers
767 in SImode and DImode */
768 2, /* cost of moving SSE register */
769 {4, 4, 3}, /* cost of loading SSE registers
770 in SImode, DImode and TImode */
771 {4, 4, 5}, /* cost of storing SSE registers
772 in SImode, DImode and TImode */
773 3, /* MMX or SSE register to integer */
774 /* On K8
775 MOVD reg64, xmmreg Double FSTORE 4
776 MOVD reg32, xmmreg Double FSTORE 4
777 On AMDFAM10
778 MOVD reg64, xmmreg Double FADD 3
779 1/1 1/1
780 MOVD reg32, xmmreg Double FADD 3
781 1/1 1/1 */
782 64, /* size of l1 cache. */
783 512, /* size of l2 cache. */
784 64, /* size of prefetch block */
785 /* New AMD processors never drop prefetches; if they cannot be performed
786 immediately, they are queued. We set number of simultaneous prefetches
787 to a large constant to reflect this (it probably is not a good idea not
788 to limit number of prefetches at all, as their execution also takes some
789 time). */
790 100, /* number of parallel prefetches */
791 2, /* Branch cost */
792 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
793 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
794 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
795 COSTS_N_INSNS (2), /* cost of FABS instruction. */
796 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
797 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
799 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
800 very small blocks it is better to use loop. For large blocks, libcall can
801 do nontemporary accesses and beat inline considerably. */
802 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
803 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
804 {{libcall, {{8, loop}, {24, unrolled_loop},
805 {2048, rep_prefix_4_byte}, {-1, libcall}}},
806 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
807 4, /* scalar_stmt_cost. */
808 2, /* scalar load_cost. */
809 2, /* scalar_store_cost. */
810 6, /* vec_stmt_cost. */
811 0, /* vec_to_scalar_cost. */
812 2, /* scalar_to_vec_cost. */
813 2, /* vec_align_load_cost. */
814 2, /* vec_unalign_load_cost. */
815 2, /* vec_store_cost. */
816 2, /* cond_taken_branch_cost. */
817 1, /* cond_not_taken_branch_cost. */
820 static const
821 struct processor_costs pentium4_cost = {
822 COSTS_N_INSNS (1), /* cost of an add instruction */
823 COSTS_N_INSNS (3), /* cost of a lea instruction */
824 COSTS_N_INSNS (4), /* variable shift costs */
825 COSTS_N_INSNS (4), /* constant shift costs */
826 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
827 COSTS_N_INSNS (15), /* HI */
828 COSTS_N_INSNS (15), /* SI */
829 COSTS_N_INSNS (15), /* DI */
830 COSTS_N_INSNS (15)}, /* other */
831 0, /* cost of multiply per each bit set */
832 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
833 COSTS_N_INSNS (56), /* HI */
834 COSTS_N_INSNS (56), /* SI */
835 COSTS_N_INSNS (56), /* DI */
836 COSTS_N_INSNS (56)}, /* other */
837 COSTS_N_INSNS (1), /* cost of movsx */
838 COSTS_N_INSNS (1), /* cost of movzx */
839 16, /* "large" insn */
840 6, /* MOVE_RATIO */
841 2, /* cost for loading QImode using movzbl */
842 {4, 5, 4}, /* cost of loading integer registers
843 in QImode, HImode and SImode.
844 Relative to reg-reg move (2). */
845 {2, 3, 2}, /* cost of storing integer registers */
846 2, /* cost of reg,reg fld/fst */
847 {2, 2, 6}, /* cost of loading fp registers
848 in SFmode, DFmode and XFmode */
849 {4, 4, 6}, /* cost of storing fp registers
850 in SFmode, DFmode and XFmode */
851 2, /* cost of moving MMX register */
852 {2, 2}, /* cost of loading MMX registers
853 in SImode and DImode */
854 {2, 2}, /* cost of storing MMX registers
855 in SImode and DImode */
856 12, /* cost of moving SSE register */
857 {12, 12, 12}, /* cost of loading SSE registers
858 in SImode, DImode and TImode */
859 {2, 2, 8}, /* cost of storing SSE registers
860 in SImode, DImode and TImode */
861 10, /* MMX or SSE register to integer */
862 8, /* size of l1 cache. */
863 256, /* size of l2 cache. */
864 64, /* size of prefetch block */
865 6, /* number of parallel prefetches */
866 2, /* Branch cost */
867 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
868 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
869 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
870 COSTS_N_INSNS (2), /* cost of FABS instruction. */
871 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
872 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
873 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
874 DUMMY_STRINGOP_ALGS},
875 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
876 {-1, libcall}}},
877 DUMMY_STRINGOP_ALGS},
878 1, /* scalar_stmt_cost. */
879 1, /* scalar load_cost. */
880 1, /* scalar_store_cost. */
881 1, /* vec_stmt_cost. */
882 1, /* vec_to_scalar_cost. */
883 1, /* scalar_to_vec_cost. */
884 1, /* vec_align_load_cost. */
885 2, /* vec_unalign_load_cost. */
886 1, /* vec_store_cost. */
887 3, /* cond_taken_branch_cost. */
888 1, /* cond_not_taken_branch_cost. */
891 static const
892 struct processor_costs nocona_cost = {
893 COSTS_N_INSNS (1), /* cost of an add instruction */
894 COSTS_N_INSNS (1), /* cost of a lea instruction */
895 COSTS_N_INSNS (1), /* variable shift costs */
896 COSTS_N_INSNS (1), /* constant shift costs */
897 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
898 COSTS_N_INSNS (10), /* HI */
899 COSTS_N_INSNS (10), /* SI */
900 COSTS_N_INSNS (10), /* DI */
901 COSTS_N_INSNS (10)}, /* other */
902 0, /* cost of multiply per each bit set */
903 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
904 COSTS_N_INSNS (66), /* HI */
905 COSTS_N_INSNS (66), /* SI */
906 COSTS_N_INSNS (66), /* DI */
907 COSTS_N_INSNS (66)}, /* other */
908 COSTS_N_INSNS (1), /* cost of movsx */
909 COSTS_N_INSNS (1), /* cost of movzx */
910 16, /* "large" insn */
911 17, /* MOVE_RATIO */
912 4, /* cost for loading QImode using movzbl */
913 {4, 4, 4}, /* cost of loading integer registers
914 in QImode, HImode and SImode.
915 Relative to reg-reg move (2). */
916 {4, 4, 4}, /* cost of storing integer registers */
917 3, /* cost of reg,reg fld/fst */
918 {12, 12, 12}, /* cost of loading fp registers
919 in SFmode, DFmode and XFmode */
920 {4, 4, 4}, /* cost of storing fp registers
921 in SFmode, DFmode and XFmode */
922 6, /* cost of moving MMX register */
923 {12, 12}, /* cost of loading MMX registers
924 in SImode and DImode */
925 {12, 12}, /* cost of storing MMX registers
926 in SImode and DImode */
927 6, /* cost of moving SSE register */
928 {12, 12, 12}, /* cost of loading SSE registers
929 in SImode, DImode and TImode */
930 {12, 12, 12}, /* cost of storing SSE registers
931 in SImode, DImode and TImode */
932 8, /* MMX or SSE register to integer */
933 8, /* size of l1 cache. */
934 1024, /* size of l2 cache. */
935 128, /* size of prefetch block */
936 8, /* number of parallel prefetches */
937 1, /* Branch cost */
938 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
939 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
940 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
941 COSTS_N_INSNS (3), /* cost of FABS instruction. */
942 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
943 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
944 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
945 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
946 {100000, unrolled_loop}, {-1, libcall}}}},
947 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
948 {-1, libcall}}},
949 {libcall, {{24, loop}, {64, unrolled_loop},
950 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
951 1, /* scalar_stmt_cost. */
952 1, /* scalar load_cost. */
953 1, /* scalar_store_cost. */
954 1, /* vec_stmt_cost. */
955 1, /* vec_to_scalar_cost. */
956 1, /* scalar_to_vec_cost. */
957 1, /* vec_align_load_cost. */
958 2, /* vec_unalign_load_cost. */
959 1, /* vec_store_cost. */
960 3, /* cond_taken_branch_cost. */
961 1, /* cond_not_taken_branch_cost. */
964 static const
965 struct processor_costs core2_cost = {
966 COSTS_N_INSNS (1), /* cost of an add instruction */
967 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
968 COSTS_N_INSNS (1), /* variable shift costs */
969 COSTS_N_INSNS (1), /* constant shift costs */
970 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
971 COSTS_N_INSNS (3), /* HI */
972 COSTS_N_INSNS (3), /* SI */
973 COSTS_N_INSNS (3), /* DI */
974 COSTS_N_INSNS (3)}, /* other */
975 0, /* cost of multiply per each bit set */
976 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
977 COSTS_N_INSNS (22), /* HI */
978 COSTS_N_INSNS (22), /* SI */
979 COSTS_N_INSNS (22), /* DI */
980 COSTS_N_INSNS (22)}, /* other */
981 COSTS_N_INSNS (1), /* cost of movsx */
982 COSTS_N_INSNS (1), /* cost of movzx */
983 8, /* "large" insn */
984 16, /* MOVE_RATIO */
985 2, /* cost for loading QImode using movzbl */
986 {6, 6, 6}, /* cost of loading integer registers
987 in QImode, HImode and SImode.
988 Relative to reg-reg move (2). */
989 {4, 4, 4}, /* cost of storing integer registers */
990 2, /* cost of reg,reg fld/fst */
991 {6, 6, 6}, /* cost of loading fp registers
992 in SFmode, DFmode and XFmode */
993 {4, 4, 4}, /* cost of storing fp registers
994 in SFmode, DFmode and XFmode */
995 2, /* cost of moving MMX register */
996 {6, 6}, /* cost of loading MMX registers
997 in SImode and DImode */
998 {4, 4}, /* cost of storing MMX registers
999 in SImode and DImode */
1000 2, /* cost of moving SSE register */
1001 {6, 6, 6}, /* cost of loading SSE registers
1002 in SImode, DImode and TImode */
1003 {4, 4, 4}, /* cost of storing SSE registers
1004 in SImode, DImode and TImode */
1005 2, /* MMX or SSE register to integer */
1006 32, /* size of l1 cache. */
1007 2048, /* size of l2 cache. */
1008 128, /* size of prefetch block */
1009 8, /* number of parallel prefetches */
1010 3, /* Branch cost */
1011 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1012 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1013 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1014 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1015 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1016 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1017 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1018 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1019 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1020 {{libcall, {{8, loop}, {15, unrolled_loop},
1021 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1022 {libcall, {{24, loop}, {32, unrolled_loop},
1023 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1024 1, /* scalar_stmt_cost. */
1025 1, /* scalar load_cost. */
1026 1, /* scalar_store_cost. */
1027 1, /* vec_stmt_cost. */
1028 1, /* vec_to_scalar_cost. */
1029 1, /* scalar_to_vec_cost. */
1030 1, /* vec_align_load_cost. */
1031 2, /* vec_unalign_load_cost. */
1032 1, /* vec_store_cost. */
1033 3, /* cond_taken_branch_cost. */
1034 1, /* cond_not_taken_branch_cost. */
1037 static const
1038 struct processor_costs atom_cost = {
1039 COSTS_N_INSNS (1), /* cost of an add instruction */
1040 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1041 COSTS_N_INSNS (1), /* variable shift costs */
1042 COSTS_N_INSNS (1), /* constant shift costs */
1043 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1044 COSTS_N_INSNS (4), /* HI */
1045 COSTS_N_INSNS (3), /* SI */
1046 COSTS_N_INSNS (4), /* DI */
1047 COSTS_N_INSNS (2)}, /* other */
1048 0, /* cost of multiply per each bit set */
1049 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1050 COSTS_N_INSNS (26), /* HI */
1051 COSTS_N_INSNS (42), /* SI */
1052 COSTS_N_INSNS (74), /* DI */
1053 COSTS_N_INSNS (74)}, /* other */
1054 COSTS_N_INSNS (1), /* cost of movsx */
1055 COSTS_N_INSNS (1), /* cost of movzx */
1056 8, /* "large" insn */
1057 17, /* MOVE_RATIO */
1058 2, /* cost for loading QImode using movzbl */
1059 {4, 4, 4}, /* cost of loading integer registers
1060 in QImode, HImode and SImode.
1061 Relative to reg-reg move (2). */
1062 {4, 4, 4}, /* cost of storing integer registers */
1063 4, /* cost of reg,reg fld/fst */
1064 {12, 12, 12}, /* cost of loading fp registers
1065 in SFmode, DFmode and XFmode */
1066 {6, 6, 8}, /* cost of storing fp registers
1067 in SFmode, DFmode and XFmode */
1068 2, /* cost of moving MMX register */
1069 {8, 8}, /* cost of loading MMX registers
1070 in SImode and DImode */
1071 {8, 8}, /* cost of storing MMX registers
1072 in SImode and DImode */
1073 2, /* cost of moving SSE register */
1074 {8, 8, 8}, /* cost of loading SSE registers
1075 in SImode, DImode and TImode */
1076 {8, 8, 8}, /* cost of storing SSE registers
1077 in SImode, DImode and TImode */
1078 5, /* MMX or SSE register to integer */
1079 32, /* size of l1 cache. */
1080 256, /* size of l2 cache. */
1081 64, /* size of prefetch block */
1082 6, /* number of parallel prefetches */
1083 3, /* Branch cost */
1084 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1085 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1086 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1087 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1088 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1089 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1090 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1091 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1092 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1093 {{libcall, {{8, loop}, {15, unrolled_loop},
1094 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1095 {libcall, {{24, loop}, {32, unrolled_loop},
1096 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1097 1, /* scalar_stmt_cost. */
1098 1, /* scalar load_cost. */
1099 1, /* scalar_store_cost. */
1100 1, /* vec_stmt_cost. */
1101 1, /* vec_to_scalar_cost. */
1102 1, /* scalar_to_vec_cost. */
1103 1, /* vec_align_load_cost. */
1104 2, /* vec_unalign_load_cost. */
1105 1, /* vec_store_cost. */
1106 3, /* cond_taken_branch_cost. */
1107 1, /* cond_not_taken_branch_cost. */
1110 /* Generic64 should produce code tuned for Nocona and K8. */
1111 static const
1112 struct processor_costs generic64_cost = {
1113 COSTS_N_INSNS (1), /* cost of an add instruction */
1114 /* On all chips taken into consideration lea is 2 cycles and more. With
1115 this cost however our current implementation of synth_mult results in
1116 use of unnecessary temporary registers causing regression on several
1117 SPECfp benchmarks. */
1118 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1119 COSTS_N_INSNS (1), /* variable shift costs */
1120 COSTS_N_INSNS (1), /* constant shift costs */
1121 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1122 COSTS_N_INSNS (4), /* HI */
1123 COSTS_N_INSNS (3), /* SI */
1124 COSTS_N_INSNS (4), /* DI */
1125 COSTS_N_INSNS (2)}, /* other */
1126 0, /* cost of multiply per each bit set */
1127 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1128 COSTS_N_INSNS (26), /* HI */
1129 COSTS_N_INSNS (42), /* SI */
1130 COSTS_N_INSNS (74), /* DI */
1131 COSTS_N_INSNS (74)}, /* other */
1132 COSTS_N_INSNS (1), /* cost of movsx */
1133 COSTS_N_INSNS (1), /* cost of movzx */
1134 8, /* "large" insn */
1135 17, /* MOVE_RATIO */
1136 4, /* cost for loading QImode using movzbl */
1137 {4, 4, 4}, /* cost of loading integer registers
1138 in QImode, HImode and SImode.
1139 Relative to reg-reg move (2). */
1140 {4, 4, 4}, /* cost of storing integer registers */
1141 4, /* cost of reg,reg fld/fst */
1142 {12, 12, 12}, /* cost of loading fp registers
1143 in SFmode, DFmode and XFmode */
1144 {6, 6, 8}, /* cost of storing fp registers
1145 in SFmode, DFmode and XFmode */
1146 2, /* cost of moving MMX register */
1147 {8, 8}, /* cost of loading MMX registers
1148 in SImode and DImode */
1149 {8, 8}, /* cost of storing MMX registers
1150 in SImode and DImode */
1151 2, /* cost of moving SSE register */
1152 {8, 8, 8}, /* cost of loading SSE registers
1153 in SImode, DImode and TImode */
1154 {8, 8, 8}, /* cost of storing SSE registers
1155 in SImode, DImode and TImode */
1156 5, /* MMX or SSE register to integer */
1157 32, /* size of l1 cache. */
1158 512, /* size of l2 cache. */
1159 64, /* size of prefetch block */
1160 6, /* number of parallel prefetches */
1161 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
1162 is increased to perhaps more appropriate value of 5. */
1163 3, /* Branch cost */
1164 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1165 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1166 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1167 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1168 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1169 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1170 {DUMMY_STRINGOP_ALGS,
1171 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1172 {DUMMY_STRINGOP_ALGS,
1173 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1174 1, /* scalar_stmt_cost. */
1175 1, /* scalar load_cost. */
1176 1, /* scalar_store_cost. */
1177 1, /* vec_stmt_cost. */
1178 1, /* vec_to_scalar_cost. */
1179 1, /* scalar_to_vec_cost. */
1180 1, /* vec_align_load_cost. */
1181 2, /* vec_unalign_load_cost. */
1182 1, /* vec_store_cost. */
1183 3, /* cond_taken_branch_cost. */
1184 1, /* cond_not_taken_branch_cost. */
1187 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
1188 static const
1189 struct processor_costs generic32_cost = {
1190 COSTS_N_INSNS (1), /* cost of an add instruction */
1191 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1192 COSTS_N_INSNS (1), /* variable shift costs */
1193 COSTS_N_INSNS (1), /* constant shift costs */
1194 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1195 COSTS_N_INSNS (4), /* HI */
1196 COSTS_N_INSNS (3), /* SI */
1197 COSTS_N_INSNS (4), /* DI */
1198 COSTS_N_INSNS (2)}, /* other */
1199 0, /* cost of multiply per each bit set */
1200 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1201 COSTS_N_INSNS (26), /* HI */
1202 COSTS_N_INSNS (42), /* SI */
1203 COSTS_N_INSNS (74), /* DI */
1204 COSTS_N_INSNS (74)}, /* other */
1205 COSTS_N_INSNS (1), /* cost of movsx */
1206 COSTS_N_INSNS (1), /* cost of movzx */
1207 8, /* "large" insn */
1208 17, /* MOVE_RATIO */
1209 4, /* cost for loading QImode using movzbl */
1210 {4, 4, 4}, /* cost of loading integer registers
1211 in QImode, HImode and SImode.
1212 Relative to reg-reg move (2). */
1213 {4, 4, 4}, /* cost of storing integer registers */
1214 4, /* cost of reg,reg fld/fst */
1215 {12, 12, 12}, /* cost of loading fp registers
1216 in SFmode, DFmode and XFmode */
1217 {6, 6, 8}, /* cost of storing fp registers
1218 in SFmode, DFmode and XFmode */
1219 2, /* cost of moving MMX register */
1220 {8, 8}, /* cost of loading MMX registers
1221 in SImode and DImode */
1222 {8, 8}, /* cost of storing MMX registers
1223 in SImode and DImode */
1224 2, /* cost of moving SSE register */
1225 {8, 8, 8}, /* cost of loading SSE registers
1226 in SImode, DImode and TImode */
1227 {8, 8, 8}, /* cost of storing SSE registers
1228 in SImode, DImode and TImode */
1229 5, /* MMX or SSE register to integer */
1230 32, /* size of l1 cache. */
1231 256, /* size of l2 cache. */
1232 64, /* size of prefetch block */
1233 6, /* number of parallel prefetches */
1234 3, /* Branch cost */
1235 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1236 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1237 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1238 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1239 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1240 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1241 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1242 DUMMY_STRINGOP_ALGS},
1243 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1244 DUMMY_STRINGOP_ALGS},
1245 1, /* scalar_stmt_cost. */
1246 1, /* scalar load_cost. */
1247 1, /* scalar_store_cost. */
1248 1, /* vec_stmt_cost. */
1249 1, /* vec_to_scalar_cost. */
1250 1, /* scalar_to_vec_cost. */
1251 1, /* vec_align_load_cost. */
1252 2, /* vec_unalign_load_cost. */
1253 1, /* vec_store_cost. */
1254 3, /* cond_taken_branch_cost. */
1255 1, /* cond_not_taken_branch_cost. */
1258 const struct processor_costs *ix86_cost = &pentium_cost;
1260 /* Processor feature/optimization bitmasks. */
1261 #define m_386 (1<<PROCESSOR_I386)
1262 #define m_486 (1<<PROCESSOR_I486)
1263 #define m_PENT (1<<PROCESSOR_PENTIUM)
1264 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1265 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1266 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1267 #define m_CORE2 (1<<PROCESSOR_CORE2)
1268 #define m_ATOM (1<<PROCESSOR_ATOM)
1270 #define m_GEODE (1<<PROCESSOR_GEODE)
1271 #define m_K6 (1<<PROCESSOR_K6)
1272 #define m_K6_GEODE (m_K6 | m_GEODE)
1273 #define m_K8 (1<<PROCESSOR_K8)
1274 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1275 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1276 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1277 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10)
1279 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1280 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1282 /* Generic instruction choice should be common subset of supported CPUs
1283 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1284 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1286 /* Feature tests against the various tunings. */
1287 unsigned char ix86_tune_features[X86_TUNE_LAST];
1289 /* Feature tests against the various tunings used to create ix86_tune_features
1290 based on the processor mask. */
1291 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1292 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1293 negatively, so enabling for Generic64 seems like good code size
1294 tradeoff. We can't enable it for 32bit generic because it does not
1295 work well with PPro base chips. */
1296 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1298 /* X86_TUNE_PUSH_MEMORY */
1299 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1300 | m_NOCONA | m_CORE2 | m_GENERIC,
1302 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1303 m_486 | m_PENT,
1305 /* X86_TUNE_UNROLL_STRLEN */
1306 m_486 | m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_K6
1307 | m_CORE2 | m_GENERIC,
1309 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1310 m_ATOM | m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1312 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1313 on simulation result. But after P4 was made, no performance benefit
1314 was observed with branch hints. It also increases the code size.
1315 As a result, icc never generates branch hints. */
1318 /* X86_TUNE_DOUBLE_WITH_ADD */
1319 ~m_386,
1321 /* X86_TUNE_USE_SAHF */
1322 m_ATOM | m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_PENT4
1323 | m_NOCONA | m_CORE2 | m_GENERIC,
1325 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1326 partial dependencies. */
1327 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA
1328 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1330 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1331 register stalls on Generic32 compilation setting as well. However
1332 in current implementation the partial register stalls are not eliminated
1333 very well - they can be introduced via subregs synthesized by combine
1334 and can happen in caller/callee saving sequences. Because this option
1335 pays back little on PPro based chips and is in conflict with partial reg
1336 dependencies used by Athlon/P4 based chips, it is better to leave it off
1337 for generic32 for now. */
1338 m_PPRO,
1340 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1341 m_CORE2 | m_GENERIC,
1343 /* X86_TUNE_USE_HIMODE_FIOP */
1344 m_386 | m_486 | m_K6_GEODE,
1346 /* X86_TUNE_USE_SIMODE_FIOP */
1347 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_ATOM | m_CORE2 | m_GENERIC),
1349 /* X86_TUNE_USE_MOV0 */
1350 m_K6,
1352 /* X86_TUNE_USE_CLTD */
1353 ~(m_PENT | m_ATOM | m_K6 | m_CORE2 | m_GENERIC),
1355 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1356 m_PENT4,
1358 /* X86_TUNE_SPLIT_LONG_MOVES */
1359 m_PPRO,
1361 /* X86_TUNE_READ_MODIFY_WRITE */
1362 ~m_PENT,
1364 /* X86_TUNE_READ_MODIFY */
1365 ~(m_PENT | m_PPRO),
1367 /* X86_TUNE_PROMOTE_QIMODE */
1368 m_K6_GEODE | m_PENT | m_ATOM | m_386 | m_486 | m_AMD_MULTIPLE
1369 | m_CORE2 | m_GENERIC /* | m_PENT4 ? */,
1371 /* X86_TUNE_FAST_PREFIX */
1372 ~(m_PENT | m_486 | m_386),
1374 /* X86_TUNE_SINGLE_STRINGOP */
1375 m_386 | m_PENT4 | m_NOCONA,
1377 /* X86_TUNE_QIMODE_MATH */
1380 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1381 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1382 might be considered for Generic32 if our scheme for avoiding partial
1383 stalls was more effective. */
1384 ~m_PPRO,
1386 /* X86_TUNE_PROMOTE_QI_REGS */
1389 /* X86_TUNE_PROMOTE_HI_REGS */
1390 m_PPRO,
1392 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
1393 m_ATOM | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA
1394 | m_CORE2 | m_GENERIC,
1396 /* X86_TUNE_ADD_ESP_8 */
1397 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_K6_GEODE | m_386
1398 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1400 /* X86_TUNE_SUB_ESP_4 */
1401 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2
1402 | m_GENERIC,
1404 /* X86_TUNE_SUB_ESP_8 */
1405 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_386 | m_486
1406 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1408 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1409 for DFmode copies */
1410 ~(m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1411 | m_GENERIC | m_GEODE),
1413 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1414 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1416 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1417 conflict here in between PPro/Pentium4 based chips that thread 128bit
1418 SSE registers as single units versus K8 based chips that divide SSE
1419 registers to two 64bit halves. This knob promotes all store destinations
1420 to be 128bit to allow register renaming on 128bit SSE units, but usually
1421 results in one extra microop on 64bit SSE units. Experimental results
1422 shows that disabling this option on P4 brings over 20% SPECfp regression,
1423 while enabling it on K8 brings roughly 2.4% regression that can be partly
1424 masked by careful scheduling of moves. */
1425 m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC
1426 | m_AMDFAM10,
1428 /* X86_TUNE_SSE_UNALIGNED_MOVE_OPTIMAL */
1429 m_AMDFAM10,
1431 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1432 are resolved on SSE register parts instead of whole registers, so we may
1433 maintain just lower part of scalar values in proper format leaving the
1434 upper part undefined. */
1435 m_ATHLON_K8,
1437 /* X86_TUNE_SSE_TYPELESS_STORES */
1438 m_AMD_MULTIPLE,
1440 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1441 m_PPRO | m_PENT4 | m_NOCONA,
1443 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1444 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1446 /* X86_TUNE_PROLOGUE_USING_MOVE */
1447 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1449 /* X86_TUNE_EPILOGUE_USING_MOVE */
1450 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1452 /* X86_TUNE_SHIFT1 */
1453 ~m_486,
1455 /* X86_TUNE_USE_FFREEP */
1456 m_AMD_MULTIPLE,
1458 /* X86_TUNE_INTER_UNIT_MOVES */
1459 ~(m_AMD_MULTIPLE | m_ATOM | m_GENERIC),
1461 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1462 ~(m_AMDFAM10),
1464 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1465 than 4 branch instructions in the 16 byte window. */
1466 m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2
1467 | m_GENERIC,
1469 /* X86_TUNE_SCHEDULE */
1470 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_ATOM | m_CORE2
1471 | m_GENERIC,
1473 /* X86_TUNE_USE_BT */
1474 m_AMD_MULTIPLE | m_ATOM | m_CORE2 | m_GENERIC,
1476 /* X86_TUNE_USE_INCDEC */
1477 ~(m_PENT4 | m_NOCONA | m_GENERIC | m_ATOM),
1479 /* X86_TUNE_PAD_RETURNS */
1480 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1482 /* X86_TUNE_EXT_80387_CONSTANTS */
1483 m_K6_GEODE | m_ATHLON_K8 | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO
1484 | m_CORE2 | m_GENERIC,
1486 /* X86_TUNE_SHORTEN_X87_SSE */
1487 ~m_K8,
1489 /* X86_TUNE_AVOID_VECTOR_DECODE */
1490 m_K8 | m_GENERIC64,
1492 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1493 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1494 ~(m_386 | m_486),
1496 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1497 vector path on AMD machines. */
1498 m_K8 | m_GENERIC64 | m_AMDFAM10,
1500 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1501 machines. */
1502 m_K8 | m_GENERIC64 | m_AMDFAM10,
1504 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1505 than a MOV. */
1506 m_PENT,
1508 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1509 but one byte longer. */
1510 m_PENT,
1512 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1513 operand that cannot be represented using a modRM byte. The XOR
1514 replacement is long decoded, so this split helps here as well. */
1515 m_K6,
1517 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
1518 from FP to FP. */
1519 m_AMDFAM10 | m_GENERIC,
1521 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1522 from integer to FP. */
1523 m_AMDFAM10,
1525 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1526 with a subsequent conditional jump instruction into a single
1527 compare-and-branch uop. */
1528 m_CORE2,
1530 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
1531 will impact LEA instruction selection. */
1532 m_ATOM,
1535 /* Feature tests against the various architecture variations. */
1536 unsigned char ix86_arch_features[X86_ARCH_LAST];
1538 /* Feature tests against the various architecture variations, used to create
1539 ix86_arch_features based on the processor mask. */
1540 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
1541 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1542 ~(m_386 | m_486 | m_PENT | m_K6),
1544 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1545 ~m_386,
1547 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1548 ~(m_386 | m_486),
1550 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1551 ~m_386,
1553 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1554 ~m_386,
1557 static const unsigned int x86_accumulate_outgoing_args
1558 = m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1559 | m_GENERIC;
1561 static const unsigned int x86_arch_always_fancy_math_387
1562 = m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1563 | m_NOCONA | m_CORE2 | m_GENERIC;
1565 static enum stringop_alg stringop_alg = no_stringop;
1567 /* In case the average insn count for single function invocation is
1568 lower than this constant, emit fast (but longer) prologue and
1569 epilogue code. */
1570 #define FAST_PROLOGUE_INSN_COUNT 20
1572 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1573 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1574 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1575 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1577 /* Array of the smallest class containing reg number REGNO, indexed by
1578 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1580 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1582 /* ax, dx, cx, bx */
1583 AREG, DREG, CREG, BREG,
1584 /* si, di, bp, sp */
1585 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1586 /* FP registers */
1587 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1588 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1589 /* arg pointer */
1590 NON_Q_REGS,
1591 /* flags, fpsr, fpcr, frame */
1592 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1593 /* SSE registers */
1594 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1595 SSE_REGS, SSE_REGS,
1596 /* MMX registers */
1597 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1598 MMX_REGS, MMX_REGS,
1599 /* REX registers */
1600 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1601 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1602 /* SSE REX registers */
1603 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1604 SSE_REGS, SSE_REGS,
1607 /* The "default" register map used in 32bit mode. */
1609 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1611 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1612 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1613 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1614 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1615 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1616 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1617 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1620 /* The "default" register map used in 64bit mode. */
1622 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1624 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1625 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1626 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1627 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1628 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1629 8,9,10,11,12,13,14,15, /* extended integer registers */
1630 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1633 /* Define the register numbers to be used in Dwarf debugging information.
1634 The SVR4 reference port C compiler uses the following register numbers
1635 in its Dwarf output code:
1636 0 for %eax (gcc regno = 0)
1637 1 for %ecx (gcc regno = 2)
1638 2 for %edx (gcc regno = 1)
1639 3 for %ebx (gcc regno = 3)
1640 4 for %esp (gcc regno = 7)
1641 5 for %ebp (gcc regno = 6)
1642 6 for %esi (gcc regno = 4)
1643 7 for %edi (gcc regno = 5)
1644 The following three DWARF register numbers are never generated by
1645 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1646 believes these numbers have these meanings.
1647 8 for %eip (no gcc equivalent)
1648 9 for %eflags (gcc regno = 17)
1649 10 for %trapno (no gcc equivalent)
1650 It is not at all clear how we should number the FP stack registers
1651 for the x86 architecture. If the version of SDB on x86/svr4 were
1652 a bit less brain dead with respect to floating-point then we would
1653 have a precedent to follow with respect to DWARF register numbers
1654 for x86 FP registers, but the SDB on x86/svr4 is so completely
1655 broken with respect to FP registers that it is hardly worth thinking
1656 of it as something to strive for compatibility with.
1657 The version of x86/svr4 SDB I have at the moment does (partially)
1658 seem to believe that DWARF register number 11 is associated with
1659 the x86 register %st(0), but that's about all. Higher DWARF
1660 register numbers don't seem to be associated with anything in
1661 particular, and even for DWARF regno 11, SDB only seems to under-
1662 stand that it should say that a variable lives in %st(0) (when
1663 asked via an `=' command) if we said it was in DWARF regno 11,
1664 but SDB still prints garbage when asked for the value of the
1665 variable in question (via a `/' command).
1666 (Also note that the labels SDB prints for various FP stack regs
1667 when doing an `x' command are all wrong.)
1668 Note that these problems generally don't affect the native SVR4
1669 C compiler because it doesn't allow the use of -O with -g and
1670 because when it is *not* optimizing, it allocates a memory
1671 location for each floating-point variable, and the memory
1672 location is what gets described in the DWARF AT_location
1673 attribute for the variable in question.
1674 Regardless of the severe mental illness of the x86/svr4 SDB, we
1675 do something sensible here and we use the following DWARF
1676 register numbers. Note that these are all stack-top-relative
1677 numbers.
1678 11 for %st(0) (gcc regno = 8)
1679 12 for %st(1) (gcc regno = 9)
1680 13 for %st(2) (gcc regno = 10)
1681 14 for %st(3) (gcc regno = 11)
1682 15 for %st(4) (gcc regno = 12)
1683 16 for %st(5) (gcc regno = 13)
1684 17 for %st(6) (gcc regno = 14)
1685 18 for %st(7) (gcc regno = 15)
1687 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1689 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1690 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1691 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1692 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1693 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1694 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1695 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1698 /* Test and compare insns in i386.md store the information needed to
1699 generate branch and scc insns here. */
1701 rtx ix86_compare_op0 = NULL_RTX;
1702 rtx ix86_compare_op1 = NULL_RTX;
1704 /* Define parameter passing and return registers. */
1706 static int const x86_64_int_parameter_registers[6] =
1708 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
1711 static int const x86_64_ms_abi_int_parameter_registers[4] =
1713 CX_REG, DX_REG, R8_REG, R9_REG
1716 static int const x86_64_int_return_registers[4] =
1718 AX_REG, DX_REG, DI_REG, SI_REG
1721 /* Define the structure for the machine field in struct function. */
1723 struct GTY(()) stack_local_entry {
1724 unsigned short mode;
1725 unsigned short n;
1726 rtx rtl;
1727 struct stack_local_entry *next;
1730 /* Structure describing stack frame layout.
1731 Stack grows downward:
1733 [arguments]
1734 <- ARG_POINTER
1735 saved pc
1737 saved frame pointer if frame_pointer_needed
1738 <- HARD_FRAME_POINTER
1739 [saved regs]
1741 [padding0]
1743 [saved SSE regs]
1745 [padding1] \
1747 [va_arg registers] (
1748 > to_allocate <- FRAME_POINTER
1749 [frame] (
1751 [padding2] /
1753 struct ix86_frame
1755 int padding0;
1756 int nsseregs;
1757 int nregs;
1758 int padding1;
1759 int va_arg_size;
1760 HOST_WIDE_INT frame;
1761 int padding2;
1762 int outgoing_arguments_size;
1763 int red_zone_size;
1765 HOST_WIDE_INT to_allocate;
1766 /* The offsets relative to ARG_POINTER. */
1767 HOST_WIDE_INT frame_pointer_offset;
1768 HOST_WIDE_INT hard_frame_pointer_offset;
1769 HOST_WIDE_INT stack_pointer_offset;
1771 /* When save_regs_using_mov is set, emit prologue using
1772 move instead of push instructions. */
1773 bool save_regs_using_mov;
1776 /* Code model option. */
1777 enum cmodel ix86_cmodel;
1778 /* Asm dialect. */
1779 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1780 /* TLS dialects. */
1781 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1783 /* Which unit we are generating floating point math for. */
1784 enum fpmath_unit ix86_fpmath;
1786 /* Which cpu are we scheduling for. */
1787 enum attr_cpu ix86_schedule;
1789 /* Which cpu are we optimizing for. */
1790 enum processor_type ix86_tune;
1792 /* Which instruction set architecture to use. */
1793 enum processor_type ix86_arch;
1795 /* true if sse prefetch instruction is not NOOP. */
1796 int x86_prefetch_sse;
1798 /* ix86_regparm_string as a number */
1799 static int ix86_regparm;
1801 /* -mstackrealign option */
1802 extern int ix86_force_align_arg_pointer;
1803 static const char ix86_force_align_arg_pointer_string[]
1804 = "force_align_arg_pointer";
1806 static rtx (*ix86_gen_leave) (void);
1807 static rtx (*ix86_gen_pop1) (rtx);
1808 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
1809 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
1810 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx, rtx);
1811 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
1812 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
1813 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
1815 /* Preferred alignment for stack boundary in bits. */
1816 unsigned int ix86_preferred_stack_boundary;
1818 /* Alignment for incoming stack boundary in bits specified at
1819 command line. */
1820 static unsigned int ix86_user_incoming_stack_boundary;
1822 /* Default alignment for incoming stack boundary in bits. */
1823 static unsigned int ix86_default_incoming_stack_boundary;
1825 /* Alignment for incoming stack boundary in bits. */
1826 unsigned int ix86_incoming_stack_boundary;
1828 /* The abi used by target. */
1829 enum calling_abi ix86_abi;
1831 /* Values 1-5: see jump.c */
1832 int ix86_branch_cost;
1834 /* Calling abi specific va_list type nodes. */
1835 static GTY(()) tree sysv_va_list_type_node;
1836 static GTY(()) tree ms_va_list_type_node;
1838 /* Variables which are this size or smaller are put in the data/bss
1839 or ldata/lbss sections. */
1841 int ix86_section_threshold = 65536;
1843 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1844 char internal_label_prefix[16];
1845 int internal_label_prefix_len;
1847 /* Fence to use after loop using movnt. */
1848 tree x86_mfence;
1850 /* Register class used for passing given 64bit part of the argument.
1851 These represent classes as documented by the PS ABI, with the exception
1852 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1853 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1855 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1856 whenever possible (upper half does contain padding). */
1857 enum x86_64_reg_class
1859 X86_64_NO_CLASS,
1860 X86_64_INTEGER_CLASS,
1861 X86_64_INTEGERSI_CLASS,
1862 X86_64_SSE_CLASS,
1863 X86_64_SSESF_CLASS,
1864 X86_64_SSEDF_CLASS,
1865 X86_64_SSEUP_CLASS,
1866 X86_64_X87_CLASS,
1867 X86_64_X87UP_CLASS,
1868 X86_64_COMPLEX_X87_CLASS,
1869 X86_64_MEMORY_CLASS
1872 #define MAX_CLASSES 4
1874 /* Table of constants used by fldpi, fldln2, etc.... */
1875 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1876 static bool ext_80387_constants_init = 0;
1879 static struct machine_function * ix86_init_machine_status (void);
1880 static rtx ix86_function_value (const_tree, const_tree, bool);
1881 static rtx ix86_static_chain (const_tree, bool);
1882 static int ix86_function_regparm (const_tree, const_tree);
1883 static void ix86_compute_frame_layout (struct ix86_frame *);
1884 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1885 rtx, rtx, int);
1886 static void ix86_add_new_builtins (int);
1887 static rtx ix86_expand_vec_perm_builtin (tree);
1889 enum ix86_function_specific_strings
1891 IX86_FUNCTION_SPECIFIC_ARCH,
1892 IX86_FUNCTION_SPECIFIC_TUNE,
1893 IX86_FUNCTION_SPECIFIC_FPMATH,
1894 IX86_FUNCTION_SPECIFIC_MAX
1897 static char *ix86_target_string (int, int, const char *, const char *,
1898 const char *, bool);
1899 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
1900 static void ix86_function_specific_save (struct cl_target_option *);
1901 static void ix86_function_specific_restore (struct cl_target_option *);
1902 static void ix86_function_specific_print (FILE *, int,
1903 struct cl_target_option *);
1904 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
1905 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
1906 static bool ix86_can_inline_p (tree, tree);
1907 static void ix86_set_current_function (tree);
1908 static unsigned int ix86_minimum_incoming_stack_boundary (bool);
1910 static enum calling_abi ix86_function_abi (const_tree);
1913 /* The svr4 ABI for the i386 says that records and unions are returned
1914 in memory. */
1915 #ifndef DEFAULT_PCC_STRUCT_RETURN
1916 #define DEFAULT_PCC_STRUCT_RETURN 1
1917 #endif
1919 /* Whether -mtune= or -march= were specified */
1920 static int ix86_tune_defaulted;
1921 static int ix86_arch_specified;
1923 /* Bit flags that specify the ISA we are compiling for. */
1924 int ix86_isa_flags = TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_ISA_DEFAULT;
1926 /* A mask of ix86_isa_flags that includes bit X if X
1927 was set or cleared on the command line. */
1928 static int ix86_isa_flags_explicit;
1930 /* Define a set of ISAs which are available when a given ISA is
1931 enabled. MMX and SSE ISAs are handled separately. */
1933 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
1934 #define OPTION_MASK_ISA_3DNOW_SET \
1935 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
1937 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
1938 #define OPTION_MASK_ISA_SSE2_SET \
1939 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
1940 #define OPTION_MASK_ISA_SSE3_SET \
1941 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
1942 #define OPTION_MASK_ISA_SSSE3_SET \
1943 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
1944 #define OPTION_MASK_ISA_SSE4_1_SET \
1945 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
1946 #define OPTION_MASK_ISA_SSE4_2_SET \
1947 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
1948 #define OPTION_MASK_ISA_AVX_SET \
1949 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
1950 #define OPTION_MASK_ISA_FMA_SET \
1951 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
1953 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
1954 as -msse4.2. */
1955 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
1957 #define OPTION_MASK_ISA_SSE4A_SET \
1958 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
1959 #define OPTION_MASK_ISA_FMA4_SET \
1960 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_SSE4A_SET \
1961 | OPTION_MASK_ISA_AVX_SET)
1962 #define OPTION_MASK_ISA_XOP_SET \
1963 (OPTION_MASK_ISA_XOP | OPTION_MASK_ISA_FMA4_SET)
1964 #define OPTION_MASK_ISA_LWP_SET \
1965 OPTION_MASK_ISA_LWP
1967 /* AES and PCLMUL need SSE2 because they use xmm registers */
1968 #define OPTION_MASK_ISA_AES_SET \
1969 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
1970 #define OPTION_MASK_ISA_PCLMUL_SET \
1971 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
1973 #define OPTION_MASK_ISA_ABM_SET \
1974 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
1976 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
1977 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
1978 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
1979 #define OPTION_MASK_ISA_MOVBE_SET OPTION_MASK_ISA_MOVBE
1980 #define OPTION_MASK_ISA_CRC32_SET OPTION_MASK_ISA_CRC32
1982 /* Define a set of ISAs which aren't available when a given ISA is
1983 disabled. MMX and SSE ISAs are handled separately. */
1985 #define OPTION_MASK_ISA_MMX_UNSET \
1986 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
1987 #define OPTION_MASK_ISA_3DNOW_UNSET \
1988 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
1989 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
1991 #define OPTION_MASK_ISA_SSE_UNSET \
1992 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
1993 #define OPTION_MASK_ISA_SSE2_UNSET \
1994 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
1995 #define OPTION_MASK_ISA_SSE3_UNSET \
1996 (OPTION_MASK_ISA_SSE3 \
1997 | OPTION_MASK_ISA_SSSE3_UNSET \
1998 | OPTION_MASK_ISA_SSE4A_UNSET )
1999 #define OPTION_MASK_ISA_SSSE3_UNSET \
2000 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
2001 #define OPTION_MASK_ISA_SSE4_1_UNSET \
2002 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
2003 #define OPTION_MASK_ISA_SSE4_2_UNSET \
2004 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
2005 #define OPTION_MASK_ISA_AVX_UNSET \
2006 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET \
2007 | OPTION_MASK_ISA_FMA4_UNSET)
2008 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
2010 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
2011 as -mno-sse4.1. */
2012 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
2014 #define OPTION_MASK_ISA_SSE4A_UNSET \
2015 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_FMA4_UNSET)
2017 #define OPTION_MASK_ISA_FMA4_UNSET \
2018 (OPTION_MASK_ISA_FMA4 | OPTION_MASK_ISA_XOP_UNSET)
2019 #define OPTION_MASK_ISA_XOP_UNSET OPTION_MASK_ISA_XOP
2020 #define OPTION_MASK_ISA_LWP_UNSET OPTION_MASK_ISA_LWP
2022 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
2023 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
2024 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
2025 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
2026 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
2027 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
2028 #define OPTION_MASK_ISA_MOVBE_UNSET OPTION_MASK_ISA_MOVBE
2029 #define OPTION_MASK_ISA_CRC32_UNSET OPTION_MASK_ISA_CRC32
2031 /* Vectorization library interface and handlers. */
2032 tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
2033 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
2034 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
2036 /* Processor target table, indexed by processor number */
2037 struct ptt
2039 const struct processor_costs *cost; /* Processor costs */
2040 const int align_loop; /* Default alignments. */
2041 const int align_loop_max_skip;
2042 const int align_jump;
2043 const int align_jump_max_skip;
2044 const int align_func;
2047 static const struct ptt processor_target_table[PROCESSOR_max] =
2049 {&i386_cost, 4, 3, 4, 3, 4},
2050 {&i486_cost, 16, 15, 16, 15, 16},
2051 {&pentium_cost, 16, 7, 16, 7, 16},
2052 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2053 {&geode_cost, 0, 0, 0, 0, 0},
2054 {&k6_cost, 32, 7, 32, 7, 32},
2055 {&athlon_cost, 16, 7, 16, 7, 16},
2056 {&pentium4_cost, 0, 0, 0, 0, 0},
2057 {&k8_cost, 16, 7, 16, 7, 16},
2058 {&nocona_cost, 0, 0, 0, 0, 0},
2059 {&core2_cost, 16, 10, 16, 10, 16},
2060 {&generic32_cost, 16, 7, 16, 7, 16},
2061 {&generic64_cost, 16, 10, 16, 10, 16},
2062 {&amdfam10_cost, 32, 24, 32, 7, 32},
2063 {&atom_cost, 16, 7, 16, 7, 16}
2066 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2068 "generic",
2069 "i386",
2070 "i486",
2071 "pentium",
2072 "pentium-mmx",
2073 "pentiumpro",
2074 "pentium2",
2075 "pentium3",
2076 "pentium4",
2077 "pentium-m",
2078 "prescott",
2079 "nocona",
2080 "core2",
2081 "atom",
2082 "geode",
2083 "k6",
2084 "k6-2",
2085 "k6-3",
2086 "athlon",
2087 "athlon-4",
2088 "k8",
2089 "amdfam10"
2092 /* Implement TARGET_HANDLE_OPTION. */
2094 static bool
2095 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
2097 switch (code)
2099 case OPT_mmmx:
2100 if (value)
2102 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
2103 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
2105 else
2107 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
2108 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2110 return true;
2112 case OPT_m3dnow:
2113 if (value)
2115 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2116 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2118 else
2120 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2121 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2123 return true;
2125 case OPT_m3dnowa:
2126 return false;
2128 case OPT_msse:
2129 if (value)
2131 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2132 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2134 else
2136 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2137 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2139 return true;
2141 case OPT_msse2:
2142 if (value)
2144 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2145 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2147 else
2149 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2150 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2152 return true;
2154 case OPT_msse3:
2155 if (value)
2157 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2158 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2160 else
2162 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2163 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2165 return true;
2167 case OPT_mssse3:
2168 if (value)
2170 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2171 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2173 else
2175 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2176 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2178 return true;
2180 case OPT_msse4_1:
2181 if (value)
2183 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2184 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2186 else
2188 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2189 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2191 return true;
2193 case OPT_msse4_2:
2194 if (value)
2196 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2197 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2199 else
2201 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2202 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2204 return true;
2206 case OPT_mavx:
2207 if (value)
2209 ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2210 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2212 else
2214 ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2215 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2217 return true;
2219 case OPT_mfma:
2220 if (value)
2222 ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2223 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2225 else
2227 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2228 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2230 return true;
2232 case OPT_msse4:
2233 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2234 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2235 return true;
2237 case OPT_mno_sse4:
2238 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2239 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2240 return true;
2242 case OPT_msse4a:
2243 if (value)
2245 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2246 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2248 else
2250 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2251 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2253 return true;
2255 case OPT_mfma4:
2256 if (value)
2258 ix86_isa_flags |= OPTION_MASK_ISA_FMA4_SET;
2259 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_SET;
2261 else
2263 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA4_UNSET;
2264 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA4_UNSET;
2266 return true;
2268 case OPT_mxop:
2269 if (value)
2271 ix86_isa_flags |= OPTION_MASK_ISA_XOP_SET;
2272 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_SET;
2274 else
2276 ix86_isa_flags &= ~OPTION_MASK_ISA_XOP_UNSET;
2277 ix86_isa_flags_explicit |= OPTION_MASK_ISA_XOP_UNSET;
2279 return true;
2281 case OPT_mlwp:
2282 if (value)
2284 ix86_isa_flags |= OPTION_MASK_ISA_LWP_SET;
2285 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_SET;
2287 else
2289 ix86_isa_flags &= ~OPTION_MASK_ISA_LWP_UNSET;
2290 ix86_isa_flags_explicit |= OPTION_MASK_ISA_LWP_UNSET;
2292 return true;
2294 case OPT_mabm:
2295 if (value)
2297 ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2298 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2300 else
2302 ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2303 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2305 return true;
2307 case OPT_mpopcnt:
2308 if (value)
2310 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2311 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2313 else
2315 ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2316 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2318 return true;
2320 case OPT_msahf:
2321 if (value)
2323 ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2324 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2326 else
2328 ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2329 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2331 return true;
2333 case OPT_mcx16:
2334 if (value)
2336 ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2337 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2339 else
2341 ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2342 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2344 return true;
2346 case OPT_mmovbe:
2347 if (value)
2349 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE_SET;
2350 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_SET;
2352 else
2354 ix86_isa_flags &= ~OPTION_MASK_ISA_MOVBE_UNSET;
2355 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_UNSET;
2357 return true;
2359 case OPT_mcrc32:
2360 if (value)
2362 ix86_isa_flags |= OPTION_MASK_ISA_CRC32_SET;
2363 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_SET;
2365 else
2367 ix86_isa_flags &= ~OPTION_MASK_ISA_CRC32_UNSET;
2368 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_UNSET;
2370 return true;
2372 case OPT_maes:
2373 if (value)
2375 ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2376 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
2378 else
2380 ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
2381 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
2383 return true;
2385 case OPT_mpclmul:
2386 if (value)
2388 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
2389 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
2391 else
2393 ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
2394 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
2396 return true;
2398 default:
2399 return true;
2403 /* Return a string the documents the current -m options. The caller is
2404 responsible for freeing the string. */
2406 static char *
2407 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
2408 const char *fpmath, bool add_nl_p)
2410 struct ix86_target_opts
2412 const char *option; /* option string */
2413 int mask; /* isa mask options */
2416 /* This table is ordered so that options like -msse4.2 that imply
2417 preceding options while match those first. */
2418 static struct ix86_target_opts isa_opts[] =
2420 { "-m64", OPTION_MASK_ISA_64BIT },
2421 { "-mfma4", OPTION_MASK_ISA_FMA4 },
2422 { "-mxop", OPTION_MASK_ISA_XOP },
2423 { "-mlwp", OPTION_MASK_ISA_LWP },
2424 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2425 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2426 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2427 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2428 { "-msse3", OPTION_MASK_ISA_SSE3 },
2429 { "-msse2", OPTION_MASK_ISA_SSE2 },
2430 { "-msse", OPTION_MASK_ISA_SSE },
2431 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2432 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2433 { "-mmmx", OPTION_MASK_ISA_MMX },
2434 { "-mabm", OPTION_MASK_ISA_ABM },
2435 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2436 { "-mmovbe", OPTION_MASK_ISA_MOVBE },
2437 { "-mcrc32", OPTION_MASK_ISA_CRC32 },
2438 { "-maes", OPTION_MASK_ISA_AES },
2439 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2442 /* Flag options. */
2443 static struct ix86_target_opts flag_opts[] =
2445 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2446 { "-m80387", MASK_80387 },
2447 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2448 { "-malign-double", MASK_ALIGN_DOUBLE },
2449 { "-mcld", MASK_CLD },
2450 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2451 { "-mieee-fp", MASK_IEEE_FP },
2452 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2453 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2454 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2455 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2456 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2457 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2458 { "-mno-red-zone", MASK_NO_RED_ZONE },
2459 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2460 { "-mrecip", MASK_RECIP },
2461 { "-mrtd", MASK_RTD },
2462 { "-msseregparm", MASK_SSEREGPARM },
2463 { "-mstack-arg-probe", MASK_STACK_PROBE },
2464 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2467 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
2469 char isa_other[40];
2470 char target_other[40];
2471 unsigned num = 0;
2472 unsigned i, j;
2473 char *ret;
2474 char *ptr;
2475 size_t len;
2476 size_t line_len;
2477 size_t sep_len;
2479 memset (opts, '\0', sizeof (opts));
2481 /* Add -march= option. */
2482 if (arch)
2484 opts[num][0] = "-march=";
2485 opts[num++][1] = arch;
2488 /* Add -mtune= option. */
2489 if (tune)
2491 opts[num][0] = "-mtune=";
2492 opts[num++][1] = tune;
2495 /* Pick out the options in isa options. */
2496 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
2498 if ((isa & isa_opts[i].mask) != 0)
2500 opts[num++][0] = isa_opts[i].option;
2501 isa &= ~ isa_opts[i].mask;
2505 if (isa && add_nl_p)
2507 opts[num++][0] = isa_other;
2508 sprintf (isa_other, "(other isa: 0x%x)", isa);
2511 /* Add flag options. */
2512 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
2514 if ((flags & flag_opts[i].mask) != 0)
2516 opts[num++][0] = flag_opts[i].option;
2517 flags &= ~ flag_opts[i].mask;
2521 if (flags && add_nl_p)
2523 opts[num++][0] = target_other;
2524 sprintf (target_other, "(other flags: 0x%x)", isa);
2527 /* Add -fpmath= option. */
2528 if (fpmath)
2530 opts[num][0] = "-mfpmath=";
2531 opts[num++][1] = fpmath;
2534 /* Any options? */
2535 if (num == 0)
2536 return NULL;
2538 gcc_assert (num < ARRAY_SIZE (opts));
2540 /* Size the string. */
2541 len = 0;
2542 sep_len = (add_nl_p) ? 3 : 1;
2543 for (i = 0; i < num; i++)
2545 len += sep_len;
2546 for (j = 0; j < 2; j++)
2547 if (opts[i][j])
2548 len += strlen (opts[i][j]);
2551 /* Build the string. */
2552 ret = ptr = (char *) xmalloc (len);
2553 line_len = 0;
2555 for (i = 0; i < num; i++)
2557 size_t len2[2];
2559 for (j = 0; j < 2; j++)
2560 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
2562 if (i != 0)
2564 *ptr++ = ' ';
2565 line_len++;
2567 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
2569 *ptr++ = '\\';
2570 *ptr++ = '\n';
2571 line_len = 0;
2575 for (j = 0; j < 2; j++)
2576 if (opts[i][j])
2578 memcpy (ptr, opts[i][j], len2[j]);
2579 ptr += len2[j];
2580 line_len += len2[j];
2584 *ptr = '\0';
2585 gcc_assert (ret + len >= ptr);
2587 return ret;
2590 /* Function that is callable from the debugger to print the current
2591 options. */
2592 void
2593 ix86_debug_options (void)
2595 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
2596 ix86_arch_string, ix86_tune_string,
2597 ix86_fpmath_string, true);
2599 if (opts)
2601 fprintf (stderr, "%s\n\n", opts);
2602 free (opts);
2604 else
2605 fputs ("<no options>\n\n", stderr);
2607 return;
2610 /* Sometimes certain combinations of command options do not make
2611 sense on a particular target machine. You can define a macro
2612 `OVERRIDE_OPTIONS' to take account of this. This macro, if
2613 defined, is executed once just after all the command options have
2614 been parsed.
2616 Don't use this macro to turn on various extra optimizations for
2617 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
2619 void
2620 override_options (bool main_args_p)
2622 int i;
2623 unsigned int ix86_arch_mask, ix86_tune_mask;
2624 const char *prefix;
2625 const char *suffix;
2626 const char *sw;
2628 /* Comes from final.c -- no real reason to change it. */
2629 #define MAX_CODE_ALIGN 16
2631 enum pta_flags
2633 PTA_SSE = 1 << 0,
2634 PTA_SSE2 = 1 << 1,
2635 PTA_SSE3 = 1 << 2,
2636 PTA_MMX = 1 << 3,
2637 PTA_PREFETCH_SSE = 1 << 4,
2638 PTA_3DNOW = 1 << 5,
2639 PTA_3DNOW_A = 1 << 6,
2640 PTA_64BIT = 1 << 7,
2641 PTA_SSSE3 = 1 << 8,
2642 PTA_CX16 = 1 << 9,
2643 PTA_POPCNT = 1 << 10,
2644 PTA_ABM = 1 << 11,
2645 PTA_SSE4A = 1 << 12,
2646 PTA_NO_SAHF = 1 << 13,
2647 PTA_SSE4_1 = 1 << 14,
2648 PTA_SSE4_2 = 1 << 15,
2649 PTA_AES = 1 << 16,
2650 PTA_PCLMUL = 1 << 17,
2651 PTA_AVX = 1 << 18,
2652 PTA_FMA = 1 << 19,
2653 PTA_MOVBE = 1 << 20,
2654 PTA_FMA4 = 1 << 21,
2655 PTA_XOP = 1 << 22,
2656 PTA_LWP = 1 << 23
2659 static struct pta
2661 const char *const name; /* processor name or nickname. */
2662 const enum processor_type processor;
2663 const enum attr_cpu schedule;
2664 const unsigned /*enum pta_flags*/ flags;
2666 const processor_alias_table[] =
2668 {"i386", PROCESSOR_I386, CPU_NONE, 0},
2669 {"i486", PROCESSOR_I486, CPU_NONE, 0},
2670 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2671 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2672 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
2673 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
2674 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2675 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2676 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
2677 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2678 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2679 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
2680 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2681 PTA_MMX | PTA_SSE},
2682 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2683 PTA_MMX | PTA_SSE},
2684 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2685 PTA_MMX | PTA_SSE | PTA_SSE2},
2686 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
2687 PTA_MMX |PTA_SSE | PTA_SSE2},
2688 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
2689 PTA_MMX | PTA_SSE | PTA_SSE2},
2690 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
2691 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2692 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
2693 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2694 | PTA_CX16 | PTA_NO_SAHF},
2695 {"core2", PROCESSOR_CORE2, CPU_CORE2,
2696 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2697 | PTA_SSSE3 | PTA_CX16},
2698 {"atom", PROCESSOR_ATOM, CPU_ATOM,
2699 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2700 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
2701 {"geode", PROCESSOR_GEODE, CPU_GEODE,
2702 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
2703 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
2704 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2705 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2706 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
2707 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2708 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
2709 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2710 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
2711 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2712 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
2713 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2714 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
2715 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2716 {"x86-64", PROCESSOR_K8, CPU_K8,
2717 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
2718 {"k8", PROCESSOR_K8, CPU_K8,
2719 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2720 | PTA_SSE2 | PTA_NO_SAHF},
2721 {"k8-sse3", PROCESSOR_K8, CPU_K8,
2722 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2723 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2724 {"opteron", PROCESSOR_K8, CPU_K8,
2725 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2726 | PTA_SSE2 | PTA_NO_SAHF},
2727 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
2728 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2729 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2730 {"athlon64", PROCESSOR_K8, CPU_K8,
2731 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2732 | PTA_SSE2 | PTA_NO_SAHF},
2733 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
2734 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2735 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2736 {"athlon-fx", PROCESSOR_K8, CPU_K8,
2737 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2738 | PTA_SSE2 | PTA_NO_SAHF},
2739 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2740 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2741 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2742 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2743 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2744 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2745 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
2746 0 /* flags are only used for -march switch. */ },
2747 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
2748 PTA_64BIT /* flags are only used for -march switch. */ },
2751 int const pta_size = ARRAY_SIZE (processor_alias_table);
2753 /* Set up prefix/suffix so the error messages refer to either the command
2754 line argument, or the attribute(target). */
2755 if (main_args_p)
2757 prefix = "-m";
2758 suffix = "";
2759 sw = "switch";
2761 else
2763 prefix = "option(\"";
2764 suffix = "\")";
2765 sw = "attribute";
2768 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2769 SUBTARGET_OVERRIDE_OPTIONS;
2770 #endif
2772 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2773 SUBSUBTARGET_OVERRIDE_OPTIONS;
2774 #endif
2776 /* -fPIC is the default for x86_64. */
2777 if (TARGET_MACHO && TARGET_64BIT)
2778 flag_pic = 2;
2780 /* Set the default values for switches whose default depends on TARGET_64BIT
2781 in case they weren't overwritten by command line options. */
2782 if (TARGET_64BIT)
2784 /* Mach-O doesn't support omitting the frame pointer for now. */
2785 if (flag_omit_frame_pointer == 2)
2786 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
2787 if (flag_asynchronous_unwind_tables == 2)
2788 flag_asynchronous_unwind_tables = 1;
2789 if (flag_pcc_struct_return == 2)
2790 flag_pcc_struct_return = 0;
2792 else
2794 if (flag_omit_frame_pointer == 2)
2795 flag_omit_frame_pointer = 0;
2796 if (flag_asynchronous_unwind_tables == 2)
2797 flag_asynchronous_unwind_tables = 0;
2798 if (flag_pcc_struct_return == 2)
2799 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
2802 /* Need to check -mtune=generic first. */
2803 if (ix86_tune_string)
2805 if (!strcmp (ix86_tune_string, "generic")
2806 || !strcmp (ix86_tune_string, "i686")
2807 /* As special support for cross compilers we read -mtune=native
2808 as -mtune=generic. With native compilers we won't see the
2809 -mtune=native, as it was changed by the driver. */
2810 || !strcmp (ix86_tune_string, "native"))
2812 if (TARGET_64BIT)
2813 ix86_tune_string = "generic64";
2814 else
2815 ix86_tune_string = "generic32";
2817 /* If this call is for setting the option attribute, allow the
2818 generic32/generic64 that was previously set. */
2819 else if (!main_args_p
2820 && (!strcmp (ix86_tune_string, "generic32")
2821 || !strcmp (ix86_tune_string, "generic64")))
2823 else if (!strncmp (ix86_tune_string, "generic", 7))
2824 error ("bad value (%s) for %stune=%s %s",
2825 ix86_tune_string, prefix, suffix, sw);
2827 else
2829 if (ix86_arch_string)
2830 ix86_tune_string = ix86_arch_string;
2831 if (!ix86_tune_string)
2833 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
2834 ix86_tune_defaulted = 1;
2837 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
2838 need to use a sensible tune option. */
2839 if (!strcmp (ix86_tune_string, "generic")
2840 || !strcmp (ix86_tune_string, "x86-64")
2841 || !strcmp (ix86_tune_string, "i686"))
2843 if (TARGET_64BIT)
2844 ix86_tune_string = "generic64";
2845 else
2846 ix86_tune_string = "generic32";
2849 if (ix86_stringop_string)
2851 if (!strcmp (ix86_stringop_string, "rep_byte"))
2852 stringop_alg = rep_prefix_1_byte;
2853 else if (!strcmp (ix86_stringop_string, "libcall"))
2854 stringop_alg = libcall;
2855 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
2856 stringop_alg = rep_prefix_4_byte;
2857 else if (!strcmp (ix86_stringop_string, "rep_8byte")
2858 && TARGET_64BIT)
2859 /* rep; movq isn't available in 32-bit code. */
2860 stringop_alg = rep_prefix_8_byte;
2861 else if (!strcmp (ix86_stringop_string, "byte_loop"))
2862 stringop_alg = loop_1_byte;
2863 else if (!strcmp (ix86_stringop_string, "loop"))
2864 stringop_alg = loop;
2865 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
2866 stringop_alg = unrolled_loop;
2867 else
2868 error ("bad value (%s) for %sstringop-strategy=%s %s",
2869 ix86_stringop_string, prefix, suffix, sw);
2871 if (!strcmp (ix86_tune_string, "x86-64"))
2872 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated. Use "
2873 "%stune=k8%s or %stune=generic%s instead as appropriate.",
2874 prefix, suffix, prefix, suffix, prefix, suffix);
2876 if (!ix86_arch_string)
2877 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
2878 else
2879 ix86_arch_specified = 1;
2881 if (!strcmp (ix86_arch_string, "generic"))
2882 error ("generic CPU can be used only for %stune=%s %s",
2883 prefix, suffix, sw);
2884 if (!strncmp (ix86_arch_string, "generic", 7))
2885 error ("bad value (%s) for %sarch=%s %s",
2886 ix86_arch_string, prefix, suffix, sw);
2888 /* Validate -mabi= value. */
2889 if (ix86_abi_string)
2891 if (strcmp (ix86_abi_string, "sysv") == 0)
2892 ix86_abi = SYSV_ABI;
2893 else if (strcmp (ix86_abi_string, "ms") == 0)
2894 ix86_abi = MS_ABI;
2895 else
2896 error ("unknown ABI (%s) for %sabi=%s %s",
2897 ix86_abi_string, prefix, suffix, sw);
2899 else
2900 ix86_abi = DEFAULT_ABI;
2902 if (ix86_cmodel_string != 0)
2904 if (!strcmp (ix86_cmodel_string, "small"))
2905 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2906 else if (!strcmp (ix86_cmodel_string, "medium"))
2907 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
2908 else if (!strcmp (ix86_cmodel_string, "large"))
2909 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
2910 else if (flag_pic)
2911 error ("code model %s does not support PIC mode", ix86_cmodel_string);
2912 else if (!strcmp (ix86_cmodel_string, "32"))
2913 ix86_cmodel = CM_32;
2914 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
2915 ix86_cmodel = CM_KERNEL;
2916 else
2917 error ("bad value (%s) for %scmodel=%s %s",
2918 ix86_cmodel_string, prefix, suffix, sw);
2920 else
2922 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
2923 use of rip-relative addressing. This eliminates fixups that
2924 would otherwise be needed if this object is to be placed in a
2925 DLL, and is essentially just as efficient as direct addressing. */
2926 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
2927 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
2928 else if (TARGET_64BIT)
2929 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2930 else
2931 ix86_cmodel = CM_32;
2933 if (ix86_asm_string != 0)
2935 if (! TARGET_MACHO
2936 && !strcmp (ix86_asm_string, "intel"))
2937 ix86_asm_dialect = ASM_INTEL;
2938 else if (!strcmp (ix86_asm_string, "att"))
2939 ix86_asm_dialect = ASM_ATT;
2940 else
2941 error ("bad value (%s) for %sasm=%s %s",
2942 ix86_asm_string, prefix, suffix, sw);
2944 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
2945 error ("code model %qs not supported in the %s bit mode",
2946 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
2947 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
2948 sorry ("%i-bit mode not compiled in",
2949 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
2951 for (i = 0; i < pta_size; i++)
2952 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
2954 ix86_schedule = processor_alias_table[i].schedule;
2955 ix86_arch = processor_alias_table[i].processor;
2956 /* Default cpu tuning to the architecture. */
2957 ix86_tune = ix86_arch;
2959 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2960 error ("CPU you selected does not support x86-64 "
2961 "instruction set");
2963 if (processor_alias_table[i].flags & PTA_MMX
2964 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
2965 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
2966 if (processor_alias_table[i].flags & PTA_3DNOW
2967 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
2968 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
2969 if (processor_alias_table[i].flags & PTA_3DNOW_A
2970 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
2971 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
2972 if (processor_alias_table[i].flags & PTA_SSE
2973 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
2974 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
2975 if (processor_alias_table[i].flags & PTA_SSE2
2976 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
2977 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
2978 if (processor_alias_table[i].flags & PTA_SSE3
2979 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
2980 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
2981 if (processor_alias_table[i].flags & PTA_SSSE3
2982 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
2983 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
2984 if (processor_alias_table[i].flags & PTA_SSE4_1
2985 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
2986 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
2987 if (processor_alias_table[i].flags & PTA_SSE4_2
2988 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
2989 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
2990 if (processor_alias_table[i].flags & PTA_AVX
2991 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
2992 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
2993 if (processor_alias_table[i].flags & PTA_FMA
2994 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
2995 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
2996 if (processor_alias_table[i].flags & PTA_SSE4A
2997 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
2998 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
2999 if (processor_alias_table[i].flags & PTA_FMA4
3000 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA4))
3001 ix86_isa_flags |= OPTION_MASK_ISA_FMA4;
3002 if (processor_alias_table[i].flags & PTA_XOP
3003 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_XOP))
3004 ix86_isa_flags |= OPTION_MASK_ISA_XOP;
3005 if (processor_alias_table[i].flags & PTA_LWP
3006 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_LWP))
3007 ix86_isa_flags |= OPTION_MASK_ISA_LWP;
3008 if (processor_alias_table[i].flags & PTA_ABM
3009 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
3010 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
3011 if (processor_alias_table[i].flags & PTA_CX16
3012 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
3013 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
3014 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
3015 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
3016 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
3017 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
3018 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
3019 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
3020 if (processor_alias_table[i].flags & PTA_MOVBE
3021 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
3022 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
3023 if (processor_alias_table[i].flags & PTA_AES
3024 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
3025 ix86_isa_flags |= OPTION_MASK_ISA_AES;
3026 if (processor_alias_table[i].flags & PTA_PCLMUL
3027 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
3028 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
3029 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
3030 x86_prefetch_sse = true;
3032 break;
3035 if (i == pta_size)
3036 error ("bad value (%s) for %sarch=%s %s",
3037 ix86_arch_string, prefix, suffix, sw);
3039 ix86_arch_mask = 1u << ix86_arch;
3040 for (i = 0; i < X86_ARCH_LAST; ++i)
3041 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3043 for (i = 0; i < pta_size; i++)
3044 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
3046 ix86_schedule = processor_alias_table[i].schedule;
3047 ix86_tune = processor_alias_table[i].processor;
3048 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
3050 if (ix86_tune_defaulted)
3052 ix86_tune_string = "x86-64";
3053 for (i = 0; i < pta_size; i++)
3054 if (! strcmp (ix86_tune_string,
3055 processor_alias_table[i].name))
3056 break;
3057 ix86_schedule = processor_alias_table[i].schedule;
3058 ix86_tune = processor_alias_table[i].processor;
3060 else
3061 error ("CPU you selected does not support x86-64 "
3062 "instruction set");
3064 /* Intel CPUs have always interpreted SSE prefetch instructions as
3065 NOPs; so, we can enable SSE prefetch instructions even when
3066 -mtune (rather than -march) points us to a processor that has them.
3067 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
3068 higher processors. */
3069 if (TARGET_CMOVE
3070 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
3071 x86_prefetch_sse = true;
3072 break;
3074 if (i == pta_size)
3075 error ("bad value (%s) for %stune=%s %s",
3076 ix86_tune_string, prefix, suffix, sw);
3078 ix86_tune_mask = 1u << ix86_tune;
3079 for (i = 0; i < X86_TUNE_LAST; ++i)
3080 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3082 if (optimize_size)
3083 ix86_cost = &ix86_size_cost;
3084 else
3085 ix86_cost = processor_target_table[ix86_tune].cost;
3087 /* Arrange to set up i386_stack_locals for all functions. */
3088 init_machine_status = ix86_init_machine_status;
3090 /* Validate -mregparm= value. */
3091 if (ix86_regparm_string)
3093 if (TARGET_64BIT)
3094 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
3095 i = atoi (ix86_regparm_string);
3096 if (i < 0 || i > REGPARM_MAX)
3097 error ("%sregparm=%d%s is not between 0 and %d",
3098 prefix, i, suffix, REGPARM_MAX);
3099 else
3100 ix86_regparm = i;
3102 if (TARGET_64BIT)
3103 ix86_regparm = REGPARM_MAX;
3105 /* If the user has provided any of the -malign-* options,
3106 warn and use that value only if -falign-* is not set.
3107 Remove this code in GCC 3.2 or later. */
3108 if (ix86_align_loops_string)
3110 warning (0, "%salign-loops%s is obsolete, use -falign-loops%s",
3111 prefix, suffix, suffix);
3112 if (align_loops == 0)
3114 i = atoi (ix86_align_loops_string);
3115 if (i < 0 || i > MAX_CODE_ALIGN)
3116 error ("%salign-loops=%d%s is not between 0 and %d",
3117 prefix, i, suffix, MAX_CODE_ALIGN);
3118 else
3119 align_loops = 1 << i;
3123 if (ix86_align_jumps_string)
3125 warning (0, "%salign-jumps%s is obsolete, use -falign-jumps%s",
3126 prefix, suffix, suffix);
3127 if (align_jumps == 0)
3129 i = atoi (ix86_align_jumps_string);
3130 if (i < 0 || i > MAX_CODE_ALIGN)
3131 error ("%salign-loops=%d%s is not between 0 and %d",
3132 prefix, i, suffix, MAX_CODE_ALIGN);
3133 else
3134 align_jumps = 1 << i;
3138 if (ix86_align_funcs_string)
3140 warning (0, "%salign-functions%s is obsolete, use -falign-functions%s",
3141 prefix, suffix, suffix);
3142 if (align_functions == 0)
3144 i = atoi (ix86_align_funcs_string);
3145 if (i < 0 || i > MAX_CODE_ALIGN)
3146 error ("%salign-loops=%d%s is not between 0 and %d",
3147 prefix, i, suffix, MAX_CODE_ALIGN);
3148 else
3149 align_functions = 1 << i;
3153 /* Default align_* from the processor table. */
3154 if (align_loops == 0)
3156 align_loops = processor_target_table[ix86_tune].align_loop;
3157 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
3159 if (align_jumps == 0)
3161 align_jumps = processor_target_table[ix86_tune].align_jump;
3162 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
3164 if (align_functions == 0)
3166 align_functions = processor_target_table[ix86_tune].align_func;
3169 /* Validate -mbranch-cost= value, or provide default. */
3170 ix86_branch_cost = ix86_cost->branch_cost;
3171 if (ix86_branch_cost_string)
3173 i = atoi (ix86_branch_cost_string);
3174 if (i < 0 || i > 5)
3175 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
3176 else
3177 ix86_branch_cost = i;
3179 if (ix86_section_threshold_string)
3181 i = atoi (ix86_section_threshold_string);
3182 if (i < 0)
3183 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
3184 else
3185 ix86_section_threshold = i;
3188 if (ix86_tls_dialect_string)
3190 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
3191 ix86_tls_dialect = TLS_DIALECT_GNU;
3192 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3193 ix86_tls_dialect = TLS_DIALECT_GNU2;
3194 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
3195 ix86_tls_dialect = TLS_DIALECT_SUN;
3196 else
3197 error ("bad value (%s) for %stls-dialect=%s %s",
3198 ix86_tls_dialect_string, prefix, suffix, sw);
3201 if (ix87_precision_string)
3203 i = atoi (ix87_precision_string);
3204 if (i != 32 && i != 64 && i != 80)
3205 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3208 if (TARGET_64BIT)
3210 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3212 /* Enable by default the SSE and MMX builtins. Do allow the user to
3213 explicitly disable any of these. In particular, disabling SSE and
3214 MMX for kernel code is extremely useful. */
3215 if (!ix86_arch_specified)
3216 ix86_isa_flags
3217 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3218 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3220 if (TARGET_RTD)
3221 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3223 else
3225 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3227 if (!ix86_arch_specified)
3228 ix86_isa_flags
3229 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3231 /* i386 ABI does not specify red zone. It still makes sense to use it
3232 when programmer takes care to stack from being destroyed. */
3233 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3234 target_flags |= MASK_NO_RED_ZONE;
3237 /* Keep nonleaf frame pointers. */
3238 if (flag_omit_frame_pointer)
3239 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
3240 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
3241 flag_omit_frame_pointer = 1;
3243 /* If we're doing fast math, we don't care about comparison order
3244 wrt NaNs. This lets us use a shorter comparison sequence. */
3245 if (flag_finite_math_only)
3246 target_flags &= ~MASK_IEEE_FP;
3248 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3249 since the insns won't need emulation. */
3250 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3251 target_flags &= ~MASK_NO_FANCY_MATH_387;
3253 /* Likewise, if the target doesn't have a 387, or we've specified
3254 software floating point, don't use 387 inline intrinsics. */
3255 if (!TARGET_80387)
3256 target_flags |= MASK_NO_FANCY_MATH_387;
3258 /* Turn on MMX builtins for -msse. */
3259 if (TARGET_SSE)
3261 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3262 x86_prefetch_sse = true;
3265 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3266 if (TARGET_SSE4_2 || TARGET_ABM)
3267 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3269 /* Validate -mpreferred-stack-boundary= value or default it to
3270 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3271 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
3272 if (ix86_preferred_stack_boundary_string)
3274 i = atoi (ix86_preferred_stack_boundary_string);
3275 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3276 error ("%spreferred-stack-boundary=%d%s is not between %d and 12",
3277 prefix, i, suffix, TARGET_64BIT ? 4 : 2);
3278 else
3279 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
3282 /* Set the default value for -mstackrealign. */
3283 if (ix86_force_align_arg_pointer == -1)
3284 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
3286 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
3288 /* Validate -mincoming-stack-boundary= value or default it to
3289 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3290 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
3291 if (ix86_incoming_stack_boundary_string)
3293 i = atoi (ix86_incoming_stack_boundary_string);
3294 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3295 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3296 i, TARGET_64BIT ? 4 : 2);
3297 else
3299 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
3300 ix86_incoming_stack_boundary
3301 = ix86_user_incoming_stack_boundary;
3305 /* Accept -msseregparm only if at least SSE support is enabled. */
3306 if (TARGET_SSEREGPARM
3307 && ! TARGET_SSE)
3308 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3310 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3311 if (ix86_fpmath_string != 0)
3313 if (! strcmp (ix86_fpmath_string, "387"))
3314 ix86_fpmath = FPMATH_387;
3315 else if (! strcmp (ix86_fpmath_string, "sse"))
3317 if (!TARGET_SSE)
3319 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3320 ix86_fpmath = FPMATH_387;
3322 else
3323 ix86_fpmath = FPMATH_SSE;
3325 else if (! strcmp (ix86_fpmath_string, "387,sse")
3326 || ! strcmp (ix86_fpmath_string, "387+sse")
3327 || ! strcmp (ix86_fpmath_string, "sse,387")
3328 || ! strcmp (ix86_fpmath_string, "sse+387")
3329 || ! strcmp (ix86_fpmath_string, "both"))
3331 if (!TARGET_SSE)
3333 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3334 ix86_fpmath = FPMATH_387;
3336 else if (!TARGET_80387)
3338 warning (0, "387 instruction set disabled, using SSE arithmetics");
3339 ix86_fpmath = FPMATH_SSE;
3341 else
3342 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
3344 else
3345 error ("bad value (%s) for %sfpmath=%s %s",
3346 ix86_fpmath_string, prefix, suffix, sw);
3349 /* If the i387 is disabled, then do not return values in it. */
3350 if (!TARGET_80387)
3351 target_flags &= ~MASK_FLOAT_RETURNS;
3353 /* Use external vectorized library in vectorizing intrinsics. */
3354 if (ix86_veclibabi_string)
3356 if (strcmp (ix86_veclibabi_string, "svml") == 0)
3357 ix86_veclib_handler = ix86_veclibabi_svml;
3358 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
3359 ix86_veclib_handler = ix86_veclibabi_acml;
3360 else
3361 error ("unknown vectorization library ABI type (%s) for "
3362 "%sveclibabi=%s %s", ix86_veclibabi_string,
3363 prefix, suffix, sw);
3366 if ((x86_accumulate_outgoing_args & ix86_tune_mask)
3367 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3368 && !optimize_size)
3369 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3371 /* ??? Unwind info is not correct around the CFG unless either a frame
3372 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3373 unwind info generation to be aware of the CFG and propagating states
3374 around edges. */
3375 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3376 || flag_exceptions || flag_non_call_exceptions)
3377 && flag_omit_frame_pointer
3378 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3380 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3381 warning (0, "unwind tables currently require either a frame pointer "
3382 "or %saccumulate-outgoing-args%s for correctness",
3383 prefix, suffix);
3384 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3387 /* If stack probes are required, the space used for large function
3388 arguments on the stack must also be probed, so enable
3389 -maccumulate-outgoing-args so this happens in the prologue. */
3390 if (TARGET_STACK_PROBE
3391 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3393 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3394 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3395 "for correctness", prefix, suffix);
3396 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3399 /* For sane SSE instruction set generation we need fcomi instruction.
3400 It is safe to enable all CMOVE instructions. */
3401 if (TARGET_SSE)
3402 TARGET_CMOVE = 1;
3404 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3406 char *p;
3407 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3408 p = strchr (internal_label_prefix, 'X');
3409 internal_label_prefix_len = p - internal_label_prefix;
3410 *p = '\0';
3413 /* When scheduling description is not available, disable scheduler pass
3414 so it won't slow down the compilation and make x87 code slower. */
3415 if (!TARGET_SCHEDULE)
3416 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3418 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
3419 set_param_value ("simultaneous-prefetches",
3420 ix86_cost->simultaneous_prefetches);
3421 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
3422 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
3423 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
3424 set_param_value ("l1-cache-size", ix86_cost->l1_cache_size);
3425 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
3426 set_param_value ("l2-cache-size", ix86_cost->l2_cache_size);
3428 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3429 can be optimized to ap = __builtin_next_arg (0). */
3430 if (!TARGET_64BIT)
3431 targetm.expand_builtin_va_start = NULL;
3433 if (TARGET_64BIT)
3435 ix86_gen_leave = gen_leave_rex64;
3436 ix86_gen_pop1 = gen_popdi1;
3437 ix86_gen_add3 = gen_adddi3;
3438 ix86_gen_sub3 = gen_subdi3;
3439 ix86_gen_sub3_carry = gen_subdi3_carry;
3440 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3441 ix86_gen_monitor = gen_sse3_monitor64;
3442 ix86_gen_andsp = gen_anddi3;
3444 else
3446 ix86_gen_leave = gen_leave;
3447 ix86_gen_pop1 = gen_popsi1;
3448 ix86_gen_add3 = gen_addsi3;
3449 ix86_gen_sub3 = gen_subsi3;
3450 ix86_gen_sub3_carry = gen_subsi3_carry;
3451 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
3452 ix86_gen_monitor = gen_sse3_monitor;
3453 ix86_gen_andsp = gen_andsi3;
3456 #ifdef USE_IX86_CLD
3457 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3458 if (!TARGET_64BIT)
3459 target_flags |= MASK_CLD & ~target_flags_explicit;
3460 #endif
3462 /* Save the initial options in case the user does function specific options */
3463 if (main_args_p)
3464 target_option_default_node = target_option_current_node
3465 = build_target_option_node ();
3468 /* Update register usage after having seen the compiler flags. */
3470 void
3471 ix86_conditional_register_usage (void)
3473 int i;
3474 unsigned int j;
3476 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3478 if (fixed_regs[i] > 1)
3479 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
3480 if (call_used_regs[i] > 1)
3481 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
3484 /* The PIC register, if it exists, is fixed. */
3485 j = PIC_OFFSET_TABLE_REGNUM;
3486 if (j != INVALID_REGNUM)
3487 fixed_regs[j] = call_used_regs[j] = 1;
3489 /* The MS_ABI changes the set of call-used registers. */
3490 if (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
3492 call_used_regs[SI_REG] = 0;
3493 call_used_regs[DI_REG] = 0;
3494 call_used_regs[XMM6_REG] = 0;
3495 call_used_regs[XMM7_REG] = 0;
3496 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3497 call_used_regs[i] = 0;
3500 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
3501 other call-clobbered regs for 64-bit. */
3502 if (TARGET_64BIT)
3504 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
3506 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3507 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
3508 && call_used_regs[i])
3509 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
3512 /* If MMX is disabled, squash the registers. */
3513 if (! TARGET_MMX)
3514 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3515 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
3516 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3518 /* If SSE is disabled, squash the registers. */
3519 if (! TARGET_SSE)
3520 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3521 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
3522 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3524 /* If the FPU is disabled, squash the registers. */
3525 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
3526 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3527 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
3528 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3530 /* If 32-bit, squash the 64-bit registers. */
3531 if (! TARGET_64BIT)
3533 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
3534 reg_names[i] = "";
3535 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3536 reg_names[i] = "";
3541 /* Save the current options */
3543 static void
3544 ix86_function_specific_save (struct cl_target_option *ptr)
3546 ptr->arch = ix86_arch;
3547 ptr->schedule = ix86_schedule;
3548 ptr->tune = ix86_tune;
3549 ptr->fpmath = ix86_fpmath;
3550 ptr->branch_cost = ix86_branch_cost;
3551 ptr->tune_defaulted = ix86_tune_defaulted;
3552 ptr->arch_specified = ix86_arch_specified;
3553 ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
3554 ptr->target_flags_explicit = target_flags_explicit;
3556 /* The fields are char but the variables are not; make sure the
3557 values fit in the fields. */
3558 gcc_assert (ptr->arch == ix86_arch);
3559 gcc_assert (ptr->schedule == ix86_schedule);
3560 gcc_assert (ptr->tune == ix86_tune);
3561 gcc_assert (ptr->fpmath == ix86_fpmath);
3562 gcc_assert (ptr->branch_cost == ix86_branch_cost);
3565 /* Restore the current options */
3567 static void
3568 ix86_function_specific_restore (struct cl_target_option *ptr)
3570 enum processor_type old_tune = ix86_tune;
3571 enum processor_type old_arch = ix86_arch;
3572 unsigned int ix86_arch_mask, ix86_tune_mask;
3573 int i;
3575 ix86_arch = (enum processor_type) ptr->arch;
3576 ix86_schedule = (enum attr_cpu) ptr->schedule;
3577 ix86_tune = (enum processor_type) ptr->tune;
3578 ix86_fpmath = (enum fpmath_unit) ptr->fpmath;
3579 ix86_branch_cost = ptr->branch_cost;
3580 ix86_tune_defaulted = ptr->tune_defaulted;
3581 ix86_arch_specified = ptr->arch_specified;
3582 ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
3583 target_flags_explicit = ptr->target_flags_explicit;
3585 /* Recreate the arch feature tests if the arch changed */
3586 if (old_arch != ix86_arch)
3588 ix86_arch_mask = 1u << ix86_arch;
3589 for (i = 0; i < X86_ARCH_LAST; ++i)
3590 ix86_arch_features[i]
3591 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3594 /* Recreate the tune optimization tests */
3595 if (old_tune != ix86_tune)
3597 ix86_tune_mask = 1u << ix86_tune;
3598 for (i = 0; i < X86_TUNE_LAST; ++i)
3599 ix86_tune_features[i]
3600 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3604 /* Print the current options */
3606 static void
3607 ix86_function_specific_print (FILE *file, int indent,
3608 struct cl_target_option *ptr)
3610 char *target_string
3611 = ix86_target_string (ptr->ix86_isa_flags, ptr->target_flags,
3612 NULL, NULL, NULL, false);
3614 fprintf (file, "%*sarch = %d (%s)\n",
3615 indent, "",
3616 ptr->arch,
3617 ((ptr->arch < TARGET_CPU_DEFAULT_max)
3618 ? cpu_names[ptr->arch]
3619 : "<unknown>"));
3621 fprintf (file, "%*stune = %d (%s)\n",
3622 indent, "",
3623 ptr->tune,
3624 ((ptr->tune < TARGET_CPU_DEFAULT_max)
3625 ? cpu_names[ptr->tune]
3626 : "<unknown>"));
3628 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
3629 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
3630 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
3631 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
3633 if (target_string)
3635 fprintf (file, "%*s%s\n", indent, "", target_string);
3636 free (target_string);
3641 /* Inner function to process the attribute((target(...))), take an argument and
3642 set the current options from the argument. If we have a list, recursively go
3643 over the list. */
3645 static bool
3646 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
3648 char *next_optstr;
3649 bool ret = true;
3651 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
3652 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
3653 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
3654 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
3656 enum ix86_opt_type
3658 ix86_opt_unknown,
3659 ix86_opt_yes,
3660 ix86_opt_no,
3661 ix86_opt_str,
3662 ix86_opt_isa
3665 static const struct
3667 const char *string;
3668 size_t len;
3669 enum ix86_opt_type type;
3670 int opt;
3671 int mask;
3672 } attrs[] = {
3673 /* isa options */
3674 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
3675 IX86_ATTR_ISA ("abm", OPT_mabm),
3676 IX86_ATTR_ISA ("aes", OPT_maes),
3677 IX86_ATTR_ISA ("avx", OPT_mavx),
3678 IX86_ATTR_ISA ("mmx", OPT_mmmx),
3679 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
3680 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
3681 IX86_ATTR_ISA ("sse", OPT_msse),
3682 IX86_ATTR_ISA ("sse2", OPT_msse2),
3683 IX86_ATTR_ISA ("sse3", OPT_msse3),
3684 IX86_ATTR_ISA ("sse4", OPT_msse4),
3685 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
3686 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
3687 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
3688 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
3689 IX86_ATTR_ISA ("fma4", OPT_mfma4),
3690 IX86_ATTR_ISA ("xop", OPT_mxop),
3691 IX86_ATTR_ISA ("lwp", OPT_mlwp),
3693 /* string options */
3694 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
3695 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
3696 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
3698 /* flag options */
3699 IX86_ATTR_YES ("cld",
3700 OPT_mcld,
3701 MASK_CLD),
3703 IX86_ATTR_NO ("fancy-math-387",
3704 OPT_mfancy_math_387,
3705 MASK_NO_FANCY_MATH_387),
3707 IX86_ATTR_YES ("ieee-fp",
3708 OPT_mieee_fp,
3709 MASK_IEEE_FP),
3711 IX86_ATTR_YES ("inline-all-stringops",
3712 OPT_minline_all_stringops,
3713 MASK_INLINE_ALL_STRINGOPS),
3715 IX86_ATTR_YES ("inline-stringops-dynamically",
3716 OPT_minline_stringops_dynamically,
3717 MASK_INLINE_STRINGOPS_DYNAMICALLY),
3719 IX86_ATTR_NO ("align-stringops",
3720 OPT_mno_align_stringops,
3721 MASK_NO_ALIGN_STRINGOPS),
3723 IX86_ATTR_YES ("recip",
3724 OPT_mrecip,
3725 MASK_RECIP),
3729 /* If this is a list, recurse to get the options. */
3730 if (TREE_CODE (args) == TREE_LIST)
3732 bool ret = true;
3734 for (; args; args = TREE_CHAIN (args))
3735 if (TREE_VALUE (args)
3736 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
3737 ret = false;
3739 return ret;
3742 else if (TREE_CODE (args) != STRING_CST)
3743 gcc_unreachable ();
3745 /* Handle multiple arguments separated by commas. */
3746 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
3748 while (next_optstr && *next_optstr != '\0')
3750 char *p = next_optstr;
3751 char *orig_p = p;
3752 char *comma = strchr (next_optstr, ',');
3753 const char *opt_string;
3754 size_t len, opt_len;
3755 int opt;
3756 bool opt_set_p;
3757 char ch;
3758 unsigned i;
3759 enum ix86_opt_type type = ix86_opt_unknown;
3760 int mask = 0;
3762 if (comma)
3764 *comma = '\0';
3765 len = comma - next_optstr;
3766 next_optstr = comma + 1;
3768 else
3770 len = strlen (p);
3771 next_optstr = NULL;
3774 /* Recognize no-xxx. */
3775 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
3777 opt_set_p = false;
3778 p += 3;
3779 len -= 3;
3781 else
3782 opt_set_p = true;
3784 /* Find the option. */
3785 ch = *p;
3786 opt = N_OPTS;
3787 for (i = 0; i < ARRAY_SIZE (attrs); i++)
3789 type = attrs[i].type;
3790 opt_len = attrs[i].len;
3791 if (ch == attrs[i].string[0]
3792 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
3793 && memcmp (p, attrs[i].string, opt_len) == 0)
3795 opt = attrs[i].opt;
3796 mask = attrs[i].mask;
3797 opt_string = attrs[i].string;
3798 break;
3802 /* Process the option. */
3803 if (opt == N_OPTS)
3805 error ("attribute(target(\"%s\")) is unknown", orig_p);
3806 ret = false;
3809 else if (type == ix86_opt_isa)
3810 ix86_handle_option (opt, p, opt_set_p);
3812 else if (type == ix86_opt_yes || type == ix86_opt_no)
3814 if (type == ix86_opt_no)
3815 opt_set_p = !opt_set_p;
3817 if (opt_set_p)
3818 target_flags |= mask;
3819 else
3820 target_flags &= ~mask;
3823 else if (type == ix86_opt_str)
3825 if (p_strings[opt])
3827 error ("option(\"%s\") was already specified", opt_string);
3828 ret = false;
3830 else
3831 p_strings[opt] = xstrdup (p + opt_len);
3834 else
3835 gcc_unreachable ();
3838 return ret;
3841 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
3843 tree
3844 ix86_valid_target_attribute_tree (tree args)
3846 const char *orig_arch_string = ix86_arch_string;
3847 const char *orig_tune_string = ix86_tune_string;
3848 const char *orig_fpmath_string = ix86_fpmath_string;
3849 int orig_tune_defaulted = ix86_tune_defaulted;
3850 int orig_arch_specified = ix86_arch_specified;
3851 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
3852 tree t = NULL_TREE;
3853 int i;
3854 struct cl_target_option *def
3855 = TREE_TARGET_OPTION (target_option_default_node);
3857 /* Process each of the options on the chain. */
3858 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
3859 return NULL_TREE;
3861 /* If the changed options are different from the default, rerun override_options,
3862 and then save the options away. The string options are are attribute options,
3863 and will be undone when we copy the save structure. */
3864 if (ix86_isa_flags != def->ix86_isa_flags
3865 || target_flags != def->target_flags
3866 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
3867 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
3868 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3870 /* If we are using the default tune= or arch=, undo the string assigned,
3871 and use the default. */
3872 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
3873 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
3874 else if (!orig_arch_specified)
3875 ix86_arch_string = NULL;
3877 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
3878 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
3879 else if (orig_tune_defaulted)
3880 ix86_tune_string = NULL;
3882 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
3883 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3884 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
3885 else if (!TARGET_64BIT && TARGET_SSE)
3886 ix86_fpmath_string = "sse,387";
3888 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
3889 override_options (false);
3891 /* Add any builtin functions with the new isa if any. */
3892 ix86_add_new_builtins (ix86_isa_flags);
3894 /* Save the current options unless we are validating options for
3895 #pragma. */
3896 t = build_target_option_node ();
3898 ix86_arch_string = orig_arch_string;
3899 ix86_tune_string = orig_tune_string;
3900 ix86_fpmath_string = orig_fpmath_string;
3902 /* Free up memory allocated to hold the strings */
3903 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
3904 if (option_strings[i])
3905 free (option_strings[i]);
3908 return t;
3911 /* Hook to validate attribute((target("string"))). */
3913 static bool
3914 ix86_valid_target_attribute_p (tree fndecl,
3915 tree ARG_UNUSED (name),
3916 tree args,
3917 int ARG_UNUSED (flags))
3919 struct cl_target_option cur_target;
3920 bool ret = true;
3921 tree old_optimize = build_optimization_node ();
3922 tree new_target, new_optimize;
3923 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
3925 /* If the function changed the optimization levels as well as setting target
3926 options, start with the optimizations specified. */
3927 if (func_optimize && func_optimize != old_optimize)
3928 cl_optimization_restore (TREE_OPTIMIZATION (func_optimize));
3930 /* The target attributes may also change some optimization flags, so update
3931 the optimization options if necessary. */
3932 cl_target_option_save (&cur_target);
3933 new_target = ix86_valid_target_attribute_tree (args);
3934 new_optimize = build_optimization_node ();
3936 if (!new_target)
3937 ret = false;
3939 else if (fndecl)
3941 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
3943 if (old_optimize != new_optimize)
3944 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
3947 cl_target_option_restore (&cur_target);
3949 if (old_optimize != new_optimize)
3950 cl_optimization_restore (TREE_OPTIMIZATION (old_optimize));
3952 return ret;
3956 /* Hook to determine if one function can safely inline another. */
3958 static bool
3959 ix86_can_inline_p (tree caller, tree callee)
3961 bool ret = false;
3962 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
3963 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
3965 /* If callee has no option attributes, then it is ok to inline. */
3966 if (!callee_tree)
3967 ret = true;
3969 /* If caller has no option attributes, but callee does then it is not ok to
3970 inline. */
3971 else if (!caller_tree)
3972 ret = false;
3974 else
3976 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
3977 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
3979 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
3980 can inline a SSE2 function but a SSE2 function can't inline a SSE4
3981 function. */
3982 if ((caller_opts->ix86_isa_flags & callee_opts->ix86_isa_flags)
3983 != callee_opts->ix86_isa_flags)
3984 ret = false;
3986 /* See if we have the same non-isa options. */
3987 else if (caller_opts->target_flags != callee_opts->target_flags)
3988 ret = false;
3990 /* See if arch, tune, etc. are the same. */
3991 else if (caller_opts->arch != callee_opts->arch)
3992 ret = false;
3994 else if (caller_opts->tune != callee_opts->tune)
3995 ret = false;
3997 else if (caller_opts->fpmath != callee_opts->fpmath)
3998 ret = false;
4000 else if (caller_opts->branch_cost != callee_opts->branch_cost)
4001 ret = false;
4003 else
4004 ret = true;
4007 return ret;
4011 /* Remember the last target of ix86_set_current_function. */
4012 static GTY(()) tree ix86_previous_fndecl;
4014 /* Establish appropriate back-end context for processing the function
4015 FNDECL. The argument might be NULL to indicate processing at top
4016 level, outside of any function scope. */
4017 static void
4018 ix86_set_current_function (tree fndecl)
4020 /* Only change the context if the function changes. This hook is called
4021 several times in the course of compiling a function, and we don't want to
4022 slow things down too much or call target_reinit when it isn't safe. */
4023 if (fndecl && fndecl != ix86_previous_fndecl)
4025 tree old_tree = (ix86_previous_fndecl
4026 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
4027 : NULL_TREE);
4029 tree new_tree = (fndecl
4030 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
4031 : NULL_TREE);
4033 ix86_previous_fndecl = fndecl;
4034 if (old_tree == new_tree)
4037 else if (new_tree)
4039 cl_target_option_restore (TREE_TARGET_OPTION (new_tree));
4040 target_reinit ();
4043 else if (old_tree)
4045 struct cl_target_option *def
4046 = TREE_TARGET_OPTION (target_option_current_node);
4048 cl_target_option_restore (def);
4049 target_reinit ();
4055 /* Return true if this goes in large data/bss. */
4057 static bool
4058 ix86_in_large_data_p (tree exp)
4060 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
4061 return false;
4063 /* Functions are never large data. */
4064 if (TREE_CODE (exp) == FUNCTION_DECL)
4065 return false;
4067 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
4069 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
4070 if (strcmp (section, ".ldata") == 0
4071 || strcmp (section, ".lbss") == 0)
4072 return true;
4073 return false;
4075 else
4077 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
4079 /* If this is an incomplete type with size 0, then we can't put it
4080 in data because it might be too big when completed. */
4081 if (!size || size > ix86_section_threshold)
4082 return true;
4085 return false;
4088 /* Switch to the appropriate section for output of DECL.
4089 DECL is either a `VAR_DECL' node or a constant of some sort.
4090 RELOC indicates whether forming the initial value of DECL requires
4091 link-time relocations. */
4093 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
4094 ATTRIBUTE_UNUSED;
4096 static section *
4097 x86_64_elf_select_section (tree decl, int reloc,
4098 unsigned HOST_WIDE_INT align)
4100 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4101 && ix86_in_large_data_p (decl))
4103 const char *sname = NULL;
4104 unsigned int flags = SECTION_WRITE;
4105 switch (categorize_decl_for_section (decl, reloc))
4107 case SECCAT_DATA:
4108 sname = ".ldata";
4109 break;
4110 case SECCAT_DATA_REL:
4111 sname = ".ldata.rel";
4112 break;
4113 case SECCAT_DATA_REL_LOCAL:
4114 sname = ".ldata.rel.local";
4115 break;
4116 case SECCAT_DATA_REL_RO:
4117 sname = ".ldata.rel.ro";
4118 break;
4119 case SECCAT_DATA_REL_RO_LOCAL:
4120 sname = ".ldata.rel.ro.local";
4121 break;
4122 case SECCAT_BSS:
4123 sname = ".lbss";
4124 flags |= SECTION_BSS;
4125 break;
4126 case SECCAT_RODATA:
4127 case SECCAT_RODATA_MERGE_STR:
4128 case SECCAT_RODATA_MERGE_STR_INIT:
4129 case SECCAT_RODATA_MERGE_CONST:
4130 sname = ".lrodata";
4131 flags = 0;
4132 break;
4133 case SECCAT_SRODATA:
4134 case SECCAT_SDATA:
4135 case SECCAT_SBSS:
4136 gcc_unreachable ();
4137 case SECCAT_TEXT:
4138 case SECCAT_TDATA:
4139 case SECCAT_TBSS:
4140 /* We don't split these for medium model. Place them into
4141 default sections and hope for best. */
4142 break;
4143 case SECCAT_EMUTLS_VAR:
4144 case SECCAT_EMUTLS_TMPL:
4145 gcc_unreachable ();
4147 if (sname)
4149 /* We might get called with string constants, but get_named_section
4150 doesn't like them as they are not DECLs. Also, we need to set
4151 flags in that case. */
4152 if (!DECL_P (decl))
4153 return get_section (sname, flags, NULL);
4154 return get_named_section (decl, sname, reloc);
4157 return default_elf_select_section (decl, reloc, align);
4160 /* Build up a unique section name, expressed as a
4161 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
4162 RELOC indicates whether the initial value of EXP requires
4163 link-time relocations. */
4165 static void ATTRIBUTE_UNUSED
4166 x86_64_elf_unique_section (tree decl, int reloc)
4168 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4169 && ix86_in_large_data_p (decl))
4171 const char *prefix = NULL;
4172 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
4173 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
4175 switch (categorize_decl_for_section (decl, reloc))
4177 case SECCAT_DATA:
4178 case SECCAT_DATA_REL:
4179 case SECCAT_DATA_REL_LOCAL:
4180 case SECCAT_DATA_REL_RO:
4181 case SECCAT_DATA_REL_RO_LOCAL:
4182 prefix = one_only ? ".ld" : ".ldata";
4183 break;
4184 case SECCAT_BSS:
4185 prefix = one_only ? ".lb" : ".lbss";
4186 break;
4187 case SECCAT_RODATA:
4188 case SECCAT_RODATA_MERGE_STR:
4189 case SECCAT_RODATA_MERGE_STR_INIT:
4190 case SECCAT_RODATA_MERGE_CONST:
4191 prefix = one_only ? ".lr" : ".lrodata";
4192 break;
4193 case SECCAT_SRODATA:
4194 case SECCAT_SDATA:
4195 case SECCAT_SBSS:
4196 gcc_unreachable ();
4197 case SECCAT_TEXT:
4198 case SECCAT_TDATA:
4199 case SECCAT_TBSS:
4200 /* We don't split these for medium model. Place them into
4201 default sections and hope for best. */
4202 break;
4203 case SECCAT_EMUTLS_VAR:
4204 prefix = targetm.emutls.var_section;
4205 break;
4206 case SECCAT_EMUTLS_TMPL:
4207 prefix = targetm.emutls.tmpl_section;
4208 break;
4210 if (prefix)
4212 const char *name, *linkonce;
4213 char *string;
4215 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
4216 name = targetm.strip_name_encoding (name);
4218 /* If we're using one_only, then there needs to be a .gnu.linkonce
4219 prefix to the section name. */
4220 linkonce = one_only ? ".gnu.linkonce" : "";
4222 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
4224 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
4225 return;
4228 default_unique_section (decl, reloc);
4231 #ifdef COMMON_ASM_OP
4232 /* This says how to output assembler code to declare an
4233 uninitialized external linkage data object.
4235 For medium model x86-64 we need to use .largecomm opcode for
4236 large objects. */
4237 void
4238 x86_elf_aligned_common (FILE *file,
4239 const char *name, unsigned HOST_WIDE_INT size,
4240 int align)
4242 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4243 && size > (unsigned int)ix86_section_threshold)
4244 fputs (".largecomm\t", file);
4245 else
4246 fputs (COMMON_ASM_OP, file);
4247 assemble_name (file, name);
4248 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
4249 size, align / BITS_PER_UNIT);
4251 #endif
4253 /* Utility function for targets to use in implementing
4254 ASM_OUTPUT_ALIGNED_BSS. */
4256 void
4257 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
4258 const char *name, unsigned HOST_WIDE_INT size,
4259 int align)
4261 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4262 && size > (unsigned int)ix86_section_threshold)
4263 switch_to_section (get_named_section (decl, ".lbss", 0));
4264 else
4265 switch_to_section (bss_section);
4266 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
4267 #ifdef ASM_DECLARE_OBJECT_NAME
4268 last_assemble_variable_decl = decl;
4269 ASM_DECLARE_OBJECT_NAME (file, name, decl);
4270 #else
4271 /* Standard thing is just output label for the object. */
4272 ASM_OUTPUT_LABEL (file, name);
4273 #endif /* ASM_DECLARE_OBJECT_NAME */
4274 ASM_OUTPUT_SKIP (file, size ? size : 1);
4277 void
4278 optimization_options (int level, int size ATTRIBUTE_UNUSED)
4280 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
4281 make the problem with not enough registers even worse. */
4282 #ifdef INSN_SCHEDULING
4283 if (level > 1)
4284 flag_schedule_insns = 0;
4285 #endif
4287 if (TARGET_MACHO)
4288 /* The Darwin libraries never set errno, so we might as well
4289 avoid calling them when that's the only reason we would. */
4290 flag_errno_math = 0;
4292 /* The default values of these switches depend on the TARGET_64BIT
4293 that is not known at this moment. Mark these values with 2 and
4294 let user the to override these. In case there is no command line option
4295 specifying them, we will set the defaults in override_options. */
4296 if (optimize >= 1)
4297 flag_omit_frame_pointer = 2;
4298 flag_pcc_struct_return = 2;
4299 flag_asynchronous_unwind_tables = 2;
4300 flag_vect_cost_model = 1;
4301 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
4302 SUBTARGET_OPTIMIZATION_OPTIONS;
4303 #endif
4306 /* Decide whether we can make a sibling call to a function. DECL is the
4307 declaration of the function being targeted by the call and EXP is the
4308 CALL_EXPR representing the call. */
4310 static bool
4311 ix86_function_ok_for_sibcall (tree decl, tree exp)
4313 tree type, decl_or_type;
4314 rtx a, b;
4316 /* If we are generating position-independent code, we cannot sibcall
4317 optimize any indirect call, or a direct call to a global function,
4318 as the PLT requires %ebx be live. */
4319 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
4320 return false;
4322 /* If we need to align the outgoing stack, then sibcalling would
4323 unalign the stack, which may break the called function. */
4324 if (ix86_minimum_incoming_stack_boundary (true)
4325 < PREFERRED_STACK_BOUNDARY)
4326 return false;
4328 if (decl)
4330 decl_or_type = decl;
4331 type = TREE_TYPE (decl);
4333 else
4335 /* We're looking at the CALL_EXPR, we need the type of the function. */
4336 type = CALL_EXPR_FN (exp); /* pointer expression */
4337 type = TREE_TYPE (type); /* pointer type */
4338 type = TREE_TYPE (type); /* function type */
4339 decl_or_type = type;
4342 /* Check that the return value locations are the same. Like
4343 if we are returning floats on the 80387 register stack, we cannot
4344 make a sibcall from a function that doesn't return a float to a
4345 function that does or, conversely, from a function that does return
4346 a float to a function that doesn't; the necessary stack adjustment
4347 would not be executed. This is also the place we notice
4348 differences in the return value ABI. Note that it is ok for one
4349 of the functions to have void return type as long as the return
4350 value of the other is passed in a register. */
4351 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
4352 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
4353 cfun->decl, false);
4354 if (STACK_REG_P (a) || STACK_REG_P (b))
4356 if (!rtx_equal_p (a, b))
4357 return false;
4359 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
4361 else if (!rtx_equal_p (a, b))
4362 return false;
4364 if (TARGET_64BIT)
4366 /* The SYSV ABI has more call-clobbered registers;
4367 disallow sibcalls from MS to SYSV. */
4368 if (cfun->machine->call_abi == MS_ABI
4369 && ix86_function_type_abi (type) == SYSV_ABI)
4370 return false;
4372 else
4374 /* If this call is indirect, we'll need to be able to use a
4375 call-clobbered register for the address of the target function.
4376 Make sure that all such registers are not used for passing
4377 parameters. Note that DLLIMPORT functions are indirect. */
4378 if (!decl
4379 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
4381 if (ix86_function_regparm (type, NULL) >= 3)
4383 /* ??? Need to count the actual number of registers to be used,
4384 not the possible number of registers. Fix later. */
4385 return false;
4390 /* Otherwise okay. That also includes certain types of indirect calls. */
4391 return true;
4394 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
4395 calling convention attributes;
4396 arguments as in struct attribute_spec.handler. */
4398 static tree
4399 ix86_handle_cconv_attribute (tree *node, tree name,
4400 tree args,
4401 int flags ATTRIBUTE_UNUSED,
4402 bool *no_add_attrs)
4404 if (TREE_CODE (*node) != FUNCTION_TYPE
4405 && TREE_CODE (*node) != METHOD_TYPE
4406 && TREE_CODE (*node) != FIELD_DECL
4407 && TREE_CODE (*node) != TYPE_DECL)
4409 warning (OPT_Wattributes, "%qE attribute only applies to functions",
4410 name);
4411 *no_add_attrs = true;
4412 return NULL_TREE;
4415 /* Can combine regparm with all attributes but fastcall. */
4416 if (is_attribute_p ("regparm", name))
4418 tree cst;
4420 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4422 error ("fastcall and regparm attributes are not compatible");
4425 cst = TREE_VALUE (args);
4426 if (TREE_CODE (cst) != INTEGER_CST)
4428 warning (OPT_Wattributes,
4429 "%qE attribute requires an integer constant argument",
4430 name);
4431 *no_add_attrs = true;
4433 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
4435 warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
4436 name, REGPARM_MAX);
4437 *no_add_attrs = true;
4440 return NULL_TREE;
4443 if (TARGET_64BIT)
4445 /* Do not warn when emulating the MS ABI. */
4446 if (TREE_CODE (*node) != FUNCTION_TYPE
4447 || ix86_function_type_abi (*node) != MS_ABI)
4448 warning (OPT_Wattributes, "%qE attribute ignored",
4449 name);
4450 *no_add_attrs = true;
4451 return NULL_TREE;
4454 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4455 if (is_attribute_p ("fastcall", name))
4457 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4459 error ("fastcall and cdecl attributes are not compatible");
4461 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4463 error ("fastcall and stdcall attributes are not compatible");
4465 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
4467 error ("fastcall and regparm attributes are not compatible");
4471 /* Can combine stdcall with fastcall (redundant), regparm and
4472 sseregparm. */
4473 else if (is_attribute_p ("stdcall", name))
4475 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4477 error ("stdcall and cdecl attributes are not compatible");
4479 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4481 error ("stdcall and fastcall attributes are not compatible");
4485 /* Can combine cdecl with regparm and sseregparm. */
4486 else if (is_attribute_p ("cdecl", name))
4488 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4490 error ("stdcall and cdecl attributes are not compatible");
4492 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4494 error ("fastcall and cdecl attributes are not compatible");
4498 /* Can combine sseregparm with all attributes. */
4500 return NULL_TREE;
4503 /* Return 0 if the attributes for two types are incompatible, 1 if they
4504 are compatible, and 2 if they are nearly compatible (which causes a
4505 warning to be generated). */
4507 static int
4508 ix86_comp_type_attributes (const_tree type1, const_tree type2)
4510 /* Check for mismatch of non-default calling convention. */
4511 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
4513 if (TREE_CODE (type1) != FUNCTION_TYPE
4514 && TREE_CODE (type1) != METHOD_TYPE)
4515 return 1;
4517 /* Check for mismatched fastcall/regparm types. */
4518 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
4519 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
4520 || (ix86_function_regparm (type1, NULL)
4521 != ix86_function_regparm (type2, NULL)))
4522 return 0;
4524 /* Check for mismatched sseregparm types. */
4525 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
4526 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
4527 return 0;
4529 /* Check for mismatched return types (cdecl vs stdcall). */
4530 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
4531 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
4532 return 0;
4534 return 1;
4537 /* Return the regparm value for a function with the indicated TYPE and DECL.
4538 DECL may be NULL when calling function indirectly
4539 or considering a libcall. */
4541 static int
4542 ix86_function_regparm (const_tree type, const_tree decl)
4544 tree attr;
4545 int regparm;
4547 if (TARGET_64BIT)
4548 return (ix86_function_type_abi (type) == SYSV_ABI
4549 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4551 regparm = ix86_regparm;
4552 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
4553 if (attr)
4555 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
4556 return regparm;
4559 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
4560 return 2;
4562 /* Use register calling convention for local functions when possible. */
4563 if (decl
4564 && TREE_CODE (decl) == FUNCTION_DECL
4565 && optimize
4566 && !profile_flag)
4568 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4569 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE (decl));
4570 if (i && i->local)
4572 int local_regparm, globals = 0, regno;
4574 /* Make sure no regparm register is taken by a
4575 fixed register variable. */
4576 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
4577 if (fixed_regs[local_regparm])
4578 break;
4580 /* We don't want to use regparm(3) for nested functions as
4581 these use a static chain pointer in the third argument. */
4582 if (local_regparm == 3 && DECL_STATIC_CHAIN (decl))
4583 local_regparm = 2;
4585 /* Each fixed register usage increases register pressure,
4586 so less registers should be used for argument passing.
4587 This functionality can be overriden by an explicit
4588 regparm value. */
4589 for (regno = 0; regno <= DI_REG; regno++)
4590 if (fixed_regs[regno])
4591 globals++;
4593 local_regparm
4594 = globals < local_regparm ? local_regparm - globals : 0;
4596 if (local_regparm > regparm)
4597 regparm = local_regparm;
4601 return regparm;
4604 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
4605 DFmode (2) arguments in SSE registers for a function with the
4606 indicated TYPE and DECL. DECL may be NULL when calling function
4607 indirectly or considering a libcall. Otherwise return 0. */
4609 static int
4610 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
4612 gcc_assert (!TARGET_64BIT);
4614 /* Use SSE registers to pass SFmode and DFmode arguments if requested
4615 by the sseregparm attribute. */
4616 if (TARGET_SSEREGPARM
4617 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
4619 if (!TARGET_SSE)
4621 if (warn)
4623 if (decl)
4624 error ("Calling %qD with attribute sseregparm without "
4625 "SSE/SSE2 enabled", decl);
4626 else
4627 error ("Calling %qT with attribute sseregparm without "
4628 "SSE/SSE2 enabled", type);
4630 return 0;
4633 return 2;
4636 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
4637 (and DFmode for SSE2) arguments in SSE registers. */
4638 if (decl && TARGET_SSE_MATH && optimize && !profile_flag)
4640 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4641 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4642 if (i && i->local)
4643 return TARGET_SSE2 ? 2 : 1;
4646 return 0;
4649 /* Return true if EAX is live at the start of the function. Used by
4650 ix86_expand_prologue to determine if we need special help before
4651 calling allocate_stack_worker. */
4653 static bool
4654 ix86_eax_live_at_start_p (void)
4656 /* Cheat. Don't bother working forward from ix86_function_regparm
4657 to the function type to whether an actual argument is located in
4658 eax. Instead just look at cfg info, which is still close enough
4659 to correct at this point. This gives false positives for broken
4660 functions that might use uninitialized data that happens to be
4661 allocated in eax, but who cares? */
4662 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
4665 /* Value is the number of bytes of arguments automatically
4666 popped when returning from a subroutine call.
4667 FUNDECL is the declaration node of the function (as a tree),
4668 FUNTYPE is the data type of the function (as a tree),
4669 or for a library call it is an identifier node for the subroutine name.
4670 SIZE is the number of bytes of arguments passed on the stack.
4672 On the 80386, the RTD insn may be used to pop them if the number
4673 of args is fixed, but if the number is variable then the caller
4674 must pop them all. RTD can't be used for library calls now
4675 because the library is compiled with the Unix compiler.
4676 Use of RTD is a selectable option, since it is incompatible with
4677 standard Unix calling sequences. If the option is not selected,
4678 the caller must always pop the args.
4680 The attribute stdcall is equivalent to RTD on a per module basis. */
4683 ix86_return_pops_args (tree fundecl, tree funtype, int size)
4685 int rtd;
4687 /* None of the 64-bit ABIs pop arguments. */
4688 if (TARGET_64BIT)
4689 return 0;
4691 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
4693 /* Cdecl functions override -mrtd, and never pop the stack. */
4694 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
4696 /* Stdcall and fastcall functions will pop the stack if not
4697 variable args. */
4698 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
4699 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
4700 rtd = 1;
4702 if (rtd && ! stdarg_p (funtype))
4703 return size;
4706 /* Lose any fake structure return argument if it is passed on the stack. */
4707 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
4708 && !KEEP_AGGREGATE_RETURN_POINTER)
4710 int nregs = ix86_function_regparm (funtype, fundecl);
4711 if (nregs == 0)
4712 return GET_MODE_SIZE (Pmode);
4715 return 0;
4718 /* Argument support functions. */
4720 /* Return true when register may be used to pass function parameters. */
4721 bool
4722 ix86_function_arg_regno_p (int regno)
4724 int i;
4725 const int *parm_regs;
4727 if (!TARGET_64BIT)
4729 if (TARGET_MACHO)
4730 return (regno < REGPARM_MAX
4731 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
4732 else
4733 return (regno < REGPARM_MAX
4734 || (TARGET_MMX && MMX_REGNO_P (regno)
4735 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
4736 || (TARGET_SSE && SSE_REGNO_P (regno)
4737 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
4740 if (TARGET_MACHO)
4742 if (SSE_REGNO_P (regno) && TARGET_SSE)
4743 return true;
4745 else
4747 if (TARGET_SSE && SSE_REGNO_P (regno)
4748 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
4749 return true;
4752 /* TODO: The function should depend on current function ABI but
4753 builtins.c would need updating then. Therefore we use the
4754 default ABI. */
4756 /* RAX is used as hidden argument to va_arg functions. */
4757 if (ix86_abi == SYSV_ABI && regno == AX_REG)
4758 return true;
4760 if (ix86_abi == MS_ABI)
4761 parm_regs = x86_64_ms_abi_int_parameter_registers;
4762 else
4763 parm_regs = x86_64_int_parameter_registers;
4764 for (i = 0; i < (ix86_abi == MS_ABI
4765 ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
4766 if (regno == parm_regs[i])
4767 return true;
4768 return false;
4771 /* Return if we do not know how to pass TYPE solely in registers. */
4773 static bool
4774 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
4776 if (must_pass_in_stack_var_size_or_pad (mode, type))
4777 return true;
4779 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
4780 The layout_type routine is crafty and tries to trick us into passing
4781 currently unsupported vector types on the stack by using TImode. */
4782 return (!TARGET_64BIT && mode == TImode
4783 && type && TREE_CODE (type) != VECTOR_TYPE);
4786 /* It returns the size, in bytes, of the area reserved for arguments passed
4787 in registers for the function represented by fndecl dependent to the used
4788 abi format. */
4790 ix86_reg_parm_stack_space (const_tree fndecl)
4792 enum calling_abi call_abi = SYSV_ABI;
4793 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
4794 call_abi = ix86_function_abi (fndecl);
4795 else
4796 call_abi = ix86_function_type_abi (fndecl);
4797 if (call_abi == MS_ABI)
4798 return 32;
4799 return 0;
4802 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
4803 call abi used. */
4804 enum calling_abi
4805 ix86_function_type_abi (const_tree fntype)
4807 if (TARGET_64BIT && fntype != NULL)
4809 enum calling_abi abi = ix86_abi;
4810 if (abi == SYSV_ABI)
4812 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
4813 abi = MS_ABI;
4815 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
4816 abi = SYSV_ABI;
4817 return abi;
4819 return ix86_abi;
4822 static bool
4823 ix86_function_ms_hook_prologue (const_tree fntype)
4825 if (!TARGET_64BIT)
4827 if (lookup_attribute ("ms_hook_prologue", DECL_ATTRIBUTES (fntype)))
4829 if (decl_function_context (fntype) != NULL_TREE)
4831 error_at (DECL_SOURCE_LOCATION (fntype),
4832 "ms_hook_prologue is not compatible with nested function");
4835 return true;
4838 return false;
4841 static enum calling_abi
4842 ix86_function_abi (const_tree fndecl)
4844 if (! fndecl)
4845 return ix86_abi;
4846 return ix86_function_type_abi (TREE_TYPE (fndecl));
4849 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
4850 call abi used. */
4851 enum calling_abi
4852 ix86_cfun_abi (void)
4854 if (! cfun || ! TARGET_64BIT)
4855 return ix86_abi;
4856 return cfun->machine->call_abi;
4859 /* regclass.c */
4860 extern void init_regs (void);
4862 /* Implementation of call abi switching target hook. Specific to FNDECL
4863 the specific call register sets are set. See also CONDITIONAL_REGISTER_USAGE
4864 for more details. */
4865 void
4866 ix86_call_abi_override (const_tree fndecl)
4868 if (fndecl == NULL_TREE)
4869 cfun->machine->call_abi = ix86_abi;
4870 else
4871 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
4874 /* MS and SYSV ABI have different set of call used registers. Avoid expensive
4875 re-initialization of init_regs each time we switch function context since
4876 this is needed only during RTL expansion. */
4877 static void
4878 ix86_maybe_switch_abi (void)
4880 if (TARGET_64BIT &&
4881 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
4882 reinit_regs ();
4885 /* Initialize a variable CUM of type CUMULATIVE_ARGS
4886 for a call to a function whose data type is FNTYPE.
4887 For a library call, FNTYPE is 0. */
4889 void
4890 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
4891 tree fntype, /* tree ptr for function decl */
4892 rtx libname, /* SYMBOL_REF of library name or 0 */
4893 tree fndecl)
4895 struct cgraph_local_info *i = fndecl ? cgraph_local_info (fndecl) : NULL;
4896 memset (cum, 0, sizeof (*cum));
4898 if (fndecl)
4899 cum->call_abi = ix86_function_abi (fndecl);
4900 else
4901 cum->call_abi = ix86_function_type_abi (fntype);
4902 /* Set up the number of registers to use for passing arguments. */
4904 if (cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
4905 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
4906 "or subtarget optimization implying it");
4907 cum->nregs = ix86_regparm;
4908 if (TARGET_64BIT)
4910 if (cum->call_abi != ix86_abi)
4911 cum->nregs = (ix86_abi != SYSV_ABI
4912 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4914 if (TARGET_SSE)
4916 cum->sse_nregs = SSE_REGPARM_MAX;
4917 if (TARGET_64BIT)
4919 if (cum->call_abi != ix86_abi)
4920 cum->sse_nregs = (ix86_abi != SYSV_ABI
4921 ? X86_64_SSE_REGPARM_MAX
4922 : X86_64_MS_SSE_REGPARM_MAX);
4925 if (TARGET_MMX)
4926 cum->mmx_nregs = MMX_REGPARM_MAX;
4927 cum->warn_avx = true;
4928 cum->warn_sse = true;
4929 cum->warn_mmx = true;
4931 /* Because type might mismatch in between caller and callee, we need to
4932 use actual type of function for local calls.
4933 FIXME: cgraph_analyze can be told to actually record if function uses
4934 va_start so for local functions maybe_vaarg can be made aggressive
4935 helping K&R code.
4936 FIXME: once typesytem is fixed, we won't need this code anymore. */
4937 if (i && i->local)
4938 fntype = TREE_TYPE (fndecl);
4939 cum->maybe_vaarg = (fntype
4940 ? (!prototype_p (fntype) || stdarg_p (fntype))
4941 : !libname);
4943 if (!TARGET_64BIT)
4945 /* If there are variable arguments, then we won't pass anything
4946 in registers in 32-bit mode. */
4947 if (stdarg_p (fntype))
4949 cum->nregs = 0;
4950 cum->sse_nregs = 0;
4951 cum->mmx_nregs = 0;
4952 cum->warn_avx = 0;
4953 cum->warn_sse = 0;
4954 cum->warn_mmx = 0;
4955 return;
4958 /* Use ecx and edx registers if function has fastcall attribute,
4959 else look for regparm information. */
4960 if (fntype)
4962 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
4964 cum->nregs = 2;
4965 cum->fastcall = 1;
4967 else
4968 cum->nregs = ix86_function_regparm (fntype, fndecl);
4971 /* Set up the number of SSE registers used for passing SFmode
4972 and DFmode arguments. Warn for mismatching ABI. */
4973 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
4977 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
4978 But in the case of vector types, it is some vector mode.
4980 When we have only some of our vector isa extensions enabled, then there
4981 are some modes for which vector_mode_supported_p is false. For these
4982 modes, the generic vector support in gcc will choose some non-vector mode
4983 in order to implement the type. By computing the natural mode, we'll
4984 select the proper ABI location for the operand and not depend on whatever
4985 the middle-end decides to do with these vector types.
4987 The midde-end can't deal with the vector types > 16 bytes. In this
4988 case, we return the original mode and warn ABI change if CUM isn't
4989 NULL. */
4991 static enum machine_mode
4992 type_natural_mode (const_tree type, CUMULATIVE_ARGS *cum)
4994 enum machine_mode mode = TYPE_MODE (type);
4996 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
4998 HOST_WIDE_INT size = int_size_in_bytes (type);
4999 if ((size == 8 || size == 16 || size == 32)
5000 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
5001 && TYPE_VECTOR_SUBPARTS (type) > 1)
5003 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
5005 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
5006 mode = MIN_MODE_VECTOR_FLOAT;
5007 else
5008 mode = MIN_MODE_VECTOR_INT;
5010 /* Get the mode which has this inner mode and number of units. */
5011 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
5012 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
5013 && GET_MODE_INNER (mode) == innermode)
5015 if (size == 32 && !TARGET_AVX)
5017 static bool warnedavx;
5019 if (cum
5020 && !warnedavx
5021 && cum->warn_avx)
5023 warnedavx = true;
5024 warning (0, "AVX vector argument without AVX "
5025 "enabled changes the ABI");
5027 return TYPE_MODE (type);
5029 else
5030 return mode;
5033 gcc_unreachable ();
5037 return mode;
5040 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
5041 this may not agree with the mode that the type system has chosen for the
5042 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
5043 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
5045 static rtx
5046 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
5047 unsigned int regno)
5049 rtx tmp;
5051 if (orig_mode != BLKmode)
5052 tmp = gen_rtx_REG (orig_mode, regno);
5053 else
5055 tmp = gen_rtx_REG (mode, regno);
5056 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
5057 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
5060 return tmp;
5063 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
5064 of this code is to classify each 8bytes of incoming argument by the register
5065 class and assign registers accordingly. */
5067 /* Return the union class of CLASS1 and CLASS2.
5068 See the x86-64 PS ABI for details. */
5070 static enum x86_64_reg_class
5071 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
5073 /* Rule #1: If both classes are equal, this is the resulting class. */
5074 if (class1 == class2)
5075 return class1;
5077 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
5078 the other class. */
5079 if (class1 == X86_64_NO_CLASS)
5080 return class2;
5081 if (class2 == X86_64_NO_CLASS)
5082 return class1;
5084 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
5085 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
5086 return X86_64_MEMORY_CLASS;
5088 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
5089 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
5090 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
5091 return X86_64_INTEGERSI_CLASS;
5092 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
5093 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
5094 return X86_64_INTEGER_CLASS;
5096 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
5097 MEMORY is used. */
5098 if (class1 == X86_64_X87_CLASS
5099 || class1 == X86_64_X87UP_CLASS
5100 || class1 == X86_64_COMPLEX_X87_CLASS
5101 || class2 == X86_64_X87_CLASS
5102 || class2 == X86_64_X87UP_CLASS
5103 || class2 == X86_64_COMPLEX_X87_CLASS)
5104 return X86_64_MEMORY_CLASS;
5106 /* Rule #6: Otherwise class SSE is used. */
5107 return X86_64_SSE_CLASS;
5110 /* Classify the argument of type TYPE and mode MODE.
5111 CLASSES will be filled by the register class used to pass each word
5112 of the operand. The number of words is returned. In case the parameter
5113 should be passed in memory, 0 is returned. As a special case for zero
5114 sized containers, classes[0] will be NO_CLASS and 1 is returned.
5116 BIT_OFFSET is used internally for handling records and specifies offset
5117 of the offset in bits modulo 256 to avoid overflow cases.
5119 See the x86-64 PS ABI for details.
5122 static int
5123 classify_argument (enum machine_mode mode, const_tree type,
5124 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
5126 HOST_WIDE_INT bytes =
5127 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5128 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5130 /* Variable sized entities are always passed/returned in memory. */
5131 if (bytes < 0)
5132 return 0;
5134 if (mode != VOIDmode
5135 && targetm.calls.must_pass_in_stack (mode, type))
5136 return 0;
5138 if (type && AGGREGATE_TYPE_P (type))
5140 int i;
5141 tree field;
5142 enum x86_64_reg_class subclasses[MAX_CLASSES];
5144 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
5145 if (bytes > 32)
5146 return 0;
5148 for (i = 0; i < words; i++)
5149 classes[i] = X86_64_NO_CLASS;
5151 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
5152 signalize memory class, so handle it as special case. */
5153 if (!words)
5155 classes[0] = X86_64_NO_CLASS;
5156 return 1;
5159 /* Classify each field of record and merge classes. */
5160 switch (TREE_CODE (type))
5162 case RECORD_TYPE:
5163 /* And now merge the fields of structure. */
5164 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5166 if (TREE_CODE (field) == FIELD_DECL)
5168 int num;
5170 if (TREE_TYPE (field) == error_mark_node)
5171 continue;
5173 /* Bitfields are always classified as integer. Handle them
5174 early, since later code would consider them to be
5175 misaligned integers. */
5176 if (DECL_BIT_FIELD (field))
5178 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5179 i < ((int_bit_position (field) + (bit_offset % 64))
5180 + tree_low_cst (DECL_SIZE (field), 0)
5181 + 63) / 8 / 8; i++)
5182 classes[i] =
5183 merge_classes (X86_64_INTEGER_CLASS,
5184 classes[i]);
5186 else
5188 int pos;
5190 type = TREE_TYPE (field);
5192 /* Flexible array member is ignored. */
5193 if (TYPE_MODE (type) == BLKmode
5194 && TREE_CODE (type) == ARRAY_TYPE
5195 && TYPE_SIZE (type) == NULL_TREE
5196 && TYPE_DOMAIN (type) != NULL_TREE
5197 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
5198 == NULL_TREE))
5200 static bool warned;
5202 if (!warned && warn_psabi)
5204 warned = true;
5205 inform (input_location,
5206 "The ABI of passing struct with"
5207 " a flexible array member has"
5208 " changed in GCC 4.4");
5210 continue;
5212 num = classify_argument (TYPE_MODE (type), type,
5213 subclasses,
5214 (int_bit_position (field)
5215 + bit_offset) % 256);
5216 if (!num)
5217 return 0;
5218 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5219 for (i = 0; i < num && (i + pos) < words; i++)
5220 classes[i + pos] =
5221 merge_classes (subclasses[i], classes[i + pos]);
5225 break;
5227 case ARRAY_TYPE:
5228 /* Arrays are handled as small records. */
5230 int num;
5231 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
5232 TREE_TYPE (type), subclasses, bit_offset);
5233 if (!num)
5234 return 0;
5236 /* The partial classes are now full classes. */
5237 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
5238 subclasses[0] = X86_64_SSE_CLASS;
5239 if (subclasses[0] == X86_64_INTEGERSI_CLASS
5240 && !((bit_offset % 64) == 0 && bytes == 4))
5241 subclasses[0] = X86_64_INTEGER_CLASS;
5243 for (i = 0; i < words; i++)
5244 classes[i] = subclasses[i % num];
5246 break;
5248 case UNION_TYPE:
5249 case QUAL_UNION_TYPE:
5250 /* Unions are similar to RECORD_TYPE but offset is always 0.
5252 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5254 if (TREE_CODE (field) == FIELD_DECL)
5256 int num;
5258 if (TREE_TYPE (field) == error_mark_node)
5259 continue;
5261 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
5262 TREE_TYPE (field), subclasses,
5263 bit_offset);
5264 if (!num)
5265 return 0;
5266 for (i = 0; i < num; i++)
5267 classes[i] = merge_classes (subclasses[i], classes[i]);
5270 break;
5272 default:
5273 gcc_unreachable ();
5276 if (words > 2)
5278 /* When size > 16 bytes, if the first one isn't
5279 X86_64_SSE_CLASS or any other ones aren't
5280 X86_64_SSEUP_CLASS, everything should be passed in
5281 memory. */
5282 if (classes[0] != X86_64_SSE_CLASS)
5283 return 0;
5285 for (i = 1; i < words; i++)
5286 if (classes[i] != X86_64_SSEUP_CLASS)
5287 return 0;
5290 /* Final merger cleanup. */
5291 for (i = 0; i < words; i++)
5293 /* If one class is MEMORY, everything should be passed in
5294 memory. */
5295 if (classes[i] == X86_64_MEMORY_CLASS)
5296 return 0;
5298 /* The X86_64_SSEUP_CLASS should be always preceded by
5299 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
5300 if (classes[i] == X86_64_SSEUP_CLASS
5301 && classes[i - 1] != X86_64_SSE_CLASS
5302 && classes[i - 1] != X86_64_SSEUP_CLASS)
5304 /* The first one should never be X86_64_SSEUP_CLASS. */
5305 gcc_assert (i != 0);
5306 classes[i] = X86_64_SSE_CLASS;
5309 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
5310 everything should be passed in memory. */
5311 if (classes[i] == X86_64_X87UP_CLASS
5312 && (classes[i - 1] != X86_64_X87_CLASS))
5314 static bool warned;
5316 /* The first one should never be X86_64_X87UP_CLASS. */
5317 gcc_assert (i != 0);
5318 if (!warned && warn_psabi)
5320 warned = true;
5321 inform (input_location,
5322 "The ABI of passing union with long double"
5323 " has changed in GCC 4.4");
5325 return 0;
5328 return words;
5331 /* Compute alignment needed. We align all types to natural boundaries with
5332 exception of XFmode that is aligned to 64bits. */
5333 if (mode != VOIDmode && mode != BLKmode)
5335 int mode_alignment = GET_MODE_BITSIZE (mode);
5337 if (mode == XFmode)
5338 mode_alignment = 128;
5339 else if (mode == XCmode)
5340 mode_alignment = 256;
5341 if (COMPLEX_MODE_P (mode))
5342 mode_alignment /= 2;
5343 /* Misaligned fields are always returned in memory. */
5344 if (bit_offset % mode_alignment)
5345 return 0;
5348 /* for V1xx modes, just use the base mode */
5349 if (VECTOR_MODE_P (mode) && mode != V1DImode && mode != V1TImode
5350 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
5351 mode = GET_MODE_INNER (mode);
5353 /* Classification of atomic types. */
5354 switch (mode)
5356 case SDmode:
5357 case DDmode:
5358 classes[0] = X86_64_SSE_CLASS;
5359 return 1;
5360 case TDmode:
5361 classes[0] = X86_64_SSE_CLASS;
5362 classes[1] = X86_64_SSEUP_CLASS;
5363 return 2;
5364 case DImode:
5365 case SImode:
5366 case HImode:
5367 case QImode:
5368 case CSImode:
5369 case CHImode:
5370 case CQImode:
5372 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
5374 if (size <= 32)
5376 classes[0] = X86_64_INTEGERSI_CLASS;
5377 return 1;
5379 else if (size <= 64)
5381 classes[0] = X86_64_INTEGER_CLASS;
5382 return 1;
5384 else if (size <= 64+32)
5386 classes[0] = X86_64_INTEGER_CLASS;
5387 classes[1] = X86_64_INTEGERSI_CLASS;
5388 return 2;
5390 else if (size <= 64+64)
5392 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5393 return 2;
5395 else
5396 gcc_unreachable ();
5398 case CDImode:
5399 case TImode:
5400 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5401 return 2;
5402 case COImode:
5403 case OImode:
5404 /* OImode shouldn't be used directly. */
5405 gcc_unreachable ();
5406 case CTImode:
5407 return 0;
5408 case SFmode:
5409 if (!(bit_offset % 64))
5410 classes[0] = X86_64_SSESF_CLASS;
5411 else
5412 classes[0] = X86_64_SSE_CLASS;
5413 return 1;
5414 case DFmode:
5415 classes[0] = X86_64_SSEDF_CLASS;
5416 return 1;
5417 case XFmode:
5418 classes[0] = X86_64_X87_CLASS;
5419 classes[1] = X86_64_X87UP_CLASS;
5420 return 2;
5421 case TFmode:
5422 classes[0] = X86_64_SSE_CLASS;
5423 classes[1] = X86_64_SSEUP_CLASS;
5424 return 2;
5425 case SCmode:
5426 classes[0] = X86_64_SSE_CLASS;
5427 if (!(bit_offset % 64))
5428 return 1;
5429 else
5431 static bool warned;
5433 if (!warned && warn_psabi)
5435 warned = true;
5436 inform (input_location,
5437 "The ABI of passing structure with complex float"
5438 " member has changed in GCC 4.4");
5440 classes[1] = X86_64_SSESF_CLASS;
5441 return 2;
5443 case DCmode:
5444 classes[0] = X86_64_SSEDF_CLASS;
5445 classes[1] = X86_64_SSEDF_CLASS;
5446 return 2;
5447 case XCmode:
5448 classes[0] = X86_64_COMPLEX_X87_CLASS;
5449 return 1;
5450 case TCmode:
5451 /* This modes is larger than 16 bytes. */
5452 return 0;
5453 case V8SFmode:
5454 case V8SImode:
5455 case V32QImode:
5456 case V16HImode:
5457 case V4DFmode:
5458 case V4DImode:
5459 classes[0] = X86_64_SSE_CLASS;
5460 classes[1] = X86_64_SSEUP_CLASS;
5461 classes[2] = X86_64_SSEUP_CLASS;
5462 classes[3] = X86_64_SSEUP_CLASS;
5463 return 4;
5464 case V4SFmode:
5465 case V4SImode:
5466 case V16QImode:
5467 case V8HImode:
5468 case V2DFmode:
5469 case V2DImode:
5470 classes[0] = X86_64_SSE_CLASS;
5471 classes[1] = X86_64_SSEUP_CLASS;
5472 return 2;
5473 case V1TImode:
5474 case V1DImode:
5475 case V2SFmode:
5476 case V2SImode:
5477 case V4HImode:
5478 case V8QImode:
5479 classes[0] = X86_64_SSE_CLASS;
5480 return 1;
5481 case BLKmode:
5482 case VOIDmode:
5483 return 0;
5484 default:
5485 gcc_assert (VECTOR_MODE_P (mode));
5487 if (bytes > 16)
5488 return 0;
5490 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
5492 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
5493 classes[0] = X86_64_INTEGERSI_CLASS;
5494 else
5495 classes[0] = X86_64_INTEGER_CLASS;
5496 classes[1] = X86_64_INTEGER_CLASS;
5497 return 1 + (bytes > 8);
5501 /* Examine the argument and return set number of register required in each
5502 class. Return 0 iff parameter should be passed in memory. */
5503 static int
5504 examine_argument (enum machine_mode mode, const_tree type, int in_return,
5505 int *int_nregs, int *sse_nregs)
5507 enum x86_64_reg_class regclass[MAX_CLASSES];
5508 int n = classify_argument (mode, type, regclass, 0);
5510 *int_nregs = 0;
5511 *sse_nregs = 0;
5512 if (!n)
5513 return 0;
5514 for (n--; n >= 0; n--)
5515 switch (regclass[n])
5517 case X86_64_INTEGER_CLASS:
5518 case X86_64_INTEGERSI_CLASS:
5519 (*int_nregs)++;
5520 break;
5521 case X86_64_SSE_CLASS:
5522 case X86_64_SSESF_CLASS:
5523 case X86_64_SSEDF_CLASS:
5524 (*sse_nregs)++;
5525 break;
5526 case X86_64_NO_CLASS:
5527 case X86_64_SSEUP_CLASS:
5528 break;
5529 case X86_64_X87_CLASS:
5530 case X86_64_X87UP_CLASS:
5531 if (!in_return)
5532 return 0;
5533 break;
5534 case X86_64_COMPLEX_X87_CLASS:
5535 return in_return ? 2 : 0;
5536 case X86_64_MEMORY_CLASS:
5537 gcc_unreachable ();
5539 return 1;
5542 /* Construct container for the argument used by GCC interface. See
5543 FUNCTION_ARG for the detailed description. */
5545 static rtx
5546 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
5547 const_tree type, int in_return, int nintregs, int nsseregs,
5548 const int *intreg, int sse_regno)
5550 /* The following variables hold the static issued_error state. */
5551 static bool issued_sse_arg_error;
5552 static bool issued_sse_ret_error;
5553 static bool issued_x87_ret_error;
5555 enum machine_mode tmpmode;
5556 int bytes =
5557 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5558 enum x86_64_reg_class regclass[MAX_CLASSES];
5559 int n;
5560 int i;
5561 int nexps = 0;
5562 int needed_sseregs, needed_intregs;
5563 rtx exp[MAX_CLASSES];
5564 rtx ret;
5566 n = classify_argument (mode, type, regclass, 0);
5567 if (!n)
5568 return NULL;
5569 if (!examine_argument (mode, type, in_return, &needed_intregs,
5570 &needed_sseregs))
5571 return NULL;
5572 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
5573 return NULL;
5575 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
5576 some less clueful developer tries to use floating-point anyway. */
5577 if (needed_sseregs && !TARGET_SSE)
5579 if (in_return)
5581 if (!issued_sse_ret_error)
5583 error ("SSE register return with SSE disabled");
5584 issued_sse_ret_error = true;
5587 else if (!issued_sse_arg_error)
5589 error ("SSE register argument with SSE disabled");
5590 issued_sse_arg_error = true;
5592 return NULL;
5595 /* Likewise, error if the ABI requires us to return values in the
5596 x87 registers and the user specified -mno-80387. */
5597 if (!TARGET_80387 && in_return)
5598 for (i = 0; i < n; i++)
5599 if (regclass[i] == X86_64_X87_CLASS
5600 || regclass[i] == X86_64_X87UP_CLASS
5601 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
5603 if (!issued_x87_ret_error)
5605 error ("x87 register return with x87 disabled");
5606 issued_x87_ret_error = true;
5608 return NULL;
5611 /* First construct simple cases. Avoid SCmode, since we want to use
5612 single register to pass this type. */
5613 if (n == 1 && mode != SCmode)
5614 switch (regclass[0])
5616 case X86_64_INTEGER_CLASS:
5617 case X86_64_INTEGERSI_CLASS:
5618 return gen_rtx_REG (mode, intreg[0]);
5619 case X86_64_SSE_CLASS:
5620 case X86_64_SSESF_CLASS:
5621 case X86_64_SSEDF_CLASS:
5622 if (mode != BLKmode)
5623 return gen_reg_or_parallel (mode, orig_mode,
5624 SSE_REGNO (sse_regno));
5625 break;
5626 case X86_64_X87_CLASS:
5627 case X86_64_COMPLEX_X87_CLASS:
5628 return gen_rtx_REG (mode, FIRST_STACK_REG);
5629 case X86_64_NO_CLASS:
5630 /* Zero sized array, struct or class. */
5631 return NULL;
5632 default:
5633 gcc_unreachable ();
5635 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
5636 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
5637 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5638 if (n == 4
5639 && regclass[0] == X86_64_SSE_CLASS
5640 && regclass[1] == X86_64_SSEUP_CLASS
5641 && regclass[2] == X86_64_SSEUP_CLASS
5642 && regclass[3] == X86_64_SSEUP_CLASS
5643 && mode != BLKmode)
5644 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5646 if (n == 2
5647 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
5648 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
5649 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
5650 && regclass[1] == X86_64_INTEGER_CLASS
5651 && (mode == CDImode || mode == TImode || mode == TFmode)
5652 && intreg[0] + 1 == intreg[1])
5653 return gen_rtx_REG (mode, intreg[0]);
5655 /* Otherwise figure out the entries of the PARALLEL. */
5656 for (i = 0; i < n; i++)
5658 int pos;
5660 switch (regclass[i])
5662 case X86_64_NO_CLASS:
5663 break;
5664 case X86_64_INTEGER_CLASS:
5665 case X86_64_INTEGERSI_CLASS:
5666 /* Merge TImodes on aligned occasions here too. */
5667 if (i * 8 + 8 > bytes)
5668 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
5669 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
5670 tmpmode = SImode;
5671 else
5672 tmpmode = DImode;
5673 /* We've requested 24 bytes we don't have mode for. Use DImode. */
5674 if (tmpmode == BLKmode)
5675 tmpmode = DImode;
5676 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5677 gen_rtx_REG (tmpmode, *intreg),
5678 GEN_INT (i*8));
5679 intreg++;
5680 break;
5681 case X86_64_SSESF_CLASS:
5682 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5683 gen_rtx_REG (SFmode,
5684 SSE_REGNO (sse_regno)),
5685 GEN_INT (i*8));
5686 sse_regno++;
5687 break;
5688 case X86_64_SSEDF_CLASS:
5689 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5690 gen_rtx_REG (DFmode,
5691 SSE_REGNO (sse_regno)),
5692 GEN_INT (i*8));
5693 sse_regno++;
5694 break;
5695 case X86_64_SSE_CLASS:
5696 pos = i;
5697 switch (n)
5699 case 1:
5700 tmpmode = DImode;
5701 break;
5702 case 2:
5703 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
5705 tmpmode = TImode;
5706 i++;
5708 else
5709 tmpmode = DImode;
5710 break;
5711 case 4:
5712 gcc_assert (i == 0
5713 && regclass[1] == X86_64_SSEUP_CLASS
5714 && regclass[2] == X86_64_SSEUP_CLASS
5715 && regclass[3] == X86_64_SSEUP_CLASS);
5716 tmpmode = OImode;
5717 i += 3;
5718 break;
5719 default:
5720 gcc_unreachable ();
5722 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5723 gen_rtx_REG (tmpmode,
5724 SSE_REGNO (sse_regno)),
5725 GEN_INT (pos*8));
5726 sse_regno++;
5727 break;
5728 default:
5729 gcc_unreachable ();
5733 /* Empty aligned struct, union or class. */
5734 if (nexps == 0)
5735 return NULL;
5737 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
5738 for (i = 0; i < nexps; i++)
5739 XVECEXP (ret, 0, i) = exp [i];
5740 return ret;
5743 /* Update the data in CUM to advance over an argument of mode MODE
5744 and data type TYPE. (TYPE is null for libcalls where that information
5745 may not be available.) */
5747 static void
5748 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5749 tree type, HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5751 switch (mode)
5753 default:
5754 break;
5756 case BLKmode:
5757 if (bytes < 0)
5758 break;
5759 /* FALLTHRU */
5761 case DImode:
5762 case SImode:
5763 case HImode:
5764 case QImode:
5765 cum->words += words;
5766 cum->nregs -= words;
5767 cum->regno += words;
5769 if (cum->nregs <= 0)
5771 cum->nregs = 0;
5772 cum->regno = 0;
5774 break;
5776 case OImode:
5777 /* OImode shouldn't be used directly. */
5778 gcc_unreachable ();
5780 case DFmode:
5781 if (cum->float_in_sse < 2)
5782 break;
5783 case SFmode:
5784 if (cum->float_in_sse < 1)
5785 break;
5786 /* FALLTHRU */
5788 case V8SFmode:
5789 case V8SImode:
5790 case V32QImode:
5791 case V16HImode:
5792 case V4DFmode:
5793 case V4DImode:
5794 case TImode:
5795 case V16QImode:
5796 case V8HImode:
5797 case V4SImode:
5798 case V2DImode:
5799 case V4SFmode:
5800 case V2DFmode:
5801 if (!type || !AGGREGATE_TYPE_P (type))
5803 cum->sse_words += words;
5804 cum->sse_nregs -= 1;
5805 cum->sse_regno += 1;
5806 if (cum->sse_nregs <= 0)
5808 cum->sse_nregs = 0;
5809 cum->sse_regno = 0;
5812 break;
5814 case V8QImode:
5815 case V4HImode:
5816 case V2SImode:
5817 case V2SFmode:
5818 case V1TImode:
5819 case V1DImode:
5820 if (!type || !AGGREGATE_TYPE_P (type))
5822 cum->mmx_words += words;
5823 cum->mmx_nregs -= 1;
5824 cum->mmx_regno += 1;
5825 if (cum->mmx_nregs <= 0)
5827 cum->mmx_nregs = 0;
5828 cum->mmx_regno = 0;
5831 break;
5835 static void
5836 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5837 tree type, HOST_WIDE_INT words, int named)
5839 int int_nregs, sse_nregs;
5841 /* Unnamed 256bit vector mode parameters are passed on stack. */
5842 if (!named && VALID_AVX256_REG_MODE (mode))
5843 return;
5845 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
5846 cum->words += words;
5847 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
5849 cum->nregs -= int_nregs;
5850 cum->sse_nregs -= sse_nregs;
5851 cum->regno += int_nregs;
5852 cum->sse_regno += sse_nregs;
5854 else
5855 cum->words += words;
5858 static void
5859 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
5860 HOST_WIDE_INT words)
5862 /* Otherwise, this should be passed indirect. */
5863 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
5865 cum->words += words;
5866 if (cum->nregs > 0)
5868 cum->nregs -= 1;
5869 cum->regno += 1;
5873 void
5874 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5875 tree type, int named)
5877 HOST_WIDE_INT bytes, words;
5879 if (mode == BLKmode)
5880 bytes = int_size_in_bytes (type);
5881 else
5882 bytes = GET_MODE_SIZE (mode);
5883 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5885 if (type)
5886 mode = type_natural_mode (type, NULL);
5888 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
5889 function_arg_advance_ms_64 (cum, bytes, words);
5890 else if (TARGET_64BIT)
5891 function_arg_advance_64 (cum, mode, type, words, named);
5892 else
5893 function_arg_advance_32 (cum, mode, type, bytes, words);
5896 /* Define where to put the arguments to a function.
5897 Value is zero to push the argument on the stack,
5898 or a hard register in which to store the argument.
5900 MODE is the argument's machine mode.
5901 TYPE is the data type of the argument (as a tree).
5902 This is null for libcalls where that information may
5903 not be available.
5904 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5905 the preceding args and about the function being called.
5906 NAMED is nonzero if this argument is a named parameter
5907 (otherwise it is an extra parameter matching an ellipsis). */
5909 static rtx
5910 function_arg_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5911 enum machine_mode orig_mode, tree type,
5912 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5914 static bool warnedsse, warnedmmx;
5916 /* Avoid the AL settings for the Unix64 ABI. */
5917 if (mode == VOIDmode)
5918 return constm1_rtx;
5920 switch (mode)
5922 default:
5923 break;
5925 case BLKmode:
5926 if (bytes < 0)
5927 break;
5928 /* FALLTHRU */
5929 case DImode:
5930 case SImode:
5931 case HImode:
5932 case QImode:
5933 if (words <= cum->nregs)
5935 int regno = cum->regno;
5937 /* Fastcall allocates the first two DWORD (SImode) or
5938 smaller arguments to ECX and EDX if it isn't an
5939 aggregate type . */
5940 if (cum->fastcall)
5942 if (mode == BLKmode
5943 || mode == DImode
5944 || (type && AGGREGATE_TYPE_P (type)))
5945 break;
5947 /* ECX not EAX is the first allocated register. */
5948 if (regno == AX_REG)
5949 regno = CX_REG;
5951 return gen_rtx_REG (mode, regno);
5953 break;
5955 case DFmode:
5956 if (cum->float_in_sse < 2)
5957 break;
5958 case SFmode:
5959 if (cum->float_in_sse < 1)
5960 break;
5961 /* FALLTHRU */
5962 case TImode:
5963 /* In 32bit, we pass TImode in xmm registers. */
5964 case V16QImode:
5965 case V8HImode:
5966 case V4SImode:
5967 case V2DImode:
5968 case V4SFmode:
5969 case V2DFmode:
5970 if (!type || !AGGREGATE_TYPE_P (type))
5972 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
5974 warnedsse = true;
5975 warning (0, "SSE vector argument without SSE enabled "
5976 "changes the ABI");
5978 if (cum->sse_nregs)
5979 return gen_reg_or_parallel (mode, orig_mode,
5980 cum->sse_regno + FIRST_SSE_REG);
5982 break;
5984 case OImode:
5985 /* OImode shouldn't be used directly. */
5986 gcc_unreachable ();
5988 case V8SFmode:
5989 case V8SImode:
5990 case V32QImode:
5991 case V16HImode:
5992 case V4DFmode:
5993 case V4DImode:
5994 if (!type || !AGGREGATE_TYPE_P (type))
5996 if (cum->sse_nregs)
5997 return gen_reg_or_parallel (mode, orig_mode,
5998 cum->sse_regno + FIRST_SSE_REG);
6000 break;
6002 case V8QImode:
6003 case V4HImode:
6004 case V2SImode:
6005 case V2SFmode:
6006 case V1TImode:
6007 case V1DImode:
6008 if (!type || !AGGREGATE_TYPE_P (type))
6010 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
6012 warnedmmx = true;
6013 warning (0, "MMX vector argument without MMX enabled "
6014 "changes the ABI");
6016 if (cum->mmx_nregs)
6017 return gen_reg_or_parallel (mode, orig_mode,
6018 cum->mmx_regno + FIRST_MMX_REG);
6020 break;
6023 return NULL_RTX;
6026 static rtx
6027 function_arg_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6028 enum machine_mode orig_mode, tree type, int named)
6030 /* Handle a hidden AL argument containing number of registers
6031 for varargs x86-64 functions. */
6032 if (mode == VOIDmode)
6033 return GEN_INT (cum->maybe_vaarg
6034 ? (cum->sse_nregs < 0
6035 ? (cum->call_abi == ix86_abi
6036 ? SSE_REGPARM_MAX
6037 : (ix86_abi != SYSV_ABI
6038 ? X86_64_SSE_REGPARM_MAX
6039 : X86_64_MS_SSE_REGPARM_MAX))
6040 : cum->sse_regno)
6041 : -1);
6043 switch (mode)
6045 default:
6046 break;
6048 case V8SFmode:
6049 case V8SImode:
6050 case V32QImode:
6051 case V16HImode:
6052 case V4DFmode:
6053 case V4DImode:
6054 /* Unnamed 256bit vector mode parameters are passed on stack. */
6055 if (!named)
6056 return NULL;
6057 break;
6060 return construct_container (mode, orig_mode, type, 0, cum->nregs,
6061 cum->sse_nregs,
6062 &x86_64_int_parameter_registers [cum->regno],
6063 cum->sse_regno);
6066 static rtx
6067 function_arg_ms_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6068 enum machine_mode orig_mode, int named,
6069 HOST_WIDE_INT bytes)
6071 unsigned int regno;
6073 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
6074 We use value of -2 to specify that current function call is MSABI. */
6075 if (mode == VOIDmode)
6076 return GEN_INT (-2);
6078 /* If we've run out of registers, it goes on the stack. */
6079 if (cum->nregs == 0)
6080 return NULL_RTX;
6082 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
6084 /* Only floating point modes are passed in anything but integer regs. */
6085 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
6087 if (named)
6088 regno = cum->regno + FIRST_SSE_REG;
6089 else
6091 rtx t1, t2;
6093 /* Unnamed floating parameters are passed in both the
6094 SSE and integer registers. */
6095 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
6096 t2 = gen_rtx_REG (mode, regno);
6097 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
6098 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
6099 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
6102 /* Handle aggregated types passed in register. */
6103 if (orig_mode == BLKmode)
6105 if (bytes > 0 && bytes <= 8)
6106 mode = (bytes > 4 ? DImode : SImode);
6107 if (mode == BLKmode)
6108 mode = DImode;
6111 return gen_reg_or_parallel (mode, orig_mode, regno);
6115 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
6116 tree type, int named)
6118 enum machine_mode mode = omode;
6119 HOST_WIDE_INT bytes, words;
6121 if (mode == BLKmode)
6122 bytes = int_size_in_bytes (type);
6123 else
6124 bytes = GET_MODE_SIZE (mode);
6125 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6127 /* To simplify the code below, represent vector types with a vector mode
6128 even if MMX/SSE are not active. */
6129 if (type && TREE_CODE (type) == VECTOR_TYPE)
6130 mode = type_natural_mode (type, cum);
6132 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6133 return function_arg_ms_64 (cum, mode, omode, named, bytes);
6134 else if (TARGET_64BIT)
6135 return function_arg_64 (cum, mode, omode, type, named);
6136 else
6137 return function_arg_32 (cum, mode, omode, type, bytes, words);
6140 /* A C expression that indicates when an argument must be passed by
6141 reference. If nonzero for an argument, a copy of that argument is
6142 made in memory and a pointer to the argument is passed instead of
6143 the argument itself. The pointer is passed in whatever way is
6144 appropriate for passing a pointer to that type. */
6146 static bool
6147 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6148 enum machine_mode mode ATTRIBUTE_UNUSED,
6149 const_tree type, bool named ATTRIBUTE_UNUSED)
6151 /* See Windows x64 Software Convention. */
6152 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6154 int msize = (int) GET_MODE_SIZE (mode);
6155 if (type)
6157 /* Arrays are passed by reference. */
6158 if (TREE_CODE (type) == ARRAY_TYPE)
6159 return true;
6161 if (AGGREGATE_TYPE_P (type))
6163 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
6164 are passed by reference. */
6165 msize = int_size_in_bytes (type);
6169 /* __m128 is passed by reference. */
6170 switch (msize) {
6171 case 1: case 2: case 4: case 8:
6172 break;
6173 default:
6174 return true;
6177 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
6178 return 1;
6180 return 0;
6183 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
6184 ABI. */
6185 static bool
6186 contains_aligned_value_p (tree type)
6188 enum machine_mode mode = TYPE_MODE (type);
6189 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
6190 || mode == TDmode
6191 || mode == TFmode
6192 || mode == TCmode)
6193 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
6194 return true;
6195 if (TYPE_ALIGN (type) < 128)
6196 return false;
6198 if (AGGREGATE_TYPE_P (type))
6200 /* Walk the aggregates recursively. */
6201 switch (TREE_CODE (type))
6203 case RECORD_TYPE:
6204 case UNION_TYPE:
6205 case QUAL_UNION_TYPE:
6207 tree field;
6209 /* Walk all the structure fields. */
6210 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
6212 if (TREE_CODE (field) == FIELD_DECL
6213 && contains_aligned_value_p (TREE_TYPE (field)))
6214 return true;
6216 break;
6219 case ARRAY_TYPE:
6220 /* Just for use if some languages passes arrays by value. */
6221 if (contains_aligned_value_p (TREE_TYPE (type)))
6222 return true;
6223 break;
6225 default:
6226 gcc_unreachable ();
6229 return false;
6232 /* Gives the alignment boundary, in bits, of an argument with the
6233 specified mode and type. */
6236 ix86_function_arg_boundary (enum machine_mode mode, tree type)
6238 int align;
6239 if (type)
6241 /* Since canonical type is used for call, we convert it to
6242 canonical type if needed. */
6243 if (!TYPE_STRUCTURAL_EQUALITY_P (type))
6244 type = TYPE_CANONICAL (type);
6245 align = TYPE_ALIGN (type);
6247 else
6248 align = GET_MODE_ALIGNMENT (mode);
6249 if (align < PARM_BOUNDARY)
6250 align = PARM_BOUNDARY;
6251 /* In 32bit, only _Decimal128 and __float128 are aligned to their
6252 natural boundaries. */
6253 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
6255 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
6256 make an exception for SSE modes since these require 128bit
6257 alignment.
6259 The handling here differs from field_alignment. ICC aligns MMX
6260 arguments to 4 byte boundaries, while structure fields are aligned
6261 to 8 byte boundaries. */
6262 if (!type)
6264 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
6265 align = PARM_BOUNDARY;
6267 else
6269 if (!contains_aligned_value_p (type))
6270 align = PARM_BOUNDARY;
6273 if (align > BIGGEST_ALIGNMENT)
6274 align = BIGGEST_ALIGNMENT;
6275 return align;
6278 /* Return true if N is a possible register number of function value. */
6280 bool
6281 ix86_function_value_regno_p (int regno)
6283 switch (regno)
6285 case 0:
6286 return true;
6288 case FIRST_FLOAT_REG:
6289 /* TODO: The function should depend on current function ABI but
6290 builtins.c would need updating then. Therefore we use the
6291 default ABI. */
6292 if (TARGET_64BIT && ix86_abi == MS_ABI)
6293 return false;
6294 return TARGET_FLOAT_RETURNS_IN_80387;
6296 case FIRST_SSE_REG:
6297 return TARGET_SSE;
6299 case FIRST_MMX_REG:
6300 if (TARGET_MACHO || TARGET_64BIT)
6301 return false;
6302 return TARGET_MMX;
6305 return false;
6308 /* Define how to find the value returned by a function.
6309 VALTYPE is the data type of the value (as a tree).
6310 If the precise function being called is known, FUNC is its FUNCTION_DECL;
6311 otherwise, FUNC is 0. */
6313 static rtx
6314 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
6315 const_tree fntype, const_tree fn)
6317 unsigned int regno;
6319 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
6320 we normally prevent this case when mmx is not available. However
6321 some ABIs may require the result to be returned like DImode. */
6322 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6323 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
6325 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
6326 we prevent this case when sse is not available. However some ABIs
6327 may require the result to be returned like integer TImode. */
6328 else if (mode == TImode
6329 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6330 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
6332 /* 32-byte vector modes in %ymm0. */
6333 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
6334 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
6336 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
6337 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
6338 regno = FIRST_FLOAT_REG;
6339 else
6340 /* Most things go in %eax. */
6341 regno = AX_REG;
6343 /* Override FP return register with %xmm0 for local functions when
6344 SSE math is enabled or for functions with sseregparm attribute. */
6345 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
6347 int sse_level = ix86_function_sseregparm (fntype, fn, false);
6348 if ((sse_level >= 1 && mode == SFmode)
6349 || (sse_level == 2 && mode == DFmode))
6350 regno = FIRST_SSE_REG;
6353 /* OImode shouldn't be used directly. */
6354 gcc_assert (mode != OImode);
6356 return gen_rtx_REG (orig_mode, regno);
6359 static rtx
6360 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
6361 const_tree valtype)
6363 rtx ret;
6365 /* Handle libcalls, which don't provide a type node. */
6366 if (valtype == NULL)
6368 switch (mode)
6370 case SFmode:
6371 case SCmode:
6372 case DFmode:
6373 case DCmode:
6374 case TFmode:
6375 case SDmode:
6376 case DDmode:
6377 case TDmode:
6378 return gen_rtx_REG (mode, FIRST_SSE_REG);
6379 case XFmode:
6380 case XCmode:
6381 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
6382 case TCmode:
6383 return NULL;
6384 default:
6385 return gen_rtx_REG (mode, AX_REG);
6389 ret = construct_container (mode, orig_mode, valtype, 1,
6390 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
6391 x86_64_int_return_registers, 0);
6393 /* For zero sized structures, construct_container returns NULL, but we
6394 need to keep rest of compiler happy by returning meaningful value. */
6395 if (!ret)
6396 ret = gen_rtx_REG (orig_mode, AX_REG);
6398 return ret;
6401 static rtx
6402 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
6404 unsigned int regno = AX_REG;
6406 if (TARGET_SSE)
6408 switch (GET_MODE_SIZE (mode))
6410 case 16:
6411 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6412 && !COMPLEX_MODE_P (mode))
6413 regno = FIRST_SSE_REG;
6414 break;
6415 case 8:
6416 case 4:
6417 if (mode == SFmode || mode == DFmode)
6418 regno = FIRST_SSE_REG;
6419 break;
6420 default:
6421 break;
6424 return gen_rtx_REG (orig_mode, regno);
6427 static rtx
6428 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
6429 enum machine_mode orig_mode, enum machine_mode mode)
6431 const_tree fn, fntype;
6433 fn = NULL_TREE;
6434 if (fntype_or_decl && DECL_P (fntype_or_decl))
6435 fn = fntype_or_decl;
6436 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
6438 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
6439 return function_value_ms_64 (orig_mode, mode);
6440 else if (TARGET_64BIT)
6441 return function_value_64 (orig_mode, mode, valtype);
6442 else
6443 return function_value_32 (orig_mode, mode, fntype, fn);
6446 static rtx
6447 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
6448 bool outgoing ATTRIBUTE_UNUSED)
6450 enum machine_mode mode, orig_mode;
6452 orig_mode = TYPE_MODE (valtype);
6453 mode = type_natural_mode (valtype, NULL);
6454 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
6458 ix86_libcall_value (enum machine_mode mode)
6460 return ix86_function_value_1 (NULL, NULL, mode, mode);
6463 /* Return true iff type is returned in memory. */
6465 static int ATTRIBUTE_UNUSED
6466 return_in_memory_32 (const_tree type, enum machine_mode mode)
6468 HOST_WIDE_INT size;
6470 if (mode == BLKmode)
6471 return 1;
6473 size = int_size_in_bytes (type);
6475 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
6476 return 0;
6478 if (VECTOR_MODE_P (mode) || mode == TImode)
6480 /* User-created vectors small enough to fit in EAX. */
6481 if (size < 8)
6482 return 0;
6484 /* MMX/3dNow values are returned in MM0,
6485 except when it doesn't exits. */
6486 if (size == 8)
6487 return (TARGET_MMX ? 0 : 1);
6489 /* SSE values are returned in XMM0, except when it doesn't exist. */
6490 if (size == 16)
6491 return (TARGET_SSE ? 0 : 1);
6493 /* AVX values are returned in YMM0, except when it doesn't exist. */
6494 if (size == 32)
6495 return TARGET_AVX ? 0 : 1;
6498 if (mode == XFmode)
6499 return 0;
6501 if (size > 12)
6502 return 1;
6504 /* OImode shouldn't be used directly. */
6505 gcc_assert (mode != OImode);
6507 return 0;
6510 static int ATTRIBUTE_UNUSED
6511 return_in_memory_64 (const_tree type, enum machine_mode mode)
6513 int needed_intregs, needed_sseregs;
6514 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
6517 static int ATTRIBUTE_UNUSED
6518 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
6520 HOST_WIDE_INT size = int_size_in_bytes (type);
6522 /* __m128 is returned in xmm0. */
6523 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6524 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
6525 return 0;
6527 /* Otherwise, the size must be exactly in [1248]. */
6528 return (size != 1 && size != 2 && size != 4 && size != 8);
6531 static bool
6532 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6534 #ifdef SUBTARGET_RETURN_IN_MEMORY
6535 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
6536 #else
6537 const enum machine_mode mode = type_natural_mode (type, NULL);
6539 if (TARGET_64BIT)
6541 if (ix86_function_type_abi (fntype) == MS_ABI)
6542 return return_in_memory_ms_64 (type, mode);
6543 else
6544 return return_in_memory_64 (type, mode);
6546 else
6547 return return_in_memory_32 (type, mode);
6548 #endif
6551 /* Return false iff TYPE is returned in memory. This version is used
6552 on Solaris 10. It is similar to the generic ix86_return_in_memory,
6553 but differs notably in that when MMX is available, 8-byte vectors
6554 are returned in memory, rather than in MMX registers. */
6556 bool
6557 ix86_sol10_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6559 int size;
6560 enum machine_mode mode = type_natural_mode (type, NULL);
6562 if (TARGET_64BIT)
6563 return return_in_memory_64 (type, mode);
6565 if (mode == BLKmode)
6566 return 1;
6568 size = int_size_in_bytes (type);
6570 if (VECTOR_MODE_P (mode))
6572 /* Return in memory only if MMX registers *are* available. This
6573 seems backwards, but it is consistent with the existing
6574 Solaris x86 ABI. */
6575 if (size == 8)
6576 return TARGET_MMX;
6577 if (size == 16)
6578 return !TARGET_SSE;
6580 else if (mode == TImode)
6581 return !TARGET_SSE;
6582 else if (mode == XFmode)
6583 return 0;
6585 return size > 12;
6588 /* When returning SSE vector types, we have a choice of either
6589 (1) being abi incompatible with a -march switch, or
6590 (2) generating an error.
6591 Given no good solution, I think the safest thing is one warning.
6592 The user won't be able to use -Werror, but....
6594 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
6595 called in response to actually generating a caller or callee that
6596 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
6597 via aggregate_value_p for general type probing from tree-ssa. */
6599 static rtx
6600 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
6602 static bool warnedsse, warnedmmx;
6604 if (!TARGET_64BIT && type)
6606 /* Look at the return type of the function, not the function type. */
6607 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
6609 if (!TARGET_SSE && !warnedsse)
6611 if (mode == TImode
6612 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6614 warnedsse = true;
6615 warning (0, "SSE vector return without SSE enabled "
6616 "changes the ABI");
6620 if (!TARGET_MMX && !warnedmmx)
6622 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6624 warnedmmx = true;
6625 warning (0, "MMX vector return without MMX enabled "
6626 "changes the ABI");
6631 return NULL;
6635 /* Create the va_list data type. */
6637 /* Returns the calling convention specific va_list date type.
6638 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
6640 static tree
6641 ix86_build_builtin_va_list_abi (enum calling_abi abi)
6643 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
6645 /* For i386 we use plain pointer to argument area. */
6646 if (!TARGET_64BIT || abi == MS_ABI)
6647 return build_pointer_type (char_type_node);
6649 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6650 type_decl = build_decl (BUILTINS_LOCATION,
6651 TYPE_DECL, get_identifier ("__va_list_tag"), record);
6653 f_gpr = build_decl (BUILTINS_LOCATION,
6654 FIELD_DECL, get_identifier ("gp_offset"),
6655 unsigned_type_node);
6656 f_fpr = build_decl (BUILTINS_LOCATION,
6657 FIELD_DECL, get_identifier ("fp_offset"),
6658 unsigned_type_node);
6659 f_ovf = build_decl (BUILTINS_LOCATION,
6660 FIELD_DECL, get_identifier ("overflow_arg_area"),
6661 ptr_type_node);
6662 f_sav = build_decl (BUILTINS_LOCATION,
6663 FIELD_DECL, get_identifier ("reg_save_area"),
6664 ptr_type_node);
6666 va_list_gpr_counter_field = f_gpr;
6667 va_list_fpr_counter_field = f_fpr;
6669 DECL_FIELD_CONTEXT (f_gpr) = record;
6670 DECL_FIELD_CONTEXT (f_fpr) = record;
6671 DECL_FIELD_CONTEXT (f_ovf) = record;
6672 DECL_FIELD_CONTEXT (f_sav) = record;
6674 TREE_CHAIN (record) = type_decl;
6675 TYPE_NAME (record) = type_decl;
6676 TYPE_FIELDS (record) = f_gpr;
6677 TREE_CHAIN (f_gpr) = f_fpr;
6678 TREE_CHAIN (f_fpr) = f_ovf;
6679 TREE_CHAIN (f_ovf) = f_sav;
6681 layout_type (record);
6683 /* The correct type is an array type of one element. */
6684 return build_array_type (record, build_index_type (size_zero_node));
6687 /* Setup the builtin va_list data type and for 64-bit the additional
6688 calling convention specific va_list data types. */
6690 static tree
6691 ix86_build_builtin_va_list (void)
6693 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
6695 /* Initialize abi specific va_list builtin types. */
6696 if (TARGET_64BIT)
6698 tree t;
6699 if (ix86_abi == MS_ABI)
6701 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
6702 if (TREE_CODE (t) != RECORD_TYPE)
6703 t = build_variant_type_copy (t);
6704 sysv_va_list_type_node = t;
6706 else
6708 t = ret;
6709 if (TREE_CODE (t) != RECORD_TYPE)
6710 t = build_variant_type_copy (t);
6711 sysv_va_list_type_node = t;
6713 if (ix86_abi != MS_ABI)
6715 t = ix86_build_builtin_va_list_abi (MS_ABI);
6716 if (TREE_CODE (t) != RECORD_TYPE)
6717 t = build_variant_type_copy (t);
6718 ms_va_list_type_node = t;
6720 else
6722 t = ret;
6723 if (TREE_CODE (t) != RECORD_TYPE)
6724 t = build_variant_type_copy (t);
6725 ms_va_list_type_node = t;
6729 return ret;
6732 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
6734 static void
6735 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
6737 rtx save_area, mem;
6738 rtx label;
6739 rtx label_ref;
6740 rtx tmp_reg;
6741 rtx nsse_reg;
6742 alias_set_type set;
6743 int i;
6744 int regparm = ix86_regparm;
6746 if (cum->call_abi != ix86_abi)
6747 regparm = (ix86_abi != SYSV_ABI
6748 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
6750 /* GPR size of varargs save area. */
6751 if (cfun->va_list_gpr_size)
6752 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
6753 else
6754 ix86_varargs_gpr_size = 0;
6756 /* FPR size of varargs save area. We don't need it if we don't pass
6757 anything in SSE registers. */
6758 if (cum->sse_nregs && cfun->va_list_fpr_size)
6759 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
6760 else
6761 ix86_varargs_fpr_size = 0;
6763 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
6764 return;
6766 save_area = frame_pointer_rtx;
6767 set = get_varargs_alias_set ();
6769 for (i = cum->regno;
6770 i < regparm
6771 && i < cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
6772 i++)
6774 mem = gen_rtx_MEM (Pmode,
6775 plus_constant (save_area, i * UNITS_PER_WORD));
6776 MEM_NOTRAP_P (mem) = 1;
6777 set_mem_alias_set (mem, set);
6778 emit_move_insn (mem, gen_rtx_REG (Pmode,
6779 x86_64_int_parameter_registers[i]));
6782 if (ix86_varargs_fpr_size)
6784 /* Now emit code to save SSE registers. The AX parameter contains number
6785 of SSE parameter registers used to call this function. We use
6786 sse_prologue_save insn template that produces computed jump across
6787 SSE saves. We need some preparation work to get this working. */
6789 label = gen_label_rtx ();
6790 label_ref = gen_rtx_LABEL_REF (Pmode, label);
6792 /* Compute address to jump to :
6793 label - eax*4 + nnamed_sse_arguments*4 Or
6794 label - eax*5 + nnamed_sse_arguments*5 for AVX. */
6795 tmp_reg = gen_reg_rtx (Pmode);
6796 nsse_reg = gen_reg_rtx (Pmode);
6797 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, AX_REG)));
6798 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6799 gen_rtx_MULT (Pmode, nsse_reg,
6800 GEN_INT (4))));
6802 /* vmovaps is one byte longer than movaps. */
6803 if (TARGET_AVX)
6804 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6805 gen_rtx_PLUS (Pmode, tmp_reg,
6806 nsse_reg)));
6808 if (cum->sse_regno)
6809 emit_move_insn
6810 (nsse_reg,
6811 gen_rtx_CONST (DImode,
6812 gen_rtx_PLUS (DImode,
6813 label_ref,
6814 GEN_INT (cum->sse_regno
6815 * (TARGET_AVX ? 5 : 4)))));
6816 else
6817 emit_move_insn (nsse_reg, label_ref);
6818 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
6820 /* Compute address of memory block we save into. We always use pointer
6821 pointing 127 bytes after first byte to store - this is needed to keep
6822 instruction size limited by 4 bytes (5 bytes for AVX) with one
6823 byte displacement. */
6824 tmp_reg = gen_reg_rtx (Pmode);
6825 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6826 plus_constant (save_area,
6827 ix86_varargs_gpr_size + 127)));
6828 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
6829 MEM_NOTRAP_P (mem) = 1;
6830 set_mem_alias_set (mem, set);
6831 set_mem_align (mem, BITS_PER_WORD);
6833 /* And finally do the dirty job! */
6834 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
6835 GEN_INT (cum->sse_regno), label));
6839 static void
6840 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
6842 alias_set_type set = get_varargs_alias_set ();
6843 int i;
6845 for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
6847 rtx reg, mem;
6849 mem = gen_rtx_MEM (Pmode,
6850 plus_constant (virtual_incoming_args_rtx,
6851 i * UNITS_PER_WORD));
6852 MEM_NOTRAP_P (mem) = 1;
6853 set_mem_alias_set (mem, set);
6855 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
6856 emit_move_insn (mem, reg);
6860 static void
6861 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6862 tree type, int *pretend_size ATTRIBUTE_UNUSED,
6863 int no_rtl)
6865 CUMULATIVE_ARGS next_cum;
6866 tree fntype;
6868 /* This argument doesn't appear to be used anymore. Which is good,
6869 because the old code here didn't suppress rtl generation. */
6870 gcc_assert (!no_rtl);
6872 if (!TARGET_64BIT)
6873 return;
6875 fntype = TREE_TYPE (current_function_decl);
6877 /* For varargs, we do not want to skip the dummy va_dcl argument.
6878 For stdargs, we do want to skip the last named argument. */
6879 next_cum = *cum;
6880 if (stdarg_p (fntype))
6881 function_arg_advance (&next_cum, mode, type, 1);
6883 if (cum->call_abi == MS_ABI)
6884 setup_incoming_varargs_ms_64 (&next_cum);
6885 else
6886 setup_incoming_varargs_64 (&next_cum);
6889 /* Checks if TYPE is of kind va_list char *. */
6891 static bool
6892 is_va_list_char_pointer (tree type)
6894 tree canonic;
6896 /* For 32-bit it is always true. */
6897 if (!TARGET_64BIT)
6898 return true;
6899 canonic = ix86_canonical_va_list_type (type);
6900 return (canonic == ms_va_list_type_node
6901 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
6904 /* Implement va_start. */
6906 static void
6907 ix86_va_start (tree valist, rtx nextarg)
6909 HOST_WIDE_INT words, n_gpr, n_fpr;
6910 tree f_gpr, f_fpr, f_ovf, f_sav;
6911 tree gpr, fpr, ovf, sav, t;
6912 tree type;
6914 /* Only 64bit target needs something special. */
6915 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
6917 std_expand_builtin_va_start (valist, nextarg);
6918 return;
6921 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
6922 f_fpr = TREE_CHAIN (f_gpr);
6923 f_ovf = TREE_CHAIN (f_fpr);
6924 f_sav = TREE_CHAIN (f_ovf);
6926 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
6927 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
6928 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
6929 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
6930 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
6932 /* Count number of gp and fp argument registers used. */
6933 words = crtl->args.info.words;
6934 n_gpr = crtl->args.info.regno;
6935 n_fpr = crtl->args.info.sse_regno;
6937 if (cfun->va_list_gpr_size)
6939 type = TREE_TYPE (gpr);
6940 t = build2 (MODIFY_EXPR, type,
6941 gpr, build_int_cst (type, n_gpr * 8));
6942 TREE_SIDE_EFFECTS (t) = 1;
6943 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6946 if (TARGET_SSE && cfun->va_list_fpr_size)
6948 type = TREE_TYPE (fpr);
6949 t = build2 (MODIFY_EXPR, type, fpr,
6950 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
6951 TREE_SIDE_EFFECTS (t) = 1;
6952 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6955 /* Find the overflow area. */
6956 type = TREE_TYPE (ovf);
6957 t = make_tree (type, crtl->args.internal_arg_pointer);
6958 if (words != 0)
6959 t = build2 (POINTER_PLUS_EXPR, type, t,
6960 size_int (words * UNITS_PER_WORD));
6961 t = build2 (MODIFY_EXPR, type, ovf, t);
6962 TREE_SIDE_EFFECTS (t) = 1;
6963 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6965 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
6967 /* Find the register save area.
6968 Prologue of the function save it right above stack frame. */
6969 type = TREE_TYPE (sav);
6970 t = make_tree (type, frame_pointer_rtx);
6971 if (!ix86_varargs_gpr_size)
6972 t = build2 (POINTER_PLUS_EXPR, type, t,
6973 size_int (-8 * X86_64_REGPARM_MAX));
6974 t = build2 (MODIFY_EXPR, type, sav, t);
6975 TREE_SIDE_EFFECTS (t) = 1;
6976 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6980 /* Implement va_arg. */
6982 static tree
6983 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6984 gimple_seq *post_p)
6986 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
6987 tree f_gpr, f_fpr, f_ovf, f_sav;
6988 tree gpr, fpr, ovf, sav, t;
6989 int size, rsize;
6990 tree lab_false, lab_over = NULL_TREE;
6991 tree addr, t2;
6992 rtx container;
6993 int indirect_p = 0;
6994 tree ptrtype;
6995 enum machine_mode nat_mode;
6996 int arg_boundary;
6998 /* Only 64bit target needs something special. */
6999 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
7000 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
7002 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
7003 f_fpr = TREE_CHAIN (f_gpr);
7004 f_ovf = TREE_CHAIN (f_fpr);
7005 f_sav = TREE_CHAIN (f_ovf);
7007 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
7008 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
7009 valist = build_va_arg_indirect_ref (valist);
7010 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
7011 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
7012 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
7014 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
7015 if (indirect_p)
7016 type = build_pointer_type (type);
7017 size = int_size_in_bytes (type);
7018 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7020 nat_mode = type_natural_mode (type, NULL);
7021 switch (nat_mode)
7023 case V8SFmode:
7024 case V8SImode:
7025 case V32QImode:
7026 case V16HImode:
7027 case V4DFmode:
7028 case V4DImode:
7029 /* Unnamed 256bit vector mode parameters are passed on stack. */
7030 if (ix86_cfun_abi () == SYSV_ABI)
7032 container = NULL;
7033 break;
7036 default:
7037 container = construct_container (nat_mode, TYPE_MODE (type),
7038 type, 0, X86_64_REGPARM_MAX,
7039 X86_64_SSE_REGPARM_MAX, intreg,
7041 break;
7044 /* Pull the value out of the saved registers. */
7046 addr = create_tmp_var (ptr_type_node, "addr");
7048 if (container)
7050 int needed_intregs, needed_sseregs;
7051 bool need_temp;
7052 tree int_addr, sse_addr;
7054 lab_false = create_artificial_label (UNKNOWN_LOCATION);
7055 lab_over = create_artificial_label (UNKNOWN_LOCATION);
7057 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
7059 need_temp = (!REG_P (container)
7060 && ((needed_intregs && TYPE_ALIGN (type) > 64)
7061 || TYPE_ALIGN (type) > 128));
7063 /* In case we are passing structure, verify that it is consecutive block
7064 on the register save area. If not we need to do moves. */
7065 if (!need_temp && !REG_P (container))
7067 /* Verify that all registers are strictly consecutive */
7068 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
7070 int i;
7072 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7074 rtx slot = XVECEXP (container, 0, i);
7075 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
7076 || INTVAL (XEXP (slot, 1)) != i * 16)
7077 need_temp = 1;
7080 else
7082 int i;
7084 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7086 rtx slot = XVECEXP (container, 0, i);
7087 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
7088 || INTVAL (XEXP (slot, 1)) != i * 8)
7089 need_temp = 1;
7093 if (!need_temp)
7095 int_addr = addr;
7096 sse_addr = addr;
7098 else
7100 int_addr = create_tmp_var (ptr_type_node, "int_addr");
7101 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
7104 /* First ensure that we fit completely in registers. */
7105 if (needed_intregs)
7107 t = build_int_cst (TREE_TYPE (gpr),
7108 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
7109 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
7110 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7111 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7112 gimplify_and_add (t, pre_p);
7114 if (needed_sseregs)
7116 t = build_int_cst (TREE_TYPE (fpr),
7117 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
7118 + X86_64_REGPARM_MAX * 8);
7119 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
7120 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7121 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7122 gimplify_and_add (t, pre_p);
7125 /* Compute index to start of area used for integer regs. */
7126 if (needed_intregs)
7128 /* int_addr = gpr + sav; */
7129 t = fold_convert (sizetype, gpr);
7130 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7131 gimplify_assign (int_addr, t, pre_p);
7133 if (needed_sseregs)
7135 /* sse_addr = fpr + sav; */
7136 t = fold_convert (sizetype, fpr);
7137 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7138 gimplify_assign (sse_addr, t, pre_p);
7140 if (need_temp)
7142 int i;
7143 tree temp = create_tmp_var (type, "va_arg_tmp");
7145 /* addr = &temp; */
7146 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
7147 gimplify_assign (addr, t, pre_p);
7149 for (i = 0; i < XVECLEN (container, 0); i++)
7151 rtx slot = XVECEXP (container, 0, i);
7152 rtx reg = XEXP (slot, 0);
7153 enum machine_mode mode = GET_MODE (reg);
7154 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
7155 tree addr_type = build_pointer_type (piece_type);
7156 tree daddr_type = build_pointer_type_for_mode (piece_type,
7157 ptr_mode, true);
7158 tree src_addr, src;
7159 int src_offset;
7160 tree dest_addr, dest;
7162 if (SSE_REGNO_P (REGNO (reg)))
7164 src_addr = sse_addr;
7165 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
7167 else
7169 src_addr = int_addr;
7170 src_offset = REGNO (reg) * 8;
7172 src_addr = fold_convert (addr_type, src_addr);
7173 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
7174 size_int (src_offset));
7175 src = build_va_arg_indirect_ref (src_addr);
7177 dest_addr = fold_convert (daddr_type, addr);
7178 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
7179 size_int (INTVAL (XEXP (slot, 1))));
7180 dest = build_va_arg_indirect_ref (dest_addr);
7182 gimplify_assign (dest, src, pre_p);
7186 if (needed_intregs)
7188 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
7189 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
7190 gimplify_assign (gpr, t, pre_p);
7193 if (needed_sseregs)
7195 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
7196 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
7197 gimplify_assign (fpr, t, pre_p);
7200 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
7202 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
7205 /* ... otherwise out of the overflow area. */
7207 /* When we align parameter on stack for caller, if the parameter
7208 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
7209 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
7210 here with caller. */
7211 arg_boundary = FUNCTION_ARG_BOUNDARY (VOIDmode, type);
7212 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
7213 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
7215 /* Care for on-stack alignment if needed. */
7216 if (arg_boundary <= 64
7217 || integer_zerop (TYPE_SIZE (type)))
7218 t = ovf;
7219 else
7221 HOST_WIDE_INT align = arg_boundary / 8;
7222 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
7223 size_int (align - 1));
7224 t = fold_convert (sizetype, t);
7225 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
7226 size_int (-align));
7227 t = fold_convert (TREE_TYPE (ovf), t);
7229 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
7230 gimplify_assign (addr, t, pre_p);
7232 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
7233 size_int (rsize * UNITS_PER_WORD));
7234 gimplify_assign (unshare_expr (ovf), t, pre_p);
7236 if (container)
7237 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
7239 ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
7240 addr = fold_convert (ptrtype, addr);
7242 if (indirect_p)
7243 addr = build_va_arg_indirect_ref (addr);
7244 return build_va_arg_indirect_ref (addr);
7247 /* Return nonzero if OPNUM's MEM should be matched
7248 in movabs* patterns. */
7251 ix86_check_movabs (rtx insn, int opnum)
7253 rtx set, mem;
7255 set = PATTERN (insn);
7256 if (GET_CODE (set) == PARALLEL)
7257 set = XVECEXP (set, 0, 0);
7258 gcc_assert (GET_CODE (set) == SET);
7259 mem = XEXP (set, opnum);
7260 while (GET_CODE (mem) == SUBREG)
7261 mem = SUBREG_REG (mem);
7262 gcc_assert (MEM_P (mem));
7263 return (volatile_ok || !MEM_VOLATILE_P (mem));
7266 /* Initialize the table of extra 80387 mathematical constants. */
7268 static void
7269 init_ext_80387_constants (void)
7271 static const char * cst[5] =
7273 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
7274 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
7275 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
7276 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
7277 "3.1415926535897932385128089594061862044", /* 4: fldpi */
7279 int i;
7281 for (i = 0; i < 5; i++)
7283 real_from_string (&ext_80387_constants_table[i], cst[i]);
7284 /* Ensure each constant is rounded to XFmode precision. */
7285 real_convert (&ext_80387_constants_table[i],
7286 XFmode, &ext_80387_constants_table[i]);
7289 ext_80387_constants_init = 1;
7292 /* Return true if the constant is something that can be loaded with
7293 a special instruction. */
7296 standard_80387_constant_p (rtx x)
7298 enum machine_mode mode = GET_MODE (x);
7300 REAL_VALUE_TYPE r;
7302 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
7303 return -1;
7305 if (x == CONST0_RTX (mode))
7306 return 1;
7307 if (x == CONST1_RTX (mode))
7308 return 2;
7310 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7312 /* For XFmode constants, try to find a special 80387 instruction when
7313 optimizing for size or on those CPUs that benefit from them. */
7314 if (mode == XFmode
7315 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
7317 int i;
7319 if (! ext_80387_constants_init)
7320 init_ext_80387_constants ();
7322 for (i = 0; i < 5; i++)
7323 if (real_identical (&r, &ext_80387_constants_table[i]))
7324 return i + 3;
7327 /* Load of the constant -0.0 or -1.0 will be split as
7328 fldz;fchs or fld1;fchs sequence. */
7329 if (real_isnegzero (&r))
7330 return 8;
7331 if (real_identical (&r, &dconstm1))
7332 return 9;
7334 return 0;
7337 /* Return the opcode of the special instruction to be used to load
7338 the constant X. */
7340 const char *
7341 standard_80387_constant_opcode (rtx x)
7343 switch (standard_80387_constant_p (x))
7345 case 1:
7346 return "fldz";
7347 case 2:
7348 return "fld1";
7349 case 3:
7350 return "fldlg2";
7351 case 4:
7352 return "fldln2";
7353 case 5:
7354 return "fldl2e";
7355 case 6:
7356 return "fldl2t";
7357 case 7:
7358 return "fldpi";
7359 case 8:
7360 case 9:
7361 return "#";
7362 default:
7363 gcc_unreachable ();
7367 /* Return the CONST_DOUBLE representing the 80387 constant that is
7368 loaded by the specified special instruction. The argument IDX
7369 matches the return value from standard_80387_constant_p. */
7372 standard_80387_constant_rtx (int idx)
7374 int i;
7376 if (! ext_80387_constants_init)
7377 init_ext_80387_constants ();
7379 switch (idx)
7381 case 3:
7382 case 4:
7383 case 5:
7384 case 6:
7385 case 7:
7386 i = idx - 3;
7387 break;
7389 default:
7390 gcc_unreachable ();
7393 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
7394 XFmode);
7397 /* Return 1 if X is all 0s and 2 if x is all 1s
7398 in supported SSE vector mode. */
7401 standard_sse_constant_p (rtx x)
7403 enum machine_mode mode = GET_MODE (x);
7405 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
7406 return 1;
7407 if (vector_all_ones_operand (x, mode))
7408 switch (mode)
7410 case V16QImode:
7411 case V8HImode:
7412 case V4SImode:
7413 case V2DImode:
7414 if (TARGET_SSE2)
7415 return 2;
7416 default:
7417 break;
7420 return 0;
7423 /* Return the opcode of the special instruction to be used to load
7424 the constant X. */
7426 const char *
7427 standard_sse_constant_opcode (rtx insn, rtx x)
7429 switch (standard_sse_constant_p (x))
7431 case 1:
7432 switch (get_attr_mode (insn))
7434 case MODE_V4SF:
7435 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7436 case MODE_V2DF:
7437 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
7438 case MODE_TI:
7439 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
7440 case MODE_V8SF:
7441 return "vxorps\t%x0, %x0, %x0";
7442 case MODE_V4DF:
7443 return "vxorpd\t%x0, %x0, %x0";
7444 case MODE_OI:
7445 return "vpxor\t%x0, %x0, %x0";
7446 default:
7447 break;
7449 case 2:
7450 return TARGET_AVX ? "vpcmpeqd\t%0, %0, %0" : "pcmpeqd\t%0, %0";
7451 default:
7452 break;
7454 gcc_unreachable ();
7457 /* Returns 1 if OP contains a symbol reference */
7460 symbolic_reference_mentioned_p (rtx op)
7462 const char *fmt;
7463 int i;
7465 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
7466 return 1;
7468 fmt = GET_RTX_FORMAT (GET_CODE (op));
7469 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
7471 if (fmt[i] == 'E')
7473 int j;
7475 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
7476 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
7477 return 1;
7480 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
7481 return 1;
7484 return 0;
7487 /* Return 1 if it is appropriate to emit `ret' instructions in the
7488 body of a function. Do this only if the epilogue is simple, needing a
7489 couple of insns. Prior to reloading, we can't tell how many registers
7490 must be saved, so return 0 then. Return 0 if there is no frame
7491 marker to de-allocate. */
7494 ix86_can_use_return_insn_p (void)
7496 struct ix86_frame frame;
7498 if (! reload_completed || frame_pointer_needed)
7499 return 0;
7501 /* Don't allow more than 32 pop, since that's all we can do
7502 with one instruction. */
7503 if (crtl->args.pops_args
7504 && crtl->args.size >= 32768)
7505 return 0;
7507 ix86_compute_frame_layout (&frame);
7508 return frame.to_allocate == 0 && frame.padding0 == 0
7509 && (frame.nregs + frame.nsseregs) == 0;
7512 /* Value should be nonzero if functions must have frame pointers.
7513 Zero means the frame pointer need not be set up (and parms may
7514 be accessed via the stack pointer) in functions that seem suitable. */
7516 static bool
7517 ix86_frame_pointer_required (void)
7519 /* If we accessed previous frames, then the generated code expects
7520 to be able to access the saved ebp value in our frame. */
7521 if (cfun->machine->accesses_prev_frame)
7522 return true;
7524 /* Several x86 os'es need a frame pointer for other reasons,
7525 usually pertaining to setjmp. */
7526 if (SUBTARGET_FRAME_POINTER_REQUIRED)
7527 return true;
7529 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
7530 the frame pointer by default. Turn it back on now if we've not
7531 got a leaf function. */
7532 if (TARGET_OMIT_LEAF_FRAME_POINTER
7533 && (!current_function_is_leaf
7534 || ix86_current_function_calls_tls_descriptor))
7535 return true;
7537 if (crtl->profile)
7538 return true;
7540 return false;
7543 /* Record that the current function accesses previous call frames. */
7545 void
7546 ix86_setup_frame_addresses (void)
7548 cfun->machine->accesses_prev_frame = 1;
7551 #ifndef USE_HIDDEN_LINKONCE
7552 # if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
7553 # define USE_HIDDEN_LINKONCE 1
7554 # else
7555 # define USE_HIDDEN_LINKONCE 0
7556 # endif
7557 #endif
7559 static int pic_labels_used;
7561 /* Fills in the label name that should be used for a pc thunk for
7562 the given register. */
7564 static void
7565 get_pc_thunk_name (char name[32], unsigned int regno)
7567 gcc_assert (!TARGET_64BIT);
7569 if (USE_HIDDEN_LINKONCE)
7570 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
7571 else
7572 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
7576 /* This function generates code for -fpic that loads %ebx with
7577 the return address of the caller and then returns. */
7579 void
7580 ix86_file_end (void)
7582 rtx xops[2];
7583 int regno;
7585 for (regno = 0; regno < 8; ++regno)
7587 char name[32];
7589 if (! ((pic_labels_used >> regno) & 1))
7590 continue;
7592 get_pc_thunk_name (name, regno);
7594 #if TARGET_MACHO
7595 if (TARGET_MACHO)
7597 switch_to_section (darwin_sections[text_coal_section]);
7598 fputs ("\t.weak_definition\t", asm_out_file);
7599 assemble_name (asm_out_file, name);
7600 fputs ("\n\t.private_extern\t", asm_out_file);
7601 assemble_name (asm_out_file, name);
7602 fputs ("\n", asm_out_file);
7603 ASM_OUTPUT_LABEL (asm_out_file, name);
7605 else
7606 #endif
7607 if (USE_HIDDEN_LINKONCE)
7609 tree decl;
7611 decl = build_decl (BUILTINS_LOCATION,
7612 FUNCTION_DECL, get_identifier (name),
7613 error_mark_node);
7614 TREE_PUBLIC (decl) = 1;
7615 TREE_STATIC (decl) = 1;
7616 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
7618 (*targetm.asm_out.unique_section) (decl, 0);
7619 switch_to_section (get_named_section (decl, NULL, 0));
7621 (*targetm.asm_out.globalize_label) (asm_out_file, name);
7622 fputs ("\t.hidden\t", asm_out_file);
7623 assemble_name (asm_out_file, name);
7624 putc ('\n', asm_out_file);
7625 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
7627 else
7629 switch_to_section (text_section);
7630 ASM_OUTPUT_LABEL (asm_out_file, name);
7633 xops[0] = gen_rtx_REG (Pmode, regno);
7634 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
7635 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
7636 output_asm_insn ("ret", xops);
7639 if (NEED_INDICATE_EXEC_STACK)
7640 file_end_indicate_exec_stack ();
7643 /* Emit code for the SET_GOT patterns. */
7645 const char *
7646 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
7648 rtx xops[3];
7650 xops[0] = dest;
7652 if (TARGET_VXWORKS_RTP && flag_pic)
7654 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
7655 xops[2] = gen_rtx_MEM (Pmode,
7656 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
7657 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
7659 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
7660 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
7661 an unadorned address. */
7662 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
7663 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
7664 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
7665 return "";
7668 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
7670 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
7672 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
7674 if (!flag_pic)
7675 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
7676 else
7677 output_asm_insn ("call\t%a2", xops);
7679 #if TARGET_MACHO
7680 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7681 is what will be referenced by the Mach-O PIC subsystem. */
7682 if (!label)
7683 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7684 #endif
7686 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7687 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
7689 if (flag_pic)
7690 output_asm_insn ("pop%z0\t%0", xops);
7692 else
7694 char name[32];
7695 get_pc_thunk_name (name, REGNO (dest));
7696 pic_labels_used |= 1 << REGNO (dest);
7698 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
7699 xops[2] = gen_rtx_MEM (QImode, xops[2]);
7700 output_asm_insn ("call\t%X2", xops);
7701 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7702 is what will be referenced by the Mach-O PIC subsystem. */
7703 #if TARGET_MACHO
7704 if (!label)
7705 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7706 else
7707 targetm.asm_out.internal_label (asm_out_file, "L",
7708 CODE_LABEL_NUMBER (label));
7709 #endif
7712 if (TARGET_MACHO)
7713 return "";
7715 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
7716 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
7717 else
7718 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
7720 return "";
7723 /* Generate an "push" pattern for input ARG. */
7725 static rtx
7726 gen_push (rtx arg)
7728 if (ix86_cfa_state->reg == stack_pointer_rtx)
7729 ix86_cfa_state->offset += UNITS_PER_WORD;
7731 return gen_rtx_SET (VOIDmode,
7732 gen_rtx_MEM (Pmode,
7733 gen_rtx_PRE_DEC (Pmode,
7734 stack_pointer_rtx)),
7735 arg);
7738 /* Return >= 0 if there is an unused call-clobbered register available
7739 for the entire function. */
7741 static unsigned int
7742 ix86_select_alt_pic_regnum (void)
7744 if (current_function_is_leaf && !crtl->profile
7745 && !ix86_current_function_calls_tls_descriptor)
7747 int i, drap;
7748 /* Can't use the same register for both PIC and DRAP. */
7749 if (crtl->drap_reg)
7750 drap = REGNO (crtl->drap_reg);
7751 else
7752 drap = -1;
7753 for (i = 2; i >= 0; --i)
7754 if (i != drap && !df_regs_ever_live_p (i))
7755 return i;
7758 return INVALID_REGNUM;
7761 /* Return 1 if we need to save REGNO. */
7762 static int
7763 ix86_save_reg (unsigned int regno, int maybe_eh_return)
7765 if (pic_offset_table_rtx
7766 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
7767 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
7768 || crtl->profile
7769 || crtl->calls_eh_return
7770 || crtl->uses_const_pool))
7772 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
7773 return 0;
7774 return 1;
7777 if (crtl->calls_eh_return && maybe_eh_return)
7779 unsigned i;
7780 for (i = 0; ; i++)
7782 unsigned test = EH_RETURN_DATA_REGNO (i);
7783 if (test == INVALID_REGNUM)
7784 break;
7785 if (test == regno)
7786 return 1;
7790 if (crtl->drap_reg && regno == REGNO (crtl->drap_reg))
7791 return 1;
7793 return (df_regs_ever_live_p (regno)
7794 && !call_used_regs[regno]
7795 && !fixed_regs[regno]
7796 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
7799 /* Return number of saved general prupose registers. */
7801 static int
7802 ix86_nsaved_regs (void)
7804 int nregs = 0;
7805 int regno;
7807 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7808 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7809 nregs ++;
7810 return nregs;
7813 /* Return number of saved SSE registrers. */
7815 static int
7816 ix86_nsaved_sseregs (void)
7818 int nregs = 0;
7819 int regno;
7821 if (ix86_cfun_abi () != MS_ABI)
7822 return 0;
7823 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7824 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7825 nregs ++;
7826 return nregs;
7829 /* Given FROM and TO register numbers, say whether this elimination is
7830 allowed. If stack alignment is needed, we can only replace argument
7831 pointer with hard frame pointer, or replace frame pointer with stack
7832 pointer. Otherwise, frame pointer elimination is automatically
7833 handled and all other eliminations are valid. */
7835 static bool
7836 ix86_can_eliminate (const int from, const int to)
7838 if (stack_realign_fp)
7839 return ((from == ARG_POINTER_REGNUM
7840 && to == HARD_FRAME_POINTER_REGNUM)
7841 || (from == FRAME_POINTER_REGNUM
7842 && to == STACK_POINTER_REGNUM));
7843 else
7844 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
7847 /* Return the offset between two registers, one to be eliminated, and the other
7848 its replacement, at the start of a routine. */
7850 HOST_WIDE_INT
7851 ix86_initial_elimination_offset (int from, int to)
7853 struct ix86_frame frame;
7854 ix86_compute_frame_layout (&frame);
7856 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7857 return frame.hard_frame_pointer_offset;
7858 else if (from == FRAME_POINTER_REGNUM
7859 && to == HARD_FRAME_POINTER_REGNUM)
7860 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
7861 else
7863 gcc_assert (to == STACK_POINTER_REGNUM);
7865 if (from == ARG_POINTER_REGNUM)
7866 return frame.stack_pointer_offset;
7868 gcc_assert (from == FRAME_POINTER_REGNUM);
7869 return frame.stack_pointer_offset - frame.frame_pointer_offset;
7873 /* In a dynamically-aligned function, we can't know the offset from
7874 stack pointer to frame pointer, so we must ensure that setjmp
7875 eliminates fp against the hard fp (%ebp) rather than trying to
7876 index from %esp up to the top of the frame across a gap that is
7877 of unknown (at compile-time) size. */
7878 static rtx
7879 ix86_builtin_setjmp_frame_value (void)
7881 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
7884 /* Fill structure ix86_frame about frame of currently computed function. */
7886 static void
7887 ix86_compute_frame_layout (struct ix86_frame *frame)
7889 unsigned int stack_alignment_needed;
7890 HOST_WIDE_INT offset;
7891 unsigned int preferred_alignment;
7892 HOST_WIDE_INT size = get_frame_size ();
7894 frame->nregs = ix86_nsaved_regs ();
7895 frame->nsseregs = ix86_nsaved_sseregs ();
7897 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
7898 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
7900 /* MS ABI seem to require stack alignment to be always 16 except for function
7901 prologues. */
7902 if (ix86_cfun_abi () == MS_ABI && preferred_alignment < 16)
7904 preferred_alignment = 16;
7905 stack_alignment_needed = 16;
7906 crtl->preferred_stack_boundary = 128;
7907 crtl->stack_alignment_needed = 128;
7910 gcc_assert (!size || stack_alignment_needed);
7911 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
7912 gcc_assert (preferred_alignment <= stack_alignment_needed);
7914 /* During reload iteration the amount of registers saved can change.
7915 Recompute the value as needed. Do not recompute when amount of registers
7916 didn't change as reload does multiple calls to the function and does not
7917 expect the decision to change within single iteration. */
7918 if (!optimize_function_for_size_p (cfun)
7919 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
7921 int count = frame->nregs;
7923 cfun->machine->use_fast_prologue_epilogue_nregs = count;
7924 /* The fast prologue uses move instead of push to save registers. This
7925 is significantly longer, but also executes faster as modern hardware
7926 can execute the moves in parallel, but can't do that for push/pop.
7928 Be careful about choosing what prologue to emit: When function takes
7929 many instructions to execute we may use slow version as well as in
7930 case function is known to be outside hot spot (this is known with
7931 feedback only). Weight the size of function by number of registers
7932 to save as it is cheap to use one or two push instructions but very
7933 slow to use many of them. */
7934 if (count)
7935 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
7936 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
7937 || (flag_branch_probabilities
7938 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
7939 cfun->machine->use_fast_prologue_epilogue = false;
7940 else
7941 cfun->machine->use_fast_prologue_epilogue
7942 = !expensive_function_p (count);
7944 if (TARGET_PROLOGUE_USING_MOVE
7945 && cfun->machine->use_fast_prologue_epilogue)
7946 frame->save_regs_using_mov = true;
7947 else
7948 frame->save_regs_using_mov = false;
7950 /* Skip return address. */
7951 offset = UNITS_PER_WORD;
7953 /* Skip pushed static chain. */
7954 if (ix86_static_chain_on_stack)
7955 offset += UNITS_PER_WORD;
7957 /* Skip saved base pointer. */
7958 if (frame_pointer_needed)
7959 offset += UNITS_PER_WORD;
7961 frame->hard_frame_pointer_offset = offset;
7963 /* Set offset to aligned because the realigned frame starts from
7964 here. */
7965 if (stack_realign_fp)
7966 offset = (offset + stack_alignment_needed -1) & -stack_alignment_needed;
7968 /* Register save area */
7969 offset += frame->nregs * UNITS_PER_WORD;
7971 /* Align SSE reg save area. */
7972 if (frame->nsseregs)
7973 frame->padding0 = ((offset + 16 - 1) & -16) - offset;
7974 else
7975 frame->padding0 = 0;
7977 /* SSE register save area. */
7978 offset += frame->padding0 + frame->nsseregs * 16;
7980 /* Va-arg area */
7981 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
7982 offset += frame->va_arg_size;
7984 /* Align start of frame for local function. */
7985 frame->padding1 = ((offset + stack_alignment_needed - 1)
7986 & -stack_alignment_needed) - offset;
7988 offset += frame->padding1;
7990 /* Frame pointer points here. */
7991 frame->frame_pointer_offset = offset;
7993 offset += size;
7995 /* Add outgoing arguments area. Can be skipped if we eliminated
7996 all the function calls as dead code.
7997 Skipping is however impossible when function calls alloca. Alloca
7998 expander assumes that last crtl->outgoing_args_size
7999 of stack frame are unused. */
8000 if (ACCUMULATE_OUTGOING_ARGS
8001 && (!current_function_is_leaf || cfun->calls_alloca
8002 || ix86_current_function_calls_tls_descriptor))
8004 offset += crtl->outgoing_args_size;
8005 frame->outgoing_arguments_size = crtl->outgoing_args_size;
8007 else
8008 frame->outgoing_arguments_size = 0;
8010 /* Align stack boundary. Only needed if we're calling another function
8011 or using alloca. */
8012 if (!current_function_is_leaf || cfun->calls_alloca
8013 || ix86_current_function_calls_tls_descriptor)
8014 frame->padding2 = ((offset + preferred_alignment - 1)
8015 & -preferred_alignment) - offset;
8016 else
8017 frame->padding2 = 0;
8019 offset += frame->padding2;
8021 /* We've reached end of stack frame. */
8022 frame->stack_pointer_offset = offset;
8024 /* Size prologue needs to allocate. */
8025 frame->to_allocate =
8026 (size + frame->padding1 + frame->padding2
8027 + frame->outgoing_arguments_size + frame->va_arg_size);
8029 if ((!frame->to_allocate && frame->nregs <= 1)
8030 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
8031 frame->save_regs_using_mov = false;
8033 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8034 && current_function_sp_is_unchanging
8035 && current_function_is_leaf
8036 && !ix86_current_function_calls_tls_descriptor)
8038 frame->red_zone_size = frame->to_allocate;
8039 if (frame->save_regs_using_mov)
8040 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
8041 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
8042 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
8044 else
8045 frame->red_zone_size = 0;
8046 frame->to_allocate -= frame->red_zone_size;
8047 frame->stack_pointer_offset -= frame->red_zone_size;
8050 /* Emit code to save registers in the prologue. */
8052 static void
8053 ix86_emit_save_regs (void)
8055 unsigned int regno;
8056 rtx insn;
8058 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
8059 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8061 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
8062 RTX_FRAME_RELATED_P (insn) = 1;
8066 /* Emit code to save registers using MOV insns. First register
8067 is restored from POINTER + OFFSET. */
8068 static void
8069 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8071 unsigned int regno;
8072 rtx insn;
8074 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8075 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8077 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
8078 Pmode, offset),
8079 gen_rtx_REG (Pmode, regno));
8080 RTX_FRAME_RELATED_P (insn) = 1;
8081 offset += UNITS_PER_WORD;
8085 /* Emit code to save registers using MOV insns. First register
8086 is restored from POINTER + OFFSET. */
8087 static void
8088 ix86_emit_save_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8090 unsigned int regno;
8091 rtx insn;
8092 rtx mem;
8094 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8095 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8097 mem = adjust_address (gen_rtx_MEM (TImode, pointer), TImode, offset);
8098 set_mem_align (mem, 128);
8099 insn = emit_move_insn (mem, gen_rtx_REG (TImode, regno));
8100 RTX_FRAME_RELATED_P (insn) = 1;
8101 offset += 16;
8105 static GTY(()) rtx queued_cfa_restores;
8107 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
8108 manipulation insn. Don't add it if the previously
8109 saved value will be left untouched within stack red-zone till return,
8110 as unwinders can find the same value in the register and
8111 on the stack. */
8113 static void
8114 ix86_add_cfa_restore_note (rtx insn, rtx reg, HOST_WIDE_INT red_offset)
8116 if (TARGET_RED_ZONE
8117 && !TARGET_64BIT_MS_ABI
8118 && red_offset + RED_ZONE_SIZE >= 0
8119 && crtl->args.pops_args < 65536)
8120 return;
8122 if (insn)
8124 add_reg_note (insn, REG_CFA_RESTORE, reg);
8125 RTX_FRAME_RELATED_P (insn) = 1;
8127 else
8128 queued_cfa_restores
8129 = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
8132 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
8134 static void
8135 ix86_add_queued_cfa_restore_notes (rtx insn)
8137 rtx last;
8138 if (!queued_cfa_restores)
8139 return;
8140 for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
8142 XEXP (last, 1) = REG_NOTES (insn);
8143 REG_NOTES (insn) = queued_cfa_restores;
8144 queued_cfa_restores = NULL_RTX;
8145 RTX_FRAME_RELATED_P (insn) = 1;
8148 /* Expand prologue or epilogue stack adjustment.
8149 The pattern exist to put a dependency on all ebp-based memory accesses.
8150 STYLE should be negative if instructions should be marked as frame related,
8151 zero if %r11 register is live and cannot be freely used and positive
8152 otherwise. */
8154 static void
8155 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
8156 int style, bool set_cfa)
8158 rtx insn;
8160 if (! TARGET_64BIT)
8161 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
8162 else if (x86_64_immediate_operand (offset, DImode))
8163 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
8164 else
8166 rtx r11;
8167 /* r11 is used by indirect sibcall return as well, set before the
8168 epilogue and used after the epilogue. ATM indirect sibcall
8169 shouldn't be used together with huge frame sizes in one
8170 function because of the frame_size check in sibcall.c. */
8171 gcc_assert (style);
8172 r11 = gen_rtx_REG (DImode, R11_REG);
8173 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
8174 if (style < 0)
8175 RTX_FRAME_RELATED_P (insn) = 1;
8176 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
8177 offset));
8180 if (style >= 0)
8181 ix86_add_queued_cfa_restore_notes (insn);
8183 if (set_cfa)
8185 rtx r;
8187 gcc_assert (ix86_cfa_state->reg == src);
8188 ix86_cfa_state->offset += INTVAL (offset);
8189 ix86_cfa_state->reg = dest;
8191 r = gen_rtx_PLUS (Pmode, src, offset);
8192 r = gen_rtx_SET (VOIDmode, dest, r);
8193 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
8194 RTX_FRAME_RELATED_P (insn) = 1;
8196 else if (style < 0)
8197 RTX_FRAME_RELATED_P (insn) = 1;
8200 /* Find an available register to be used as dynamic realign argument
8201 pointer regsiter. Such a register will be written in prologue and
8202 used in begin of body, so it must not be
8203 1. parameter passing register.
8204 2. GOT pointer.
8205 We reuse static-chain register if it is available. Otherwise, we
8206 use DI for i386 and R13 for x86-64. We chose R13 since it has
8207 shorter encoding.
8209 Return: the regno of chosen register. */
8211 static unsigned int
8212 find_drap_reg (void)
8214 tree decl = cfun->decl;
8216 if (TARGET_64BIT)
8218 /* Use R13 for nested function or function need static chain.
8219 Since function with tail call may use any caller-saved
8220 registers in epilogue, DRAP must not use caller-saved
8221 register in such case. */
8222 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8223 return R13_REG;
8225 return R10_REG;
8227 else
8229 /* Use DI for nested function or function need static chain.
8230 Since function with tail call may use any caller-saved
8231 registers in epilogue, DRAP must not use caller-saved
8232 register in such case. */
8233 if (DECL_STATIC_CHAIN (decl) || crtl->tail_call_emit)
8234 return DI_REG;
8236 /* Reuse static chain register if it isn't used for parameter
8237 passing. */
8238 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2
8239 && !lookup_attribute ("fastcall",
8240 TYPE_ATTRIBUTES (TREE_TYPE (decl))))
8241 return CX_REG;
8242 else
8243 return DI_REG;
8247 /* Return minimum incoming stack alignment. */
8249 static unsigned int
8250 ix86_minimum_incoming_stack_boundary (bool sibcall)
8252 unsigned int incoming_stack_boundary;
8254 /* Prefer the one specified at command line. */
8255 if (ix86_user_incoming_stack_boundary)
8256 incoming_stack_boundary = ix86_user_incoming_stack_boundary;
8257 /* In 32bit, use MIN_STACK_BOUNDARY for incoming stack boundary
8258 if -mstackrealign is used, it isn't used for sibcall check and
8259 estimated stack alignment is 128bit. */
8260 else if (!sibcall
8261 && !TARGET_64BIT
8262 && ix86_force_align_arg_pointer
8263 && crtl->stack_alignment_estimated == 128)
8264 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8265 else
8266 incoming_stack_boundary = ix86_default_incoming_stack_boundary;
8268 /* Incoming stack alignment can be changed on individual functions
8269 via force_align_arg_pointer attribute. We use the smallest
8270 incoming stack boundary. */
8271 if (incoming_stack_boundary > MIN_STACK_BOUNDARY
8272 && lookup_attribute (ix86_force_align_arg_pointer_string,
8273 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
8274 incoming_stack_boundary = MIN_STACK_BOUNDARY;
8276 /* The incoming stack frame has to be aligned at least at
8277 parm_stack_boundary. */
8278 if (incoming_stack_boundary < crtl->parm_stack_boundary)
8279 incoming_stack_boundary = crtl->parm_stack_boundary;
8281 /* Stack at entrance of main is aligned by runtime. We use the
8282 smallest incoming stack boundary. */
8283 if (incoming_stack_boundary > MAIN_STACK_BOUNDARY
8284 && DECL_NAME (current_function_decl)
8285 && MAIN_NAME_P (DECL_NAME (current_function_decl))
8286 && DECL_FILE_SCOPE_P (current_function_decl))
8287 incoming_stack_boundary = MAIN_STACK_BOUNDARY;
8289 return incoming_stack_boundary;
8292 /* Update incoming stack boundary and estimated stack alignment. */
8294 static void
8295 ix86_update_stack_boundary (void)
8297 ix86_incoming_stack_boundary
8298 = ix86_minimum_incoming_stack_boundary (false);
8300 /* x86_64 vararg needs 16byte stack alignment for register save
8301 area. */
8302 if (TARGET_64BIT
8303 && cfun->stdarg
8304 && crtl->stack_alignment_estimated < 128)
8305 crtl->stack_alignment_estimated = 128;
8308 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
8309 needed or an rtx for DRAP otherwise. */
8311 static rtx
8312 ix86_get_drap_rtx (void)
8314 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
8315 crtl->need_drap = true;
8317 if (stack_realign_drap)
8319 /* Assign DRAP to vDRAP and returns vDRAP */
8320 unsigned int regno = find_drap_reg ();
8321 rtx drap_vreg;
8322 rtx arg_ptr;
8323 rtx seq, insn;
8325 arg_ptr = gen_rtx_REG (Pmode, regno);
8326 crtl->drap_reg = arg_ptr;
8328 start_sequence ();
8329 drap_vreg = copy_to_reg (arg_ptr);
8330 seq = get_insns ();
8331 end_sequence ();
8333 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
8334 RTX_FRAME_RELATED_P (insn) = 1;
8335 return drap_vreg;
8337 else
8338 return NULL;
8341 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
8343 static rtx
8344 ix86_internal_arg_pointer (void)
8346 return virtual_incoming_args_rtx;
8349 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
8350 to be generated in correct form. */
8351 static void
8352 ix86_finalize_stack_realign_flags (void)
8354 /* Check if stack realign is really needed after reload, and
8355 stores result in cfun */
8356 unsigned int incoming_stack_boundary
8357 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
8358 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
8359 unsigned int stack_realign = (incoming_stack_boundary
8360 < (current_function_is_leaf
8361 ? crtl->max_used_stack_slot_alignment
8362 : crtl->stack_alignment_needed));
8364 if (crtl->stack_realign_finalized)
8366 /* After stack_realign_needed is finalized, we can't no longer
8367 change it. */
8368 gcc_assert (crtl->stack_realign_needed == stack_realign);
8370 else
8372 crtl->stack_realign_needed = stack_realign;
8373 crtl->stack_realign_finalized = true;
8377 /* Expand the prologue into a bunch of separate insns. */
8379 void
8380 ix86_expand_prologue (void)
8382 rtx insn;
8383 bool pic_reg_used;
8384 struct ix86_frame frame;
8385 HOST_WIDE_INT allocate;
8386 int gen_frame_pointer = frame_pointer_needed;
8388 ix86_finalize_stack_realign_flags ();
8390 /* DRAP should not coexist with stack_realign_fp */
8391 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
8393 /* Initialize CFA state for before the prologue. */
8394 ix86_cfa_state->reg = stack_pointer_rtx;
8395 ix86_cfa_state->offset = INCOMING_FRAME_SP_OFFSET;
8397 ix86_compute_frame_layout (&frame);
8399 if (ix86_function_ms_hook_prologue (current_function_decl))
8401 rtx push, mov;
8403 /* Make sure the function starts with
8404 8b ff movl.s %edi,%edi
8405 55 push %ebp
8406 8b ec movl.s %esp,%ebp
8408 This matches the hookable function prologue in Win32 API
8409 functions in Microsoft Windows XP Service Pack 2 and newer.
8410 Wine uses this to enable Windows apps to hook the Win32 API
8411 functions provided by Wine. */
8412 insn = emit_insn (gen_vswapmov (gen_rtx_REG (SImode, DI_REG),
8413 gen_rtx_REG (SImode, DI_REG)));
8414 push = emit_insn (gen_push (hard_frame_pointer_rtx));
8415 mov = emit_insn (gen_vswapmov (hard_frame_pointer_rtx,
8416 stack_pointer_rtx));
8418 if (frame_pointer_needed && !(crtl->drap_reg
8419 && crtl->stack_realign_needed))
8421 /* The push %ebp and movl.s %esp, %ebp already set up
8422 the frame pointer. No need to do this again. */
8423 gen_frame_pointer = 0;
8424 RTX_FRAME_RELATED_P (push) = 1;
8425 RTX_FRAME_RELATED_P (mov) = 1;
8426 if (ix86_cfa_state->reg == stack_pointer_rtx)
8427 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8429 else
8430 /* If the frame pointer is not needed, pop %ebp again. This
8431 could be optimized for cases where ebp needs to be backed up
8432 for some other reason. If stack realignment is needed, pop
8433 the base pointer again, align the stack, and later regenerate
8434 the frame pointer setup. The frame pointer generated by the
8435 hook prologue is not aligned, so it can't be used. */
8436 insn = emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
8439 /* The first insn of a function that accepts its static chain on the
8440 stack is to push the register that would be filled in by a direct
8441 call. This insn will be skipped by the trampoline. */
8442 if (ix86_static_chain_on_stack)
8444 rtx t;
8446 insn = emit_insn (gen_push (ix86_static_chain (cfun->decl, false)));
8447 emit_insn (gen_blockage ());
8449 /* We don't want to interpret this push insn as a register save,
8450 only as a stack adjustment. The real copy of the register as
8451 a save will be done later, if needed. */
8452 t = plus_constant (stack_pointer_rtx, -UNITS_PER_WORD);
8453 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8454 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8455 RTX_FRAME_RELATED_P (insn) = 1;
8458 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
8459 of DRAP is needed and stack realignment is really needed after reload */
8460 if (crtl->drap_reg && crtl->stack_realign_needed)
8462 rtx x, y;
8463 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8464 int param_ptr_offset = UNITS_PER_WORD;
8466 if (ix86_static_chain_on_stack)
8467 param_ptr_offset += UNITS_PER_WORD;
8468 if (!call_used_regs[REGNO (crtl->drap_reg)])
8469 param_ptr_offset += UNITS_PER_WORD;
8471 gcc_assert (stack_realign_drap);
8473 /* Grab the argument pointer. */
8474 x = plus_constant (stack_pointer_rtx, param_ptr_offset);
8475 y = crtl->drap_reg;
8477 /* Only need to push parameter pointer reg if it is caller
8478 saved reg */
8479 if (!call_used_regs[REGNO (crtl->drap_reg)])
8481 /* Push arg pointer reg */
8482 insn = emit_insn (gen_push (y));
8483 RTX_FRAME_RELATED_P (insn) = 1;
8486 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
8487 RTX_FRAME_RELATED_P (insn) = 1;
8488 ix86_cfa_state->reg = crtl->drap_reg;
8490 /* Align the stack. */
8491 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8492 stack_pointer_rtx,
8493 GEN_INT (-align_bytes)));
8494 RTX_FRAME_RELATED_P (insn) = 1;
8496 /* Replicate the return address on the stack so that return
8497 address can be reached via (argp - 1) slot. This is needed
8498 to implement macro RETURN_ADDR_RTX and intrinsic function
8499 expand_builtin_return_addr etc. */
8500 x = crtl->drap_reg;
8501 x = gen_frame_mem (Pmode,
8502 plus_constant (x, -UNITS_PER_WORD));
8503 insn = emit_insn (gen_push (x));
8504 RTX_FRAME_RELATED_P (insn) = 1;
8507 /* Note: AT&T enter does NOT have reversed args. Enter is probably
8508 slower on all targets. Also sdb doesn't like it. */
8510 if (gen_frame_pointer)
8512 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
8513 RTX_FRAME_RELATED_P (insn) = 1;
8515 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8516 RTX_FRAME_RELATED_P (insn) = 1;
8518 if (ix86_cfa_state->reg == stack_pointer_rtx)
8519 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8522 if (stack_realign_fp)
8524 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8525 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
8527 /* Align the stack. */
8528 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8529 stack_pointer_rtx,
8530 GEN_INT (-align_bytes)));
8531 RTX_FRAME_RELATED_P (insn) = 1;
8534 allocate = frame.to_allocate + frame.nsseregs * 16 + frame.padding0;
8536 if (!frame.save_regs_using_mov)
8537 ix86_emit_save_regs ();
8538 else
8539 allocate += frame.nregs * UNITS_PER_WORD;
8541 /* When using red zone we may start register saving before allocating
8542 the stack frame saving one cycle of the prologue. However I will
8543 avoid doing this if I am going to have to probe the stack since
8544 at least on x86_64 the stack probe can turn into a call that clobbers
8545 a red zone location */
8546 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && frame.save_regs_using_mov
8547 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT))
8548 ix86_emit_save_regs_using_mov ((frame_pointer_needed
8549 && !crtl->stack_realign_needed)
8550 ? hard_frame_pointer_rtx
8551 : stack_pointer_rtx,
8552 -frame.nregs * UNITS_PER_WORD);
8554 if (allocate == 0)
8556 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
8557 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8558 GEN_INT (-allocate), -1,
8559 ix86_cfa_state->reg == stack_pointer_rtx);
8560 else
8562 /* Only valid for Win32. */
8563 rtx eax = gen_rtx_REG (Pmode, AX_REG);
8564 bool eax_live;
8565 rtx t;
8567 gcc_assert (!TARGET_64BIT || cfun->machine->call_abi == MS_ABI);
8569 if (cfun->machine->call_abi == MS_ABI)
8570 eax_live = false;
8571 else
8572 eax_live = ix86_eax_live_at_start_p ();
8574 if (eax_live)
8576 emit_insn (gen_push (eax));
8577 allocate -= UNITS_PER_WORD;
8580 emit_move_insn (eax, GEN_INT (allocate));
8582 if (TARGET_64BIT)
8583 insn = gen_allocate_stack_worker_64 (eax, eax);
8584 else
8585 insn = gen_allocate_stack_worker_32 (eax, eax);
8586 insn = emit_insn (insn);
8588 if (ix86_cfa_state->reg == stack_pointer_rtx)
8590 ix86_cfa_state->offset += allocate;
8591 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
8592 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8593 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8594 RTX_FRAME_RELATED_P (insn) = 1;
8597 if (eax_live)
8599 if (frame_pointer_needed)
8600 t = plus_constant (hard_frame_pointer_rtx,
8601 allocate
8602 - frame.to_allocate
8603 - frame.nregs * UNITS_PER_WORD);
8604 else
8605 t = plus_constant (stack_pointer_rtx, allocate);
8606 emit_move_insn (eax, gen_rtx_MEM (Pmode, t));
8610 if (frame.save_regs_using_mov
8611 && !(!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8612 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)))
8614 if (!frame_pointer_needed
8615 || !(frame.to_allocate + frame.padding0)
8616 || crtl->stack_realign_needed)
8617 ix86_emit_save_regs_using_mov (stack_pointer_rtx,
8618 frame.to_allocate
8619 + frame.nsseregs * 16 + frame.padding0);
8620 else
8621 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
8622 -frame.nregs * UNITS_PER_WORD);
8624 if (!frame_pointer_needed
8625 || !(frame.to_allocate + frame.padding0)
8626 || crtl->stack_realign_needed)
8627 ix86_emit_save_sse_regs_using_mov (stack_pointer_rtx,
8628 frame.to_allocate);
8629 else
8630 ix86_emit_save_sse_regs_using_mov (hard_frame_pointer_rtx,
8631 - frame.nregs * UNITS_PER_WORD
8632 - frame.nsseregs * 16
8633 - frame.padding0);
8635 pic_reg_used = false;
8636 if (pic_offset_table_rtx
8637 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8638 || crtl->profile))
8640 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
8642 if (alt_pic_reg_used != INVALID_REGNUM)
8643 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
8645 pic_reg_used = true;
8648 if (pic_reg_used)
8650 if (TARGET_64BIT)
8652 if (ix86_cmodel == CM_LARGE_PIC)
8654 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
8655 rtx label = gen_label_rtx ();
8656 emit_label (label);
8657 LABEL_PRESERVE_P (label) = 1;
8658 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
8659 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
8660 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
8661 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
8662 pic_offset_table_rtx, tmp_reg));
8664 else
8665 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
8667 else
8668 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
8671 /* In the pic_reg_used case, make sure that the got load isn't deleted
8672 when mcount needs it. Blockage to avoid call movement across mcount
8673 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
8674 note. */
8675 if (crtl->profile && pic_reg_used)
8676 emit_insn (gen_prologue_use (pic_offset_table_rtx));
8678 if (crtl->drap_reg && !crtl->stack_realign_needed)
8680 /* vDRAP is setup but after reload it turns out stack realign
8681 isn't necessary, here we will emit prologue to setup DRAP
8682 without stack realign adjustment */
8683 rtx x;
8684 int drap_bp_offset = UNITS_PER_WORD * 2;
8686 if (ix86_static_chain_on_stack)
8687 drap_bp_offset += UNITS_PER_WORD;
8688 x = plus_constant (hard_frame_pointer_rtx, drap_bp_offset);
8689 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, x));
8692 /* Prevent instructions from being scheduled into register save push
8693 sequence when access to the redzone area is done through frame pointer.
8694 The offset between the frame pointer and the stack pointer is calculated
8695 relative to the value of the stack pointer at the end of the function
8696 prologue, and moving instructions that access redzone area via frame
8697 pointer inside push sequence violates this assumption. */
8698 if (frame_pointer_needed && frame.red_zone_size)
8699 emit_insn (gen_memory_blockage ());
8701 /* Emit cld instruction if stringops are used in the function. */
8702 if (TARGET_CLD && ix86_current_function_needs_cld)
8703 emit_insn (gen_cld ());
8706 /* Emit code to restore REG using a POP insn. */
8708 static void
8709 ix86_emit_restore_reg_using_pop (rtx reg, HOST_WIDE_INT red_offset)
8711 rtx insn = emit_insn (ix86_gen_pop1 (reg));
8713 if (ix86_cfa_state->reg == crtl->drap_reg
8714 && REGNO (reg) == REGNO (crtl->drap_reg))
8716 /* Previously we'd represented the CFA as an expression
8717 like *(%ebp - 8). We've just popped that value from
8718 the stack, which means we need to reset the CFA to
8719 the drap register. This will remain until we restore
8720 the stack pointer. */
8721 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
8722 RTX_FRAME_RELATED_P (insn) = 1;
8723 return;
8726 if (ix86_cfa_state->reg == stack_pointer_rtx)
8728 ix86_cfa_state->offset -= UNITS_PER_WORD;
8729 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8730 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
8731 RTX_FRAME_RELATED_P (insn) = 1;
8734 /* When the frame pointer is the CFA, and we pop it, we are
8735 swapping back to the stack pointer as the CFA. This happens
8736 for stack frames that don't allocate other data, so we assume
8737 the stack pointer is now pointing at the return address, i.e.
8738 the function entry state, which makes the offset be 1 word. */
8739 else if (ix86_cfa_state->reg == hard_frame_pointer_rtx
8740 && reg == hard_frame_pointer_rtx)
8742 ix86_cfa_state->reg = stack_pointer_rtx;
8743 ix86_cfa_state->offset -= UNITS_PER_WORD;
8745 add_reg_note (insn, REG_CFA_DEF_CFA,
8746 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8747 GEN_INT (ix86_cfa_state->offset)));
8748 RTX_FRAME_RELATED_P (insn) = 1;
8751 ix86_add_cfa_restore_note (insn, reg, red_offset);
8754 /* Emit code to restore saved registers using POP insns. */
8756 static void
8757 ix86_emit_restore_regs_using_pop (HOST_WIDE_INT red_offset)
8759 int regno;
8761 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8762 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
8764 ix86_emit_restore_reg_using_pop (gen_rtx_REG (Pmode, regno),
8765 red_offset);
8766 red_offset += UNITS_PER_WORD;
8770 /* Emit code and notes for the LEAVE instruction. */
8772 static void
8773 ix86_emit_leave (HOST_WIDE_INT red_offset)
8775 rtx insn = emit_insn (ix86_gen_leave ());
8777 ix86_add_queued_cfa_restore_notes (insn);
8779 if (ix86_cfa_state->reg == hard_frame_pointer_rtx)
8781 ix86_cfa_state->reg = stack_pointer_rtx;
8782 ix86_cfa_state->offset -= UNITS_PER_WORD;
8784 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8785 copy_rtx (XVECEXP (PATTERN (insn), 0, 0)));
8786 RTX_FRAME_RELATED_P (insn) = 1;
8787 ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx, red_offset);
8791 /* Emit code to restore saved registers using MOV insns. First register
8792 is restored from POINTER + OFFSET. */
8793 static void
8794 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8795 HOST_WIDE_INT red_offset,
8796 int maybe_eh_return)
8798 unsigned int regno;
8799 rtx base_address = gen_rtx_MEM (Pmode, pointer);
8800 rtx insn;
8802 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8803 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8805 rtx reg = gen_rtx_REG (Pmode, regno);
8807 /* Ensure that adjust_address won't be forced to produce pointer
8808 out of range allowed by x86-64 instruction set. */
8809 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8811 rtx r11;
8813 r11 = gen_rtx_REG (DImode, R11_REG);
8814 emit_move_insn (r11, GEN_INT (offset));
8815 emit_insn (gen_adddi3 (r11, r11, pointer));
8816 base_address = gen_rtx_MEM (Pmode, r11);
8817 offset = 0;
8819 insn = emit_move_insn (reg,
8820 adjust_address (base_address, Pmode, offset));
8821 offset += UNITS_PER_WORD;
8823 if (ix86_cfa_state->reg == crtl->drap_reg
8824 && regno == REGNO (crtl->drap_reg))
8826 /* Previously we'd represented the CFA as an expression
8827 like *(%ebp - 8). We've just popped that value from
8828 the stack, which means we need to reset the CFA to
8829 the drap register. This will remain until we restore
8830 the stack pointer. */
8831 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
8832 RTX_FRAME_RELATED_P (insn) = 1;
8834 else
8835 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
8837 red_offset += UNITS_PER_WORD;
8841 /* Emit code to restore saved registers using MOV insns. First register
8842 is restored from POINTER + OFFSET. */
8843 static void
8844 ix86_emit_restore_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8845 HOST_WIDE_INT red_offset,
8846 int maybe_eh_return)
8848 int regno;
8849 rtx base_address = gen_rtx_MEM (TImode, pointer);
8850 rtx mem;
8852 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8853 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8855 rtx reg = gen_rtx_REG (TImode, regno);
8857 /* Ensure that adjust_address won't be forced to produce pointer
8858 out of range allowed by x86-64 instruction set. */
8859 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8861 rtx r11;
8863 r11 = gen_rtx_REG (DImode, R11_REG);
8864 emit_move_insn (r11, GEN_INT (offset));
8865 emit_insn (gen_adddi3 (r11, r11, pointer));
8866 base_address = gen_rtx_MEM (TImode, r11);
8867 offset = 0;
8869 mem = adjust_address (base_address, TImode, offset);
8870 set_mem_align (mem, 128);
8871 emit_move_insn (reg, mem);
8872 offset += 16;
8874 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
8876 red_offset += 16;
8880 /* Restore function stack, frame, and registers. */
8882 void
8883 ix86_expand_epilogue (int style)
8885 int sp_valid;
8886 struct ix86_frame frame;
8887 HOST_WIDE_INT offset, red_offset;
8888 struct machine_cfa_state cfa_state_save = *ix86_cfa_state;
8889 bool using_drap;
8891 ix86_finalize_stack_realign_flags ();
8893 /* When stack is realigned, SP must be valid. */
8894 sp_valid = (!frame_pointer_needed
8895 || current_function_sp_is_unchanging
8896 || stack_realign_fp);
8898 ix86_compute_frame_layout (&frame);
8900 /* See the comment about red zone and frame
8901 pointer usage in ix86_expand_prologue. */
8902 if (frame_pointer_needed && frame.red_zone_size)
8903 emit_insn (gen_memory_blockage ());
8905 using_drap = crtl->drap_reg && crtl->stack_realign_needed;
8906 gcc_assert (!using_drap || ix86_cfa_state->reg == crtl->drap_reg);
8908 /* Calculate start of saved registers relative to ebp. Special care
8909 must be taken for the normal return case of a function using
8910 eh_return: the eax and edx registers are marked as saved, but not
8911 restored along this path. */
8912 offset = frame.nregs;
8913 if (crtl->calls_eh_return && style != 2)
8914 offset -= 2;
8915 offset *= -UNITS_PER_WORD;
8916 offset -= frame.nsseregs * 16 + frame.padding0;
8918 /* Calculate start of saved registers relative to esp on entry of the
8919 function. When realigning stack, this needs to be the most negative
8920 value possible at runtime. */
8921 red_offset = offset;
8922 if (using_drap)
8923 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
8924 + UNITS_PER_WORD;
8925 else if (stack_realign_fp)
8926 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
8927 - UNITS_PER_WORD;
8928 if (ix86_static_chain_on_stack)
8929 red_offset -= UNITS_PER_WORD;
8930 if (frame_pointer_needed)
8931 red_offset -= UNITS_PER_WORD;
8933 /* If we're only restoring one register and sp is not valid then
8934 using a move instruction to restore the register since it's
8935 less work than reloading sp and popping the register.
8937 The default code result in stack adjustment using add/lea instruction,
8938 while this code results in LEAVE instruction (or discrete equivalent),
8939 so it is profitable in some other cases as well. Especially when there
8940 are no registers to restore. We also use this code when TARGET_USE_LEAVE
8941 and there is exactly one register to pop. This heuristic may need some
8942 tuning in future. */
8943 if ((!sp_valid && (frame.nregs + frame.nsseregs) <= 1)
8944 || (TARGET_EPILOGUE_USING_MOVE
8945 && cfun->machine->use_fast_prologue_epilogue
8946 && ((frame.nregs + frame.nsseregs) > 1
8947 || (frame.to_allocate + frame.padding0) != 0))
8948 || (frame_pointer_needed && !(frame.nregs + frame.nsseregs)
8949 && (frame.to_allocate + frame.padding0) != 0)
8950 || (frame_pointer_needed && TARGET_USE_LEAVE
8951 && cfun->machine->use_fast_prologue_epilogue
8952 && (frame.nregs + frame.nsseregs) == 1)
8953 || crtl->calls_eh_return)
8955 /* Restore registers. We can use ebp or esp to address the memory
8956 locations. If both are available, default to ebp, since offsets
8957 are known to be small. Only exception is esp pointing directly
8958 to the end of block of saved registers, where we may simplify
8959 addressing mode.
8961 If we are realigning stack with bp and sp, regs restore can't
8962 be addressed by bp. sp must be used instead. */
8964 if (!frame_pointer_needed
8965 || (sp_valid && !(frame.to_allocate + frame.padding0))
8966 || stack_realign_fp)
8968 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
8969 frame.to_allocate, red_offset,
8970 style == 2);
8971 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
8972 frame.to_allocate
8973 + frame.nsseregs * 16
8974 + frame.padding0,
8975 red_offset
8976 + frame.nsseregs * 16
8977 + frame.padding0, style == 2);
8979 else
8981 ix86_emit_restore_sse_regs_using_mov (hard_frame_pointer_rtx,
8982 offset, red_offset,
8983 style == 2);
8984 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
8985 offset
8986 + frame.nsseregs * 16
8987 + frame.padding0,
8988 red_offset
8989 + frame.nsseregs * 16
8990 + frame.padding0, style == 2);
8993 red_offset -= offset;
8995 /* eh_return epilogues need %ecx added to the stack pointer. */
8996 if (style == 2)
8998 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
9000 /* Stack align doesn't work with eh_return. */
9001 gcc_assert (!crtl->stack_realign_needed);
9002 /* Neither does regparm nested functions. */
9003 gcc_assert (!ix86_static_chain_on_stack);
9005 if (frame_pointer_needed)
9007 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
9008 tmp = plus_constant (tmp, UNITS_PER_WORD);
9009 tmp = emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
9011 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
9012 tmp = emit_move_insn (hard_frame_pointer_rtx, tmp);
9014 /* Note that we use SA as a temporary CFA, as the return
9015 address is at the proper place relative to it. We
9016 pretend this happens at the FP restore insn because
9017 prior to this insn the FP would be stored at the wrong
9018 offset relative to SA, and after this insn we have no
9019 other reasonable register to use for the CFA. We don't
9020 bother resetting the CFA to the SP for the duration of
9021 the return insn. */
9022 add_reg_note (tmp, REG_CFA_DEF_CFA,
9023 plus_constant (sa, UNITS_PER_WORD));
9024 ix86_add_queued_cfa_restore_notes (tmp);
9025 add_reg_note (tmp, REG_CFA_RESTORE, hard_frame_pointer_rtx);
9026 RTX_FRAME_RELATED_P (tmp) = 1;
9027 ix86_cfa_state->reg = sa;
9028 ix86_cfa_state->offset = UNITS_PER_WORD;
9030 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
9031 const0_rtx, style, false);
9033 else
9035 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
9036 tmp = plus_constant (tmp, (frame.to_allocate
9037 + frame.nregs * UNITS_PER_WORD
9038 + frame.nsseregs * 16
9039 + frame.padding0));
9040 tmp = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
9041 ix86_add_queued_cfa_restore_notes (tmp);
9043 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9044 if (ix86_cfa_state->offset != UNITS_PER_WORD)
9046 ix86_cfa_state->offset = UNITS_PER_WORD;
9047 add_reg_note (tmp, REG_CFA_DEF_CFA,
9048 plus_constant (stack_pointer_rtx,
9049 UNITS_PER_WORD));
9050 RTX_FRAME_RELATED_P (tmp) = 1;
9054 else if (!frame_pointer_needed)
9055 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9056 GEN_INT (frame.to_allocate
9057 + frame.nregs * UNITS_PER_WORD
9058 + frame.nsseregs * 16
9059 + frame.padding0),
9060 style, !using_drap);
9061 /* If not an i386, mov & pop is faster than "leave". */
9062 else if (TARGET_USE_LEAVE || optimize_function_for_size_p (cfun)
9063 || !cfun->machine->use_fast_prologue_epilogue)
9064 ix86_emit_leave (red_offset);
9065 else
9067 pro_epilogue_adjust_stack (stack_pointer_rtx,
9068 hard_frame_pointer_rtx,
9069 const0_rtx, style, !using_drap);
9071 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx, red_offset);
9074 else
9076 /* First step is to deallocate the stack frame so that we can
9077 pop the registers.
9079 If we realign stack with frame pointer, then stack pointer
9080 won't be able to recover via lea $offset(%bp), %sp, because
9081 there is a padding area between bp and sp for realign.
9082 "add $to_allocate, %sp" must be used instead. */
9083 if (!sp_valid)
9085 gcc_assert (frame_pointer_needed);
9086 gcc_assert (!stack_realign_fp);
9087 pro_epilogue_adjust_stack (stack_pointer_rtx,
9088 hard_frame_pointer_rtx,
9089 GEN_INT (offset), style, false);
9090 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9091 0, red_offset,
9092 style == 2);
9093 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9094 GEN_INT (frame.nsseregs * 16
9095 + frame.padding0),
9096 style, false);
9098 else if (frame.to_allocate || frame.padding0 || frame.nsseregs)
9100 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
9101 frame.to_allocate, red_offset,
9102 style == 2);
9103 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9104 GEN_INT (frame.to_allocate
9105 + frame.nsseregs * 16
9106 + frame.padding0), style,
9107 !using_drap && !frame_pointer_needed);
9110 ix86_emit_restore_regs_using_pop (red_offset + frame.nsseregs * 16
9111 + frame.padding0);
9112 red_offset -= offset;
9114 if (frame_pointer_needed)
9116 /* Leave results in shorter dependency chains on CPUs that are
9117 able to grok it fast. */
9118 if (TARGET_USE_LEAVE)
9119 ix86_emit_leave (red_offset);
9120 else
9122 /* For stack realigned really happens, recover stack
9123 pointer to hard frame pointer is a must, if not using
9124 leave. */
9125 if (stack_realign_fp)
9126 pro_epilogue_adjust_stack (stack_pointer_rtx,
9127 hard_frame_pointer_rtx,
9128 const0_rtx, style, !using_drap);
9129 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx,
9130 red_offset);
9135 if (using_drap)
9137 int param_ptr_offset = UNITS_PER_WORD;
9138 rtx insn;
9140 gcc_assert (stack_realign_drap);
9142 if (ix86_static_chain_on_stack)
9143 param_ptr_offset += UNITS_PER_WORD;
9144 if (!call_used_regs[REGNO (crtl->drap_reg)])
9145 param_ptr_offset += UNITS_PER_WORD;
9147 insn = emit_insn ((*ix86_gen_add3) (stack_pointer_rtx,
9148 crtl->drap_reg,
9149 GEN_INT (-param_ptr_offset)));
9151 ix86_cfa_state->reg = stack_pointer_rtx;
9152 ix86_cfa_state->offset = param_ptr_offset;
9154 add_reg_note (insn, REG_CFA_DEF_CFA,
9155 gen_rtx_PLUS (Pmode, ix86_cfa_state->reg,
9156 GEN_INT (ix86_cfa_state->offset)));
9157 RTX_FRAME_RELATED_P (insn) = 1;
9159 if (!call_used_regs[REGNO (crtl->drap_reg)])
9160 ix86_emit_restore_reg_using_pop (crtl->drap_reg, -UNITS_PER_WORD);
9163 /* Remove the saved static chain from the stack. The use of ECX is
9164 merely as a scratch register, not as the actual static chain. */
9165 if (ix86_static_chain_on_stack)
9167 rtx r, insn;
9169 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
9170 ix86_cfa_state->offset += UNITS_PER_WORD;
9172 r = gen_rtx_REG (Pmode, CX_REG);
9173 insn = emit_insn (ix86_gen_pop1 (r));
9175 r = plus_constant (stack_pointer_rtx, UNITS_PER_WORD);
9176 r = gen_rtx_SET (VOIDmode, stack_pointer_rtx, r);
9177 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
9178 RTX_FRAME_RELATED_P (insn) = 1;
9181 /* Sibcall epilogues don't want a return instruction. */
9182 if (style == 0)
9184 *ix86_cfa_state = cfa_state_save;
9185 return;
9188 if (crtl->args.pops_args && crtl->args.size)
9190 rtx popc = GEN_INT (crtl->args.pops_args);
9192 /* i386 can only pop 64K bytes. If asked to pop more, pop return
9193 address, do explicit add, and jump indirectly to the caller. */
9195 if (crtl->args.pops_args >= 65536)
9197 rtx ecx = gen_rtx_REG (SImode, CX_REG);
9198 rtx insn;
9200 /* There is no "pascal" calling convention in any 64bit ABI. */
9201 gcc_assert (!TARGET_64BIT);
9203 insn = emit_insn (gen_popsi1 (ecx));
9204 ix86_cfa_state->offset -= UNITS_PER_WORD;
9206 add_reg_note (insn, REG_CFA_ADJUST_CFA,
9207 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
9208 add_reg_note (insn, REG_CFA_REGISTER,
9209 gen_rtx_SET (VOIDmode, ecx, pc_rtx));
9210 RTX_FRAME_RELATED_P (insn) = 1;
9212 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9213 popc, -1, true);
9214 emit_jump_insn (gen_return_indirect_internal (ecx));
9216 else
9217 emit_jump_insn (gen_return_pop_internal (popc));
9219 else
9220 emit_jump_insn (gen_return_internal ());
9222 /* Restore the state back to the state from the prologue,
9223 so that it's correct for the next epilogue. */
9224 *ix86_cfa_state = cfa_state_save;
9227 /* Reset from the function's potential modifications. */
9229 static void
9230 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9231 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
9233 if (pic_offset_table_rtx)
9234 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
9235 #if TARGET_MACHO
9236 /* Mach-O doesn't support labels at the end of objects, so if
9237 it looks like we might want one, insert a NOP. */
9239 rtx insn = get_last_insn ();
9240 while (insn
9241 && NOTE_P (insn)
9242 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
9243 insn = PREV_INSN (insn);
9244 if (insn
9245 && (LABEL_P (insn)
9246 || (NOTE_P (insn)
9247 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
9248 fputs ("\tnop\n", file);
9250 #endif
9254 /* Extract the parts of an RTL expression that is a valid memory address
9255 for an instruction. Return 0 if the structure of the address is
9256 grossly off. Return -1 if the address contains ASHIFT, so it is not
9257 strictly valid, but still used for computing length of lea instruction. */
9260 ix86_decompose_address (rtx addr, struct ix86_address *out)
9262 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
9263 rtx base_reg, index_reg;
9264 HOST_WIDE_INT scale = 1;
9265 rtx scale_rtx = NULL_RTX;
9266 int retval = 1;
9267 enum ix86_address_seg seg = SEG_DEFAULT;
9269 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
9270 base = addr;
9271 else if (GET_CODE (addr) == PLUS)
9273 rtx addends[4], op;
9274 int n = 0, i;
9276 op = addr;
9279 if (n >= 4)
9280 return 0;
9281 addends[n++] = XEXP (op, 1);
9282 op = XEXP (op, 0);
9284 while (GET_CODE (op) == PLUS);
9285 if (n >= 4)
9286 return 0;
9287 addends[n] = op;
9289 for (i = n; i >= 0; --i)
9291 op = addends[i];
9292 switch (GET_CODE (op))
9294 case MULT:
9295 if (index)
9296 return 0;
9297 index = XEXP (op, 0);
9298 scale_rtx = XEXP (op, 1);
9299 break;
9301 case UNSPEC:
9302 if (XINT (op, 1) == UNSPEC_TP
9303 && TARGET_TLS_DIRECT_SEG_REFS
9304 && seg == SEG_DEFAULT)
9305 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
9306 else
9307 return 0;
9308 break;
9310 case REG:
9311 case SUBREG:
9312 if (!base)
9313 base = op;
9314 else if (!index)
9315 index = op;
9316 else
9317 return 0;
9318 break;
9320 case CONST:
9321 case CONST_INT:
9322 case SYMBOL_REF:
9323 case LABEL_REF:
9324 if (disp)
9325 return 0;
9326 disp = op;
9327 break;
9329 default:
9330 return 0;
9334 else if (GET_CODE (addr) == MULT)
9336 index = XEXP (addr, 0); /* index*scale */
9337 scale_rtx = XEXP (addr, 1);
9339 else if (GET_CODE (addr) == ASHIFT)
9341 rtx tmp;
9343 /* We're called for lea too, which implements ashift on occasion. */
9344 index = XEXP (addr, 0);
9345 tmp = XEXP (addr, 1);
9346 if (!CONST_INT_P (tmp))
9347 return 0;
9348 scale = INTVAL (tmp);
9349 if ((unsigned HOST_WIDE_INT) scale > 3)
9350 return 0;
9351 scale = 1 << scale;
9352 retval = -1;
9354 else
9355 disp = addr; /* displacement */
9357 /* Extract the integral value of scale. */
9358 if (scale_rtx)
9360 if (!CONST_INT_P (scale_rtx))
9361 return 0;
9362 scale = INTVAL (scale_rtx);
9365 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
9366 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
9368 /* Avoid useless 0 displacement. */
9369 if (disp == const0_rtx && (base || index))
9370 disp = NULL_RTX;
9372 /* Allow arg pointer and stack pointer as index if there is not scaling. */
9373 if (base_reg && index_reg && scale == 1
9374 && (index_reg == arg_pointer_rtx
9375 || index_reg == frame_pointer_rtx
9376 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
9378 rtx tmp;
9379 tmp = base, base = index, index = tmp;
9380 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
9383 /* Special case: %ebp cannot be encoded as a base without a displacement.
9384 Similarly %r13. */
9385 if (!disp
9386 && base_reg
9387 && (base_reg == hard_frame_pointer_rtx
9388 || base_reg == frame_pointer_rtx
9389 || base_reg == arg_pointer_rtx
9390 || (REG_P (base_reg)
9391 && (REGNO (base_reg) == HARD_FRAME_POINTER_REGNUM
9392 || REGNO (base_reg) == R13_REG))))
9393 disp = const0_rtx;
9395 /* Special case: on K6, [%esi] makes the instruction vector decoded.
9396 Avoid this by transforming to [%esi+0].
9397 Reload calls address legitimization without cfun defined, so we need
9398 to test cfun for being non-NULL. */
9399 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
9400 && base_reg && !index_reg && !disp
9401 && REG_P (base_reg)
9402 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
9403 disp = const0_rtx;
9405 /* Special case: encode reg+reg instead of reg*2. */
9406 if (!base && index && scale == 2)
9407 base = index, base_reg = index_reg, scale = 1;
9409 /* Special case: scaling cannot be encoded without base or displacement. */
9410 if (!base && !disp && index && scale != 1)
9411 disp = const0_rtx;
9413 out->base = base;
9414 out->index = index;
9415 out->disp = disp;
9416 out->scale = scale;
9417 out->seg = seg;
9419 return retval;
9422 /* Return cost of the memory address x.
9423 For i386, it is better to use a complex address than let gcc copy
9424 the address into a reg and make a new pseudo. But not if the address
9425 requires to two regs - that would mean more pseudos with longer
9426 lifetimes. */
9427 static int
9428 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
9430 struct ix86_address parts;
9431 int cost = 1;
9432 int ok = ix86_decompose_address (x, &parts);
9434 gcc_assert (ok);
9436 if (parts.base && GET_CODE (parts.base) == SUBREG)
9437 parts.base = SUBREG_REG (parts.base);
9438 if (parts.index && GET_CODE (parts.index) == SUBREG)
9439 parts.index = SUBREG_REG (parts.index);
9441 /* Attempt to minimize number of registers in the address. */
9442 if ((parts.base
9443 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
9444 || (parts.index
9445 && (!REG_P (parts.index)
9446 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
9447 cost++;
9449 if (parts.base
9450 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
9451 && parts.index
9452 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
9453 && parts.base != parts.index)
9454 cost++;
9456 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
9457 since it's predecode logic can't detect the length of instructions
9458 and it degenerates to vector decoded. Increase cost of such
9459 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
9460 to split such addresses or even refuse such addresses at all.
9462 Following addressing modes are affected:
9463 [base+scale*index]
9464 [scale*index+disp]
9465 [base+index]
9467 The first and last case may be avoidable by explicitly coding the zero in
9468 memory address, but I don't have AMD-K6 machine handy to check this
9469 theory. */
9471 if (TARGET_K6
9472 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
9473 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
9474 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
9475 cost += 10;
9477 return cost;
9480 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
9481 this is used for to form addresses to local data when -fPIC is in
9482 use. */
9484 static bool
9485 darwin_local_data_pic (rtx disp)
9487 return (GET_CODE (disp) == UNSPEC
9488 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
9491 /* Determine if a given RTX is a valid constant. We already know this
9492 satisfies CONSTANT_P. */
9494 bool
9495 legitimate_constant_p (rtx x)
9497 switch (GET_CODE (x))
9499 case CONST:
9500 x = XEXP (x, 0);
9502 if (GET_CODE (x) == PLUS)
9504 if (!CONST_INT_P (XEXP (x, 1)))
9505 return false;
9506 x = XEXP (x, 0);
9509 if (TARGET_MACHO && darwin_local_data_pic (x))
9510 return true;
9512 /* Only some unspecs are valid as "constants". */
9513 if (GET_CODE (x) == UNSPEC)
9514 switch (XINT (x, 1))
9516 case UNSPEC_GOT:
9517 case UNSPEC_GOTOFF:
9518 case UNSPEC_PLTOFF:
9519 return TARGET_64BIT;
9520 case UNSPEC_TPOFF:
9521 case UNSPEC_NTPOFF:
9522 x = XVECEXP (x, 0, 0);
9523 return (GET_CODE (x) == SYMBOL_REF
9524 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9525 case UNSPEC_DTPOFF:
9526 x = XVECEXP (x, 0, 0);
9527 return (GET_CODE (x) == SYMBOL_REF
9528 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
9529 default:
9530 return false;
9533 /* We must have drilled down to a symbol. */
9534 if (GET_CODE (x) == LABEL_REF)
9535 return true;
9536 if (GET_CODE (x) != SYMBOL_REF)
9537 return false;
9538 /* FALLTHRU */
9540 case SYMBOL_REF:
9541 /* TLS symbols are never valid. */
9542 if (SYMBOL_REF_TLS_MODEL (x))
9543 return false;
9545 /* DLLIMPORT symbols are never valid. */
9546 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
9547 && SYMBOL_REF_DLLIMPORT_P (x))
9548 return false;
9549 break;
9551 case CONST_DOUBLE:
9552 if (GET_MODE (x) == TImode
9553 && x != CONST0_RTX (TImode)
9554 && !TARGET_64BIT)
9555 return false;
9556 break;
9558 case CONST_VECTOR:
9559 if (!standard_sse_constant_p (x))
9560 return false;
9562 default:
9563 break;
9566 /* Otherwise we handle everything else in the move patterns. */
9567 return true;
9570 /* Determine if it's legal to put X into the constant pool. This
9571 is not possible for the address of thread-local symbols, which
9572 is checked above. */
9574 static bool
9575 ix86_cannot_force_const_mem (rtx x)
9577 /* We can always put integral constants and vectors in memory. */
9578 switch (GET_CODE (x))
9580 case CONST_INT:
9581 case CONST_DOUBLE:
9582 case CONST_VECTOR:
9583 return false;
9585 default:
9586 break;
9588 return !legitimate_constant_p (x);
9592 /* Nonzero if the constant value X is a legitimate general operand
9593 when generating PIC code. It is given that flag_pic is on and
9594 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
9596 bool
9597 legitimate_pic_operand_p (rtx x)
9599 rtx inner;
9601 switch (GET_CODE (x))
9603 case CONST:
9604 inner = XEXP (x, 0);
9605 if (GET_CODE (inner) == PLUS
9606 && CONST_INT_P (XEXP (inner, 1)))
9607 inner = XEXP (inner, 0);
9609 /* Only some unspecs are valid as "constants". */
9610 if (GET_CODE (inner) == UNSPEC)
9611 switch (XINT (inner, 1))
9613 case UNSPEC_GOT:
9614 case UNSPEC_GOTOFF:
9615 case UNSPEC_PLTOFF:
9616 return TARGET_64BIT;
9617 case UNSPEC_TPOFF:
9618 x = XVECEXP (inner, 0, 0);
9619 return (GET_CODE (x) == SYMBOL_REF
9620 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9621 case UNSPEC_MACHOPIC_OFFSET:
9622 return legitimate_pic_address_disp_p (x);
9623 default:
9624 return false;
9626 /* FALLTHRU */
9628 case SYMBOL_REF:
9629 case LABEL_REF:
9630 return legitimate_pic_address_disp_p (x);
9632 default:
9633 return true;
9637 /* Determine if a given CONST RTX is a valid memory displacement
9638 in PIC mode. */
9641 legitimate_pic_address_disp_p (rtx disp)
9643 bool saw_plus;
9645 /* In 64bit mode we can allow direct addresses of symbols and labels
9646 when they are not dynamic symbols. */
9647 if (TARGET_64BIT)
9649 rtx op0 = disp, op1;
9651 switch (GET_CODE (disp))
9653 case LABEL_REF:
9654 return true;
9656 case CONST:
9657 if (GET_CODE (XEXP (disp, 0)) != PLUS)
9658 break;
9659 op0 = XEXP (XEXP (disp, 0), 0);
9660 op1 = XEXP (XEXP (disp, 0), 1);
9661 if (!CONST_INT_P (op1)
9662 || INTVAL (op1) >= 16*1024*1024
9663 || INTVAL (op1) < -16*1024*1024)
9664 break;
9665 if (GET_CODE (op0) == LABEL_REF)
9666 return true;
9667 if (GET_CODE (op0) != SYMBOL_REF)
9668 break;
9669 /* FALLTHRU */
9671 case SYMBOL_REF:
9672 /* TLS references should always be enclosed in UNSPEC. */
9673 if (SYMBOL_REF_TLS_MODEL (op0))
9674 return false;
9675 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
9676 && ix86_cmodel != CM_LARGE_PIC)
9677 return true;
9678 break;
9680 default:
9681 break;
9684 if (GET_CODE (disp) != CONST)
9685 return 0;
9686 disp = XEXP (disp, 0);
9688 if (TARGET_64BIT)
9690 /* We are unsafe to allow PLUS expressions. This limit allowed distance
9691 of GOT tables. We should not need these anyway. */
9692 if (GET_CODE (disp) != UNSPEC
9693 || (XINT (disp, 1) != UNSPEC_GOTPCREL
9694 && XINT (disp, 1) != UNSPEC_GOTOFF
9695 && XINT (disp, 1) != UNSPEC_PLTOFF))
9696 return 0;
9698 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
9699 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
9700 return 0;
9701 return 1;
9704 saw_plus = false;
9705 if (GET_CODE (disp) == PLUS)
9707 if (!CONST_INT_P (XEXP (disp, 1)))
9708 return 0;
9709 disp = XEXP (disp, 0);
9710 saw_plus = true;
9713 if (TARGET_MACHO && darwin_local_data_pic (disp))
9714 return 1;
9716 if (GET_CODE (disp) != UNSPEC)
9717 return 0;
9719 switch (XINT (disp, 1))
9721 case UNSPEC_GOT:
9722 if (saw_plus)
9723 return false;
9724 /* We need to check for both symbols and labels because VxWorks loads
9725 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
9726 details. */
9727 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9728 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
9729 case UNSPEC_GOTOFF:
9730 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
9731 While ABI specify also 32bit relocation but we don't produce it in
9732 small PIC model at all. */
9733 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9734 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
9735 && !TARGET_64BIT)
9736 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
9737 return false;
9738 case UNSPEC_GOTTPOFF:
9739 case UNSPEC_GOTNTPOFF:
9740 case UNSPEC_INDNTPOFF:
9741 if (saw_plus)
9742 return false;
9743 disp = XVECEXP (disp, 0, 0);
9744 return (GET_CODE (disp) == SYMBOL_REF
9745 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
9746 case UNSPEC_NTPOFF:
9747 disp = XVECEXP (disp, 0, 0);
9748 return (GET_CODE (disp) == SYMBOL_REF
9749 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
9750 case UNSPEC_DTPOFF:
9751 disp = XVECEXP (disp, 0, 0);
9752 return (GET_CODE (disp) == SYMBOL_REF
9753 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
9756 return 0;
9759 /* Recognizes RTL expressions that are valid memory addresses for an
9760 instruction. The MODE argument is the machine mode for the MEM
9761 expression that wants to use this address.
9763 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
9764 convert common non-canonical forms to canonical form so that they will
9765 be recognized. */
9767 static bool
9768 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
9769 rtx addr, bool strict)
9771 struct ix86_address parts;
9772 rtx base, index, disp;
9773 HOST_WIDE_INT scale;
9775 if (ix86_decompose_address (addr, &parts) <= 0)
9776 /* Decomposition failed. */
9777 return false;
9779 base = parts.base;
9780 index = parts.index;
9781 disp = parts.disp;
9782 scale = parts.scale;
9784 /* Validate base register.
9786 Don't allow SUBREG's that span more than a word here. It can lead to spill
9787 failures when the base is one word out of a two word structure, which is
9788 represented internally as a DImode int. */
9790 if (base)
9792 rtx reg;
9794 if (REG_P (base))
9795 reg = base;
9796 else if (GET_CODE (base) == SUBREG
9797 && REG_P (SUBREG_REG (base))
9798 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
9799 <= UNITS_PER_WORD)
9800 reg = SUBREG_REG (base);
9801 else
9802 /* Base is not a register. */
9803 return false;
9805 if (GET_MODE (base) != Pmode)
9806 /* Base is not in Pmode. */
9807 return false;
9809 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
9810 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
9811 /* Base is not valid. */
9812 return false;
9815 /* Validate index register.
9817 Don't allow SUBREG's that span more than a word here -- same as above. */
9819 if (index)
9821 rtx reg;
9823 if (REG_P (index))
9824 reg = index;
9825 else if (GET_CODE (index) == SUBREG
9826 && REG_P (SUBREG_REG (index))
9827 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
9828 <= UNITS_PER_WORD)
9829 reg = SUBREG_REG (index);
9830 else
9831 /* Index is not a register. */
9832 return false;
9834 if (GET_MODE (index) != Pmode)
9835 /* Index is not in Pmode. */
9836 return false;
9838 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
9839 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
9840 /* Index is not valid. */
9841 return false;
9844 /* Validate scale factor. */
9845 if (scale != 1)
9847 if (!index)
9848 /* Scale without index. */
9849 return false;
9851 if (scale != 2 && scale != 4 && scale != 8)
9852 /* Scale is not a valid multiplier. */
9853 return false;
9856 /* Validate displacement. */
9857 if (disp)
9859 if (GET_CODE (disp) == CONST
9860 && GET_CODE (XEXP (disp, 0)) == UNSPEC
9861 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
9862 switch (XINT (XEXP (disp, 0), 1))
9864 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
9865 used. While ABI specify also 32bit relocations, we don't produce
9866 them at all and use IP relative instead. */
9867 case UNSPEC_GOT:
9868 case UNSPEC_GOTOFF:
9869 gcc_assert (flag_pic);
9870 if (!TARGET_64BIT)
9871 goto is_legitimate_pic;
9873 /* 64bit address unspec. */
9874 return false;
9876 case UNSPEC_GOTPCREL:
9877 gcc_assert (flag_pic);
9878 goto is_legitimate_pic;
9880 case UNSPEC_GOTTPOFF:
9881 case UNSPEC_GOTNTPOFF:
9882 case UNSPEC_INDNTPOFF:
9883 case UNSPEC_NTPOFF:
9884 case UNSPEC_DTPOFF:
9885 break;
9887 default:
9888 /* Invalid address unspec. */
9889 return false;
9892 else if (SYMBOLIC_CONST (disp)
9893 && (flag_pic
9894 || (TARGET_MACHO
9895 #if TARGET_MACHO
9896 && MACHOPIC_INDIRECT
9897 && !machopic_operand_p (disp)
9898 #endif
9902 is_legitimate_pic:
9903 if (TARGET_64BIT && (index || base))
9905 /* foo@dtpoff(%rX) is ok. */
9906 if (GET_CODE (disp) != CONST
9907 || GET_CODE (XEXP (disp, 0)) != PLUS
9908 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
9909 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
9910 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
9911 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
9912 /* Non-constant pic memory reference. */
9913 return false;
9915 else if (! legitimate_pic_address_disp_p (disp))
9916 /* Displacement is an invalid pic construct. */
9917 return false;
9919 /* This code used to verify that a symbolic pic displacement
9920 includes the pic_offset_table_rtx register.
9922 While this is good idea, unfortunately these constructs may
9923 be created by "adds using lea" optimization for incorrect
9924 code like:
9926 int a;
9927 int foo(int i)
9929 return *(&a+i);
9932 This code is nonsensical, but results in addressing
9933 GOT table with pic_offset_table_rtx base. We can't
9934 just refuse it easily, since it gets matched by
9935 "addsi3" pattern, that later gets split to lea in the
9936 case output register differs from input. While this
9937 can be handled by separate addsi pattern for this case
9938 that never results in lea, this seems to be easier and
9939 correct fix for crash to disable this test. */
9941 else if (GET_CODE (disp) != LABEL_REF
9942 && !CONST_INT_P (disp)
9943 && (GET_CODE (disp) != CONST
9944 || !legitimate_constant_p (disp))
9945 && (GET_CODE (disp) != SYMBOL_REF
9946 || !legitimate_constant_p (disp)))
9947 /* Displacement is not constant. */
9948 return false;
9949 else if (TARGET_64BIT
9950 && !x86_64_immediate_operand (disp, VOIDmode))
9951 /* Displacement is out of range. */
9952 return false;
9955 /* Everything looks valid. */
9956 return true;
9959 /* Determine if a given RTX is a valid constant address. */
9961 bool
9962 constant_address_p (rtx x)
9964 return CONSTANT_P (x) && ix86_legitimate_address_p (Pmode, x, 1);
9967 /* Return a unique alias set for the GOT. */
9969 static alias_set_type
9970 ix86_GOT_alias_set (void)
9972 static alias_set_type set = -1;
9973 if (set == -1)
9974 set = new_alias_set ();
9975 return set;
9978 /* Return a legitimate reference for ORIG (an address) using the
9979 register REG. If REG is 0, a new pseudo is generated.
9981 There are two types of references that must be handled:
9983 1. Global data references must load the address from the GOT, via
9984 the PIC reg. An insn is emitted to do this load, and the reg is
9985 returned.
9987 2. Static data references, constant pool addresses, and code labels
9988 compute the address as an offset from the GOT, whose base is in
9989 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
9990 differentiate them from global data objects. The returned
9991 address is the PIC reg + an unspec constant.
9993 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
9994 reg also appears in the address. */
9996 static rtx
9997 legitimize_pic_address (rtx orig, rtx reg)
9999 rtx addr = orig;
10000 rtx new_rtx = orig;
10001 rtx base;
10003 #if TARGET_MACHO
10004 if (TARGET_MACHO && !TARGET_64BIT)
10006 if (reg == 0)
10007 reg = gen_reg_rtx (Pmode);
10008 /* Use the generic Mach-O PIC machinery. */
10009 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
10011 #endif
10013 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
10014 new_rtx = addr;
10015 else if (TARGET_64BIT
10016 && ix86_cmodel != CM_SMALL_PIC
10017 && gotoff_operand (addr, Pmode))
10019 rtx tmpreg;
10020 /* This symbol may be referenced via a displacement from the PIC
10021 base address (@GOTOFF). */
10023 if (reload_in_progress)
10024 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10025 if (GET_CODE (addr) == CONST)
10026 addr = XEXP (addr, 0);
10027 if (GET_CODE (addr) == PLUS)
10029 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10030 UNSPEC_GOTOFF);
10031 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10033 else
10034 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10035 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10036 if (!reg)
10037 tmpreg = gen_reg_rtx (Pmode);
10038 else
10039 tmpreg = reg;
10040 emit_move_insn (tmpreg, new_rtx);
10042 if (reg != 0)
10044 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
10045 tmpreg, 1, OPTAB_DIRECT);
10046 new_rtx = reg;
10048 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
10050 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
10052 /* This symbol may be referenced via a displacement from the PIC
10053 base address (@GOTOFF). */
10055 if (reload_in_progress)
10056 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10057 if (GET_CODE (addr) == CONST)
10058 addr = XEXP (addr, 0);
10059 if (GET_CODE (addr) == PLUS)
10061 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
10062 UNSPEC_GOTOFF);
10063 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
10065 else
10066 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
10067 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10068 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10070 if (reg != 0)
10072 emit_move_insn (reg, new_rtx);
10073 new_rtx = reg;
10076 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
10077 /* We can't use @GOTOFF for text labels on VxWorks;
10078 see gotoff_operand. */
10079 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
10081 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10083 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
10084 return legitimize_dllimport_symbol (addr, true);
10085 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
10086 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
10087 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
10089 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
10090 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
10094 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
10096 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
10097 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10098 new_rtx = gen_const_mem (Pmode, new_rtx);
10099 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10101 if (reg == 0)
10102 reg = gen_reg_rtx (Pmode);
10103 /* Use directly gen_movsi, otherwise the address is loaded
10104 into register for CSE. We don't want to CSE this addresses,
10105 instead we CSE addresses from the GOT table, so skip this. */
10106 emit_insn (gen_movsi (reg, new_rtx));
10107 new_rtx = reg;
10109 else
10111 /* This symbol must be referenced via a load from the
10112 Global Offset Table (@GOT). */
10114 if (reload_in_progress)
10115 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10116 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
10117 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10118 if (TARGET_64BIT)
10119 new_rtx = force_reg (Pmode, new_rtx);
10120 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10121 new_rtx = gen_const_mem (Pmode, new_rtx);
10122 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
10124 if (reg == 0)
10125 reg = gen_reg_rtx (Pmode);
10126 emit_move_insn (reg, new_rtx);
10127 new_rtx = reg;
10130 else
10132 if (CONST_INT_P (addr)
10133 && !x86_64_immediate_operand (addr, VOIDmode))
10135 if (reg)
10137 emit_move_insn (reg, addr);
10138 new_rtx = reg;
10140 else
10141 new_rtx = force_reg (Pmode, addr);
10143 else if (GET_CODE (addr) == CONST)
10145 addr = XEXP (addr, 0);
10147 /* We must match stuff we generate before. Assume the only
10148 unspecs that can get here are ours. Not that we could do
10149 anything with them anyway.... */
10150 if (GET_CODE (addr) == UNSPEC
10151 || (GET_CODE (addr) == PLUS
10152 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
10153 return orig;
10154 gcc_assert (GET_CODE (addr) == PLUS);
10156 if (GET_CODE (addr) == PLUS)
10158 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
10160 /* Check first to see if this is a constant offset from a @GOTOFF
10161 symbol reference. */
10162 if (gotoff_operand (op0, Pmode)
10163 && CONST_INT_P (op1))
10165 if (!TARGET_64BIT)
10167 if (reload_in_progress)
10168 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10169 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
10170 UNSPEC_GOTOFF);
10171 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
10172 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
10173 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
10175 if (reg != 0)
10177 emit_move_insn (reg, new_rtx);
10178 new_rtx = reg;
10181 else
10183 if (INTVAL (op1) < -16*1024*1024
10184 || INTVAL (op1) >= 16*1024*1024)
10186 if (!x86_64_immediate_operand (op1, Pmode))
10187 op1 = force_reg (Pmode, op1);
10188 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
10192 else
10194 base = legitimize_pic_address (XEXP (addr, 0), reg);
10195 new_rtx = legitimize_pic_address (XEXP (addr, 1),
10196 base == reg ? NULL_RTX : reg);
10198 if (CONST_INT_P (new_rtx))
10199 new_rtx = plus_constant (base, INTVAL (new_rtx));
10200 else
10202 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
10204 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
10205 new_rtx = XEXP (new_rtx, 1);
10207 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
10212 return new_rtx;
10215 /* Load the thread pointer. If TO_REG is true, force it into a register. */
10217 static rtx
10218 get_thread_pointer (int to_reg)
10220 rtx tp, reg, insn;
10222 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
10223 if (!to_reg)
10224 return tp;
10226 reg = gen_reg_rtx (Pmode);
10227 insn = gen_rtx_SET (VOIDmode, reg, tp);
10228 insn = emit_insn (insn);
10230 return reg;
10233 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
10234 false if we expect this to be used for a memory address and true if
10235 we expect to load the address into a register. */
10237 static rtx
10238 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
10240 rtx dest, base, off, pic, tp;
10241 int type;
10243 switch (model)
10245 case TLS_MODEL_GLOBAL_DYNAMIC:
10246 dest = gen_reg_rtx (Pmode);
10247 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10249 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10251 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
10253 start_sequence ();
10254 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
10255 insns = get_insns ();
10256 end_sequence ();
10258 RTL_CONST_CALL_P (insns) = 1;
10259 emit_libcall_block (insns, dest, rax, x);
10261 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10262 emit_insn (gen_tls_global_dynamic_64 (dest, x));
10263 else
10264 emit_insn (gen_tls_global_dynamic_32 (dest, x));
10266 if (TARGET_GNU2_TLS)
10268 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
10270 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10272 break;
10274 case TLS_MODEL_LOCAL_DYNAMIC:
10275 base = gen_reg_rtx (Pmode);
10276 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10278 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10280 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
10282 start_sequence ();
10283 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
10284 insns = get_insns ();
10285 end_sequence ();
10287 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
10288 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
10289 RTL_CONST_CALL_P (insns) = 1;
10290 emit_libcall_block (insns, base, rax, note);
10292 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10293 emit_insn (gen_tls_local_dynamic_base_64 (base));
10294 else
10295 emit_insn (gen_tls_local_dynamic_base_32 (base));
10297 if (TARGET_GNU2_TLS)
10299 rtx x = ix86_tls_module_base ();
10301 set_unique_reg_note (get_last_insn (), REG_EQUIV,
10302 gen_rtx_MINUS (Pmode, x, tp));
10305 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
10306 off = gen_rtx_CONST (Pmode, off);
10308 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
10310 if (TARGET_GNU2_TLS)
10312 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
10314 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10317 break;
10319 case TLS_MODEL_INITIAL_EXEC:
10320 if (TARGET_64BIT)
10322 pic = NULL;
10323 type = UNSPEC_GOTNTPOFF;
10325 else if (flag_pic)
10327 if (reload_in_progress)
10328 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10329 pic = pic_offset_table_rtx;
10330 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
10332 else if (!TARGET_ANY_GNU_TLS)
10334 pic = gen_reg_rtx (Pmode);
10335 emit_insn (gen_set_got (pic));
10336 type = UNSPEC_GOTTPOFF;
10338 else
10340 pic = NULL;
10341 type = UNSPEC_INDNTPOFF;
10344 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
10345 off = gen_rtx_CONST (Pmode, off);
10346 if (pic)
10347 off = gen_rtx_PLUS (Pmode, pic, off);
10348 off = gen_const_mem (Pmode, off);
10349 set_mem_alias_set (off, ix86_GOT_alias_set ());
10351 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10353 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10354 off = force_reg (Pmode, off);
10355 return gen_rtx_PLUS (Pmode, base, off);
10357 else
10359 base = get_thread_pointer (true);
10360 dest = gen_reg_rtx (Pmode);
10361 emit_insn (gen_subsi3 (dest, base, off));
10363 break;
10365 case TLS_MODEL_LOCAL_EXEC:
10366 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
10367 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10368 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
10369 off = gen_rtx_CONST (Pmode, off);
10371 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10373 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10374 return gen_rtx_PLUS (Pmode, base, off);
10376 else
10378 base = get_thread_pointer (true);
10379 dest = gen_reg_rtx (Pmode);
10380 emit_insn (gen_subsi3 (dest, base, off));
10382 break;
10384 default:
10385 gcc_unreachable ();
10388 return dest;
10391 /* Create or return the unique __imp_DECL dllimport symbol corresponding
10392 to symbol DECL. */
10394 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
10395 htab_t dllimport_map;
10397 static tree
10398 get_dllimport_decl (tree decl)
10400 struct tree_map *h, in;
10401 void **loc;
10402 const char *name;
10403 const char *prefix;
10404 size_t namelen, prefixlen;
10405 char *imp_name;
10406 tree to;
10407 rtx rtl;
10409 if (!dllimport_map)
10410 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
10412 in.hash = htab_hash_pointer (decl);
10413 in.base.from = decl;
10414 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
10415 h = (struct tree_map *) *loc;
10416 if (h)
10417 return h->to;
10419 *loc = h = GGC_NEW (struct tree_map);
10420 h->hash = in.hash;
10421 h->base.from = decl;
10422 h->to = to = build_decl (DECL_SOURCE_LOCATION (decl),
10423 VAR_DECL, NULL, ptr_type_node);
10424 DECL_ARTIFICIAL (to) = 1;
10425 DECL_IGNORED_P (to) = 1;
10426 DECL_EXTERNAL (to) = 1;
10427 TREE_READONLY (to) = 1;
10429 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10430 name = targetm.strip_name_encoding (name);
10431 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
10432 ? "*__imp_" : "*__imp__";
10433 namelen = strlen (name);
10434 prefixlen = strlen (prefix);
10435 imp_name = (char *) alloca (namelen + prefixlen + 1);
10436 memcpy (imp_name, prefix, prefixlen);
10437 memcpy (imp_name + prefixlen, name, namelen + 1);
10439 name = ggc_alloc_string (imp_name, namelen + prefixlen);
10440 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
10441 SET_SYMBOL_REF_DECL (rtl, to);
10442 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
10444 rtl = gen_const_mem (Pmode, rtl);
10445 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
10447 SET_DECL_RTL (to, rtl);
10448 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
10450 return to;
10453 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
10454 true if we require the result be a register. */
10456 static rtx
10457 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
10459 tree imp_decl;
10460 rtx x;
10462 gcc_assert (SYMBOL_REF_DECL (symbol));
10463 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
10465 x = DECL_RTL (imp_decl);
10466 if (want_reg)
10467 x = force_reg (Pmode, x);
10468 return x;
10471 /* Try machine-dependent ways of modifying an illegitimate address
10472 to be legitimate. If we find one, return the new, valid address.
10473 This macro is used in only one place: `memory_address' in explow.c.
10475 OLDX is the address as it was before break_out_memory_refs was called.
10476 In some cases it is useful to look at this to decide what needs to be done.
10478 It is always safe for this macro to do nothing. It exists to recognize
10479 opportunities to optimize the output.
10481 For the 80386, we handle X+REG by loading X into a register R and
10482 using R+REG. R will go in a general reg and indexing will be used.
10483 However, if REG is a broken-out memory address or multiplication,
10484 nothing needs to be done because REG can certainly go in a general reg.
10486 When -fpic is used, special handling is needed for symbolic references.
10487 See comments by legitimize_pic_address in i386.c for details. */
10489 static rtx
10490 ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
10491 enum machine_mode mode)
10493 int changed = 0;
10494 unsigned log;
10496 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
10497 if (log)
10498 return legitimize_tls_address (x, (enum tls_model) log, false);
10499 if (GET_CODE (x) == CONST
10500 && GET_CODE (XEXP (x, 0)) == PLUS
10501 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10502 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
10504 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
10505 (enum tls_model) log, false);
10506 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10509 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10511 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
10512 return legitimize_dllimport_symbol (x, true);
10513 if (GET_CODE (x) == CONST
10514 && GET_CODE (XEXP (x, 0)) == PLUS
10515 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10516 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
10518 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
10519 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10523 if (flag_pic && SYMBOLIC_CONST (x))
10524 return legitimize_pic_address (x, 0);
10526 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
10527 if (GET_CODE (x) == ASHIFT
10528 && CONST_INT_P (XEXP (x, 1))
10529 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
10531 changed = 1;
10532 log = INTVAL (XEXP (x, 1));
10533 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
10534 GEN_INT (1 << log));
10537 if (GET_CODE (x) == PLUS)
10539 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
10541 if (GET_CODE (XEXP (x, 0)) == ASHIFT
10542 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
10543 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
10545 changed = 1;
10546 log = INTVAL (XEXP (XEXP (x, 0), 1));
10547 XEXP (x, 0) = gen_rtx_MULT (Pmode,
10548 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
10549 GEN_INT (1 << log));
10552 if (GET_CODE (XEXP (x, 1)) == ASHIFT
10553 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
10554 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
10556 changed = 1;
10557 log = INTVAL (XEXP (XEXP (x, 1), 1));
10558 XEXP (x, 1) = gen_rtx_MULT (Pmode,
10559 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
10560 GEN_INT (1 << log));
10563 /* Put multiply first if it isn't already. */
10564 if (GET_CODE (XEXP (x, 1)) == MULT)
10566 rtx tmp = XEXP (x, 0);
10567 XEXP (x, 0) = XEXP (x, 1);
10568 XEXP (x, 1) = tmp;
10569 changed = 1;
10572 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
10573 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
10574 created by virtual register instantiation, register elimination, and
10575 similar optimizations. */
10576 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
10578 changed = 1;
10579 x = gen_rtx_PLUS (Pmode,
10580 gen_rtx_PLUS (Pmode, XEXP (x, 0),
10581 XEXP (XEXP (x, 1), 0)),
10582 XEXP (XEXP (x, 1), 1));
10585 /* Canonicalize
10586 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
10587 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
10588 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
10589 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
10590 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
10591 && CONSTANT_P (XEXP (x, 1)))
10593 rtx constant;
10594 rtx other = NULL_RTX;
10596 if (CONST_INT_P (XEXP (x, 1)))
10598 constant = XEXP (x, 1);
10599 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
10601 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
10603 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
10604 other = XEXP (x, 1);
10606 else
10607 constant = 0;
10609 if (constant)
10611 changed = 1;
10612 x = gen_rtx_PLUS (Pmode,
10613 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
10614 XEXP (XEXP (XEXP (x, 0), 1), 0)),
10615 plus_constant (other, INTVAL (constant)));
10619 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10620 return x;
10622 if (GET_CODE (XEXP (x, 0)) == MULT)
10624 changed = 1;
10625 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
10628 if (GET_CODE (XEXP (x, 1)) == MULT)
10630 changed = 1;
10631 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
10634 if (changed
10635 && REG_P (XEXP (x, 1))
10636 && REG_P (XEXP (x, 0)))
10637 return x;
10639 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
10641 changed = 1;
10642 x = legitimize_pic_address (x, 0);
10645 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10646 return x;
10648 if (REG_P (XEXP (x, 0)))
10650 rtx temp = gen_reg_rtx (Pmode);
10651 rtx val = force_operand (XEXP (x, 1), temp);
10652 if (val != temp)
10653 emit_move_insn (temp, val);
10655 XEXP (x, 1) = temp;
10656 return x;
10659 else if (REG_P (XEXP (x, 1)))
10661 rtx temp = gen_reg_rtx (Pmode);
10662 rtx val = force_operand (XEXP (x, 0), temp);
10663 if (val != temp)
10664 emit_move_insn (temp, val);
10666 XEXP (x, 0) = temp;
10667 return x;
10671 return x;
10674 /* Print an integer constant expression in assembler syntax. Addition
10675 and subtraction are the only arithmetic that may appear in these
10676 expressions. FILE is the stdio stream to write to, X is the rtx, and
10677 CODE is the operand print code from the output string. */
10679 static void
10680 output_pic_addr_const (FILE *file, rtx x, int code)
10682 char buf[256];
10684 switch (GET_CODE (x))
10686 case PC:
10687 gcc_assert (flag_pic);
10688 putc ('.', file);
10689 break;
10691 case SYMBOL_REF:
10692 if (! TARGET_MACHO || TARGET_64BIT)
10693 output_addr_const (file, x);
10694 else
10696 const char *name = XSTR (x, 0);
10698 /* Mark the decl as referenced so that cgraph will
10699 output the function. */
10700 if (SYMBOL_REF_DECL (x))
10701 mark_decl_referenced (SYMBOL_REF_DECL (x));
10703 #if TARGET_MACHO
10704 if (MACHOPIC_INDIRECT
10705 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
10706 name = machopic_indirection_name (x, /*stub_p=*/true);
10707 #endif
10708 assemble_name (file, name);
10710 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
10711 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
10712 fputs ("@PLT", file);
10713 break;
10715 case LABEL_REF:
10716 x = XEXP (x, 0);
10717 /* FALLTHRU */
10718 case CODE_LABEL:
10719 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
10720 assemble_name (asm_out_file, buf);
10721 break;
10723 case CONST_INT:
10724 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
10725 break;
10727 case CONST:
10728 /* This used to output parentheses around the expression,
10729 but that does not work on the 386 (either ATT or BSD assembler). */
10730 output_pic_addr_const (file, XEXP (x, 0), code);
10731 break;
10733 case CONST_DOUBLE:
10734 if (GET_MODE (x) == VOIDmode)
10736 /* We can use %d if the number is <32 bits and positive. */
10737 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
10738 fprintf (file, "0x%lx%08lx",
10739 (unsigned long) CONST_DOUBLE_HIGH (x),
10740 (unsigned long) CONST_DOUBLE_LOW (x));
10741 else
10742 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
10744 else
10745 /* We can't handle floating point constants;
10746 PRINT_OPERAND must handle them. */
10747 output_operand_lossage ("floating constant misused");
10748 break;
10750 case PLUS:
10751 /* Some assemblers need integer constants to appear first. */
10752 if (CONST_INT_P (XEXP (x, 0)))
10754 output_pic_addr_const (file, XEXP (x, 0), code);
10755 putc ('+', file);
10756 output_pic_addr_const (file, XEXP (x, 1), code);
10758 else
10760 gcc_assert (CONST_INT_P (XEXP (x, 1)));
10761 output_pic_addr_const (file, XEXP (x, 1), code);
10762 putc ('+', file);
10763 output_pic_addr_const (file, XEXP (x, 0), code);
10765 break;
10767 case MINUS:
10768 if (!TARGET_MACHO)
10769 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
10770 output_pic_addr_const (file, XEXP (x, 0), code);
10771 putc ('-', file);
10772 output_pic_addr_const (file, XEXP (x, 1), code);
10773 if (!TARGET_MACHO)
10774 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
10775 break;
10777 case UNSPEC:
10778 gcc_assert (XVECLEN (x, 0) == 1);
10779 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
10780 switch (XINT (x, 1))
10782 case UNSPEC_GOT:
10783 fputs ("@GOT", file);
10784 break;
10785 case UNSPEC_GOTOFF:
10786 fputs ("@GOTOFF", file);
10787 break;
10788 case UNSPEC_PLTOFF:
10789 fputs ("@PLTOFF", file);
10790 break;
10791 case UNSPEC_GOTPCREL:
10792 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10793 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
10794 break;
10795 case UNSPEC_GOTTPOFF:
10796 /* FIXME: This might be @TPOFF in Sun ld too. */
10797 fputs ("@GOTTPOFF", file);
10798 break;
10799 case UNSPEC_TPOFF:
10800 fputs ("@TPOFF", file);
10801 break;
10802 case UNSPEC_NTPOFF:
10803 if (TARGET_64BIT)
10804 fputs ("@TPOFF", file);
10805 else
10806 fputs ("@NTPOFF", file);
10807 break;
10808 case UNSPEC_DTPOFF:
10809 fputs ("@DTPOFF", file);
10810 break;
10811 case UNSPEC_GOTNTPOFF:
10812 if (TARGET_64BIT)
10813 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10814 "@GOTTPOFF(%rip)": "@GOTTPOFF[rip]", file);
10815 else
10816 fputs ("@GOTNTPOFF", file);
10817 break;
10818 case UNSPEC_INDNTPOFF:
10819 fputs ("@INDNTPOFF", file);
10820 break;
10821 #if TARGET_MACHO
10822 case UNSPEC_MACHOPIC_OFFSET:
10823 putc ('-', file);
10824 machopic_output_function_base_name (file);
10825 break;
10826 #endif
10827 default:
10828 output_operand_lossage ("invalid UNSPEC as operand");
10829 break;
10831 break;
10833 default:
10834 output_operand_lossage ("invalid expression as operand");
10838 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
10839 We need to emit DTP-relative relocations. */
10841 static void ATTRIBUTE_UNUSED
10842 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
10844 fputs (ASM_LONG, file);
10845 output_addr_const (file, x);
10846 fputs ("@DTPOFF", file);
10847 switch (size)
10849 case 4:
10850 break;
10851 case 8:
10852 fputs (", 0", file);
10853 break;
10854 default:
10855 gcc_unreachable ();
10859 /* Return true if X is a representation of the PIC register. This copes
10860 with calls from ix86_find_base_term, where the register might have
10861 been replaced by a cselib value. */
10863 static bool
10864 ix86_pic_register_p (rtx x)
10866 if (GET_CODE (x) == VALUE && CSELIB_VAL_PTR (x))
10867 return (pic_offset_table_rtx
10868 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
10869 else
10870 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
10873 /* In the name of slightly smaller debug output, and to cater to
10874 general assembler lossage, recognize PIC+GOTOFF and turn it back
10875 into a direct symbol reference.
10877 On Darwin, this is necessary to avoid a crash, because Darwin
10878 has a different PIC label for each routine but the DWARF debugging
10879 information is not associated with any particular routine, so it's
10880 necessary to remove references to the PIC label from RTL stored by
10881 the DWARF output code. */
10883 static rtx
10884 ix86_delegitimize_address (rtx x)
10886 rtx orig_x = delegitimize_mem_from_attrs (x);
10887 /* reg_addend is NULL or a multiple of some register. */
10888 rtx reg_addend = NULL_RTX;
10889 /* const_addend is NULL or a const_int. */
10890 rtx const_addend = NULL_RTX;
10891 /* This is the result, or NULL. */
10892 rtx result = NULL_RTX;
10894 x = orig_x;
10896 if (MEM_P (x))
10897 x = XEXP (x, 0);
10899 if (TARGET_64BIT)
10901 if (GET_CODE (x) != CONST
10902 || GET_CODE (XEXP (x, 0)) != UNSPEC
10903 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
10904 || !MEM_P (orig_x))
10905 return orig_x;
10906 return XVECEXP (XEXP (x, 0), 0, 0);
10909 if (GET_CODE (x) != PLUS
10910 || GET_CODE (XEXP (x, 1)) != CONST)
10911 return orig_x;
10913 if (ix86_pic_register_p (XEXP (x, 0)))
10914 /* %ebx + GOT/GOTOFF */
10916 else if (GET_CODE (XEXP (x, 0)) == PLUS)
10918 /* %ebx + %reg * scale + GOT/GOTOFF */
10919 reg_addend = XEXP (x, 0);
10920 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
10921 reg_addend = XEXP (reg_addend, 1);
10922 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
10923 reg_addend = XEXP (reg_addend, 0);
10924 else
10925 return orig_x;
10926 if (!REG_P (reg_addend)
10927 && GET_CODE (reg_addend) != MULT
10928 && GET_CODE (reg_addend) != ASHIFT)
10929 return orig_x;
10931 else
10932 return orig_x;
10934 x = XEXP (XEXP (x, 1), 0);
10935 if (GET_CODE (x) == PLUS
10936 && CONST_INT_P (XEXP (x, 1)))
10938 const_addend = XEXP (x, 1);
10939 x = XEXP (x, 0);
10942 if (GET_CODE (x) == UNSPEC
10943 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x))
10944 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
10945 result = XVECEXP (x, 0, 0);
10947 if (TARGET_MACHO && darwin_local_data_pic (x)
10948 && !MEM_P (orig_x))
10949 result = XVECEXP (x, 0, 0);
10951 if (! result)
10952 return orig_x;
10954 if (const_addend)
10955 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
10956 if (reg_addend)
10957 result = gen_rtx_PLUS (Pmode, reg_addend, result);
10958 return result;
10961 /* If X is a machine specific address (i.e. a symbol or label being
10962 referenced as a displacement from the GOT implemented using an
10963 UNSPEC), then return the base term. Otherwise return X. */
10966 ix86_find_base_term (rtx x)
10968 rtx term;
10970 if (TARGET_64BIT)
10972 if (GET_CODE (x) != CONST)
10973 return x;
10974 term = XEXP (x, 0);
10975 if (GET_CODE (term) == PLUS
10976 && (CONST_INT_P (XEXP (term, 1))
10977 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
10978 term = XEXP (term, 0);
10979 if (GET_CODE (term) != UNSPEC
10980 || XINT (term, 1) != UNSPEC_GOTPCREL)
10981 return x;
10983 return XVECEXP (term, 0, 0);
10986 return ix86_delegitimize_address (x);
10989 static void
10990 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
10991 int fp, FILE *file)
10993 const char *suffix;
10995 if (mode == CCFPmode || mode == CCFPUmode)
10997 code = ix86_fp_compare_code_to_integer (code);
10998 mode = CCmode;
11000 if (reverse)
11001 code = reverse_condition (code);
11003 switch (code)
11005 case EQ:
11006 switch (mode)
11008 case CCAmode:
11009 suffix = "a";
11010 break;
11012 case CCCmode:
11013 suffix = "c";
11014 break;
11016 case CCOmode:
11017 suffix = "o";
11018 break;
11020 case CCSmode:
11021 suffix = "s";
11022 break;
11024 default:
11025 suffix = "e";
11027 break;
11028 case NE:
11029 switch (mode)
11031 case CCAmode:
11032 suffix = "na";
11033 break;
11035 case CCCmode:
11036 suffix = "nc";
11037 break;
11039 case CCOmode:
11040 suffix = "no";
11041 break;
11043 case CCSmode:
11044 suffix = "ns";
11045 break;
11047 default:
11048 suffix = "ne";
11050 break;
11051 case GT:
11052 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
11053 suffix = "g";
11054 break;
11055 case GTU:
11056 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
11057 Those same assemblers have the same but opposite lossage on cmov. */
11058 if (mode == CCmode)
11059 suffix = fp ? "nbe" : "a";
11060 else if (mode == CCCmode)
11061 suffix = "b";
11062 else
11063 gcc_unreachable ();
11064 break;
11065 case LT:
11066 switch (mode)
11068 case CCNOmode:
11069 case CCGOCmode:
11070 suffix = "s";
11071 break;
11073 case CCmode:
11074 case CCGCmode:
11075 suffix = "l";
11076 break;
11078 default:
11079 gcc_unreachable ();
11081 break;
11082 case LTU:
11083 gcc_assert (mode == CCmode || mode == CCCmode);
11084 suffix = "b";
11085 break;
11086 case GE:
11087 switch (mode)
11089 case CCNOmode:
11090 case CCGOCmode:
11091 suffix = "ns";
11092 break;
11094 case CCmode:
11095 case CCGCmode:
11096 suffix = "ge";
11097 break;
11099 default:
11100 gcc_unreachable ();
11102 break;
11103 case GEU:
11104 /* ??? As above. */
11105 gcc_assert (mode == CCmode || mode == CCCmode);
11106 suffix = fp ? "nb" : "ae";
11107 break;
11108 case LE:
11109 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
11110 suffix = "le";
11111 break;
11112 case LEU:
11113 /* ??? As above. */
11114 if (mode == CCmode)
11115 suffix = "be";
11116 else if (mode == CCCmode)
11117 suffix = fp ? "nb" : "ae";
11118 else
11119 gcc_unreachable ();
11120 break;
11121 case UNORDERED:
11122 suffix = fp ? "u" : "p";
11123 break;
11124 case ORDERED:
11125 suffix = fp ? "nu" : "np";
11126 break;
11127 default:
11128 gcc_unreachable ();
11130 fputs (suffix, file);
11133 /* Print the name of register X to FILE based on its machine mode and number.
11134 If CODE is 'w', pretend the mode is HImode.
11135 If CODE is 'b', pretend the mode is QImode.
11136 If CODE is 'k', pretend the mode is SImode.
11137 If CODE is 'q', pretend the mode is DImode.
11138 If CODE is 'x', pretend the mode is V4SFmode.
11139 If CODE is 't', pretend the mode is V8SFmode.
11140 If CODE is 'h', pretend the reg is the 'high' byte register.
11141 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
11142 If CODE is 'd', duplicate the operand for AVX instruction.
11145 void
11146 print_reg (rtx x, int code, FILE *file)
11148 const char *reg;
11149 bool duplicated = code == 'd' && TARGET_AVX;
11151 gcc_assert (x == pc_rtx
11152 || (REGNO (x) != ARG_POINTER_REGNUM
11153 && REGNO (x) != FRAME_POINTER_REGNUM
11154 && REGNO (x) != FLAGS_REG
11155 && REGNO (x) != FPSR_REG
11156 && REGNO (x) != FPCR_REG));
11158 if (ASSEMBLER_DIALECT == ASM_ATT)
11159 putc ('%', file);
11161 if (x == pc_rtx)
11163 gcc_assert (TARGET_64BIT);
11164 fputs ("rip", file);
11165 return;
11168 if (code == 'w' || MMX_REG_P (x))
11169 code = 2;
11170 else if (code == 'b')
11171 code = 1;
11172 else if (code == 'k')
11173 code = 4;
11174 else if (code == 'q')
11175 code = 8;
11176 else if (code == 'y')
11177 code = 3;
11178 else if (code == 'h')
11179 code = 0;
11180 else if (code == 'x')
11181 code = 16;
11182 else if (code == 't')
11183 code = 32;
11184 else
11185 code = GET_MODE_SIZE (GET_MODE (x));
11187 /* Irritatingly, AMD extended registers use different naming convention
11188 from the normal registers. */
11189 if (REX_INT_REG_P (x))
11191 gcc_assert (TARGET_64BIT);
11192 switch (code)
11194 case 0:
11195 error ("extended registers have no high halves");
11196 break;
11197 case 1:
11198 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
11199 break;
11200 case 2:
11201 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
11202 break;
11203 case 4:
11204 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
11205 break;
11206 case 8:
11207 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
11208 break;
11209 default:
11210 error ("unsupported operand size for extended register");
11211 break;
11213 return;
11216 reg = NULL;
11217 switch (code)
11219 case 3:
11220 if (STACK_TOP_P (x))
11222 reg = "st(0)";
11223 break;
11225 /* FALLTHRU */
11226 case 8:
11227 case 4:
11228 case 12:
11229 if (! ANY_FP_REG_P (x))
11230 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
11231 /* FALLTHRU */
11232 case 16:
11233 case 2:
11234 normal:
11235 reg = hi_reg_name[REGNO (x)];
11236 break;
11237 case 1:
11238 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
11239 goto normal;
11240 reg = qi_reg_name[REGNO (x)];
11241 break;
11242 case 0:
11243 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
11244 goto normal;
11245 reg = qi_high_reg_name[REGNO (x)];
11246 break;
11247 case 32:
11248 if (SSE_REG_P (x))
11250 gcc_assert (!duplicated);
11251 putc ('y', file);
11252 fputs (hi_reg_name[REGNO (x)] + 1, file);
11253 return;
11255 break;
11256 default:
11257 gcc_unreachable ();
11260 fputs (reg, file);
11261 if (duplicated)
11263 if (ASSEMBLER_DIALECT == ASM_ATT)
11264 fprintf (file, ", %%%s", reg);
11265 else
11266 fprintf (file, ", %s", reg);
11270 /* Locate some local-dynamic symbol still in use by this function
11271 so that we can print its name in some tls_local_dynamic_base
11272 pattern. */
11274 static int
11275 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
11277 rtx x = *px;
11279 if (GET_CODE (x) == SYMBOL_REF
11280 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
11282 cfun->machine->some_ld_name = XSTR (x, 0);
11283 return 1;
11286 return 0;
11289 static const char *
11290 get_some_local_dynamic_name (void)
11292 rtx insn;
11294 if (cfun->machine->some_ld_name)
11295 return cfun->machine->some_ld_name;
11297 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
11298 if (INSN_P (insn)
11299 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
11300 return cfun->machine->some_ld_name;
11302 return NULL;
11305 /* Meaning of CODE:
11306 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
11307 C -- print opcode suffix for set/cmov insn.
11308 c -- like C, but print reversed condition
11309 E,e -- likewise, but for compare-and-branch fused insn.
11310 F,f -- likewise, but for floating-point.
11311 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
11312 otherwise nothing
11313 R -- print the prefix for register names.
11314 z -- print the opcode suffix for the size of the current operand.
11315 Z -- likewise, with special suffixes for x87 instructions.
11316 * -- print a star (in certain assembler syntax)
11317 A -- print an absolute memory reference.
11318 w -- print the operand as if it's a "word" (HImode) even if it isn't.
11319 s -- print a shift double count, followed by the assemblers argument
11320 delimiter.
11321 b -- print the QImode name of the register for the indicated operand.
11322 %b0 would print %al if operands[0] is reg 0.
11323 w -- likewise, print the HImode name of the register.
11324 k -- likewise, print the SImode name of the register.
11325 q -- likewise, print the DImode name of the register.
11326 x -- likewise, print the V4SFmode name of the register.
11327 t -- likewise, print the V8SFmode name of the register.
11328 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
11329 y -- print "st(0)" instead of "st" as a register.
11330 d -- print duplicated register operand for AVX instruction.
11331 D -- print condition for SSE cmp instruction.
11332 P -- if PIC, print an @PLT suffix.
11333 X -- don't print any sort of PIC '@' suffix for a symbol.
11334 & -- print some in-use local-dynamic symbol name.
11335 H -- print a memory address offset by 8; used for sse high-parts
11336 Y -- print condition for XOP pcom* instruction.
11337 + -- print a branch hint as 'cs' or 'ds' prefix
11338 ; -- print a semicolon (after prefixes due to bug in older gas).
11341 void
11342 print_operand (FILE *file, rtx x, int code)
11344 if (code)
11346 switch (code)
11348 case '*':
11349 if (ASSEMBLER_DIALECT == ASM_ATT)
11350 putc ('*', file);
11351 return;
11353 case '&':
11355 const char *name = get_some_local_dynamic_name ();
11356 if (name == NULL)
11357 output_operand_lossage ("'%%&' used without any "
11358 "local dynamic TLS references");
11359 else
11360 assemble_name (file, name);
11361 return;
11364 case 'A':
11365 switch (ASSEMBLER_DIALECT)
11367 case ASM_ATT:
11368 putc ('*', file);
11369 break;
11371 case ASM_INTEL:
11372 /* Intel syntax. For absolute addresses, registers should not
11373 be surrounded by braces. */
11374 if (!REG_P (x))
11376 putc ('[', file);
11377 PRINT_OPERAND (file, x, 0);
11378 putc (']', file);
11379 return;
11381 break;
11383 default:
11384 gcc_unreachable ();
11387 PRINT_OPERAND (file, x, 0);
11388 return;
11391 case 'L':
11392 if (ASSEMBLER_DIALECT == ASM_ATT)
11393 putc ('l', file);
11394 return;
11396 case 'W':
11397 if (ASSEMBLER_DIALECT == ASM_ATT)
11398 putc ('w', file);
11399 return;
11401 case 'B':
11402 if (ASSEMBLER_DIALECT == ASM_ATT)
11403 putc ('b', file);
11404 return;
11406 case 'Q':
11407 if (ASSEMBLER_DIALECT == ASM_ATT)
11408 putc ('l', file);
11409 return;
11411 case 'S':
11412 if (ASSEMBLER_DIALECT == ASM_ATT)
11413 putc ('s', file);
11414 return;
11416 case 'T':
11417 if (ASSEMBLER_DIALECT == ASM_ATT)
11418 putc ('t', file);
11419 return;
11421 case 'z':
11422 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11424 /* Opcodes don't get size suffixes if using Intel opcodes. */
11425 if (ASSEMBLER_DIALECT == ASM_INTEL)
11426 return;
11428 switch (GET_MODE_SIZE (GET_MODE (x)))
11430 case 1:
11431 putc ('b', file);
11432 return;
11434 case 2:
11435 putc ('w', file);
11436 return;
11438 case 4:
11439 putc ('l', file);
11440 return;
11442 case 8:
11443 putc ('q', file);
11444 return;
11446 default:
11447 output_operand_lossage
11448 ("invalid operand size for operand code '%c'", code);
11449 return;
11453 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11454 warning
11455 (0, "non-integer operand used with operand code '%c'", code);
11456 /* FALLTHRU */
11458 case 'Z':
11459 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
11460 if (ASSEMBLER_DIALECT == ASM_INTEL)
11461 return;
11463 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11465 switch (GET_MODE_SIZE (GET_MODE (x)))
11467 case 2:
11468 #ifdef HAVE_AS_IX86_FILDS
11469 putc ('s', file);
11470 #endif
11471 return;
11473 case 4:
11474 putc ('l', file);
11475 return;
11477 case 8:
11478 #ifdef HAVE_AS_IX86_FILDQ
11479 putc ('q', file);
11480 #else
11481 fputs ("ll", file);
11482 #endif
11483 return;
11485 default:
11486 break;
11489 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11491 /* 387 opcodes don't get size suffixes
11492 if the operands are registers. */
11493 if (STACK_REG_P (x))
11494 return;
11496 switch (GET_MODE_SIZE (GET_MODE (x)))
11498 case 4:
11499 putc ('s', file);
11500 return;
11502 case 8:
11503 putc ('l', file);
11504 return;
11506 case 12:
11507 case 16:
11508 putc ('t', file);
11509 return;
11511 default:
11512 break;
11515 else
11517 output_operand_lossage
11518 ("invalid operand type used with operand code '%c'", code);
11519 return;
11522 output_operand_lossage
11523 ("invalid operand size for operand code '%c'", code);
11524 return;
11526 case 'd':
11527 case 'b':
11528 case 'w':
11529 case 'k':
11530 case 'q':
11531 case 'h':
11532 case 't':
11533 case 'y':
11534 case 'x':
11535 case 'X':
11536 case 'P':
11537 break;
11539 case 's':
11540 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
11542 PRINT_OPERAND (file, x, 0);
11543 fputs (", ", file);
11545 return;
11547 case 'D':
11548 /* Little bit of braindamage here. The SSE compare instructions
11549 does use completely different names for the comparisons that the
11550 fp conditional moves. */
11551 if (TARGET_AVX)
11553 switch (GET_CODE (x))
11555 case EQ:
11556 fputs ("eq", file);
11557 break;
11558 case UNEQ:
11559 fputs ("eq_us", file);
11560 break;
11561 case LT:
11562 fputs ("lt", file);
11563 break;
11564 case UNLT:
11565 fputs ("nge", file);
11566 break;
11567 case LE:
11568 fputs ("le", file);
11569 break;
11570 case UNLE:
11571 fputs ("ngt", file);
11572 break;
11573 case UNORDERED:
11574 fputs ("unord", file);
11575 break;
11576 case NE:
11577 fputs ("neq", file);
11578 break;
11579 case LTGT:
11580 fputs ("neq_oq", file);
11581 break;
11582 case GE:
11583 fputs ("ge", file);
11584 break;
11585 case UNGE:
11586 fputs ("nlt", file);
11587 break;
11588 case GT:
11589 fputs ("gt", file);
11590 break;
11591 case UNGT:
11592 fputs ("nle", file);
11593 break;
11594 case ORDERED:
11595 fputs ("ord", file);
11596 break;
11597 default:
11598 output_operand_lossage ("operand is not a condition code, "
11599 "invalid operand code 'D'");
11600 return;
11603 else
11605 switch (GET_CODE (x))
11607 case EQ:
11608 case UNEQ:
11609 fputs ("eq", file);
11610 break;
11611 case LT:
11612 case UNLT:
11613 fputs ("lt", file);
11614 break;
11615 case LE:
11616 case UNLE:
11617 fputs ("le", file);
11618 break;
11619 case UNORDERED:
11620 fputs ("unord", file);
11621 break;
11622 case NE:
11623 case LTGT:
11624 fputs ("neq", file);
11625 break;
11626 case UNGE:
11627 case GE:
11628 fputs ("nlt", file);
11629 break;
11630 case UNGT:
11631 case GT:
11632 fputs ("nle", file);
11633 break;
11634 case ORDERED:
11635 fputs ("ord", file);
11636 break;
11637 default:
11638 output_operand_lossage ("operand is not a condition code, "
11639 "invalid operand code 'D'");
11640 return;
11643 return;
11644 case 'O':
11645 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11646 if (ASSEMBLER_DIALECT == ASM_ATT)
11648 switch (GET_MODE (x))
11650 case HImode: putc ('w', file); break;
11651 case SImode:
11652 case SFmode: putc ('l', file); break;
11653 case DImode:
11654 case DFmode: putc ('q', file); break;
11655 default: gcc_unreachable ();
11657 putc ('.', file);
11659 #endif
11660 return;
11661 case 'C':
11662 if (!COMPARISON_P (x))
11664 output_operand_lossage ("operand is neither a constant nor a "
11665 "condition code, invalid operand code "
11666 "'C'");
11667 return;
11669 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
11670 return;
11671 case 'F':
11672 if (!COMPARISON_P (x))
11674 output_operand_lossage ("operand is neither a constant nor a "
11675 "condition code, invalid operand code "
11676 "'F'");
11677 return;
11679 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11680 if (ASSEMBLER_DIALECT == ASM_ATT)
11681 putc ('.', file);
11682 #endif
11683 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
11684 return;
11686 /* Like above, but reverse condition */
11687 case 'c':
11688 /* Check to see if argument to %c is really a constant
11689 and not a condition code which needs to be reversed. */
11690 if (!COMPARISON_P (x))
11692 output_operand_lossage ("operand is neither a constant nor a "
11693 "condition code, invalid operand "
11694 "code 'c'");
11695 return;
11697 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
11698 return;
11699 case 'f':
11700 if (!COMPARISON_P (x))
11702 output_operand_lossage ("operand is neither a constant nor a "
11703 "condition code, invalid operand "
11704 "code 'f'");
11705 return;
11707 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11708 if (ASSEMBLER_DIALECT == ASM_ATT)
11709 putc ('.', file);
11710 #endif
11711 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
11712 return;
11714 case 'E':
11715 put_condition_code (GET_CODE (x), CCmode, 0, 0, file);
11716 return;
11718 case 'e':
11719 put_condition_code (GET_CODE (x), CCmode, 1, 0, file);
11720 return;
11722 case 'H':
11723 /* It doesn't actually matter what mode we use here, as we're
11724 only going to use this for printing. */
11725 x = adjust_address_nv (x, DImode, 8);
11726 break;
11728 case '+':
11730 rtx x;
11732 if (!optimize
11733 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
11734 return;
11736 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
11737 if (x)
11739 int pred_val = INTVAL (XEXP (x, 0));
11741 if (pred_val < REG_BR_PROB_BASE * 45 / 100
11742 || pred_val > REG_BR_PROB_BASE * 55 / 100)
11744 int taken = pred_val > REG_BR_PROB_BASE / 2;
11745 int cputaken = final_forward_branch_p (current_output_insn) == 0;
11747 /* Emit hints only in the case default branch prediction
11748 heuristics would fail. */
11749 if (taken != cputaken)
11751 /* We use 3e (DS) prefix for taken branches and
11752 2e (CS) prefix for not taken branches. */
11753 if (taken)
11754 fputs ("ds ; ", file);
11755 else
11756 fputs ("cs ; ", file);
11760 return;
11763 case 'Y':
11764 switch (GET_CODE (x))
11766 case NE:
11767 fputs ("neq", file);
11768 break;
11769 case EQ:
11770 fputs ("eq", file);
11771 break;
11772 case GE:
11773 case GEU:
11774 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
11775 break;
11776 case GT:
11777 case GTU:
11778 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
11779 break;
11780 case LE:
11781 case LEU:
11782 fputs ("le", file);
11783 break;
11784 case LT:
11785 case LTU:
11786 fputs ("lt", file);
11787 break;
11788 case UNORDERED:
11789 fputs ("unord", file);
11790 break;
11791 case ORDERED:
11792 fputs ("ord", file);
11793 break;
11794 case UNEQ:
11795 fputs ("ueq", file);
11796 break;
11797 case UNGE:
11798 fputs ("nlt", file);
11799 break;
11800 case UNGT:
11801 fputs ("nle", file);
11802 break;
11803 case UNLE:
11804 fputs ("ule", file);
11805 break;
11806 case UNLT:
11807 fputs ("ult", file);
11808 break;
11809 case LTGT:
11810 fputs ("une", file);
11811 break;
11812 default:
11813 output_operand_lossage ("operand is not a condition code, "
11814 "invalid operand code 'Y'");
11815 return;
11817 return;
11819 case ';':
11820 #if TARGET_MACHO
11821 fputs (" ; ", file);
11822 #else
11823 putc (' ', file);
11824 #endif
11825 return;
11827 default:
11828 output_operand_lossage ("invalid operand code '%c'", code);
11832 if (REG_P (x))
11833 print_reg (x, code, file);
11835 else if (MEM_P (x))
11837 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
11838 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
11839 && GET_MODE (x) != BLKmode)
11841 const char * size;
11842 switch (GET_MODE_SIZE (GET_MODE (x)))
11844 case 1: size = "BYTE"; break;
11845 case 2: size = "WORD"; break;
11846 case 4: size = "DWORD"; break;
11847 case 8: size = "QWORD"; break;
11848 case 12: size = "TBYTE"; break;
11849 case 16:
11850 if (GET_MODE (x) == XFmode)
11851 size = "TBYTE";
11852 else
11853 size = "XMMWORD";
11854 break;
11855 case 32: size = "YMMWORD"; break;
11856 default:
11857 gcc_unreachable ();
11860 /* Check for explicit size override (codes 'b', 'w' and 'k') */
11861 if (code == 'b')
11862 size = "BYTE";
11863 else if (code == 'w')
11864 size = "WORD";
11865 else if (code == 'k')
11866 size = "DWORD";
11868 fputs (size, file);
11869 fputs (" PTR ", file);
11872 x = XEXP (x, 0);
11873 /* Avoid (%rip) for call operands. */
11874 if (CONSTANT_ADDRESS_P (x) && code == 'P'
11875 && !CONST_INT_P (x))
11876 output_addr_const (file, x);
11877 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
11878 output_operand_lossage ("invalid constraints for operand");
11879 else
11880 output_address (x);
11883 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
11885 REAL_VALUE_TYPE r;
11886 long l;
11888 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
11889 REAL_VALUE_TO_TARGET_SINGLE (r, l);
11891 if (ASSEMBLER_DIALECT == ASM_ATT)
11892 putc ('$', file);
11893 fprintf (file, "0x%08lx", (long unsigned int) l);
11896 /* These float cases don't actually occur as immediate operands. */
11897 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
11899 char dstr[30];
11901 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
11902 fputs (dstr, file);
11905 else if (GET_CODE (x) == CONST_DOUBLE
11906 && GET_MODE (x) == XFmode)
11908 char dstr[30];
11910 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
11911 fputs (dstr, file);
11914 else
11916 /* We have patterns that allow zero sets of memory, for instance.
11917 In 64-bit mode, we should probably support all 8-byte vectors,
11918 since we can in fact encode that into an immediate. */
11919 if (GET_CODE (x) == CONST_VECTOR)
11921 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
11922 x = const0_rtx;
11925 if (code != 'P')
11927 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
11929 if (ASSEMBLER_DIALECT == ASM_ATT)
11930 putc ('$', file);
11932 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
11933 || GET_CODE (x) == LABEL_REF)
11935 if (ASSEMBLER_DIALECT == ASM_ATT)
11936 putc ('$', file);
11937 else
11938 fputs ("OFFSET FLAT:", file);
11941 if (CONST_INT_P (x))
11942 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
11943 else if (flag_pic)
11944 output_pic_addr_const (file, x, code);
11945 else
11946 output_addr_const (file, x);
11950 /* Print a memory operand whose address is ADDR. */
11952 void
11953 print_operand_address (FILE *file, rtx addr)
11955 struct ix86_address parts;
11956 rtx base, index, disp;
11957 int scale;
11958 int ok = ix86_decompose_address (addr, &parts);
11960 gcc_assert (ok);
11962 base = parts.base;
11963 index = parts.index;
11964 disp = parts.disp;
11965 scale = parts.scale;
11967 switch (parts.seg)
11969 case SEG_DEFAULT:
11970 break;
11971 case SEG_FS:
11972 case SEG_GS:
11973 if (ASSEMBLER_DIALECT == ASM_ATT)
11974 putc ('%', file);
11975 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
11976 break;
11977 default:
11978 gcc_unreachable ();
11981 /* Use one byte shorter RIP relative addressing for 64bit mode. */
11982 if (TARGET_64BIT && !base && !index)
11984 rtx symbol = disp;
11986 if (GET_CODE (disp) == CONST
11987 && GET_CODE (XEXP (disp, 0)) == PLUS
11988 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
11989 symbol = XEXP (XEXP (disp, 0), 0);
11991 if (GET_CODE (symbol) == LABEL_REF
11992 || (GET_CODE (symbol) == SYMBOL_REF
11993 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
11994 base = pc_rtx;
11996 if (!base && !index)
11998 /* Displacement only requires special attention. */
12000 if (CONST_INT_P (disp))
12002 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
12003 fputs ("ds:", file);
12004 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
12006 else if (flag_pic)
12007 output_pic_addr_const (file, disp, 0);
12008 else
12009 output_addr_const (file, disp);
12011 else
12013 if (ASSEMBLER_DIALECT == ASM_ATT)
12015 if (disp)
12017 if (flag_pic)
12018 output_pic_addr_const (file, disp, 0);
12019 else if (GET_CODE (disp) == LABEL_REF)
12020 output_asm_label (disp);
12021 else
12022 output_addr_const (file, disp);
12025 putc ('(', file);
12026 if (base)
12027 print_reg (base, 0, file);
12028 if (index)
12030 putc (',', file);
12031 print_reg (index, 0, file);
12032 if (scale != 1)
12033 fprintf (file, ",%d", scale);
12035 putc (')', file);
12037 else
12039 rtx offset = NULL_RTX;
12041 if (disp)
12043 /* Pull out the offset of a symbol; print any symbol itself. */
12044 if (GET_CODE (disp) == CONST
12045 && GET_CODE (XEXP (disp, 0)) == PLUS
12046 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
12048 offset = XEXP (XEXP (disp, 0), 1);
12049 disp = gen_rtx_CONST (VOIDmode,
12050 XEXP (XEXP (disp, 0), 0));
12053 if (flag_pic)
12054 output_pic_addr_const (file, disp, 0);
12055 else if (GET_CODE (disp) == LABEL_REF)
12056 output_asm_label (disp);
12057 else if (CONST_INT_P (disp))
12058 offset = disp;
12059 else
12060 output_addr_const (file, disp);
12063 putc ('[', file);
12064 if (base)
12066 print_reg (base, 0, file);
12067 if (offset)
12069 if (INTVAL (offset) >= 0)
12070 putc ('+', file);
12071 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12074 else if (offset)
12075 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
12076 else
12077 putc ('0', file);
12079 if (index)
12081 putc ('+', file);
12082 print_reg (index, 0, file);
12083 if (scale != 1)
12084 fprintf (file, "*%d", scale);
12086 putc (']', file);
12091 bool
12092 output_addr_const_extra (FILE *file, rtx x)
12094 rtx op;
12096 if (GET_CODE (x) != UNSPEC)
12097 return false;
12099 op = XVECEXP (x, 0, 0);
12100 switch (XINT (x, 1))
12102 case UNSPEC_GOTTPOFF:
12103 output_addr_const (file, op);
12104 /* FIXME: This might be @TPOFF in Sun ld. */
12105 fputs ("@GOTTPOFF", file);
12106 break;
12107 case UNSPEC_TPOFF:
12108 output_addr_const (file, op);
12109 fputs ("@TPOFF", file);
12110 break;
12111 case UNSPEC_NTPOFF:
12112 output_addr_const (file, op);
12113 if (TARGET_64BIT)
12114 fputs ("@TPOFF", file);
12115 else
12116 fputs ("@NTPOFF", file);
12117 break;
12118 case UNSPEC_DTPOFF:
12119 output_addr_const (file, op);
12120 fputs ("@DTPOFF", file);
12121 break;
12122 case UNSPEC_GOTNTPOFF:
12123 output_addr_const (file, op);
12124 if (TARGET_64BIT)
12125 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
12126 "@GOTTPOFF(%rip)" : "@GOTTPOFF[rip]", file);
12127 else
12128 fputs ("@GOTNTPOFF", file);
12129 break;
12130 case UNSPEC_INDNTPOFF:
12131 output_addr_const (file, op);
12132 fputs ("@INDNTPOFF", file);
12133 break;
12134 #if TARGET_MACHO
12135 case UNSPEC_MACHOPIC_OFFSET:
12136 output_addr_const (file, op);
12137 putc ('-', file);
12138 machopic_output_function_base_name (file);
12139 break;
12140 #endif
12142 default:
12143 return false;
12146 return true;
12149 /* Split one or more DImode RTL references into pairs of SImode
12150 references. The RTL can be REG, offsettable MEM, integer constant, or
12151 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12152 split and "num" is its length. lo_half and hi_half are output arrays
12153 that parallel "operands". */
12155 void
12156 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12158 while (num--)
12160 rtx op = operands[num];
12162 /* simplify_subreg refuse to split volatile memory addresses,
12163 but we still have to handle it. */
12164 if (MEM_P (op))
12166 lo_half[num] = adjust_address (op, SImode, 0);
12167 hi_half[num] = adjust_address (op, SImode, 4);
12169 else
12171 lo_half[num] = simplify_gen_subreg (SImode, op,
12172 GET_MODE (op) == VOIDmode
12173 ? DImode : GET_MODE (op), 0);
12174 hi_half[num] = simplify_gen_subreg (SImode, op,
12175 GET_MODE (op) == VOIDmode
12176 ? DImode : GET_MODE (op), 4);
12180 /* Split one or more TImode RTL references into pairs of DImode
12181 references. The RTL can be REG, offsettable MEM, integer constant, or
12182 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
12183 split and "num" is its length. lo_half and hi_half are output arrays
12184 that parallel "operands". */
12186 void
12187 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
12189 while (num--)
12191 rtx op = operands[num];
12193 /* simplify_subreg refuse to split volatile memory addresses, but we
12194 still have to handle it. */
12195 if (MEM_P (op))
12197 lo_half[num] = adjust_address (op, DImode, 0);
12198 hi_half[num] = adjust_address (op, DImode, 8);
12200 else
12202 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
12203 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
12208 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
12209 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
12210 is the expression of the binary operation. The output may either be
12211 emitted here, or returned to the caller, like all output_* functions.
12213 There is no guarantee that the operands are the same mode, as they
12214 might be within FLOAT or FLOAT_EXTEND expressions. */
12216 #ifndef SYSV386_COMPAT
12217 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
12218 wants to fix the assemblers because that causes incompatibility
12219 with gcc. No-one wants to fix gcc because that causes
12220 incompatibility with assemblers... You can use the option of
12221 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
12222 #define SYSV386_COMPAT 1
12223 #endif
12225 const char *
12226 output_387_binary_op (rtx insn, rtx *operands)
12228 static char buf[40];
12229 const char *p;
12230 const char *ssep;
12231 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
12233 #ifdef ENABLE_CHECKING
12234 /* Even if we do not want to check the inputs, this documents input
12235 constraints. Which helps in understanding the following code. */
12236 if (STACK_REG_P (operands[0])
12237 && ((REG_P (operands[1])
12238 && REGNO (operands[0]) == REGNO (operands[1])
12239 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
12240 || (REG_P (operands[2])
12241 && REGNO (operands[0]) == REGNO (operands[2])
12242 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
12243 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
12244 ; /* ok */
12245 else
12246 gcc_assert (is_sse);
12247 #endif
12249 switch (GET_CODE (operands[3]))
12251 case PLUS:
12252 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12253 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12254 p = "fiadd";
12255 else
12256 p = "fadd";
12257 ssep = "vadd";
12258 break;
12260 case MINUS:
12261 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12262 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12263 p = "fisub";
12264 else
12265 p = "fsub";
12266 ssep = "vsub";
12267 break;
12269 case MULT:
12270 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12271 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12272 p = "fimul";
12273 else
12274 p = "fmul";
12275 ssep = "vmul";
12276 break;
12278 case DIV:
12279 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12280 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12281 p = "fidiv";
12282 else
12283 p = "fdiv";
12284 ssep = "vdiv";
12285 break;
12287 default:
12288 gcc_unreachable ();
12291 if (is_sse)
12293 if (TARGET_AVX)
12295 strcpy (buf, ssep);
12296 if (GET_MODE (operands[0]) == SFmode)
12297 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
12298 else
12299 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
12301 else
12303 strcpy (buf, ssep + 1);
12304 if (GET_MODE (operands[0]) == SFmode)
12305 strcat (buf, "ss\t{%2, %0|%0, %2}");
12306 else
12307 strcat (buf, "sd\t{%2, %0|%0, %2}");
12309 return buf;
12311 strcpy (buf, p);
12313 switch (GET_CODE (operands[3]))
12315 case MULT:
12316 case PLUS:
12317 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
12319 rtx temp = operands[2];
12320 operands[2] = operands[1];
12321 operands[1] = temp;
12324 /* know operands[0] == operands[1]. */
12326 if (MEM_P (operands[2]))
12328 p = "%Z2\t%2";
12329 break;
12332 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12334 if (STACK_TOP_P (operands[0]))
12335 /* How is it that we are storing to a dead operand[2]?
12336 Well, presumably operands[1] is dead too. We can't
12337 store the result to st(0) as st(0) gets popped on this
12338 instruction. Instead store to operands[2] (which I
12339 think has to be st(1)). st(1) will be popped later.
12340 gcc <= 2.8.1 didn't have this check and generated
12341 assembly code that the Unixware assembler rejected. */
12342 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12343 else
12344 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12345 break;
12348 if (STACK_TOP_P (operands[0]))
12349 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12350 else
12351 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12352 break;
12354 case MINUS:
12355 case DIV:
12356 if (MEM_P (operands[1]))
12358 p = "r%Z1\t%1";
12359 break;
12362 if (MEM_P (operands[2]))
12364 p = "%Z2\t%2";
12365 break;
12368 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12370 #if SYSV386_COMPAT
12371 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
12372 derived assemblers, confusingly reverse the direction of
12373 the operation for fsub{r} and fdiv{r} when the
12374 destination register is not st(0). The Intel assembler
12375 doesn't have this brain damage. Read !SYSV386_COMPAT to
12376 figure out what the hardware really does. */
12377 if (STACK_TOP_P (operands[0]))
12378 p = "{p\t%0, %2|rp\t%2, %0}";
12379 else
12380 p = "{rp\t%2, %0|p\t%0, %2}";
12381 #else
12382 if (STACK_TOP_P (operands[0]))
12383 /* As above for fmul/fadd, we can't store to st(0). */
12384 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12385 else
12386 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12387 #endif
12388 break;
12391 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
12393 #if SYSV386_COMPAT
12394 if (STACK_TOP_P (operands[0]))
12395 p = "{rp\t%0, %1|p\t%1, %0}";
12396 else
12397 p = "{p\t%1, %0|rp\t%0, %1}";
12398 #else
12399 if (STACK_TOP_P (operands[0]))
12400 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
12401 else
12402 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
12403 #endif
12404 break;
12407 if (STACK_TOP_P (operands[0]))
12409 if (STACK_TOP_P (operands[1]))
12410 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12411 else
12412 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
12413 break;
12415 else if (STACK_TOP_P (operands[1]))
12417 #if SYSV386_COMPAT
12418 p = "{\t%1, %0|r\t%0, %1}";
12419 #else
12420 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
12421 #endif
12423 else
12425 #if SYSV386_COMPAT
12426 p = "{r\t%2, %0|\t%0, %2}";
12427 #else
12428 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12429 #endif
12431 break;
12433 default:
12434 gcc_unreachable ();
12437 strcat (buf, p);
12438 return buf;
12441 /* Return needed mode for entity in optimize_mode_switching pass. */
12444 ix86_mode_needed (int entity, rtx insn)
12446 enum attr_i387_cw mode;
12448 /* The mode UNINITIALIZED is used to store control word after a
12449 function call or ASM pattern. The mode ANY specify that function
12450 has no requirements on the control word and make no changes in the
12451 bits we are interested in. */
12453 if (CALL_P (insn)
12454 || (NONJUMP_INSN_P (insn)
12455 && (asm_noperands (PATTERN (insn)) >= 0
12456 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
12457 return I387_CW_UNINITIALIZED;
12459 if (recog_memoized (insn) < 0)
12460 return I387_CW_ANY;
12462 mode = get_attr_i387_cw (insn);
12464 switch (entity)
12466 case I387_TRUNC:
12467 if (mode == I387_CW_TRUNC)
12468 return mode;
12469 break;
12471 case I387_FLOOR:
12472 if (mode == I387_CW_FLOOR)
12473 return mode;
12474 break;
12476 case I387_CEIL:
12477 if (mode == I387_CW_CEIL)
12478 return mode;
12479 break;
12481 case I387_MASK_PM:
12482 if (mode == I387_CW_MASK_PM)
12483 return mode;
12484 break;
12486 default:
12487 gcc_unreachable ();
12490 return I387_CW_ANY;
12493 /* Output code to initialize control word copies used by trunc?f?i and
12494 rounding patterns. CURRENT_MODE is set to current control word,
12495 while NEW_MODE is set to new control word. */
12497 void
12498 emit_i387_cw_initialization (int mode)
12500 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
12501 rtx new_mode;
12503 enum ix86_stack_slot slot;
12505 rtx reg = gen_reg_rtx (HImode);
12507 emit_insn (gen_x86_fnstcw_1 (stored_mode));
12508 emit_move_insn (reg, copy_rtx (stored_mode));
12510 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
12511 || optimize_function_for_size_p (cfun))
12513 switch (mode)
12515 case I387_CW_TRUNC:
12516 /* round toward zero (truncate) */
12517 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
12518 slot = SLOT_CW_TRUNC;
12519 break;
12521 case I387_CW_FLOOR:
12522 /* round down toward -oo */
12523 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12524 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
12525 slot = SLOT_CW_FLOOR;
12526 break;
12528 case I387_CW_CEIL:
12529 /* round up toward +oo */
12530 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12531 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
12532 slot = SLOT_CW_CEIL;
12533 break;
12535 case I387_CW_MASK_PM:
12536 /* mask precision exception for nearbyint() */
12537 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12538 slot = SLOT_CW_MASK_PM;
12539 break;
12541 default:
12542 gcc_unreachable ();
12545 else
12547 switch (mode)
12549 case I387_CW_TRUNC:
12550 /* round toward zero (truncate) */
12551 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
12552 slot = SLOT_CW_TRUNC;
12553 break;
12555 case I387_CW_FLOOR:
12556 /* round down toward -oo */
12557 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
12558 slot = SLOT_CW_FLOOR;
12559 break;
12561 case I387_CW_CEIL:
12562 /* round up toward +oo */
12563 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
12564 slot = SLOT_CW_CEIL;
12565 break;
12567 case I387_CW_MASK_PM:
12568 /* mask precision exception for nearbyint() */
12569 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12570 slot = SLOT_CW_MASK_PM;
12571 break;
12573 default:
12574 gcc_unreachable ();
12578 gcc_assert (slot < MAX_386_STACK_LOCALS);
12580 new_mode = assign_386_stack_local (HImode, slot);
12581 emit_move_insn (new_mode, reg);
12584 /* Output code for INSN to convert a float to a signed int. OPERANDS
12585 are the insn operands. The output may be [HSD]Imode and the input
12586 operand may be [SDX]Fmode. */
12588 const char *
12589 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
12591 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12592 int dimode_p = GET_MODE (operands[0]) == DImode;
12593 int round_mode = get_attr_i387_cw (insn);
12595 /* Jump through a hoop or two for DImode, since the hardware has no
12596 non-popping instruction. We used to do this a different way, but
12597 that was somewhat fragile and broke with post-reload splitters. */
12598 if ((dimode_p || fisttp) && !stack_top_dies)
12599 output_asm_insn ("fld\t%y1", operands);
12601 gcc_assert (STACK_TOP_P (operands[1]));
12602 gcc_assert (MEM_P (operands[0]));
12603 gcc_assert (GET_MODE (operands[1]) != TFmode);
12605 if (fisttp)
12606 output_asm_insn ("fisttp%Z0\t%0", operands);
12607 else
12609 if (round_mode != I387_CW_ANY)
12610 output_asm_insn ("fldcw\t%3", operands);
12611 if (stack_top_dies || dimode_p)
12612 output_asm_insn ("fistp%Z0\t%0", operands);
12613 else
12614 output_asm_insn ("fist%Z0\t%0", operands);
12615 if (round_mode != I387_CW_ANY)
12616 output_asm_insn ("fldcw\t%2", operands);
12619 return "";
12622 /* Output code for x87 ffreep insn. The OPNO argument, which may only
12623 have the values zero or one, indicates the ffreep insn's operand
12624 from the OPERANDS array. */
12626 static const char *
12627 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
12629 if (TARGET_USE_FFREEP)
12630 #ifdef HAVE_AS_IX86_FFREEP
12631 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
12632 #else
12634 static char retval[32];
12635 int regno = REGNO (operands[opno]);
12637 gcc_assert (FP_REGNO_P (regno));
12639 regno -= FIRST_STACK_REG;
12641 snprintf (retval, sizeof (retval), ASM_SHORT "0xc%ddf", regno);
12642 return retval;
12644 #endif
12646 return opno ? "fstp\t%y1" : "fstp\t%y0";
12650 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
12651 should be used. UNORDERED_P is true when fucom should be used. */
12653 const char *
12654 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
12656 int stack_top_dies;
12657 rtx cmp_op0, cmp_op1;
12658 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
12660 if (eflags_p)
12662 cmp_op0 = operands[0];
12663 cmp_op1 = operands[1];
12665 else
12667 cmp_op0 = operands[1];
12668 cmp_op1 = operands[2];
12671 if (is_sse)
12673 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
12674 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
12675 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
12676 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
12678 if (GET_MODE (operands[0]) == SFmode)
12679 if (unordered_p)
12680 return &ucomiss[TARGET_AVX ? 0 : 1];
12681 else
12682 return &comiss[TARGET_AVX ? 0 : 1];
12683 else
12684 if (unordered_p)
12685 return &ucomisd[TARGET_AVX ? 0 : 1];
12686 else
12687 return &comisd[TARGET_AVX ? 0 : 1];
12690 gcc_assert (STACK_TOP_P (cmp_op0));
12692 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12694 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
12696 if (stack_top_dies)
12698 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
12699 return output_387_ffreep (operands, 1);
12701 else
12702 return "ftst\n\tfnstsw\t%0";
12705 if (STACK_REG_P (cmp_op1)
12706 && stack_top_dies
12707 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
12708 && REGNO (cmp_op1) != FIRST_STACK_REG)
12710 /* If both the top of the 387 stack dies, and the other operand
12711 is also a stack register that dies, then this must be a
12712 `fcompp' float compare */
12714 if (eflags_p)
12716 /* There is no double popping fcomi variant. Fortunately,
12717 eflags is immune from the fstp's cc clobbering. */
12718 if (unordered_p)
12719 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
12720 else
12721 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
12722 return output_387_ffreep (operands, 0);
12724 else
12726 if (unordered_p)
12727 return "fucompp\n\tfnstsw\t%0";
12728 else
12729 return "fcompp\n\tfnstsw\t%0";
12732 else
12734 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
12736 static const char * const alt[16] =
12738 "fcom%Z2\t%y2\n\tfnstsw\t%0",
12739 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
12740 "fucom%Z2\t%y2\n\tfnstsw\t%0",
12741 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
12743 "ficom%Z2\t%y2\n\tfnstsw\t%0",
12744 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
12745 NULL,
12746 NULL,
12748 "fcomi\t{%y1, %0|%0, %y1}",
12749 "fcomip\t{%y1, %0|%0, %y1}",
12750 "fucomi\t{%y1, %0|%0, %y1}",
12751 "fucomip\t{%y1, %0|%0, %y1}",
12753 NULL,
12754 NULL,
12755 NULL,
12756 NULL
12759 int mask;
12760 const char *ret;
12762 mask = eflags_p << 3;
12763 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
12764 mask |= unordered_p << 1;
12765 mask |= stack_top_dies;
12767 gcc_assert (mask < 16);
12768 ret = alt[mask];
12769 gcc_assert (ret);
12771 return ret;
12775 void
12776 ix86_output_addr_vec_elt (FILE *file, int value)
12778 const char *directive = ASM_LONG;
12780 #ifdef ASM_QUAD
12781 if (TARGET_64BIT)
12782 directive = ASM_QUAD;
12783 #else
12784 gcc_assert (!TARGET_64BIT);
12785 #endif
12787 fprintf (file, "%s" LPREFIX "%d\n", directive, value);
12790 void
12791 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
12793 const char *directive = ASM_LONG;
12795 #ifdef ASM_QUAD
12796 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
12797 directive = ASM_QUAD;
12798 #else
12799 gcc_assert (!TARGET_64BIT);
12800 #endif
12801 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
12802 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
12803 fprintf (file, "%s" LPREFIX "%d-" LPREFIX "%d\n",
12804 directive, value, rel);
12805 else if (HAVE_AS_GOTOFF_IN_DATA)
12806 fprintf (file, ASM_LONG LPREFIX "%d@GOTOFF\n", value);
12807 #if TARGET_MACHO
12808 else if (TARGET_MACHO)
12810 fprintf (file, ASM_LONG LPREFIX "%d-", value);
12811 machopic_output_function_base_name (file);
12812 putc ('\n', file);
12814 #endif
12815 else
12816 asm_fprintf (file, ASM_LONG "%U%s+[.-" LPREFIX "%d]\n",
12817 GOT_SYMBOL_NAME, value);
12820 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
12821 for the target. */
12823 void
12824 ix86_expand_clear (rtx dest)
12826 rtx tmp;
12828 /* We play register width games, which are only valid after reload. */
12829 gcc_assert (reload_completed);
12831 /* Avoid HImode and its attendant prefix byte. */
12832 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
12833 dest = gen_rtx_REG (SImode, REGNO (dest));
12834 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
12836 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
12837 if (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ())
12839 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
12840 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
12843 emit_insn (tmp);
12846 /* X is an unchanging MEM. If it is a constant pool reference, return
12847 the constant pool rtx, else NULL. */
12850 maybe_get_pool_constant (rtx x)
12852 x = ix86_delegitimize_address (XEXP (x, 0));
12854 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
12855 return get_pool_constant (x);
12857 return NULL_RTX;
12860 void
12861 ix86_expand_move (enum machine_mode mode, rtx operands[])
12863 rtx op0, op1;
12864 enum tls_model model;
12866 op0 = operands[0];
12867 op1 = operands[1];
12869 if (GET_CODE (op1) == SYMBOL_REF)
12871 model = SYMBOL_REF_TLS_MODEL (op1);
12872 if (model)
12874 op1 = legitimize_tls_address (op1, model, true);
12875 op1 = force_operand (op1, op0);
12876 if (op1 == op0)
12877 return;
12879 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
12880 && SYMBOL_REF_DLLIMPORT_P (op1))
12881 op1 = legitimize_dllimport_symbol (op1, false);
12883 else if (GET_CODE (op1) == CONST
12884 && GET_CODE (XEXP (op1, 0)) == PLUS
12885 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
12887 rtx addend = XEXP (XEXP (op1, 0), 1);
12888 rtx symbol = XEXP (XEXP (op1, 0), 0);
12889 rtx tmp = NULL;
12891 model = SYMBOL_REF_TLS_MODEL (symbol);
12892 if (model)
12893 tmp = legitimize_tls_address (symbol, model, true);
12894 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
12895 && SYMBOL_REF_DLLIMPORT_P (symbol))
12896 tmp = legitimize_dllimport_symbol (symbol, true);
12898 if (tmp)
12900 tmp = force_operand (tmp, NULL);
12901 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
12902 op0, 1, OPTAB_DIRECT);
12903 if (tmp == op0)
12904 return;
12908 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
12910 if (TARGET_MACHO && !TARGET_64BIT)
12912 #if TARGET_MACHO
12913 if (MACHOPIC_PURE)
12915 rtx temp = ((reload_in_progress
12916 || ((op0 && REG_P (op0))
12917 && mode == Pmode))
12918 ? op0 : gen_reg_rtx (Pmode));
12919 op1 = machopic_indirect_data_reference (op1, temp);
12920 op1 = machopic_legitimize_pic_address (op1, mode,
12921 temp == op1 ? 0 : temp);
12923 else if (MACHOPIC_INDIRECT)
12924 op1 = machopic_indirect_data_reference (op1, 0);
12925 if (op0 == op1)
12926 return;
12927 #endif
12929 else
12931 if (MEM_P (op0))
12932 op1 = force_reg (Pmode, op1);
12933 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
12935 rtx reg = can_create_pseudo_p () ? NULL_RTX : op0;
12936 op1 = legitimize_pic_address (op1, reg);
12937 if (op0 == op1)
12938 return;
12942 else
12944 if (MEM_P (op0)
12945 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
12946 || !push_operand (op0, mode))
12947 && MEM_P (op1))
12948 op1 = force_reg (mode, op1);
12950 if (push_operand (op0, mode)
12951 && ! general_no_elim_operand (op1, mode))
12952 op1 = copy_to_mode_reg (mode, op1);
12954 /* Force large constants in 64bit compilation into register
12955 to get them CSEed. */
12956 if (can_create_pseudo_p ()
12957 && (mode == DImode) && TARGET_64BIT
12958 && immediate_operand (op1, mode)
12959 && !x86_64_zext_immediate_operand (op1, VOIDmode)
12960 && !register_operand (op0, mode)
12961 && optimize)
12962 op1 = copy_to_mode_reg (mode, op1);
12964 if (can_create_pseudo_p ()
12965 && FLOAT_MODE_P (mode)
12966 && GET_CODE (op1) == CONST_DOUBLE)
12968 /* If we are loading a floating point constant to a register,
12969 force the value to memory now, since we'll get better code
12970 out the back end. */
12972 op1 = validize_mem (force_const_mem (mode, op1));
12973 if (!register_operand (op0, mode))
12975 rtx temp = gen_reg_rtx (mode);
12976 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
12977 emit_move_insn (op0, temp);
12978 return;
12983 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
12986 void
12987 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
12989 rtx op0 = operands[0], op1 = operands[1];
12990 unsigned int align = GET_MODE_ALIGNMENT (mode);
12992 /* Force constants other than zero into memory. We do not know how
12993 the instructions used to build constants modify the upper 64 bits
12994 of the register, once we have that information we may be able
12995 to handle some of them more efficiently. */
12996 if (can_create_pseudo_p ()
12997 && register_operand (op0, mode)
12998 && (CONSTANT_P (op1)
12999 || (GET_CODE (op1) == SUBREG
13000 && CONSTANT_P (SUBREG_REG (op1))))
13001 && !standard_sse_constant_p (op1))
13002 op1 = validize_mem (force_const_mem (mode, op1));
13004 /* We need to check memory alignment for SSE mode since attribute
13005 can make operands unaligned. */
13006 if (can_create_pseudo_p ()
13007 && SSE_REG_MODE_P (mode)
13008 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
13009 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
13011 rtx tmp[2];
13013 /* ix86_expand_vector_move_misalign() does not like constants ... */
13014 if (CONSTANT_P (op1)
13015 || (GET_CODE (op1) == SUBREG
13016 && CONSTANT_P (SUBREG_REG (op1))))
13017 op1 = validize_mem (force_const_mem (mode, op1));
13019 /* ... nor both arguments in memory. */
13020 if (!register_operand (op0, mode)
13021 && !register_operand (op1, mode))
13022 op1 = force_reg (mode, op1);
13024 tmp[0] = op0; tmp[1] = op1;
13025 ix86_expand_vector_move_misalign (mode, tmp);
13026 return;
13029 /* Make operand1 a register if it isn't already. */
13030 if (can_create_pseudo_p ()
13031 && !register_operand (op0, mode)
13032 && !register_operand (op1, mode))
13034 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
13035 return;
13038 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
13041 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
13042 straight to ix86_expand_vector_move. */
13043 /* Code generation for scalar reg-reg moves of single and double precision data:
13044 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
13045 movaps reg, reg
13046 else
13047 movss reg, reg
13048 if (x86_sse_partial_reg_dependency == true)
13049 movapd reg, reg
13050 else
13051 movsd reg, reg
13053 Code generation for scalar loads of double precision data:
13054 if (x86_sse_split_regs == true)
13055 movlpd mem, reg (gas syntax)
13056 else
13057 movsd mem, reg
13059 Code generation for unaligned packed loads of single precision data
13060 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
13061 if (x86_sse_unaligned_move_optimal)
13062 movups mem, reg
13064 if (x86_sse_partial_reg_dependency == true)
13066 xorps reg, reg
13067 movlps mem, reg
13068 movhps mem+8, reg
13070 else
13072 movlps mem, reg
13073 movhps mem+8, reg
13076 Code generation for unaligned packed loads of double precision data
13077 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
13078 if (x86_sse_unaligned_move_optimal)
13079 movupd mem, reg
13081 if (x86_sse_split_regs == true)
13083 movlpd mem, reg
13084 movhpd mem+8, reg
13086 else
13088 movsd mem, reg
13089 movhpd mem+8, reg
13093 void
13094 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
13096 rtx op0, op1, m;
13098 op0 = operands[0];
13099 op1 = operands[1];
13101 if (TARGET_AVX)
13103 switch (GET_MODE_CLASS (mode))
13105 case MODE_VECTOR_INT:
13106 case MODE_INT:
13107 switch (GET_MODE_SIZE (mode))
13109 case 16:
13110 op0 = gen_lowpart (V16QImode, op0);
13111 op1 = gen_lowpart (V16QImode, op1);
13112 emit_insn (gen_avx_movdqu (op0, op1));
13113 break;
13114 case 32:
13115 op0 = gen_lowpart (V32QImode, op0);
13116 op1 = gen_lowpart (V32QImode, op1);
13117 emit_insn (gen_avx_movdqu256 (op0, op1));
13118 break;
13119 default:
13120 gcc_unreachable ();
13122 break;
13123 case MODE_VECTOR_FLOAT:
13124 op0 = gen_lowpart (mode, op0);
13125 op1 = gen_lowpart (mode, op1);
13127 switch (mode)
13129 case V4SFmode:
13130 emit_insn (gen_avx_movups (op0, op1));
13131 break;
13132 case V8SFmode:
13133 emit_insn (gen_avx_movups256 (op0, op1));
13134 break;
13135 case V2DFmode:
13136 emit_insn (gen_avx_movupd (op0, op1));
13137 break;
13138 case V4DFmode:
13139 emit_insn (gen_avx_movupd256 (op0, op1));
13140 break;
13141 default:
13142 gcc_unreachable ();
13144 break;
13146 default:
13147 gcc_unreachable ();
13150 return;
13153 if (MEM_P (op1))
13155 /* If we're optimizing for size, movups is the smallest. */
13156 if (optimize_insn_for_size_p ())
13158 op0 = gen_lowpart (V4SFmode, op0);
13159 op1 = gen_lowpart (V4SFmode, op1);
13160 emit_insn (gen_sse_movups (op0, op1));
13161 return;
13164 /* ??? If we have typed data, then it would appear that using
13165 movdqu is the only way to get unaligned data loaded with
13166 integer type. */
13167 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13169 op0 = gen_lowpart (V16QImode, op0);
13170 op1 = gen_lowpart (V16QImode, op1);
13171 emit_insn (gen_sse2_movdqu (op0, op1));
13172 return;
13175 if (TARGET_SSE2 && mode == V2DFmode)
13177 rtx zero;
13179 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
13181 op0 = gen_lowpart (V2DFmode, op0);
13182 op1 = gen_lowpart (V2DFmode, op1);
13183 emit_insn (gen_sse2_movupd (op0, op1));
13184 return;
13187 /* When SSE registers are split into halves, we can avoid
13188 writing to the top half twice. */
13189 if (TARGET_SSE_SPLIT_REGS)
13191 emit_clobber (op0);
13192 zero = op0;
13194 else
13196 /* ??? Not sure about the best option for the Intel chips.
13197 The following would seem to satisfy; the register is
13198 entirely cleared, breaking the dependency chain. We
13199 then store to the upper half, with a dependency depth
13200 of one. A rumor has it that Intel recommends two movsd
13201 followed by an unpacklpd, but this is unconfirmed. And
13202 given that the dependency depth of the unpacklpd would
13203 still be one, I'm not sure why this would be better. */
13204 zero = CONST0_RTX (V2DFmode);
13207 m = adjust_address (op1, DFmode, 0);
13208 emit_insn (gen_sse2_loadlpd (op0, zero, m));
13209 m = adjust_address (op1, DFmode, 8);
13210 emit_insn (gen_sse2_loadhpd (op0, op0, m));
13212 else
13214 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
13216 op0 = gen_lowpart (V4SFmode, op0);
13217 op1 = gen_lowpart (V4SFmode, op1);
13218 emit_insn (gen_sse_movups (op0, op1));
13219 return;
13222 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
13223 emit_move_insn (op0, CONST0_RTX (mode));
13224 else
13225 emit_clobber (op0);
13227 if (mode != V4SFmode)
13228 op0 = gen_lowpart (V4SFmode, op0);
13229 m = adjust_address (op1, V2SFmode, 0);
13230 emit_insn (gen_sse_loadlps (op0, op0, m));
13231 m = adjust_address (op1, V2SFmode, 8);
13232 emit_insn (gen_sse_loadhps (op0, op0, m));
13235 else if (MEM_P (op0))
13237 /* If we're optimizing for size, movups is the smallest. */
13238 if (optimize_insn_for_size_p ())
13240 op0 = gen_lowpart (V4SFmode, op0);
13241 op1 = gen_lowpart (V4SFmode, op1);
13242 emit_insn (gen_sse_movups (op0, op1));
13243 return;
13246 /* ??? Similar to above, only less clear because of quote
13247 typeless stores unquote. */
13248 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
13249 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
13251 op0 = gen_lowpart (V16QImode, op0);
13252 op1 = gen_lowpart (V16QImode, op1);
13253 emit_insn (gen_sse2_movdqu (op0, op1));
13254 return;
13257 if (TARGET_SSE2 && mode == V2DFmode)
13259 m = adjust_address (op0, DFmode, 0);
13260 emit_insn (gen_sse2_storelpd (m, op1));
13261 m = adjust_address (op0, DFmode, 8);
13262 emit_insn (gen_sse2_storehpd (m, op1));
13264 else
13266 if (mode != V4SFmode)
13267 op1 = gen_lowpart (V4SFmode, op1);
13268 m = adjust_address (op0, V2SFmode, 0);
13269 emit_insn (gen_sse_storelps (m, op1));
13270 m = adjust_address (op0, V2SFmode, 8);
13271 emit_insn (gen_sse_storehps (m, op1));
13274 else
13275 gcc_unreachable ();
13278 /* Expand a push in MODE. This is some mode for which we do not support
13279 proper push instructions, at least from the registers that we expect
13280 the value to live in. */
13282 void
13283 ix86_expand_push (enum machine_mode mode, rtx x)
13285 rtx tmp;
13287 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
13288 GEN_INT (-GET_MODE_SIZE (mode)),
13289 stack_pointer_rtx, 1, OPTAB_DIRECT);
13290 if (tmp != stack_pointer_rtx)
13291 emit_move_insn (stack_pointer_rtx, tmp);
13293 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
13295 /* When we push an operand onto stack, it has to be aligned at least
13296 at the function argument boundary. However since we don't have
13297 the argument type, we can't determine the actual argument
13298 boundary. */
13299 emit_move_insn (tmp, x);
13302 /* Helper function of ix86_fixup_binary_operands to canonicalize
13303 operand order. Returns true if the operands should be swapped. */
13305 static bool
13306 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
13307 rtx operands[])
13309 rtx dst = operands[0];
13310 rtx src1 = operands[1];
13311 rtx src2 = operands[2];
13313 /* If the operation is not commutative, we can't do anything. */
13314 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
13315 return false;
13317 /* Highest priority is that src1 should match dst. */
13318 if (rtx_equal_p (dst, src1))
13319 return false;
13320 if (rtx_equal_p (dst, src2))
13321 return true;
13323 /* Next highest priority is that immediate constants come second. */
13324 if (immediate_operand (src2, mode))
13325 return false;
13326 if (immediate_operand (src1, mode))
13327 return true;
13329 /* Lowest priority is that memory references should come second. */
13330 if (MEM_P (src2))
13331 return false;
13332 if (MEM_P (src1))
13333 return true;
13335 return false;
13339 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
13340 destination to use for the operation. If different from the true
13341 destination in operands[0], a copy operation will be required. */
13344 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
13345 rtx operands[])
13347 rtx dst = operands[0];
13348 rtx src1 = operands[1];
13349 rtx src2 = operands[2];
13351 /* Canonicalize operand order. */
13352 if (ix86_swap_binary_operands_p (code, mode, operands))
13354 rtx temp;
13356 /* It is invalid to swap operands of different modes. */
13357 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
13359 temp = src1;
13360 src1 = src2;
13361 src2 = temp;
13364 /* Both source operands cannot be in memory. */
13365 if (MEM_P (src1) && MEM_P (src2))
13367 /* Optimization: Only read from memory once. */
13368 if (rtx_equal_p (src1, src2))
13370 src2 = force_reg (mode, src2);
13371 src1 = src2;
13373 else
13374 src2 = force_reg (mode, src2);
13377 /* If the destination is memory, and we do not have matching source
13378 operands, do things in registers. */
13379 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13380 dst = gen_reg_rtx (mode);
13382 /* Source 1 cannot be a constant. */
13383 if (CONSTANT_P (src1))
13384 src1 = force_reg (mode, src1);
13386 /* Source 1 cannot be a non-matching memory. */
13387 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13388 src1 = force_reg (mode, src1);
13390 operands[1] = src1;
13391 operands[2] = src2;
13392 return dst;
13395 /* Similarly, but assume that the destination has already been
13396 set up properly. */
13398 void
13399 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
13400 enum machine_mode mode, rtx operands[])
13402 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
13403 gcc_assert (dst == operands[0]);
13406 /* Attempt to expand a binary operator. Make the expansion closer to the
13407 actual machine, then just general_operand, which will allow 3 separate
13408 memory references (one output, two input) in a single insn. */
13410 void
13411 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
13412 rtx operands[])
13414 rtx src1, src2, dst, op, clob;
13416 dst = ix86_fixup_binary_operands (code, mode, operands);
13417 src1 = operands[1];
13418 src2 = operands[2];
13420 /* Emit the instruction. */
13422 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
13423 if (reload_in_progress)
13425 /* Reload doesn't know about the flags register, and doesn't know that
13426 it doesn't want to clobber it. We can only do this with PLUS. */
13427 gcc_assert (code == PLUS);
13428 emit_insn (op);
13430 else
13432 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13433 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13436 /* Fix up the destination if needed. */
13437 if (dst != operands[0])
13438 emit_move_insn (operands[0], dst);
13441 /* Return TRUE or FALSE depending on whether the binary operator meets the
13442 appropriate constraints. */
13445 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
13446 rtx operands[3])
13448 rtx dst = operands[0];
13449 rtx src1 = operands[1];
13450 rtx src2 = operands[2];
13452 /* Both source operands cannot be in memory. */
13453 if (MEM_P (src1) && MEM_P (src2))
13454 return 0;
13456 /* Canonicalize operand order for commutative operators. */
13457 if (ix86_swap_binary_operands_p (code, mode, operands))
13459 rtx temp = src1;
13460 src1 = src2;
13461 src2 = temp;
13464 /* If the destination is memory, we must have a matching source operand. */
13465 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13466 return 0;
13468 /* Source 1 cannot be a constant. */
13469 if (CONSTANT_P (src1))
13470 return 0;
13472 /* Source 1 cannot be a non-matching memory. */
13473 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13474 return 0;
13476 return 1;
13479 /* Attempt to expand a unary operator. Make the expansion closer to the
13480 actual machine, then just general_operand, which will allow 2 separate
13481 memory references (one output, one input) in a single insn. */
13483 void
13484 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
13485 rtx operands[])
13487 int matching_memory;
13488 rtx src, dst, op, clob;
13490 dst = operands[0];
13491 src = operands[1];
13493 /* If the destination is memory, and we do not have matching source
13494 operands, do things in registers. */
13495 matching_memory = 0;
13496 if (MEM_P (dst))
13498 if (rtx_equal_p (dst, src))
13499 matching_memory = 1;
13500 else
13501 dst = gen_reg_rtx (mode);
13504 /* When source operand is memory, destination must match. */
13505 if (MEM_P (src) && !matching_memory)
13506 src = force_reg (mode, src);
13508 /* Emit the instruction. */
13510 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
13511 if (reload_in_progress || code == NOT)
13513 /* Reload doesn't know about the flags register, and doesn't know that
13514 it doesn't want to clobber it. */
13515 gcc_assert (code == NOT);
13516 emit_insn (op);
13518 else
13520 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13521 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13524 /* Fix up the destination if needed. */
13525 if (dst != operands[0])
13526 emit_move_insn (operands[0], dst);
13529 #define LEA_SEARCH_THRESHOLD 12
13531 /* Search backward for non-agu definition of register number REGNO1
13532 or register number REGNO2 in INSN's basic block until
13533 1. Pass LEA_SEARCH_THRESHOLD instructions, or
13534 2. Reach BB boundary, or
13535 3. Reach agu definition.
13536 Returns the distance between the non-agu definition point and INSN.
13537 If no definition point, returns -1. */
13539 static int
13540 distance_non_agu_define (unsigned int regno1, unsigned int regno2,
13541 rtx insn)
13543 basic_block bb = BLOCK_FOR_INSN (insn);
13544 int distance = 0;
13545 df_ref *def_rec;
13546 enum attr_type insn_type;
13548 if (insn != BB_HEAD (bb))
13550 rtx prev = PREV_INSN (insn);
13551 while (prev && distance < LEA_SEARCH_THRESHOLD)
13553 if (INSN_P (prev))
13555 distance++;
13556 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13557 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13558 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13559 && (regno1 == DF_REF_REGNO (*def_rec)
13560 || regno2 == DF_REF_REGNO (*def_rec)))
13562 insn_type = get_attr_type (prev);
13563 if (insn_type != TYPE_LEA)
13564 goto done;
13567 if (prev == BB_HEAD (bb))
13568 break;
13569 prev = PREV_INSN (prev);
13573 if (distance < LEA_SEARCH_THRESHOLD)
13575 edge e;
13576 edge_iterator ei;
13577 bool simple_loop = false;
13579 FOR_EACH_EDGE (e, ei, bb->preds)
13580 if (e->src == bb)
13582 simple_loop = true;
13583 break;
13586 if (simple_loop)
13588 rtx prev = BB_END (bb);
13589 while (prev
13590 && prev != insn
13591 && distance < LEA_SEARCH_THRESHOLD)
13593 if (INSN_P (prev))
13595 distance++;
13596 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13597 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13598 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13599 && (regno1 == DF_REF_REGNO (*def_rec)
13600 || regno2 == DF_REF_REGNO (*def_rec)))
13602 insn_type = get_attr_type (prev);
13603 if (insn_type != TYPE_LEA)
13604 goto done;
13607 prev = PREV_INSN (prev);
13612 distance = -1;
13614 done:
13615 /* get_attr_type may modify recog data. We want to make sure
13616 that recog data is valid for instruction INSN, on which
13617 distance_non_agu_define is called. INSN is unchanged here. */
13618 extract_insn_cached (insn);
13619 return distance;
13622 /* Return the distance between INSN and the next insn that uses
13623 register number REGNO0 in memory address. Return -1 if no such
13624 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
13626 static int
13627 distance_agu_use (unsigned int regno0, rtx insn)
13629 basic_block bb = BLOCK_FOR_INSN (insn);
13630 int distance = 0;
13631 df_ref *def_rec;
13632 df_ref *use_rec;
13634 if (insn != BB_END (bb))
13636 rtx next = NEXT_INSN (insn);
13637 while (next && distance < LEA_SEARCH_THRESHOLD)
13639 if (INSN_P (next))
13641 distance++;
13643 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13644 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13645 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13646 && regno0 == DF_REF_REGNO (*use_rec))
13648 /* Return DISTANCE if OP0 is used in memory
13649 address in NEXT. */
13650 return distance;
13653 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13654 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13655 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13656 && regno0 == DF_REF_REGNO (*def_rec))
13658 /* Return -1 if OP0 is set in NEXT. */
13659 return -1;
13662 if (next == BB_END (bb))
13663 break;
13664 next = NEXT_INSN (next);
13668 if (distance < LEA_SEARCH_THRESHOLD)
13670 edge e;
13671 edge_iterator ei;
13672 bool simple_loop = false;
13674 FOR_EACH_EDGE (e, ei, bb->succs)
13675 if (e->dest == bb)
13677 simple_loop = true;
13678 break;
13681 if (simple_loop)
13683 rtx next = BB_HEAD (bb);
13684 while (next
13685 && next != insn
13686 && distance < LEA_SEARCH_THRESHOLD)
13688 if (INSN_P (next))
13690 distance++;
13692 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13693 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13694 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13695 && regno0 == DF_REF_REGNO (*use_rec))
13697 /* Return DISTANCE if OP0 is used in memory
13698 address in NEXT. */
13699 return distance;
13702 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13703 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13704 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13705 && regno0 == DF_REF_REGNO (*def_rec))
13707 /* Return -1 if OP0 is set in NEXT. */
13708 return -1;
13712 next = NEXT_INSN (next);
13717 return -1;
13720 /* Define this macro to tune LEA priority vs ADD, it take effect when
13721 there is a dilemma of choicing LEA or ADD
13722 Negative value: ADD is more preferred than LEA
13723 Zero: Netrual
13724 Positive value: LEA is more preferred than ADD*/
13725 #define IX86_LEA_PRIORITY 2
13727 /* Return true if it is ok to optimize an ADD operation to LEA
13728 operation to avoid flag register consumation. For the processors
13729 like ATOM, if the destination register of LEA holds an actual
13730 address which will be used soon, LEA is better and otherwise ADD
13731 is better. */
13733 bool
13734 ix86_lea_for_add_ok (enum rtx_code code ATTRIBUTE_UNUSED,
13735 rtx insn, rtx operands[])
13737 unsigned int regno0 = true_regnum (operands[0]);
13738 unsigned int regno1 = true_regnum (operands[1]);
13739 unsigned int regno2;
13741 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
13742 return regno0 != regno1;
13744 regno2 = true_regnum (operands[2]);
13746 /* If a = b + c, (a!=b && a!=c), must use lea form. */
13747 if (regno0 != regno1 && regno0 != regno2)
13748 return true;
13749 else
13751 int dist_define, dist_use;
13752 dist_define = distance_non_agu_define (regno1, regno2, insn);
13753 if (dist_define <= 0)
13754 return true;
13756 /* If this insn has both backward non-agu dependence and forward
13757 agu dependence, the one with short distance take effect. */
13758 dist_use = distance_agu_use (regno0, insn);
13759 if (dist_use <= 0
13760 || (dist_define + IX86_LEA_PRIORITY) < dist_use)
13761 return false;
13763 return true;
13767 /* Return true if destination reg of SET_BODY is shift count of
13768 USE_BODY. */
13770 static bool
13771 ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
13773 rtx set_dest;
13774 rtx shift_rtx;
13775 int i;
13777 /* Retrieve destination of SET_BODY. */
13778 switch (GET_CODE (set_body))
13780 case SET:
13781 set_dest = SET_DEST (set_body);
13782 if (!set_dest || !REG_P (set_dest))
13783 return false;
13784 break;
13785 case PARALLEL:
13786 for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
13787 if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
13788 use_body))
13789 return true;
13790 default:
13791 return false;
13792 break;
13795 /* Retrieve shift count of USE_BODY. */
13796 switch (GET_CODE (use_body))
13798 case SET:
13799 shift_rtx = XEXP (use_body, 1);
13800 break;
13801 case PARALLEL:
13802 for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
13803 if (ix86_dep_by_shift_count_body (set_body,
13804 XVECEXP (use_body, 0, i)))
13805 return true;
13806 default:
13807 return false;
13808 break;
13811 if (shift_rtx
13812 && (GET_CODE (shift_rtx) == ASHIFT
13813 || GET_CODE (shift_rtx) == LSHIFTRT
13814 || GET_CODE (shift_rtx) == ASHIFTRT
13815 || GET_CODE (shift_rtx) == ROTATE
13816 || GET_CODE (shift_rtx) == ROTATERT))
13818 rtx shift_count = XEXP (shift_rtx, 1);
13820 /* Return true if shift count is dest of SET_BODY. */
13821 if (REG_P (shift_count)
13822 && true_regnum (set_dest) == true_regnum (shift_count))
13823 return true;
13826 return false;
13829 /* Return true if destination reg of SET_INSN is shift count of
13830 USE_INSN. */
13832 bool
13833 ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
13835 return ix86_dep_by_shift_count_body (PATTERN (set_insn),
13836 PATTERN (use_insn));
13839 /* Return TRUE or FALSE depending on whether the unary operator meets the
13840 appropriate constraints. */
13843 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
13844 enum machine_mode mode ATTRIBUTE_UNUSED,
13845 rtx operands[2] ATTRIBUTE_UNUSED)
13847 /* If one of operands is memory, source and destination must match. */
13848 if ((MEM_P (operands[0])
13849 || MEM_P (operands[1]))
13850 && ! rtx_equal_p (operands[0], operands[1]))
13851 return FALSE;
13852 return TRUE;
13855 /* Return TRUE if the operands to a vec_interleave_{high,low}v2df
13856 are ok, keeping in mind the possible movddup alternative. */
13858 bool
13859 ix86_vec_interleave_v2df_operator_ok (rtx operands[3], bool high)
13861 if (MEM_P (operands[0]))
13862 return rtx_equal_p (operands[0], operands[1 + high]);
13863 if (MEM_P (operands[1]) && MEM_P (operands[2]))
13864 return TARGET_SSE3 && rtx_equal_p (operands[1], operands[2]);
13865 return true;
13868 /* Post-reload splitter for converting an SF or DFmode value in an
13869 SSE register into an unsigned SImode. */
13871 void
13872 ix86_split_convert_uns_si_sse (rtx operands[])
13874 enum machine_mode vecmode;
13875 rtx value, large, zero_or_two31, input, two31, x;
13877 large = operands[1];
13878 zero_or_two31 = operands[2];
13879 input = operands[3];
13880 two31 = operands[4];
13881 vecmode = GET_MODE (large);
13882 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
13884 /* Load up the value into the low element. We must ensure that the other
13885 elements are valid floats -- zero is the easiest such value. */
13886 if (MEM_P (input))
13888 if (vecmode == V4SFmode)
13889 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
13890 else
13891 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
13893 else
13895 input = gen_rtx_REG (vecmode, REGNO (input));
13896 emit_move_insn (value, CONST0_RTX (vecmode));
13897 if (vecmode == V4SFmode)
13898 emit_insn (gen_sse_movss (value, value, input));
13899 else
13900 emit_insn (gen_sse2_movsd (value, value, input));
13903 emit_move_insn (large, two31);
13904 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
13906 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
13907 emit_insn (gen_rtx_SET (VOIDmode, large, x));
13909 x = gen_rtx_AND (vecmode, zero_or_two31, large);
13910 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
13912 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
13913 emit_insn (gen_rtx_SET (VOIDmode, value, x));
13915 large = gen_rtx_REG (V4SImode, REGNO (large));
13916 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
13918 x = gen_rtx_REG (V4SImode, REGNO (value));
13919 if (vecmode == V4SFmode)
13920 emit_insn (gen_sse2_cvttps2dq (x, value));
13921 else
13922 emit_insn (gen_sse2_cvttpd2dq (x, value));
13923 value = x;
13925 emit_insn (gen_xorv4si3 (value, value, large));
13928 /* Convert an unsigned DImode value into a DFmode, using only SSE.
13929 Expects the 64-bit DImode to be supplied in a pair of integral
13930 registers. Requires SSE2; will use SSE3 if available. For x86_32,
13931 -mfpmath=sse, !optimize_size only. */
13933 void
13934 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
13936 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
13937 rtx int_xmm, fp_xmm;
13938 rtx biases, exponents;
13939 rtx x;
13941 int_xmm = gen_reg_rtx (V4SImode);
13942 if (TARGET_INTER_UNIT_MOVES)
13943 emit_insn (gen_movdi_to_sse (int_xmm, input));
13944 else if (TARGET_SSE_SPLIT_REGS)
13946 emit_clobber (int_xmm);
13947 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
13949 else
13951 x = gen_reg_rtx (V2DImode);
13952 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
13953 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
13956 x = gen_rtx_CONST_VECTOR (V4SImode,
13957 gen_rtvec (4, GEN_INT (0x43300000UL),
13958 GEN_INT (0x45300000UL),
13959 const0_rtx, const0_rtx));
13960 exponents = validize_mem (force_const_mem (V4SImode, x));
13962 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
13963 emit_insn (gen_vec_interleave_lowv4si (int_xmm, int_xmm, exponents));
13965 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
13966 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
13967 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
13968 (0x1.0p84 + double(fp_value_hi_xmm)).
13969 Note these exponents differ by 32. */
13971 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
13973 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
13974 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
13975 real_ldexp (&bias_lo_rvt, &dconst1, 52);
13976 real_ldexp (&bias_hi_rvt, &dconst1, 84);
13977 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
13978 x = const_double_from_real_value (bias_hi_rvt, DFmode);
13979 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
13980 biases = validize_mem (force_const_mem (V2DFmode, biases));
13981 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
13983 /* Add the upper and lower DFmode values together. */
13984 if (TARGET_SSE3)
13985 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
13986 else
13988 x = copy_to_mode_reg (V2DFmode, fp_xmm);
13989 emit_insn (gen_vec_interleave_highv2df (fp_xmm, fp_xmm, fp_xmm));
13990 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
13993 ix86_expand_vector_extract (false, target, fp_xmm, 0);
13996 /* Not used, but eases macroization of patterns. */
13997 void
13998 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
13999 rtx input ATTRIBUTE_UNUSED)
14001 gcc_unreachable ();
14004 /* Convert an unsigned SImode value into a DFmode. Only currently used
14005 for SSE, but applicable anywhere. */
14007 void
14008 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
14010 REAL_VALUE_TYPE TWO31r;
14011 rtx x, fp;
14013 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
14014 NULL, 1, OPTAB_DIRECT);
14016 fp = gen_reg_rtx (DFmode);
14017 emit_insn (gen_floatsidf2 (fp, x));
14019 real_ldexp (&TWO31r, &dconst1, 31);
14020 x = const_double_from_real_value (TWO31r, DFmode);
14022 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
14023 if (x != target)
14024 emit_move_insn (target, x);
14027 /* Convert a signed DImode value into a DFmode. Only used for SSE in
14028 32-bit mode; otherwise we have a direct convert instruction. */
14030 void
14031 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
14033 REAL_VALUE_TYPE TWO32r;
14034 rtx fp_lo, fp_hi, x;
14036 fp_lo = gen_reg_rtx (DFmode);
14037 fp_hi = gen_reg_rtx (DFmode);
14039 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
14041 real_ldexp (&TWO32r, &dconst1, 32);
14042 x = const_double_from_real_value (TWO32r, DFmode);
14043 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
14045 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
14047 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
14048 0, OPTAB_DIRECT);
14049 if (x != target)
14050 emit_move_insn (target, x);
14053 /* Convert an unsigned SImode value into a SFmode, using only SSE.
14054 For x86_32, -mfpmath=sse, !optimize_size only. */
14055 void
14056 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
14058 REAL_VALUE_TYPE ONE16r;
14059 rtx fp_hi, fp_lo, int_hi, int_lo, x;
14061 real_ldexp (&ONE16r, &dconst1, 16);
14062 x = const_double_from_real_value (ONE16r, SFmode);
14063 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
14064 NULL, 0, OPTAB_DIRECT);
14065 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
14066 NULL, 0, OPTAB_DIRECT);
14067 fp_hi = gen_reg_rtx (SFmode);
14068 fp_lo = gen_reg_rtx (SFmode);
14069 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
14070 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
14071 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
14072 0, OPTAB_DIRECT);
14073 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
14074 0, OPTAB_DIRECT);
14075 if (!rtx_equal_p (target, fp_hi))
14076 emit_move_insn (target, fp_hi);
14079 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
14080 then replicate the value for all elements of the vector
14081 register. */
14084 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
14086 rtvec v;
14087 switch (mode)
14089 case SImode:
14090 gcc_assert (vect);
14091 v = gen_rtvec (4, value, value, value, value);
14092 return gen_rtx_CONST_VECTOR (V4SImode, v);
14094 case DImode:
14095 gcc_assert (vect);
14096 v = gen_rtvec (2, value, value);
14097 return gen_rtx_CONST_VECTOR (V2DImode, v);
14099 case SFmode:
14100 if (vect)
14101 v = gen_rtvec (4, value, value, value, value);
14102 else
14103 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
14104 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
14105 return gen_rtx_CONST_VECTOR (V4SFmode, v);
14107 case DFmode:
14108 if (vect)
14109 v = gen_rtvec (2, value, value);
14110 else
14111 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
14112 return gen_rtx_CONST_VECTOR (V2DFmode, v);
14114 default:
14115 gcc_unreachable ();
14119 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
14120 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
14121 for an SSE register. If VECT is true, then replicate the mask for
14122 all elements of the vector register. If INVERT is true, then create
14123 a mask excluding the sign bit. */
14126 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
14128 enum machine_mode vec_mode, imode;
14129 HOST_WIDE_INT hi, lo;
14130 int shift = 63;
14131 rtx v;
14132 rtx mask;
14134 /* Find the sign bit, sign extended to 2*HWI. */
14135 switch (mode)
14137 case SImode:
14138 case SFmode:
14139 imode = SImode;
14140 vec_mode = (mode == SImode) ? V4SImode : V4SFmode;
14141 lo = 0x80000000, hi = lo < 0;
14142 break;
14144 case DImode:
14145 case DFmode:
14146 imode = DImode;
14147 vec_mode = (mode == DImode) ? V2DImode : V2DFmode;
14148 if (HOST_BITS_PER_WIDE_INT >= 64)
14149 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
14150 else
14151 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14152 break;
14154 case TImode:
14155 case TFmode:
14156 vec_mode = VOIDmode;
14157 if (HOST_BITS_PER_WIDE_INT >= 64)
14159 imode = TImode;
14160 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
14162 else
14164 rtvec vec;
14166 imode = DImode;
14167 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
14169 if (invert)
14171 lo = ~lo, hi = ~hi;
14172 v = constm1_rtx;
14174 else
14175 v = const0_rtx;
14177 mask = immed_double_const (lo, hi, imode);
14179 vec = gen_rtvec (2, v, mask);
14180 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
14181 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
14183 return v;
14185 break;
14187 default:
14188 gcc_unreachable ();
14191 if (invert)
14192 lo = ~lo, hi = ~hi;
14194 /* Force this value into the low part of a fp vector constant. */
14195 mask = immed_double_const (lo, hi, imode);
14196 mask = gen_lowpart (mode, mask);
14198 if (vec_mode == VOIDmode)
14199 return force_reg (mode, mask);
14201 v = ix86_build_const_vector (mode, vect, mask);
14202 return force_reg (vec_mode, v);
14205 /* Generate code for floating point ABS or NEG. */
14207 void
14208 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
14209 rtx operands[])
14211 rtx mask, set, use, clob, dst, src;
14212 bool use_sse = false;
14213 bool vector_mode = VECTOR_MODE_P (mode);
14214 enum machine_mode elt_mode = mode;
14216 if (vector_mode)
14218 elt_mode = GET_MODE_INNER (mode);
14219 use_sse = true;
14221 else if (mode == TFmode)
14222 use_sse = true;
14223 else if (TARGET_SSE_MATH)
14224 use_sse = SSE_FLOAT_MODE_P (mode);
14226 /* NEG and ABS performed with SSE use bitwise mask operations.
14227 Create the appropriate mask now. */
14228 if (use_sse)
14229 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
14230 else
14231 mask = NULL_RTX;
14233 dst = operands[0];
14234 src = operands[1];
14236 if (vector_mode)
14238 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
14239 set = gen_rtx_SET (VOIDmode, dst, set);
14240 emit_insn (set);
14242 else
14244 set = gen_rtx_fmt_e (code, mode, src);
14245 set = gen_rtx_SET (VOIDmode, dst, set);
14246 if (mask)
14248 use = gen_rtx_USE (VOIDmode, mask);
14249 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
14250 emit_insn (gen_rtx_PARALLEL (VOIDmode,
14251 gen_rtvec (3, set, use, clob)));
14253 else
14254 emit_insn (set);
14258 /* Expand a copysign operation. Special case operand 0 being a constant. */
14260 void
14261 ix86_expand_copysign (rtx operands[])
14263 enum machine_mode mode;
14264 rtx dest, op0, op1, mask, nmask;
14266 dest = operands[0];
14267 op0 = operands[1];
14268 op1 = operands[2];
14270 mode = GET_MODE (dest);
14272 if (GET_CODE (op0) == CONST_DOUBLE)
14274 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
14276 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
14277 op0 = simplify_unary_operation (ABS, mode, op0, mode);
14279 if (mode == SFmode || mode == DFmode)
14281 enum machine_mode vmode;
14283 vmode = mode == SFmode ? V4SFmode : V2DFmode;
14285 if (op0 == CONST0_RTX (mode))
14286 op0 = CONST0_RTX (vmode);
14287 else
14289 rtx v = ix86_build_const_vector (mode, false, op0);
14291 op0 = force_reg (vmode, v);
14294 else if (op0 != CONST0_RTX (mode))
14295 op0 = force_reg (mode, op0);
14297 mask = ix86_build_signbit_mask (mode, 0, 0);
14299 if (mode == SFmode)
14300 copysign_insn = gen_copysignsf3_const;
14301 else if (mode == DFmode)
14302 copysign_insn = gen_copysigndf3_const;
14303 else
14304 copysign_insn = gen_copysigntf3_const;
14306 emit_insn (copysign_insn (dest, op0, op1, mask));
14308 else
14310 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
14312 nmask = ix86_build_signbit_mask (mode, 0, 1);
14313 mask = ix86_build_signbit_mask (mode, 0, 0);
14315 if (mode == SFmode)
14316 copysign_insn = gen_copysignsf3_var;
14317 else if (mode == DFmode)
14318 copysign_insn = gen_copysigndf3_var;
14319 else
14320 copysign_insn = gen_copysigntf3_var;
14322 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
14326 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
14327 be a constant, and so has already been expanded into a vector constant. */
14329 void
14330 ix86_split_copysign_const (rtx operands[])
14332 enum machine_mode mode, vmode;
14333 rtx dest, op0, mask, x;
14335 dest = operands[0];
14336 op0 = operands[1];
14337 mask = operands[3];
14339 mode = GET_MODE (dest);
14340 vmode = GET_MODE (mask);
14342 dest = simplify_gen_subreg (vmode, dest, mode, 0);
14343 x = gen_rtx_AND (vmode, dest, mask);
14344 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14346 if (op0 != CONST0_RTX (vmode))
14348 x = gen_rtx_IOR (vmode, dest, op0);
14349 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14353 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
14354 so we have to do two masks. */
14356 void
14357 ix86_split_copysign_var (rtx operands[])
14359 enum machine_mode mode, vmode;
14360 rtx dest, scratch, op0, op1, mask, nmask, x;
14362 dest = operands[0];
14363 scratch = operands[1];
14364 op0 = operands[2];
14365 op1 = operands[3];
14366 nmask = operands[4];
14367 mask = operands[5];
14369 mode = GET_MODE (dest);
14370 vmode = GET_MODE (mask);
14372 if (rtx_equal_p (op0, op1))
14374 /* Shouldn't happen often (it's useless, obviously), but when it does
14375 we'd generate incorrect code if we continue below. */
14376 emit_move_insn (dest, op0);
14377 return;
14380 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
14382 gcc_assert (REGNO (op1) == REGNO (scratch));
14384 x = gen_rtx_AND (vmode, scratch, mask);
14385 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14387 dest = mask;
14388 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14389 x = gen_rtx_NOT (vmode, dest);
14390 x = gen_rtx_AND (vmode, x, op0);
14391 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14393 else
14395 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
14397 x = gen_rtx_AND (vmode, scratch, mask);
14399 else /* alternative 2,4 */
14401 gcc_assert (REGNO (mask) == REGNO (scratch));
14402 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
14403 x = gen_rtx_AND (vmode, scratch, op1);
14405 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14407 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
14409 dest = simplify_gen_subreg (vmode, op0, mode, 0);
14410 x = gen_rtx_AND (vmode, dest, nmask);
14412 else /* alternative 3,4 */
14414 gcc_assert (REGNO (nmask) == REGNO (dest));
14415 dest = nmask;
14416 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14417 x = gen_rtx_AND (vmode, dest, op0);
14419 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14422 x = gen_rtx_IOR (vmode, dest, scratch);
14423 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14426 /* Return TRUE or FALSE depending on whether the first SET in INSN
14427 has source and destination with matching CC modes, and that the
14428 CC mode is at least as constrained as REQ_MODE. */
14431 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
14433 rtx set;
14434 enum machine_mode set_mode;
14436 set = PATTERN (insn);
14437 if (GET_CODE (set) == PARALLEL)
14438 set = XVECEXP (set, 0, 0);
14439 gcc_assert (GET_CODE (set) == SET);
14440 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
14442 set_mode = GET_MODE (SET_DEST (set));
14443 switch (set_mode)
14445 case CCNOmode:
14446 if (req_mode != CCNOmode
14447 && (req_mode != CCmode
14448 || XEXP (SET_SRC (set), 1) != const0_rtx))
14449 return 0;
14450 break;
14451 case CCmode:
14452 if (req_mode == CCGCmode)
14453 return 0;
14454 /* FALLTHRU */
14455 case CCGCmode:
14456 if (req_mode == CCGOCmode || req_mode == CCNOmode)
14457 return 0;
14458 /* FALLTHRU */
14459 case CCGOCmode:
14460 if (req_mode == CCZmode)
14461 return 0;
14462 /* FALLTHRU */
14463 case CCAmode:
14464 case CCCmode:
14465 case CCOmode:
14466 case CCSmode:
14467 case CCZmode:
14468 break;
14470 default:
14471 gcc_unreachable ();
14474 return (GET_MODE (SET_SRC (set)) == set_mode);
14477 /* Generate insn patterns to do an integer compare of OPERANDS. */
14479 static rtx
14480 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
14482 enum machine_mode cmpmode;
14483 rtx tmp, flags;
14485 cmpmode = SELECT_CC_MODE (code, op0, op1);
14486 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
14488 /* This is very simple, but making the interface the same as in the
14489 FP case makes the rest of the code easier. */
14490 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
14491 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
14493 /* Return the test that should be put into the flags user, i.e.
14494 the bcc, scc, or cmov instruction. */
14495 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
14498 /* Figure out whether to use ordered or unordered fp comparisons.
14499 Return the appropriate mode to use. */
14501 enum machine_mode
14502 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
14504 /* ??? In order to make all comparisons reversible, we do all comparisons
14505 non-trapping when compiling for IEEE. Once gcc is able to distinguish
14506 all forms trapping and nontrapping comparisons, we can make inequality
14507 comparisons trapping again, since it results in better code when using
14508 FCOM based compares. */
14509 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
14512 enum machine_mode
14513 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
14515 enum machine_mode mode = GET_MODE (op0);
14517 if (SCALAR_FLOAT_MODE_P (mode))
14519 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
14520 return ix86_fp_compare_mode (code);
14523 switch (code)
14525 /* Only zero flag is needed. */
14526 case EQ: /* ZF=0 */
14527 case NE: /* ZF!=0 */
14528 return CCZmode;
14529 /* Codes needing carry flag. */
14530 case GEU: /* CF=0 */
14531 case LTU: /* CF=1 */
14532 /* Detect overflow checks. They need just the carry flag. */
14533 if (GET_CODE (op0) == PLUS
14534 && rtx_equal_p (op1, XEXP (op0, 0)))
14535 return CCCmode;
14536 else
14537 return CCmode;
14538 case GTU: /* CF=0 & ZF=0 */
14539 case LEU: /* CF=1 | ZF=1 */
14540 /* Detect overflow checks. They need just the carry flag. */
14541 if (GET_CODE (op0) == MINUS
14542 && rtx_equal_p (op1, XEXP (op0, 0)))
14543 return CCCmode;
14544 else
14545 return CCmode;
14546 /* Codes possibly doable only with sign flag when
14547 comparing against zero. */
14548 case GE: /* SF=OF or SF=0 */
14549 case LT: /* SF<>OF or SF=1 */
14550 if (op1 == const0_rtx)
14551 return CCGOCmode;
14552 else
14553 /* For other cases Carry flag is not required. */
14554 return CCGCmode;
14555 /* Codes doable only with sign flag when comparing
14556 against zero, but we miss jump instruction for it
14557 so we need to use relational tests against overflow
14558 that thus needs to be zero. */
14559 case GT: /* ZF=0 & SF=OF */
14560 case LE: /* ZF=1 | SF<>OF */
14561 if (op1 == const0_rtx)
14562 return CCNOmode;
14563 else
14564 return CCGCmode;
14565 /* strcmp pattern do (use flags) and combine may ask us for proper
14566 mode. */
14567 case USE:
14568 return CCmode;
14569 default:
14570 gcc_unreachable ();
14574 /* Return the fixed registers used for condition codes. */
14576 static bool
14577 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
14579 *p1 = FLAGS_REG;
14580 *p2 = FPSR_REG;
14581 return true;
14584 /* If two condition code modes are compatible, return a condition code
14585 mode which is compatible with both. Otherwise, return
14586 VOIDmode. */
14588 static enum machine_mode
14589 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
14591 if (m1 == m2)
14592 return m1;
14594 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
14595 return VOIDmode;
14597 if ((m1 == CCGCmode && m2 == CCGOCmode)
14598 || (m1 == CCGOCmode && m2 == CCGCmode))
14599 return CCGCmode;
14601 switch (m1)
14603 default:
14604 gcc_unreachable ();
14606 case CCmode:
14607 case CCGCmode:
14608 case CCGOCmode:
14609 case CCNOmode:
14610 case CCAmode:
14611 case CCCmode:
14612 case CCOmode:
14613 case CCSmode:
14614 case CCZmode:
14615 switch (m2)
14617 default:
14618 return VOIDmode;
14620 case CCmode:
14621 case CCGCmode:
14622 case CCGOCmode:
14623 case CCNOmode:
14624 case CCAmode:
14625 case CCCmode:
14626 case CCOmode:
14627 case CCSmode:
14628 case CCZmode:
14629 return CCmode;
14632 case CCFPmode:
14633 case CCFPUmode:
14634 /* These are only compatible with themselves, which we already
14635 checked above. */
14636 return VOIDmode;
14641 /* Return a comparison we can do and that it is equivalent to
14642 swap_condition (code) apart possibly from orderedness.
14643 But, never change orderedness if TARGET_IEEE_FP, returning
14644 UNKNOWN in that case if necessary. */
14646 static enum rtx_code
14647 ix86_fp_swap_condition (enum rtx_code code)
14649 switch (code)
14651 case GT: /* GTU - CF=0 & ZF=0 */
14652 return TARGET_IEEE_FP ? UNKNOWN : UNLT;
14653 case GE: /* GEU - CF=0 */
14654 return TARGET_IEEE_FP ? UNKNOWN : UNLE;
14655 case UNLT: /* LTU - CF=1 */
14656 return TARGET_IEEE_FP ? UNKNOWN : GT;
14657 case UNLE: /* LEU - CF=1 | ZF=1 */
14658 return TARGET_IEEE_FP ? UNKNOWN : GE;
14659 default:
14660 return swap_condition (code);
14664 /* Return cost of comparison CODE using the best strategy for performance.
14665 All following functions do use number of instructions as a cost metrics.
14666 In future this should be tweaked to compute bytes for optimize_size and
14667 take into account performance of various instructions on various CPUs. */
14669 static int
14670 ix86_fp_comparison_cost (enum rtx_code code)
14672 int arith_cost;
14674 /* The cost of code using bit-twiddling on %ah. */
14675 switch (code)
14677 case UNLE:
14678 case UNLT:
14679 case LTGT:
14680 case GT:
14681 case GE:
14682 case UNORDERED:
14683 case ORDERED:
14684 case UNEQ:
14685 arith_cost = 4;
14686 break;
14687 case LT:
14688 case NE:
14689 case EQ:
14690 case UNGE:
14691 arith_cost = TARGET_IEEE_FP ? 5 : 4;
14692 break;
14693 case LE:
14694 case UNGT:
14695 arith_cost = TARGET_IEEE_FP ? 6 : 4;
14696 break;
14697 default:
14698 gcc_unreachable ();
14701 switch (ix86_fp_comparison_strategy (code))
14703 case IX86_FPCMP_COMI:
14704 return arith_cost > 4 ? 3 : 2;
14705 case IX86_FPCMP_SAHF:
14706 return arith_cost > 4 ? 4 : 3;
14707 default:
14708 return arith_cost;
14712 /* Return strategy to use for floating-point. We assume that fcomi is always
14713 preferrable where available, since that is also true when looking at size
14714 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
14716 enum ix86_fpcmp_strategy
14717 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED)
14719 /* Do fcomi/sahf based test when profitable. */
14721 if (TARGET_CMOVE)
14722 return IX86_FPCMP_COMI;
14724 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_function_for_size_p (cfun)))
14725 return IX86_FPCMP_SAHF;
14727 return IX86_FPCMP_ARITH;
14730 /* Swap, force into registers, or otherwise massage the two operands
14731 to a fp comparison. The operands are updated in place; the new
14732 comparison code is returned. */
14734 static enum rtx_code
14735 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
14737 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
14738 rtx op0 = *pop0, op1 = *pop1;
14739 enum machine_mode op_mode = GET_MODE (op0);
14740 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
14742 /* All of the unordered compare instructions only work on registers.
14743 The same is true of the fcomi compare instructions. The XFmode
14744 compare instructions require registers except when comparing
14745 against zero or when converting operand 1 from fixed point to
14746 floating point. */
14748 if (!is_sse
14749 && (fpcmp_mode == CCFPUmode
14750 || (op_mode == XFmode
14751 && ! (standard_80387_constant_p (op0) == 1
14752 || standard_80387_constant_p (op1) == 1)
14753 && GET_CODE (op1) != FLOAT)
14754 || ix86_fp_comparison_strategy (code) == IX86_FPCMP_COMI))
14756 op0 = force_reg (op_mode, op0);
14757 op1 = force_reg (op_mode, op1);
14759 else
14761 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
14762 things around if they appear profitable, otherwise force op0
14763 into a register. */
14765 if (standard_80387_constant_p (op0) == 0
14766 || (MEM_P (op0)
14767 && ! (standard_80387_constant_p (op1) == 0
14768 || MEM_P (op1))))
14770 enum rtx_code new_code = ix86_fp_swap_condition (code);
14771 if (new_code != UNKNOWN)
14773 rtx tmp;
14774 tmp = op0, op0 = op1, op1 = tmp;
14775 code = new_code;
14779 if (!REG_P (op0))
14780 op0 = force_reg (op_mode, op0);
14782 if (CONSTANT_P (op1))
14784 int tmp = standard_80387_constant_p (op1);
14785 if (tmp == 0)
14786 op1 = validize_mem (force_const_mem (op_mode, op1));
14787 else if (tmp == 1)
14789 if (TARGET_CMOVE)
14790 op1 = force_reg (op_mode, op1);
14792 else
14793 op1 = force_reg (op_mode, op1);
14797 /* Try to rearrange the comparison to make it cheaper. */
14798 if (ix86_fp_comparison_cost (code)
14799 > ix86_fp_comparison_cost (swap_condition (code))
14800 && (REG_P (op1) || can_create_pseudo_p ()))
14802 rtx tmp;
14803 tmp = op0, op0 = op1, op1 = tmp;
14804 code = swap_condition (code);
14805 if (!REG_P (op0))
14806 op0 = force_reg (op_mode, op0);
14809 *pop0 = op0;
14810 *pop1 = op1;
14811 return code;
14814 /* Convert comparison codes we use to represent FP comparison to integer
14815 code that will result in proper branch. Return UNKNOWN if no such code
14816 is available. */
14818 enum rtx_code
14819 ix86_fp_compare_code_to_integer (enum rtx_code code)
14821 switch (code)
14823 case GT:
14824 return GTU;
14825 case GE:
14826 return GEU;
14827 case ORDERED:
14828 case UNORDERED:
14829 return code;
14830 break;
14831 case UNEQ:
14832 return EQ;
14833 break;
14834 case UNLT:
14835 return LTU;
14836 break;
14837 case UNLE:
14838 return LEU;
14839 break;
14840 case LTGT:
14841 return NE;
14842 break;
14843 default:
14844 return UNKNOWN;
14848 /* Generate insn patterns to do a floating point compare of OPERANDS. */
14850 static rtx
14851 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch)
14853 enum machine_mode fpcmp_mode, intcmp_mode;
14854 rtx tmp, tmp2;
14856 fpcmp_mode = ix86_fp_compare_mode (code);
14857 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
14859 /* Do fcomi/sahf based test when profitable. */
14860 switch (ix86_fp_comparison_strategy (code))
14862 case IX86_FPCMP_COMI:
14863 intcmp_mode = fpcmp_mode;
14864 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14865 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
14866 tmp);
14867 emit_insn (tmp);
14868 break;
14870 case IX86_FPCMP_SAHF:
14871 intcmp_mode = fpcmp_mode;
14872 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14873 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
14874 tmp);
14876 if (!scratch)
14877 scratch = gen_reg_rtx (HImode);
14878 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
14879 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
14880 break;
14882 case IX86_FPCMP_ARITH:
14883 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
14884 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14885 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
14886 if (!scratch)
14887 scratch = gen_reg_rtx (HImode);
14888 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
14890 /* In the unordered case, we have to check C2 for NaN's, which
14891 doesn't happen to work out to anything nice combination-wise.
14892 So do some bit twiddling on the value we've got in AH to come
14893 up with an appropriate set of condition codes. */
14895 intcmp_mode = CCNOmode;
14896 switch (code)
14898 case GT:
14899 case UNGT:
14900 if (code == GT || !TARGET_IEEE_FP)
14902 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
14903 code = EQ;
14905 else
14907 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14908 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
14909 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
14910 intcmp_mode = CCmode;
14911 code = GEU;
14913 break;
14914 case LT:
14915 case UNLT:
14916 if (code == LT && TARGET_IEEE_FP)
14918 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14919 emit_insn (gen_cmpqi_ext_3 (scratch, const1_rtx));
14920 intcmp_mode = CCmode;
14921 code = EQ;
14923 else
14925 emit_insn (gen_testqi_ext_ccno_0 (scratch, const1_rtx));
14926 code = NE;
14928 break;
14929 case GE:
14930 case UNGE:
14931 if (code == GE || !TARGET_IEEE_FP)
14933 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
14934 code = EQ;
14936 else
14938 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14939 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, const1_rtx));
14940 code = NE;
14942 break;
14943 case LE:
14944 case UNLE:
14945 if (code == LE && TARGET_IEEE_FP)
14947 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14948 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
14949 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
14950 intcmp_mode = CCmode;
14951 code = LTU;
14953 else
14955 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
14956 code = NE;
14958 break;
14959 case EQ:
14960 case UNEQ:
14961 if (code == EQ && TARGET_IEEE_FP)
14963 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14964 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
14965 intcmp_mode = CCmode;
14966 code = EQ;
14968 else
14970 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
14971 code = NE;
14973 break;
14974 case NE:
14975 case LTGT:
14976 if (code == NE && TARGET_IEEE_FP)
14978 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14979 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
14980 GEN_INT (0x40)));
14981 code = NE;
14983 else
14985 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
14986 code = EQ;
14988 break;
14990 case UNORDERED:
14991 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
14992 code = NE;
14993 break;
14994 case ORDERED:
14995 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
14996 code = EQ;
14997 break;
14999 default:
15000 gcc_unreachable ();
15002 break;
15004 default:
15005 gcc_unreachable();
15008 /* Return the test that should be put into the flags user, i.e.
15009 the bcc, scc, or cmov instruction. */
15010 return gen_rtx_fmt_ee (code, VOIDmode,
15011 gen_rtx_REG (intcmp_mode, FLAGS_REG),
15012 const0_rtx);
15016 ix86_expand_compare (enum rtx_code code)
15018 rtx op0, op1, ret;
15019 op0 = ix86_compare_op0;
15020 op1 = ix86_compare_op1;
15022 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC)
15023 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_op0, ix86_compare_op1);
15025 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
15027 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
15028 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15030 else
15031 ret = ix86_expand_int_compare (code, op0, op1);
15033 return ret;
15036 void
15037 ix86_expand_branch (enum rtx_code code, rtx label)
15039 rtx tmp;
15041 switch (GET_MODE (ix86_compare_op0))
15043 case SFmode:
15044 case DFmode:
15045 case XFmode:
15046 case QImode:
15047 case HImode:
15048 case SImode:
15049 simple:
15050 tmp = ix86_expand_compare (code);
15051 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
15052 gen_rtx_LABEL_REF (VOIDmode, label),
15053 pc_rtx);
15054 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
15055 return;
15057 case DImode:
15058 if (TARGET_64BIT)
15059 goto simple;
15060 case TImode:
15061 /* Expand DImode branch into multiple compare+branch. */
15063 rtx lo[2], hi[2], label2;
15064 enum rtx_code code1, code2, code3;
15065 enum machine_mode submode;
15067 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
15069 tmp = ix86_compare_op0;
15070 ix86_compare_op0 = ix86_compare_op1;
15071 ix86_compare_op1 = tmp;
15072 code = swap_condition (code);
15074 if (GET_MODE (ix86_compare_op0) == DImode)
15076 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
15077 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
15078 submode = SImode;
15080 else
15082 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
15083 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
15084 submode = DImode;
15087 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
15088 avoid two branches. This costs one extra insn, so disable when
15089 optimizing for size. */
15091 if ((code == EQ || code == NE)
15092 && (!optimize_insn_for_size_p ()
15093 || hi[1] == const0_rtx || lo[1] == const0_rtx))
15095 rtx xor0, xor1;
15097 xor1 = hi[0];
15098 if (hi[1] != const0_rtx)
15099 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
15100 NULL_RTX, 0, OPTAB_WIDEN);
15102 xor0 = lo[0];
15103 if (lo[1] != const0_rtx)
15104 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
15105 NULL_RTX, 0, OPTAB_WIDEN);
15107 tmp = expand_binop (submode, ior_optab, xor1, xor0,
15108 NULL_RTX, 0, OPTAB_WIDEN);
15110 ix86_compare_op0 = tmp;
15111 ix86_compare_op1 = const0_rtx;
15112 ix86_expand_branch (code, label);
15113 return;
15116 /* Otherwise, if we are doing less-than or greater-or-equal-than,
15117 op1 is a constant and the low word is zero, then we can just
15118 examine the high word. Similarly for low word -1 and
15119 less-or-equal-than or greater-than. */
15121 if (CONST_INT_P (hi[1]))
15122 switch (code)
15124 case LT: case LTU: case GE: case GEU:
15125 if (lo[1] == const0_rtx)
15127 ix86_compare_op0 = hi[0];
15128 ix86_compare_op1 = hi[1];
15129 ix86_expand_branch (code, label);
15130 return;
15132 break;
15133 case LE: case LEU: case GT: case GTU:
15134 if (lo[1] == constm1_rtx)
15136 ix86_compare_op0 = hi[0];
15137 ix86_compare_op1 = hi[1];
15138 ix86_expand_branch (code, label);
15139 return;
15141 break;
15142 default:
15143 break;
15146 /* Otherwise, we need two or three jumps. */
15148 label2 = gen_label_rtx ();
15150 code1 = code;
15151 code2 = swap_condition (code);
15152 code3 = unsigned_condition (code);
15154 switch (code)
15156 case LT: case GT: case LTU: case GTU:
15157 break;
15159 case LE: code1 = LT; code2 = GT; break;
15160 case GE: code1 = GT; code2 = LT; break;
15161 case LEU: code1 = LTU; code2 = GTU; break;
15162 case GEU: code1 = GTU; code2 = LTU; break;
15164 case EQ: code1 = UNKNOWN; code2 = NE; break;
15165 case NE: code2 = UNKNOWN; break;
15167 default:
15168 gcc_unreachable ();
15172 * a < b =>
15173 * if (hi(a) < hi(b)) goto true;
15174 * if (hi(a) > hi(b)) goto false;
15175 * if (lo(a) < lo(b)) goto true;
15176 * false:
15179 ix86_compare_op0 = hi[0];
15180 ix86_compare_op1 = hi[1];
15182 if (code1 != UNKNOWN)
15183 ix86_expand_branch (code1, label);
15184 if (code2 != UNKNOWN)
15185 ix86_expand_branch (code2, label2);
15187 ix86_compare_op0 = lo[0];
15188 ix86_compare_op1 = lo[1];
15189 ix86_expand_branch (code3, label);
15191 if (code2 != UNKNOWN)
15192 emit_label (label2);
15193 return;
15196 default:
15197 /* If we have already emitted a compare insn, go straight to simple.
15198 ix86_expand_compare won't emit anything if ix86_compare_emitted
15199 is non NULL. */
15200 gcc_assert (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC);
15201 goto simple;
15205 /* Split branch based on floating point condition. */
15206 void
15207 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
15208 rtx target1, rtx target2, rtx tmp, rtx pushed)
15210 rtx condition;
15211 rtx i;
15213 if (target2 != pc_rtx)
15215 rtx tmp = target2;
15216 code = reverse_condition_maybe_unordered (code);
15217 target2 = target1;
15218 target1 = tmp;
15221 condition = ix86_expand_fp_compare (code, op1, op2,
15222 tmp);
15224 /* Remove pushed operand from stack. */
15225 if (pushed)
15226 ix86_free_from_memory (GET_MODE (pushed));
15228 i = emit_jump_insn (gen_rtx_SET
15229 (VOIDmode, pc_rtx,
15230 gen_rtx_IF_THEN_ELSE (VOIDmode,
15231 condition, target1, target2)));
15232 if (split_branch_probability >= 0)
15233 add_reg_note (i, REG_BR_PROB, GEN_INT (split_branch_probability));
15236 void
15237 ix86_expand_setcc (enum rtx_code code, rtx dest)
15239 rtx ret;
15241 gcc_assert (GET_MODE (dest) == QImode);
15243 ret = ix86_expand_compare (code);
15244 PUT_MODE (ret, QImode);
15245 emit_insn (gen_rtx_SET (VOIDmode, dest, ret));
15248 /* Expand comparison setting or clearing carry flag. Return true when
15249 successful and set pop for the operation. */
15250 static bool
15251 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
15253 enum machine_mode mode =
15254 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
15256 /* Do not handle DImode compares that go through special path. */
15257 if (mode == (TARGET_64BIT ? TImode : DImode))
15258 return false;
15260 if (SCALAR_FLOAT_MODE_P (mode))
15262 rtx compare_op, compare_seq;
15264 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
15266 /* Shortcut: following common codes never translate
15267 into carry flag compares. */
15268 if (code == EQ || code == NE || code == UNEQ || code == LTGT
15269 || code == ORDERED || code == UNORDERED)
15270 return false;
15272 /* These comparisons require zero flag; swap operands so they won't. */
15273 if ((code == GT || code == UNLE || code == LE || code == UNGT)
15274 && !TARGET_IEEE_FP)
15276 rtx tmp = op0;
15277 op0 = op1;
15278 op1 = tmp;
15279 code = swap_condition (code);
15282 /* Try to expand the comparison and verify that we end up with
15283 carry flag based comparison. This fails to be true only when
15284 we decide to expand comparison using arithmetic that is not
15285 too common scenario. */
15286 start_sequence ();
15287 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15288 compare_seq = get_insns ();
15289 end_sequence ();
15291 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
15292 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
15293 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
15294 else
15295 code = GET_CODE (compare_op);
15297 if (code != LTU && code != GEU)
15298 return false;
15300 emit_insn (compare_seq);
15301 *pop = compare_op;
15302 return true;
15305 if (!INTEGRAL_MODE_P (mode))
15306 return false;
15308 switch (code)
15310 case LTU:
15311 case GEU:
15312 break;
15314 /* Convert a==0 into (unsigned)a<1. */
15315 case EQ:
15316 case NE:
15317 if (op1 != const0_rtx)
15318 return false;
15319 op1 = const1_rtx;
15320 code = (code == EQ ? LTU : GEU);
15321 break;
15323 /* Convert a>b into b<a or a>=b-1. */
15324 case GTU:
15325 case LEU:
15326 if (CONST_INT_P (op1))
15328 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
15329 /* Bail out on overflow. We still can swap operands but that
15330 would force loading of the constant into register. */
15331 if (op1 == const0_rtx
15332 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
15333 return false;
15334 code = (code == GTU ? GEU : LTU);
15336 else
15338 rtx tmp = op1;
15339 op1 = op0;
15340 op0 = tmp;
15341 code = (code == GTU ? LTU : GEU);
15343 break;
15345 /* Convert a>=0 into (unsigned)a<0x80000000. */
15346 case LT:
15347 case GE:
15348 if (mode == DImode || op1 != const0_rtx)
15349 return false;
15350 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15351 code = (code == LT ? GEU : LTU);
15352 break;
15353 case LE:
15354 case GT:
15355 if (mode == DImode || op1 != constm1_rtx)
15356 return false;
15357 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15358 code = (code == LE ? GEU : LTU);
15359 break;
15361 default:
15362 return false;
15364 /* Swapping operands may cause constant to appear as first operand. */
15365 if (!nonimmediate_operand (op0, VOIDmode))
15367 if (!can_create_pseudo_p ())
15368 return false;
15369 op0 = force_reg (mode, op0);
15371 ix86_compare_op0 = op0;
15372 ix86_compare_op1 = op1;
15373 *pop = ix86_expand_compare (code);
15374 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
15375 return true;
15379 ix86_expand_int_movcc (rtx operands[])
15381 enum rtx_code code = GET_CODE (operands[1]), compare_code;
15382 rtx compare_seq, compare_op;
15383 enum machine_mode mode = GET_MODE (operands[0]);
15384 bool sign_bit_compare_p = false;
15386 start_sequence ();
15387 ix86_compare_op0 = XEXP (operands[1], 0);
15388 ix86_compare_op1 = XEXP (operands[1], 1);
15389 compare_op = ix86_expand_compare (code);
15390 compare_seq = get_insns ();
15391 end_sequence ();
15393 compare_code = GET_CODE (compare_op);
15395 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
15396 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
15397 sign_bit_compare_p = true;
15399 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
15400 HImode insns, we'd be swallowed in word prefix ops. */
15402 if ((mode != HImode || TARGET_FAST_PREFIX)
15403 && (mode != (TARGET_64BIT ? TImode : DImode))
15404 && CONST_INT_P (operands[2])
15405 && CONST_INT_P (operands[3]))
15407 rtx out = operands[0];
15408 HOST_WIDE_INT ct = INTVAL (operands[2]);
15409 HOST_WIDE_INT cf = INTVAL (operands[3]);
15410 HOST_WIDE_INT diff;
15412 diff = ct - cf;
15413 /* Sign bit compares are better done using shifts than we do by using
15414 sbb. */
15415 if (sign_bit_compare_p
15416 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
15417 ix86_compare_op1, &compare_op))
15419 /* Detect overlap between destination and compare sources. */
15420 rtx tmp = out;
15422 if (!sign_bit_compare_p)
15424 rtx flags;
15425 bool fpcmp = false;
15427 compare_code = GET_CODE (compare_op);
15429 flags = XEXP (compare_op, 0);
15431 if (GET_MODE (flags) == CCFPmode
15432 || GET_MODE (flags) == CCFPUmode)
15434 fpcmp = true;
15435 compare_code
15436 = ix86_fp_compare_code_to_integer (compare_code);
15439 /* To simplify rest of code, restrict to the GEU case. */
15440 if (compare_code == LTU)
15442 HOST_WIDE_INT tmp = ct;
15443 ct = cf;
15444 cf = tmp;
15445 compare_code = reverse_condition (compare_code);
15446 code = reverse_condition (code);
15448 else
15450 if (fpcmp)
15451 PUT_CODE (compare_op,
15452 reverse_condition_maybe_unordered
15453 (GET_CODE (compare_op)));
15454 else
15455 PUT_CODE (compare_op,
15456 reverse_condition (GET_CODE (compare_op)));
15458 diff = ct - cf;
15460 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
15461 || reg_overlap_mentioned_p (out, ix86_compare_op1))
15462 tmp = gen_reg_rtx (mode);
15464 if (mode == DImode)
15465 emit_insn (gen_x86_movdicc_0_m1 (tmp, flags, compare_op));
15466 else
15467 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp),
15468 flags, compare_op));
15470 else
15472 if (code == GT || code == GE)
15473 code = reverse_condition (code);
15474 else
15476 HOST_WIDE_INT tmp = ct;
15477 ct = cf;
15478 cf = tmp;
15479 diff = ct - cf;
15481 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
15482 ix86_compare_op1, VOIDmode, 0, -1);
15485 if (diff == 1)
15488 * cmpl op0,op1
15489 * sbbl dest,dest
15490 * [addl dest, ct]
15492 * Size 5 - 8.
15494 if (ct)
15495 tmp = expand_simple_binop (mode, PLUS,
15496 tmp, GEN_INT (ct),
15497 copy_rtx (tmp), 1, OPTAB_DIRECT);
15499 else if (cf == -1)
15502 * cmpl op0,op1
15503 * sbbl dest,dest
15504 * orl $ct, dest
15506 * Size 8.
15508 tmp = expand_simple_binop (mode, IOR,
15509 tmp, GEN_INT (ct),
15510 copy_rtx (tmp), 1, OPTAB_DIRECT);
15512 else if (diff == -1 && ct)
15515 * cmpl op0,op1
15516 * sbbl dest,dest
15517 * notl dest
15518 * [addl dest, cf]
15520 * Size 8 - 11.
15522 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15523 if (cf)
15524 tmp = expand_simple_binop (mode, PLUS,
15525 copy_rtx (tmp), GEN_INT (cf),
15526 copy_rtx (tmp), 1, OPTAB_DIRECT);
15528 else
15531 * cmpl op0,op1
15532 * sbbl dest,dest
15533 * [notl dest]
15534 * andl cf - ct, dest
15535 * [addl dest, ct]
15537 * Size 8 - 11.
15540 if (cf == 0)
15542 cf = ct;
15543 ct = 0;
15544 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15547 tmp = expand_simple_binop (mode, AND,
15548 copy_rtx (tmp),
15549 gen_int_mode (cf - ct, mode),
15550 copy_rtx (tmp), 1, OPTAB_DIRECT);
15551 if (ct)
15552 tmp = expand_simple_binop (mode, PLUS,
15553 copy_rtx (tmp), GEN_INT (ct),
15554 copy_rtx (tmp), 1, OPTAB_DIRECT);
15557 if (!rtx_equal_p (tmp, out))
15558 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
15560 return 1; /* DONE */
15563 if (diff < 0)
15565 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15567 HOST_WIDE_INT tmp;
15568 tmp = ct, ct = cf, cf = tmp;
15569 diff = -diff;
15571 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15573 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15575 /* We may be reversing unordered compare to normal compare, that
15576 is not valid in general (we may convert non-trapping condition
15577 to trapping one), however on i386 we currently emit all
15578 comparisons unordered. */
15579 compare_code = reverse_condition_maybe_unordered (compare_code);
15580 code = reverse_condition_maybe_unordered (code);
15582 else
15584 compare_code = reverse_condition (compare_code);
15585 code = reverse_condition (code);
15589 compare_code = UNKNOWN;
15590 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
15591 && CONST_INT_P (ix86_compare_op1))
15593 if (ix86_compare_op1 == const0_rtx
15594 && (code == LT || code == GE))
15595 compare_code = code;
15596 else if (ix86_compare_op1 == constm1_rtx)
15598 if (code == LE)
15599 compare_code = LT;
15600 else if (code == GT)
15601 compare_code = GE;
15605 /* Optimize dest = (op0 < 0) ? -1 : cf. */
15606 if (compare_code != UNKNOWN
15607 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
15608 && (cf == -1 || ct == -1))
15610 /* If lea code below could be used, only optimize
15611 if it results in a 2 insn sequence. */
15613 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
15614 || diff == 3 || diff == 5 || diff == 9)
15615 || (compare_code == LT && ct == -1)
15616 || (compare_code == GE && cf == -1))
15619 * notl op1 (if necessary)
15620 * sarl $31, op1
15621 * orl cf, op1
15623 if (ct != -1)
15625 cf = ct;
15626 ct = -1;
15627 code = reverse_condition (code);
15630 out = emit_store_flag (out, code, ix86_compare_op0,
15631 ix86_compare_op1, VOIDmode, 0, -1);
15633 out = expand_simple_binop (mode, IOR,
15634 out, GEN_INT (cf),
15635 out, 1, OPTAB_DIRECT);
15636 if (out != operands[0])
15637 emit_move_insn (operands[0], out);
15639 return 1; /* DONE */
15644 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
15645 || diff == 3 || diff == 5 || diff == 9)
15646 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
15647 && (mode != DImode
15648 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
15651 * xorl dest,dest
15652 * cmpl op1,op2
15653 * setcc dest
15654 * lea cf(dest*(ct-cf)),dest
15656 * Size 14.
15658 * This also catches the degenerate setcc-only case.
15661 rtx tmp;
15662 int nops;
15664 out = emit_store_flag (out, code, ix86_compare_op0,
15665 ix86_compare_op1, VOIDmode, 0, 1);
15667 nops = 0;
15668 /* On x86_64 the lea instruction operates on Pmode, so we need
15669 to get arithmetics done in proper mode to match. */
15670 if (diff == 1)
15671 tmp = copy_rtx (out);
15672 else
15674 rtx out1;
15675 out1 = copy_rtx (out);
15676 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
15677 nops++;
15678 if (diff & 1)
15680 tmp = gen_rtx_PLUS (mode, tmp, out1);
15681 nops++;
15684 if (cf != 0)
15686 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
15687 nops++;
15689 if (!rtx_equal_p (tmp, out))
15691 if (nops == 1)
15692 out = force_operand (tmp, copy_rtx (out));
15693 else
15694 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
15696 if (!rtx_equal_p (out, operands[0]))
15697 emit_move_insn (operands[0], copy_rtx (out));
15699 return 1; /* DONE */
15703 * General case: Jumpful:
15704 * xorl dest,dest cmpl op1, op2
15705 * cmpl op1, op2 movl ct, dest
15706 * setcc dest jcc 1f
15707 * decl dest movl cf, dest
15708 * andl (cf-ct),dest 1:
15709 * addl ct,dest
15711 * Size 20. Size 14.
15713 * This is reasonably steep, but branch mispredict costs are
15714 * high on modern cpus, so consider failing only if optimizing
15715 * for space.
15718 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15719 && BRANCH_COST (optimize_insn_for_speed_p (),
15720 false) >= 2)
15722 if (cf == 0)
15724 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15726 cf = ct;
15727 ct = 0;
15729 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15731 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15733 /* We may be reversing unordered compare to normal compare,
15734 that is not valid in general (we may convert non-trapping
15735 condition to trapping one), however on i386 we currently
15736 emit all comparisons unordered. */
15737 code = reverse_condition_maybe_unordered (code);
15739 else
15741 code = reverse_condition (code);
15742 if (compare_code != UNKNOWN)
15743 compare_code = reverse_condition (compare_code);
15747 if (compare_code != UNKNOWN)
15749 /* notl op1 (if needed)
15750 sarl $31, op1
15751 andl (cf-ct), op1
15752 addl ct, op1
15754 For x < 0 (resp. x <= -1) there will be no notl,
15755 so if possible swap the constants to get rid of the
15756 complement.
15757 True/false will be -1/0 while code below (store flag
15758 followed by decrement) is 0/-1, so the constants need
15759 to be exchanged once more. */
15761 if (compare_code == GE || !cf)
15763 code = reverse_condition (code);
15764 compare_code = LT;
15766 else
15768 HOST_WIDE_INT tmp = cf;
15769 cf = ct;
15770 ct = tmp;
15773 out = emit_store_flag (out, code, ix86_compare_op0,
15774 ix86_compare_op1, VOIDmode, 0, -1);
15776 else
15778 out = emit_store_flag (out, code, ix86_compare_op0,
15779 ix86_compare_op1, VOIDmode, 0, 1);
15781 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
15782 copy_rtx (out), 1, OPTAB_DIRECT);
15785 out = expand_simple_binop (mode, AND, copy_rtx (out),
15786 gen_int_mode (cf - ct, mode),
15787 copy_rtx (out), 1, OPTAB_DIRECT);
15788 if (ct)
15789 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
15790 copy_rtx (out), 1, OPTAB_DIRECT);
15791 if (!rtx_equal_p (out, operands[0]))
15792 emit_move_insn (operands[0], copy_rtx (out));
15794 return 1; /* DONE */
15798 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15800 /* Try a few things more with specific constants and a variable. */
15802 optab op;
15803 rtx var, orig_out, out, tmp;
15805 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
15806 return 0; /* FAIL */
15808 /* If one of the two operands is an interesting constant, load a
15809 constant with the above and mask it in with a logical operation. */
15811 if (CONST_INT_P (operands[2]))
15813 var = operands[3];
15814 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
15815 operands[3] = constm1_rtx, op = and_optab;
15816 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
15817 operands[3] = const0_rtx, op = ior_optab;
15818 else
15819 return 0; /* FAIL */
15821 else if (CONST_INT_P (operands[3]))
15823 var = operands[2];
15824 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
15825 operands[2] = constm1_rtx, op = and_optab;
15826 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
15827 operands[2] = const0_rtx, op = ior_optab;
15828 else
15829 return 0; /* FAIL */
15831 else
15832 return 0; /* FAIL */
15834 orig_out = operands[0];
15835 tmp = gen_reg_rtx (mode);
15836 operands[0] = tmp;
15838 /* Recurse to get the constant loaded. */
15839 if (ix86_expand_int_movcc (operands) == 0)
15840 return 0; /* FAIL */
15842 /* Mask in the interesting variable. */
15843 out = expand_binop (mode, op, var, tmp, orig_out, 0,
15844 OPTAB_WIDEN);
15845 if (!rtx_equal_p (out, orig_out))
15846 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
15848 return 1; /* DONE */
15852 * For comparison with above,
15854 * movl cf,dest
15855 * movl ct,tmp
15856 * cmpl op1,op2
15857 * cmovcc tmp,dest
15859 * Size 15.
15862 if (! nonimmediate_operand (operands[2], mode))
15863 operands[2] = force_reg (mode, operands[2]);
15864 if (! nonimmediate_operand (operands[3], mode))
15865 operands[3] = force_reg (mode, operands[3]);
15867 if (! register_operand (operands[2], VOIDmode)
15868 && (mode == QImode
15869 || ! register_operand (operands[3], VOIDmode)))
15870 operands[2] = force_reg (mode, operands[2]);
15872 if (mode == QImode
15873 && ! register_operand (operands[3], VOIDmode))
15874 operands[3] = force_reg (mode, operands[3]);
15876 emit_insn (compare_seq);
15877 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
15878 gen_rtx_IF_THEN_ELSE (mode,
15879 compare_op, operands[2],
15880 operands[3])));
15882 return 1; /* DONE */
15885 /* Swap, force into registers, or otherwise massage the two operands
15886 to an sse comparison with a mask result. Thus we differ a bit from
15887 ix86_prepare_fp_compare_args which expects to produce a flags result.
15889 The DEST operand exists to help determine whether to commute commutative
15890 operators. The POP0/POP1 operands are updated in place. The new
15891 comparison code is returned, or UNKNOWN if not implementable. */
15893 static enum rtx_code
15894 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
15895 rtx *pop0, rtx *pop1)
15897 rtx tmp;
15899 switch (code)
15901 case LTGT:
15902 case UNEQ:
15903 /* We have no LTGT as an operator. We could implement it with
15904 NE & ORDERED, but this requires an extra temporary. It's
15905 not clear that it's worth it. */
15906 return UNKNOWN;
15908 case LT:
15909 case LE:
15910 case UNGT:
15911 case UNGE:
15912 /* These are supported directly. */
15913 break;
15915 case EQ:
15916 case NE:
15917 case UNORDERED:
15918 case ORDERED:
15919 /* For commutative operators, try to canonicalize the destination
15920 operand to be first in the comparison - this helps reload to
15921 avoid extra moves. */
15922 if (!dest || !rtx_equal_p (dest, *pop1))
15923 break;
15924 /* FALLTHRU */
15926 case GE:
15927 case GT:
15928 case UNLE:
15929 case UNLT:
15930 /* These are not supported directly. Swap the comparison operands
15931 to transform into something that is supported. */
15932 tmp = *pop0;
15933 *pop0 = *pop1;
15934 *pop1 = tmp;
15935 code = swap_condition (code);
15936 break;
15938 default:
15939 gcc_unreachable ();
15942 return code;
15945 /* Detect conditional moves that exactly match min/max operational
15946 semantics. Note that this is IEEE safe, as long as we don't
15947 interchange the operands.
15949 Returns FALSE if this conditional move doesn't match a MIN/MAX,
15950 and TRUE if the operation is successful and instructions are emitted. */
15952 static bool
15953 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
15954 rtx cmp_op1, rtx if_true, rtx if_false)
15956 enum machine_mode mode;
15957 bool is_min;
15958 rtx tmp;
15960 if (code == LT)
15962 else if (code == UNGE)
15964 tmp = if_true;
15965 if_true = if_false;
15966 if_false = tmp;
15968 else
15969 return false;
15971 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
15972 is_min = true;
15973 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
15974 is_min = false;
15975 else
15976 return false;
15978 mode = GET_MODE (dest);
15980 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
15981 but MODE may be a vector mode and thus not appropriate. */
15982 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
15984 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
15985 rtvec v;
15987 if_true = force_reg (mode, if_true);
15988 v = gen_rtvec (2, if_true, if_false);
15989 tmp = gen_rtx_UNSPEC (mode, v, u);
15991 else
15993 code = is_min ? SMIN : SMAX;
15994 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
15997 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
15998 return true;
16001 /* Expand an sse vector comparison. Return the register with the result. */
16003 static rtx
16004 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
16005 rtx op_true, rtx op_false)
16007 enum machine_mode mode = GET_MODE (dest);
16008 rtx x;
16010 cmp_op0 = force_reg (mode, cmp_op0);
16011 if (!nonimmediate_operand (cmp_op1, mode))
16012 cmp_op1 = force_reg (mode, cmp_op1);
16014 if (optimize
16015 || reg_overlap_mentioned_p (dest, op_true)
16016 || reg_overlap_mentioned_p (dest, op_false))
16017 dest = gen_reg_rtx (mode);
16019 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
16020 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16022 return dest;
16025 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
16026 operations. This is used for both scalar and vector conditional moves. */
16028 static void
16029 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
16031 enum machine_mode mode = GET_MODE (dest);
16032 rtx t2, t3, x;
16034 if (op_false == CONST0_RTX (mode))
16036 op_true = force_reg (mode, op_true);
16037 x = gen_rtx_AND (mode, cmp, op_true);
16038 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16040 else if (op_true == CONST0_RTX (mode))
16042 op_false = force_reg (mode, op_false);
16043 x = gen_rtx_NOT (mode, cmp);
16044 x = gen_rtx_AND (mode, x, op_false);
16045 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16047 else if (TARGET_XOP)
16049 rtx pcmov = gen_rtx_SET (mode, dest,
16050 gen_rtx_IF_THEN_ELSE (mode, cmp,
16051 op_true,
16052 op_false));
16053 emit_insn (pcmov);
16055 else
16057 op_true = force_reg (mode, op_true);
16058 op_false = force_reg (mode, op_false);
16060 t2 = gen_reg_rtx (mode);
16061 if (optimize)
16062 t3 = gen_reg_rtx (mode);
16063 else
16064 t3 = dest;
16066 x = gen_rtx_AND (mode, op_true, cmp);
16067 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
16069 x = gen_rtx_NOT (mode, cmp);
16070 x = gen_rtx_AND (mode, x, op_false);
16071 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
16073 x = gen_rtx_IOR (mode, t3, t2);
16074 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
16078 /* Expand a floating-point conditional move. Return true if successful. */
16081 ix86_expand_fp_movcc (rtx operands[])
16083 enum machine_mode mode = GET_MODE (operands[0]);
16084 enum rtx_code code = GET_CODE (operands[1]);
16085 rtx tmp, compare_op;
16087 ix86_compare_op0 = XEXP (operands[1], 0);
16088 ix86_compare_op1 = XEXP (operands[1], 1);
16089 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
16091 enum machine_mode cmode;
16093 /* Since we've no cmove for sse registers, don't force bad register
16094 allocation just to gain access to it. Deny movcc when the
16095 comparison mode doesn't match the move mode. */
16096 cmode = GET_MODE (ix86_compare_op0);
16097 if (cmode == VOIDmode)
16098 cmode = GET_MODE (ix86_compare_op1);
16099 if (cmode != mode)
16100 return 0;
16102 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16103 &ix86_compare_op0,
16104 &ix86_compare_op1);
16105 if (code == UNKNOWN)
16106 return 0;
16108 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
16109 ix86_compare_op1, operands[2],
16110 operands[3]))
16111 return 1;
16113 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
16114 ix86_compare_op1, operands[2], operands[3]);
16115 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
16116 return 1;
16119 /* The floating point conditional move instructions don't directly
16120 support conditions resulting from a signed integer comparison. */
16122 compare_op = ix86_expand_compare (code);
16123 if (!fcmov_comparison_operator (compare_op, VOIDmode))
16125 tmp = gen_reg_rtx (QImode);
16126 ix86_expand_setcc (code, tmp);
16127 code = NE;
16128 ix86_compare_op0 = tmp;
16129 ix86_compare_op1 = const0_rtx;
16130 compare_op = ix86_expand_compare (code);
16133 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
16134 gen_rtx_IF_THEN_ELSE (mode, compare_op,
16135 operands[2], operands[3])));
16137 return 1;
16140 /* Expand a floating-point vector conditional move; a vcond operation
16141 rather than a movcc operation. */
16143 bool
16144 ix86_expand_fp_vcond (rtx operands[])
16146 enum rtx_code code = GET_CODE (operands[3]);
16147 rtx cmp;
16149 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
16150 &operands[4], &operands[5]);
16151 if (code == UNKNOWN)
16152 return false;
16154 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
16155 operands[5], operands[1], operands[2]))
16156 return true;
16158 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
16159 operands[1], operands[2]);
16160 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
16161 return true;
16164 /* Expand a signed/unsigned integral vector conditional move. */
16166 bool
16167 ix86_expand_int_vcond (rtx operands[])
16169 enum machine_mode mode = GET_MODE (operands[0]);
16170 enum rtx_code code = GET_CODE (operands[3]);
16171 bool negate = false;
16172 rtx x, cop0, cop1;
16174 cop0 = operands[4];
16175 cop1 = operands[5];
16177 /* XOP supports all of the comparisons on all vector int types. */
16178 if (!TARGET_XOP)
16180 /* Canonicalize the comparison to EQ, GT, GTU. */
16181 switch (code)
16183 case EQ:
16184 case GT:
16185 case GTU:
16186 break;
16188 case NE:
16189 case LE:
16190 case LEU:
16191 code = reverse_condition (code);
16192 negate = true;
16193 break;
16195 case GE:
16196 case GEU:
16197 code = reverse_condition (code);
16198 negate = true;
16199 /* FALLTHRU */
16201 case LT:
16202 case LTU:
16203 code = swap_condition (code);
16204 x = cop0, cop0 = cop1, cop1 = x;
16205 break;
16207 default:
16208 gcc_unreachable ();
16211 /* Only SSE4.1/SSE4.2 supports V2DImode. */
16212 if (mode == V2DImode)
16214 switch (code)
16216 case EQ:
16217 /* SSE4.1 supports EQ. */
16218 if (!TARGET_SSE4_1)
16219 return false;
16220 break;
16222 case GT:
16223 case GTU:
16224 /* SSE4.2 supports GT/GTU. */
16225 if (!TARGET_SSE4_2)
16226 return false;
16227 break;
16229 default:
16230 gcc_unreachable ();
16234 /* Unsigned parallel compare is not supported by the hardware.
16235 Play some tricks to turn this into a signed comparison
16236 against 0. */
16237 if (code == GTU)
16239 cop0 = force_reg (mode, cop0);
16241 switch (mode)
16243 case V4SImode:
16244 case V2DImode:
16246 rtx t1, t2, mask;
16247 rtx (*gen_sub3) (rtx, rtx, rtx);
16249 /* Subtract (-(INT MAX) - 1) from both operands to make
16250 them signed. */
16251 mask = ix86_build_signbit_mask (GET_MODE_INNER (mode),
16252 true, false);
16253 gen_sub3 = (mode == V4SImode
16254 ? gen_subv4si3 : gen_subv2di3);
16255 t1 = gen_reg_rtx (mode);
16256 emit_insn (gen_sub3 (t1, cop0, mask));
16258 t2 = gen_reg_rtx (mode);
16259 emit_insn (gen_sub3 (t2, cop1, mask));
16261 cop0 = t1;
16262 cop1 = t2;
16263 code = GT;
16265 break;
16267 case V16QImode:
16268 case V8HImode:
16269 /* Perform a parallel unsigned saturating subtraction. */
16270 x = gen_reg_rtx (mode);
16271 emit_insn (gen_rtx_SET (VOIDmode, x,
16272 gen_rtx_US_MINUS (mode, cop0, cop1)));
16274 cop0 = x;
16275 cop1 = CONST0_RTX (mode);
16276 code = EQ;
16277 negate = !negate;
16278 break;
16280 default:
16281 gcc_unreachable ();
16286 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
16287 operands[1+negate], operands[2-negate]);
16289 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
16290 operands[2-negate]);
16291 return true;
16294 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
16295 true if we should do zero extension, else sign extension. HIGH_P is
16296 true if we want the N/2 high elements, else the low elements. */
16298 void
16299 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16301 enum machine_mode imode = GET_MODE (operands[1]);
16302 rtx (*unpack)(rtx, rtx, rtx);
16303 rtx se, dest;
16305 switch (imode)
16307 case V16QImode:
16308 if (high_p)
16309 unpack = gen_vec_interleave_highv16qi;
16310 else
16311 unpack = gen_vec_interleave_lowv16qi;
16312 break;
16313 case V8HImode:
16314 if (high_p)
16315 unpack = gen_vec_interleave_highv8hi;
16316 else
16317 unpack = gen_vec_interleave_lowv8hi;
16318 break;
16319 case V4SImode:
16320 if (high_p)
16321 unpack = gen_vec_interleave_highv4si;
16322 else
16323 unpack = gen_vec_interleave_lowv4si;
16324 break;
16325 default:
16326 gcc_unreachable ();
16329 dest = gen_lowpart (imode, operands[0]);
16331 if (unsigned_p)
16332 se = force_reg (imode, CONST0_RTX (imode));
16333 else
16334 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
16335 operands[1], pc_rtx, pc_rtx);
16337 emit_insn (unpack (dest, operands[1], se));
16340 /* This function performs the same task as ix86_expand_sse_unpack,
16341 but with SSE4.1 instructions. */
16343 void
16344 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16346 enum machine_mode imode = GET_MODE (operands[1]);
16347 rtx (*unpack)(rtx, rtx);
16348 rtx src, dest;
16350 switch (imode)
16352 case V16QImode:
16353 if (unsigned_p)
16354 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
16355 else
16356 unpack = gen_sse4_1_extendv8qiv8hi2;
16357 break;
16358 case V8HImode:
16359 if (unsigned_p)
16360 unpack = gen_sse4_1_zero_extendv4hiv4si2;
16361 else
16362 unpack = gen_sse4_1_extendv4hiv4si2;
16363 break;
16364 case V4SImode:
16365 if (unsigned_p)
16366 unpack = gen_sse4_1_zero_extendv2siv2di2;
16367 else
16368 unpack = gen_sse4_1_extendv2siv2di2;
16369 break;
16370 default:
16371 gcc_unreachable ();
16374 dest = operands[0];
16375 if (high_p)
16377 /* Shift higher 8 bytes to lower 8 bytes. */
16378 src = gen_reg_rtx (imode);
16379 emit_insn (gen_sse2_lshrv1ti3 (gen_lowpart (V1TImode, src),
16380 gen_lowpart (V1TImode, operands[1]),
16381 GEN_INT (64)));
16383 else
16384 src = operands[1];
16386 emit_insn (unpack (dest, src));
16389 /* Expand conditional increment or decrement using adb/sbb instructions.
16390 The default case using setcc followed by the conditional move can be
16391 done by generic code. */
16393 ix86_expand_int_addcc (rtx operands[])
16395 enum rtx_code code = GET_CODE (operands[1]);
16396 rtx flags;
16397 rtx (*insn)(rtx, rtx, rtx, rtx, rtx);
16398 rtx compare_op;
16399 rtx val = const0_rtx;
16400 bool fpcmp = false;
16401 enum machine_mode mode;
16403 ix86_compare_op0 = XEXP (operands[1], 0);
16404 ix86_compare_op1 = XEXP (operands[1], 1);
16405 if (operands[3] != const1_rtx
16406 && operands[3] != constm1_rtx)
16407 return 0;
16408 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
16409 ix86_compare_op1, &compare_op))
16410 return 0;
16411 code = GET_CODE (compare_op);
16413 flags = XEXP (compare_op, 0);
16415 if (GET_MODE (flags) == CCFPmode
16416 || GET_MODE (flags) == CCFPUmode)
16418 fpcmp = true;
16419 code = ix86_fp_compare_code_to_integer (code);
16422 if (code != LTU)
16424 val = constm1_rtx;
16425 if (fpcmp)
16426 PUT_CODE (compare_op,
16427 reverse_condition_maybe_unordered
16428 (GET_CODE (compare_op)));
16429 else
16430 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
16433 mode = GET_MODE (operands[0]);
16435 /* Construct either adc or sbb insn. */
16436 if ((code == LTU) == (operands[3] == constm1_rtx))
16438 switch (mode)
16440 case QImode:
16441 insn = gen_subqi3_carry;
16442 break;
16443 case HImode:
16444 insn = gen_subhi3_carry;
16445 break;
16446 case SImode:
16447 insn = gen_subsi3_carry;
16448 break;
16449 case DImode:
16450 insn = gen_subdi3_carry;
16451 break;
16452 default:
16453 gcc_unreachable ();
16456 else
16458 switch (mode)
16460 case QImode:
16461 insn = gen_addqi3_carry;
16462 break;
16463 case HImode:
16464 insn = gen_addhi3_carry;
16465 break;
16466 case SImode:
16467 insn = gen_addsi3_carry;
16468 break;
16469 case DImode:
16470 insn = gen_adddi3_carry;
16471 break;
16472 default:
16473 gcc_unreachable ();
16476 emit_insn (insn (operands[0], operands[2], val, flags, compare_op));
16478 return 1; /* DONE */
16482 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
16483 works for floating pointer parameters and nonoffsetable memories.
16484 For pushes, it returns just stack offsets; the values will be saved
16485 in the right order. Maximally three parts are generated. */
16487 static int
16488 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
16490 int size;
16492 if (!TARGET_64BIT)
16493 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
16494 else
16495 size = (GET_MODE_SIZE (mode) + 4) / 8;
16497 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
16498 gcc_assert (size >= 2 && size <= 4);
16500 /* Optimize constant pool reference to immediates. This is used by fp
16501 moves, that force all constants to memory to allow combining. */
16502 if (MEM_P (operand) && MEM_READONLY_P (operand))
16504 rtx tmp = maybe_get_pool_constant (operand);
16505 if (tmp)
16506 operand = tmp;
16509 if (MEM_P (operand) && !offsettable_memref_p (operand))
16511 /* The only non-offsetable memories we handle are pushes. */
16512 int ok = push_operand (operand, VOIDmode);
16514 gcc_assert (ok);
16516 operand = copy_rtx (operand);
16517 PUT_MODE (operand, Pmode);
16518 parts[0] = parts[1] = parts[2] = parts[3] = operand;
16519 return size;
16522 if (GET_CODE (operand) == CONST_VECTOR)
16524 enum machine_mode imode = int_mode_for_mode (mode);
16525 /* Caution: if we looked through a constant pool memory above,
16526 the operand may actually have a different mode now. That's
16527 ok, since we want to pun this all the way back to an integer. */
16528 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
16529 gcc_assert (operand != NULL);
16530 mode = imode;
16533 if (!TARGET_64BIT)
16535 if (mode == DImode)
16536 split_di (&operand, 1, &parts[0], &parts[1]);
16537 else
16539 int i;
16541 if (REG_P (operand))
16543 gcc_assert (reload_completed);
16544 for (i = 0; i < size; i++)
16545 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
16547 else if (offsettable_memref_p (operand))
16549 operand = adjust_address (operand, SImode, 0);
16550 parts[0] = operand;
16551 for (i = 1; i < size; i++)
16552 parts[i] = adjust_address (operand, SImode, 4 * i);
16554 else if (GET_CODE (operand) == CONST_DOUBLE)
16556 REAL_VALUE_TYPE r;
16557 long l[4];
16559 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16560 switch (mode)
16562 case TFmode:
16563 real_to_target (l, &r, mode);
16564 parts[3] = gen_int_mode (l[3], SImode);
16565 parts[2] = gen_int_mode (l[2], SImode);
16566 break;
16567 case XFmode:
16568 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
16569 parts[2] = gen_int_mode (l[2], SImode);
16570 break;
16571 case DFmode:
16572 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
16573 break;
16574 default:
16575 gcc_unreachable ();
16577 parts[1] = gen_int_mode (l[1], SImode);
16578 parts[0] = gen_int_mode (l[0], SImode);
16580 else
16581 gcc_unreachable ();
16584 else
16586 if (mode == TImode)
16587 split_ti (&operand, 1, &parts[0], &parts[1]);
16588 if (mode == XFmode || mode == TFmode)
16590 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
16591 if (REG_P (operand))
16593 gcc_assert (reload_completed);
16594 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
16595 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
16597 else if (offsettable_memref_p (operand))
16599 operand = adjust_address (operand, DImode, 0);
16600 parts[0] = operand;
16601 parts[1] = adjust_address (operand, upper_mode, 8);
16603 else if (GET_CODE (operand) == CONST_DOUBLE)
16605 REAL_VALUE_TYPE r;
16606 long l[4];
16608 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16609 real_to_target (l, &r, mode);
16611 /* Do not use shift by 32 to avoid warning on 32bit systems. */
16612 if (HOST_BITS_PER_WIDE_INT >= 64)
16613 parts[0]
16614 = gen_int_mode
16615 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
16616 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
16617 DImode);
16618 else
16619 parts[0] = immed_double_const (l[0], l[1], DImode);
16621 if (upper_mode == SImode)
16622 parts[1] = gen_int_mode (l[2], SImode);
16623 else if (HOST_BITS_PER_WIDE_INT >= 64)
16624 parts[1]
16625 = gen_int_mode
16626 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
16627 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
16628 DImode);
16629 else
16630 parts[1] = immed_double_const (l[2], l[3], DImode);
16632 else
16633 gcc_unreachable ();
16637 return size;
16640 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
16641 Return false when normal moves are needed; true when all required
16642 insns have been emitted. Operands 2-4 contain the input values
16643 int the correct order; operands 5-7 contain the output values. */
16645 void
16646 ix86_split_long_move (rtx operands[])
16648 rtx part[2][4];
16649 int nparts, i, j;
16650 int push = 0;
16651 int collisions = 0;
16652 enum machine_mode mode = GET_MODE (operands[0]);
16653 bool collisionparts[4];
16655 /* The DFmode expanders may ask us to move double.
16656 For 64bit target this is single move. By hiding the fact
16657 here we simplify i386.md splitters. */
16658 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
16660 /* Optimize constant pool reference to immediates. This is used by
16661 fp moves, that force all constants to memory to allow combining. */
16663 if (MEM_P (operands[1])
16664 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
16665 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
16666 operands[1] = get_pool_constant (XEXP (operands[1], 0));
16667 if (push_operand (operands[0], VOIDmode))
16669 operands[0] = copy_rtx (operands[0]);
16670 PUT_MODE (operands[0], Pmode);
16672 else
16673 operands[0] = gen_lowpart (DImode, operands[0]);
16674 operands[1] = gen_lowpart (DImode, operands[1]);
16675 emit_move_insn (operands[0], operands[1]);
16676 return;
16679 /* The only non-offsettable memory we handle is push. */
16680 if (push_operand (operands[0], VOIDmode))
16681 push = 1;
16682 else
16683 gcc_assert (!MEM_P (operands[0])
16684 || offsettable_memref_p (operands[0]));
16686 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
16687 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
16689 /* When emitting push, take care for source operands on the stack. */
16690 if (push && MEM_P (operands[1])
16691 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
16693 rtx src_base = XEXP (part[1][nparts - 1], 0);
16695 /* Compensate for the stack decrement by 4. */
16696 if (!TARGET_64BIT && nparts == 3
16697 && mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
16698 src_base = plus_constant (src_base, 4);
16700 /* src_base refers to the stack pointer and is
16701 automatically decreased by emitted push. */
16702 for (i = 0; i < nparts; i++)
16703 part[1][i] = change_address (part[1][i],
16704 GET_MODE (part[1][i]), src_base);
16707 /* We need to do copy in the right order in case an address register
16708 of the source overlaps the destination. */
16709 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
16711 rtx tmp;
16713 for (i = 0; i < nparts; i++)
16715 collisionparts[i]
16716 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
16717 if (collisionparts[i])
16718 collisions++;
16721 /* Collision in the middle part can be handled by reordering. */
16722 if (collisions == 1 && nparts == 3 && collisionparts [1])
16724 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16725 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16727 else if (collisions == 1
16728 && nparts == 4
16729 && (collisionparts [1] || collisionparts [2]))
16731 if (collisionparts [1])
16733 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16734 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16736 else
16738 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
16739 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
16743 /* If there are more collisions, we can't handle it by reordering.
16744 Do an lea to the last part and use only one colliding move. */
16745 else if (collisions > 1)
16747 rtx base;
16749 collisions = 1;
16751 base = part[0][nparts - 1];
16753 /* Handle the case when the last part isn't valid for lea.
16754 Happens in 64-bit mode storing the 12-byte XFmode. */
16755 if (GET_MODE (base) != Pmode)
16756 base = gen_rtx_REG (Pmode, REGNO (base));
16758 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
16759 part[1][0] = replace_equiv_address (part[1][0], base);
16760 for (i = 1; i < nparts; i++)
16762 tmp = plus_constant (base, UNITS_PER_WORD * i);
16763 part[1][i] = replace_equiv_address (part[1][i], tmp);
16768 if (push)
16770 if (!TARGET_64BIT)
16772 if (nparts == 3)
16774 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
16775 emit_insn (gen_addsi3 (stack_pointer_rtx,
16776 stack_pointer_rtx, GEN_INT (-4)));
16777 emit_move_insn (part[0][2], part[1][2]);
16779 else if (nparts == 4)
16781 emit_move_insn (part[0][3], part[1][3]);
16782 emit_move_insn (part[0][2], part[1][2]);
16785 else
16787 /* In 64bit mode we don't have 32bit push available. In case this is
16788 register, it is OK - we will just use larger counterpart. We also
16789 retype memory - these comes from attempt to avoid REX prefix on
16790 moving of second half of TFmode value. */
16791 if (GET_MODE (part[1][1]) == SImode)
16793 switch (GET_CODE (part[1][1]))
16795 case MEM:
16796 part[1][1] = adjust_address (part[1][1], DImode, 0);
16797 break;
16799 case REG:
16800 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
16801 break;
16803 default:
16804 gcc_unreachable ();
16807 if (GET_MODE (part[1][0]) == SImode)
16808 part[1][0] = part[1][1];
16811 emit_move_insn (part[0][1], part[1][1]);
16812 emit_move_insn (part[0][0], part[1][0]);
16813 return;
16816 /* Choose correct order to not overwrite the source before it is copied. */
16817 if ((REG_P (part[0][0])
16818 && REG_P (part[1][1])
16819 && (REGNO (part[0][0]) == REGNO (part[1][1])
16820 || (nparts == 3
16821 && REGNO (part[0][0]) == REGNO (part[1][2]))
16822 || (nparts == 4
16823 && REGNO (part[0][0]) == REGNO (part[1][3]))))
16824 || (collisions > 0
16825 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
16827 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
16829 operands[2 + i] = part[0][j];
16830 operands[6 + i] = part[1][j];
16833 else
16835 for (i = 0; i < nparts; i++)
16837 operands[2 + i] = part[0][i];
16838 operands[6 + i] = part[1][i];
16842 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
16843 if (optimize_insn_for_size_p ())
16845 for (j = 0; j < nparts - 1; j++)
16846 if (CONST_INT_P (operands[6 + j])
16847 && operands[6 + j] != const0_rtx
16848 && REG_P (operands[2 + j]))
16849 for (i = j; i < nparts - 1; i++)
16850 if (CONST_INT_P (operands[7 + i])
16851 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
16852 operands[7 + i] = operands[2 + j];
16855 for (i = 0; i < nparts; i++)
16856 emit_move_insn (operands[2 + i], operands[6 + i]);
16858 return;
16861 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
16862 left shift by a constant, either using a single shift or
16863 a sequence of add instructions. */
16865 static void
16866 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
16868 if (count == 1)
16870 emit_insn ((mode == DImode
16871 ? gen_addsi3
16872 : gen_adddi3) (operand, operand, operand));
16874 else if (!optimize_insn_for_size_p ()
16875 && count * ix86_cost->add <= ix86_cost->shift_const)
16877 int i;
16878 for (i=0; i<count; i++)
16880 emit_insn ((mode == DImode
16881 ? gen_addsi3
16882 : gen_adddi3) (operand, operand, operand));
16885 else
16886 emit_insn ((mode == DImode
16887 ? gen_ashlsi3
16888 : gen_ashldi3) (operand, operand, GEN_INT (count)));
16891 void
16892 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
16894 rtx low[2], high[2];
16895 int count;
16896 const int single_width = mode == DImode ? 32 : 64;
16898 if (CONST_INT_P (operands[2]))
16900 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
16901 count = INTVAL (operands[2]) & (single_width * 2 - 1);
16903 if (count >= single_width)
16905 emit_move_insn (high[0], low[1]);
16906 emit_move_insn (low[0], const0_rtx);
16908 if (count > single_width)
16909 ix86_expand_ashl_const (high[0], count - single_width, mode);
16911 else
16913 if (!rtx_equal_p (operands[0], operands[1]))
16914 emit_move_insn (operands[0], operands[1]);
16915 emit_insn ((mode == DImode
16916 ? gen_x86_shld
16917 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
16918 ix86_expand_ashl_const (low[0], count, mode);
16920 return;
16923 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
16925 if (operands[1] == const1_rtx)
16927 /* Assuming we've chosen a QImode capable registers, then 1 << N
16928 can be done with two 32/64-bit shifts, no branches, no cmoves. */
16929 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
16931 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
16933 ix86_expand_clear (low[0]);
16934 ix86_expand_clear (high[0]);
16935 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
16937 d = gen_lowpart (QImode, low[0]);
16938 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
16939 s = gen_rtx_EQ (QImode, flags, const0_rtx);
16940 emit_insn (gen_rtx_SET (VOIDmode, d, s));
16942 d = gen_lowpart (QImode, high[0]);
16943 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
16944 s = gen_rtx_NE (QImode, flags, const0_rtx);
16945 emit_insn (gen_rtx_SET (VOIDmode, d, s));
16948 /* Otherwise, we can get the same results by manually performing
16949 a bit extract operation on bit 5/6, and then performing the two
16950 shifts. The two methods of getting 0/1 into low/high are exactly
16951 the same size. Avoiding the shift in the bit extract case helps
16952 pentium4 a bit; no one else seems to care much either way. */
16953 else
16955 rtx x;
16957 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
16958 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
16959 else
16960 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
16961 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
16963 emit_insn ((mode == DImode
16964 ? gen_lshrsi3
16965 : gen_lshrdi3) (high[0], high[0],
16966 GEN_INT (mode == DImode ? 5 : 6)));
16967 emit_insn ((mode == DImode
16968 ? gen_andsi3
16969 : gen_anddi3) (high[0], high[0], const1_rtx));
16970 emit_move_insn (low[0], high[0]);
16971 emit_insn ((mode == DImode
16972 ? gen_xorsi3
16973 : gen_xordi3) (low[0], low[0], const1_rtx));
16976 emit_insn ((mode == DImode
16977 ? gen_ashlsi3
16978 : gen_ashldi3) (low[0], low[0], operands[2]));
16979 emit_insn ((mode == DImode
16980 ? gen_ashlsi3
16981 : gen_ashldi3) (high[0], high[0], operands[2]));
16982 return;
16985 if (operands[1] == constm1_rtx)
16987 /* For -1 << N, we can avoid the shld instruction, because we
16988 know that we're shifting 0...31/63 ones into a -1. */
16989 emit_move_insn (low[0], constm1_rtx);
16990 if (optimize_insn_for_size_p ())
16991 emit_move_insn (high[0], low[0]);
16992 else
16993 emit_move_insn (high[0], constm1_rtx);
16995 else
16997 if (!rtx_equal_p (operands[0], operands[1]))
16998 emit_move_insn (operands[0], operands[1]);
17000 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17001 emit_insn ((mode == DImode
17002 ? gen_x86_shld
17003 : gen_x86_64_shld) (high[0], low[0], operands[2]));
17006 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
17008 if (TARGET_CMOVE && scratch)
17010 ix86_expand_clear (scratch);
17011 emit_insn ((mode == DImode
17012 ? gen_x86_shift_adj_1
17013 : gen_x86_64_shift_adj_1) (high[0], low[0], operands[2],
17014 scratch));
17016 else
17017 emit_insn ((mode == DImode
17018 ? gen_x86_shift_adj_2
17019 : gen_x86_64_shift_adj_2) (high[0], low[0], operands[2]));
17022 void
17023 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
17025 rtx low[2], high[2];
17026 int count;
17027 const int single_width = mode == DImode ? 32 : 64;
17029 if (CONST_INT_P (operands[2]))
17031 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17032 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17034 if (count == single_width * 2 - 1)
17036 emit_move_insn (high[0], high[1]);
17037 emit_insn ((mode == DImode
17038 ? gen_ashrsi3
17039 : gen_ashrdi3) (high[0], high[0],
17040 GEN_INT (single_width - 1)));
17041 emit_move_insn (low[0], high[0]);
17044 else if (count >= single_width)
17046 emit_move_insn (low[0], high[1]);
17047 emit_move_insn (high[0], low[0]);
17048 emit_insn ((mode == DImode
17049 ? gen_ashrsi3
17050 : gen_ashrdi3) (high[0], high[0],
17051 GEN_INT (single_width - 1)));
17052 if (count > single_width)
17053 emit_insn ((mode == DImode
17054 ? gen_ashrsi3
17055 : gen_ashrdi3) (low[0], low[0],
17056 GEN_INT (count - single_width)));
17058 else
17060 if (!rtx_equal_p (operands[0], operands[1]))
17061 emit_move_insn (operands[0], operands[1]);
17062 emit_insn ((mode == DImode
17063 ? gen_x86_shrd
17064 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17065 emit_insn ((mode == DImode
17066 ? gen_ashrsi3
17067 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
17070 else
17072 if (!rtx_equal_p (operands[0], operands[1]))
17073 emit_move_insn (operands[0], operands[1]);
17075 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17077 emit_insn ((mode == DImode
17078 ? gen_x86_shrd
17079 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17080 emit_insn ((mode == DImode
17081 ? gen_ashrsi3
17082 : gen_ashrdi3) (high[0], high[0], operands[2]));
17084 if (TARGET_CMOVE && scratch)
17086 emit_move_insn (scratch, high[0]);
17087 emit_insn ((mode == DImode
17088 ? gen_ashrsi3
17089 : gen_ashrdi3) (scratch, scratch,
17090 GEN_INT (single_width - 1)));
17091 emit_insn ((mode == DImode
17092 ? gen_x86_shift_adj_1
17093 : gen_x86_64_shift_adj_1) (low[0], high[0], operands[2],
17094 scratch));
17096 else
17097 emit_insn ((mode == DImode
17098 ? gen_x86_shift_adj_3
17099 : gen_x86_64_shift_adj_3) (low[0], high[0], operands[2]));
17103 void
17104 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
17106 rtx low[2], high[2];
17107 int count;
17108 const int single_width = mode == DImode ? 32 : 64;
17110 if (CONST_INT_P (operands[2]))
17112 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
17113 count = INTVAL (operands[2]) & (single_width * 2 - 1);
17115 if (count >= single_width)
17117 emit_move_insn (low[0], high[1]);
17118 ix86_expand_clear (high[0]);
17120 if (count > single_width)
17121 emit_insn ((mode == DImode
17122 ? gen_lshrsi3
17123 : gen_lshrdi3) (low[0], low[0],
17124 GEN_INT (count - single_width)));
17126 else
17128 if (!rtx_equal_p (operands[0], operands[1]))
17129 emit_move_insn (operands[0], operands[1]);
17130 emit_insn ((mode == DImode
17131 ? gen_x86_shrd
17132 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
17133 emit_insn ((mode == DImode
17134 ? gen_lshrsi3
17135 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
17138 else
17140 if (!rtx_equal_p (operands[0], operands[1]))
17141 emit_move_insn (operands[0], operands[1]);
17143 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
17145 emit_insn ((mode == DImode
17146 ? gen_x86_shrd
17147 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
17148 emit_insn ((mode == DImode
17149 ? gen_lshrsi3
17150 : gen_lshrdi3) (high[0], high[0], operands[2]));
17152 /* Heh. By reversing the arguments, we can reuse this pattern. */
17153 if (TARGET_CMOVE && scratch)
17155 ix86_expand_clear (scratch);
17156 emit_insn ((mode == DImode
17157 ? gen_x86_shift_adj_1
17158 : gen_x86_64_shift_adj_1) (low[0], high[0], operands[2],
17159 scratch));
17161 else
17162 emit_insn ((mode == DImode
17163 ? gen_x86_shift_adj_2
17164 : gen_x86_64_shift_adj_2) (low[0], high[0], operands[2]));
17168 /* Predict just emitted jump instruction to be taken with probability PROB. */
17169 static void
17170 predict_jump (int prob)
17172 rtx insn = get_last_insn ();
17173 gcc_assert (JUMP_P (insn));
17174 add_reg_note (insn, REG_BR_PROB, GEN_INT (prob));
17177 /* Helper function for the string operations below. Dest VARIABLE whether
17178 it is aligned to VALUE bytes. If true, jump to the label. */
17179 static rtx
17180 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
17182 rtx label = gen_label_rtx ();
17183 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
17184 if (GET_MODE (variable) == DImode)
17185 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
17186 else
17187 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
17188 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
17189 1, label);
17190 if (epilogue)
17191 predict_jump (REG_BR_PROB_BASE * 50 / 100);
17192 else
17193 predict_jump (REG_BR_PROB_BASE * 90 / 100);
17194 return label;
17197 /* Adjust COUNTER by the VALUE. */
17198 static void
17199 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
17201 if (GET_MODE (countreg) == DImode)
17202 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
17203 else
17204 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
17207 /* Zero extend possibly SImode EXP to Pmode register. */
17209 ix86_zero_extend_to_Pmode (rtx exp)
17211 rtx r;
17212 if (GET_MODE (exp) == VOIDmode)
17213 return force_reg (Pmode, exp);
17214 if (GET_MODE (exp) == Pmode)
17215 return copy_to_mode_reg (Pmode, exp);
17216 r = gen_reg_rtx (Pmode);
17217 emit_insn (gen_zero_extendsidi2 (r, exp));
17218 return r;
17221 /* Divide COUNTREG by SCALE. */
17222 static rtx
17223 scale_counter (rtx countreg, int scale)
17225 rtx sc;
17227 if (scale == 1)
17228 return countreg;
17229 if (CONST_INT_P (countreg))
17230 return GEN_INT (INTVAL (countreg) / scale);
17231 gcc_assert (REG_P (countreg));
17233 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
17234 GEN_INT (exact_log2 (scale)),
17235 NULL, 1, OPTAB_DIRECT);
17236 return sc;
17239 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
17240 DImode for constant loop counts. */
17242 static enum machine_mode
17243 counter_mode (rtx count_exp)
17245 if (GET_MODE (count_exp) != VOIDmode)
17246 return GET_MODE (count_exp);
17247 if (!CONST_INT_P (count_exp))
17248 return Pmode;
17249 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
17250 return DImode;
17251 return SImode;
17254 /* When SRCPTR is non-NULL, output simple loop to move memory
17255 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
17256 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
17257 equivalent loop to set memory by VALUE (supposed to be in MODE).
17259 The size is rounded down to whole number of chunk size moved at once.
17260 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
17263 static void
17264 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
17265 rtx destptr, rtx srcptr, rtx value,
17266 rtx count, enum machine_mode mode, int unroll,
17267 int expected_size)
17269 rtx out_label, top_label, iter, tmp;
17270 enum machine_mode iter_mode = counter_mode (count);
17271 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
17272 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
17273 rtx size;
17274 rtx x_addr;
17275 rtx y_addr;
17276 int i;
17278 top_label = gen_label_rtx ();
17279 out_label = gen_label_rtx ();
17280 iter = gen_reg_rtx (iter_mode);
17282 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
17283 NULL, 1, OPTAB_DIRECT);
17284 /* Those two should combine. */
17285 if (piece_size == const1_rtx)
17287 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
17288 true, out_label);
17289 predict_jump (REG_BR_PROB_BASE * 10 / 100);
17291 emit_move_insn (iter, const0_rtx);
17293 emit_label (top_label);
17295 tmp = convert_modes (Pmode, iter_mode, iter, true);
17296 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
17297 destmem = change_address (destmem, mode, x_addr);
17299 if (srcmem)
17301 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
17302 srcmem = change_address (srcmem, mode, y_addr);
17304 /* When unrolling for chips that reorder memory reads and writes,
17305 we can save registers by using single temporary.
17306 Also using 4 temporaries is overkill in 32bit mode. */
17307 if (!TARGET_64BIT && 0)
17309 for (i = 0; i < unroll; i++)
17311 if (i)
17313 destmem =
17314 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17315 srcmem =
17316 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17318 emit_move_insn (destmem, srcmem);
17321 else
17323 rtx tmpreg[4];
17324 gcc_assert (unroll <= 4);
17325 for (i = 0; i < unroll; i++)
17327 tmpreg[i] = gen_reg_rtx (mode);
17328 if (i)
17330 srcmem =
17331 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17333 emit_move_insn (tmpreg[i], srcmem);
17335 for (i = 0; i < unroll; i++)
17337 if (i)
17339 destmem =
17340 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17342 emit_move_insn (destmem, tmpreg[i]);
17346 else
17347 for (i = 0; i < unroll; i++)
17349 if (i)
17350 destmem =
17351 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17352 emit_move_insn (destmem, value);
17355 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
17356 true, OPTAB_LIB_WIDEN);
17357 if (tmp != iter)
17358 emit_move_insn (iter, tmp);
17360 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
17361 true, top_label);
17362 if (expected_size != -1)
17364 expected_size /= GET_MODE_SIZE (mode) * unroll;
17365 if (expected_size == 0)
17366 predict_jump (0);
17367 else if (expected_size > REG_BR_PROB_BASE)
17368 predict_jump (REG_BR_PROB_BASE - 1);
17369 else
17370 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
17372 else
17373 predict_jump (REG_BR_PROB_BASE * 80 / 100);
17374 iter = ix86_zero_extend_to_Pmode (iter);
17375 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
17376 true, OPTAB_LIB_WIDEN);
17377 if (tmp != destptr)
17378 emit_move_insn (destptr, tmp);
17379 if (srcptr)
17381 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
17382 true, OPTAB_LIB_WIDEN);
17383 if (tmp != srcptr)
17384 emit_move_insn (srcptr, tmp);
17386 emit_label (out_label);
17389 /* Output "rep; mov" instruction.
17390 Arguments have same meaning as for previous function */
17391 static void
17392 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
17393 rtx destptr, rtx srcptr,
17394 rtx count,
17395 enum machine_mode mode)
17397 rtx destexp;
17398 rtx srcexp;
17399 rtx countreg;
17401 /* If the size is known, it is shorter to use rep movs. */
17402 if (mode == QImode && CONST_INT_P (count)
17403 && !(INTVAL (count) & 3))
17404 mode = SImode;
17406 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17407 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17408 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
17409 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
17410 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17411 if (mode != QImode)
17413 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17414 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17415 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17416 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
17417 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17418 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
17420 else
17422 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17423 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
17425 if (CONST_INT_P (count))
17427 count = GEN_INT (INTVAL (count)
17428 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17429 destmem = shallow_copy_rtx (destmem);
17430 srcmem = shallow_copy_rtx (srcmem);
17431 set_mem_size (destmem, count);
17432 set_mem_size (srcmem, count);
17434 else
17436 if (MEM_SIZE (destmem))
17437 set_mem_size (destmem, NULL_RTX);
17438 if (MEM_SIZE (srcmem))
17439 set_mem_size (srcmem, NULL_RTX);
17441 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
17442 destexp, srcexp));
17445 /* Output "rep; stos" instruction.
17446 Arguments have same meaning as for previous function */
17447 static void
17448 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
17449 rtx count, enum machine_mode mode,
17450 rtx orig_value)
17452 rtx destexp;
17453 rtx countreg;
17455 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17456 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17457 value = force_reg (mode, gen_lowpart (mode, value));
17458 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17459 if (mode != QImode)
17461 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17462 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17463 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17465 else
17466 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17467 if (orig_value == const0_rtx && CONST_INT_P (count))
17469 count = GEN_INT (INTVAL (count)
17470 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17471 destmem = shallow_copy_rtx (destmem);
17472 set_mem_size (destmem, count);
17474 else if (MEM_SIZE (destmem))
17475 set_mem_size (destmem, NULL_RTX);
17476 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
17479 static void
17480 emit_strmov (rtx destmem, rtx srcmem,
17481 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
17483 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
17484 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
17485 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17488 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
17489 static void
17490 expand_movmem_epilogue (rtx destmem, rtx srcmem,
17491 rtx destptr, rtx srcptr, rtx count, int max_size)
17493 rtx src, dest;
17494 if (CONST_INT_P (count))
17496 HOST_WIDE_INT countval = INTVAL (count);
17497 int offset = 0;
17499 if ((countval & 0x10) && max_size > 16)
17501 if (TARGET_64BIT)
17503 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17504 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
17506 else
17507 gcc_unreachable ();
17508 offset += 16;
17510 if ((countval & 0x08) && max_size > 8)
17512 if (TARGET_64BIT)
17513 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17514 else
17516 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17517 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
17519 offset += 8;
17521 if ((countval & 0x04) && max_size > 4)
17523 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17524 offset += 4;
17526 if ((countval & 0x02) && max_size > 2)
17528 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
17529 offset += 2;
17531 if ((countval & 0x01) && max_size > 1)
17533 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
17534 offset += 1;
17536 return;
17538 if (max_size > 8)
17540 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
17541 count, 1, OPTAB_DIRECT);
17542 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
17543 count, QImode, 1, 4);
17544 return;
17547 /* When there are stringops, we can cheaply increase dest and src pointers.
17548 Otherwise we save code size by maintaining offset (zero is readily
17549 available from preceding rep operation) and using x86 addressing modes.
17551 if (TARGET_SINGLE_STRINGOP)
17553 if (max_size > 4)
17555 rtx label = ix86_expand_aligntest (count, 4, true);
17556 src = change_address (srcmem, SImode, srcptr);
17557 dest = change_address (destmem, SImode, destptr);
17558 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17559 emit_label (label);
17560 LABEL_NUSES (label) = 1;
17562 if (max_size > 2)
17564 rtx label = ix86_expand_aligntest (count, 2, true);
17565 src = change_address (srcmem, HImode, srcptr);
17566 dest = change_address (destmem, HImode, destptr);
17567 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17568 emit_label (label);
17569 LABEL_NUSES (label) = 1;
17571 if (max_size > 1)
17573 rtx label = ix86_expand_aligntest (count, 1, true);
17574 src = change_address (srcmem, QImode, srcptr);
17575 dest = change_address (destmem, QImode, destptr);
17576 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17577 emit_label (label);
17578 LABEL_NUSES (label) = 1;
17581 else
17583 rtx offset = force_reg (Pmode, const0_rtx);
17584 rtx tmp;
17586 if (max_size > 4)
17588 rtx label = ix86_expand_aligntest (count, 4, true);
17589 src = change_address (srcmem, SImode, srcptr);
17590 dest = change_address (destmem, SImode, destptr);
17591 emit_move_insn (dest, src);
17592 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
17593 true, OPTAB_LIB_WIDEN);
17594 if (tmp != offset)
17595 emit_move_insn (offset, tmp);
17596 emit_label (label);
17597 LABEL_NUSES (label) = 1;
17599 if (max_size > 2)
17601 rtx label = ix86_expand_aligntest (count, 2, true);
17602 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17603 src = change_address (srcmem, HImode, tmp);
17604 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17605 dest = change_address (destmem, HImode, tmp);
17606 emit_move_insn (dest, src);
17607 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
17608 true, OPTAB_LIB_WIDEN);
17609 if (tmp != offset)
17610 emit_move_insn (offset, tmp);
17611 emit_label (label);
17612 LABEL_NUSES (label) = 1;
17614 if (max_size > 1)
17616 rtx label = ix86_expand_aligntest (count, 1, true);
17617 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17618 src = change_address (srcmem, QImode, tmp);
17619 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17620 dest = change_address (destmem, QImode, tmp);
17621 emit_move_insn (dest, src);
17622 emit_label (label);
17623 LABEL_NUSES (label) = 1;
17628 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17629 static void
17630 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
17631 rtx count, int max_size)
17633 count =
17634 expand_simple_binop (counter_mode (count), AND, count,
17635 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
17636 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
17637 gen_lowpart (QImode, value), count, QImode,
17638 1, max_size / 2);
17641 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17642 static void
17643 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
17645 rtx dest;
17647 if (CONST_INT_P (count))
17649 HOST_WIDE_INT countval = INTVAL (count);
17650 int offset = 0;
17652 if ((countval & 0x10) && max_size > 16)
17654 if (TARGET_64BIT)
17656 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17657 emit_insn (gen_strset (destptr, dest, value));
17658 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
17659 emit_insn (gen_strset (destptr, dest, value));
17661 else
17662 gcc_unreachable ();
17663 offset += 16;
17665 if ((countval & 0x08) && max_size > 8)
17667 if (TARGET_64BIT)
17669 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17670 emit_insn (gen_strset (destptr, dest, value));
17672 else
17674 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17675 emit_insn (gen_strset (destptr, dest, value));
17676 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
17677 emit_insn (gen_strset (destptr, dest, value));
17679 offset += 8;
17681 if ((countval & 0x04) && max_size > 4)
17683 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17684 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17685 offset += 4;
17687 if ((countval & 0x02) && max_size > 2)
17689 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
17690 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17691 offset += 2;
17693 if ((countval & 0x01) && max_size > 1)
17695 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
17696 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17697 offset += 1;
17699 return;
17701 if (max_size > 32)
17703 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
17704 return;
17706 if (max_size > 16)
17708 rtx label = ix86_expand_aligntest (count, 16, true);
17709 if (TARGET_64BIT)
17711 dest = change_address (destmem, DImode, destptr);
17712 emit_insn (gen_strset (destptr, dest, value));
17713 emit_insn (gen_strset (destptr, dest, value));
17715 else
17717 dest = change_address (destmem, SImode, destptr);
17718 emit_insn (gen_strset (destptr, dest, value));
17719 emit_insn (gen_strset (destptr, dest, value));
17720 emit_insn (gen_strset (destptr, dest, value));
17721 emit_insn (gen_strset (destptr, dest, value));
17723 emit_label (label);
17724 LABEL_NUSES (label) = 1;
17726 if (max_size > 8)
17728 rtx label = ix86_expand_aligntest (count, 8, true);
17729 if (TARGET_64BIT)
17731 dest = change_address (destmem, DImode, destptr);
17732 emit_insn (gen_strset (destptr, dest, value));
17734 else
17736 dest = change_address (destmem, SImode, destptr);
17737 emit_insn (gen_strset (destptr, dest, value));
17738 emit_insn (gen_strset (destptr, dest, value));
17740 emit_label (label);
17741 LABEL_NUSES (label) = 1;
17743 if (max_size > 4)
17745 rtx label = ix86_expand_aligntest (count, 4, true);
17746 dest = change_address (destmem, SImode, destptr);
17747 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17748 emit_label (label);
17749 LABEL_NUSES (label) = 1;
17751 if (max_size > 2)
17753 rtx label = ix86_expand_aligntest (count, 2, true);
17754 dest = change_address (destmem, HImode, destptr);
17755 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17756 emit_label (label);
17757 LABEL_NUSES (label) = 1;
17759 if (max_size > 1)
17761 rtx label = ix86_expand_aligntest (count, 1, true);
17762 dest = change_address (destmem, QImode, destptr);
17763 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17764 emit_label (label);
17765 LABEL_NUSES (label) = 1;
17769 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
17770 DESIRED_ALIGNMENT. */
17771 static void
17772 expand_movmem_prologue (rtx destmem, rtx srcmem,
17773 rtx destptr, rtx srcptr, rtx count,
17774 int align, int desired_alignment)
17776 if (align <= 1 && desired_alignment > 1)
17778 rtx label = ix86_expand_aligntest (destptr, 1, false);
17779 srcmem = change_address (srcmem, QImode, srcptr);
17780 destmem = change_address (destmem, QImode, destptr);
17781 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17782 ix86_adjust_counter (count, 1);
17783 emit_label (label);
17784 LABEL_NUSES (label) = 1;
17786 if (align <= 2 && desired_alignment > 2)
17788 rtx label = ix86_expand_aligntest (destptr, 2, false);
17789 srcmem = change_address (srcmem, HImode, srcptr);
17790 destmem = change_address (destmem, HImode, destptr);
17791 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17792 ix86_adjust_counter (count, 2);
17793 emit_label (label);
17794 LABEL_NUSES (label) = 1;
17796 if (align <= 4 && desired_alignment > 4)
17798 rtx label = ix86_expand_aligntest (destptr, 4, false);
17799 srcmem = change_address (srcmem, SImode, srcptr);
17800 destmem = change_address (destmem, SImode, destptr);
17801 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17802 ix86_adjust_counter (count, 4);
17803 emit_label (label);
17804 LABEL_NUSES (label) = 1;
17806 gcc_assert (desired_alignment <= 8);
17809 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
17810 ALIGN_BYTES is how many bytes need to be copied. */
17811 static rtx
17812 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
17813 int desired_align, int align_bytes)
17815 rtx src = *srcp;
17816 rtx src_size, dst_size;
17817 int off = 0;
17818 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
17819 if (src_align_bytes >= 0)
17820 src_align_bytes = desired_align - src_align_bytes;
17821 src_size = MEM_SIZE (src);
17822 dst_size = MEM_SIZE (dst);
17823 if (align_bytes & 1)
17825 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
17826 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
17827 off = 1;
17828 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17830 if (align_bytes & 2)
17832 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
17833 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
17834 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
17835 set_mem_align (dst, 2 * BITS_PER_UNIT);
17836 if (src_align_bytes >= 0
17837 && (src_align_bytes & 1) == (align_bytes & 1)
17838 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
17839 set_mem_align (src, 2 * BITS_PER_UNIT);
17840 off = 2;
17841 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17843 if (align_bytes & 4)
17845 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
17846 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
17847 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
17848 set_mem_align (dst, 4 * BITS_PER_UNIT);
17849 if (src_align_bytes >= 0)
17851 unsigned int src_align = 0;
17852 if ((src_align_bytes & 3) == (align_bytes & 3))
17853 src_align = 4;
17854 else if ((src_align_bytes & 1) == (align_bytes & 1))
17855 src_align = 2;
17856 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
17857 set_mem_align (src, src_align * BITS_PER_UNIT);
17859 off = 4;
17860 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17862 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
17863 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
17864 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
17865 set_mem_align (dst, desired_align * BITS_PER_UNIT);
17866 if (src_align_bytes >= 0)
17868 unsigned int src_align = 0;
17869 if ((src_align_bytes & 7) == (align_bytes & 7))
17870 src_align = 8;
17871 else if ((src_align_bytes & 3) == (align_bytes & 3))
17872 src_align = 4;
17873 else if ((src_align_bytes & 1) == (align_bytes & 1))
17874 src_align = 2;
17875 if (src_align > (unsigned int) desired_align)
17876 src_align = desired_align;
17877 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
17878 set_mem_align (src, src_align * BITS_PER_UNIT);
17880 if (dst_size)
17881 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
17882 if (src_size)
17883 set_mem_size (dst, GEN_INT (INTVAL (src_size) - align_bytes));
17884 *srcp = src;
17885 return dst;
17888 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
17889 DESIRED_ALIGNMENT. */
17890 static void
17891 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
17892 int align, int desired_alignment)
17894 if (align <= 1 && desired_alignment > 1)
17896 rtx label = ix86_expand_aligntest (destptr, 1, false);
17897 destmem = change_address (destmem, QImode, destptr);
17898 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
17899 ix86_adjust_counter (count, 1);
17900 emit_label (label);
17901 LABEL_NUSES (label) = 1;
17903 if (align <= 2 && desired_alignment > 2)
17905 rtx label = ix86_expand_aligntest (destptr, 2, false);
17906 destmem = change_address (destmem, HImode, destptr);
17907 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
17908 ix86_adjust_counter (count, 2);
17909 emit_label (label);
17910 LABEL_NUSES (label) = 1;
17912 if (align <= 4 && desired_alignment > 4)
17914 rtx label = ix86_expand_aligntest (destptr, 4, false);
17915 destmem = change_address (destmem, SImode, destptr);
17916 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
17917 ix86_adjust_counter (count, 4);
17918 emit_label (label);
17919 LABEL_NUSES (label) = 1;
17921 gcc_assert (desired_alignment <= 8);
17924 /* Set enough from DST to align DST known to by aligned by ALIGN to
17925 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
17926 static rtx
17927 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
17928 int desired_align, int align_bytes)
17930 int off = 0;
17931 rtx dst_size = MEM_SIZE (dst);
17932 if (align_bytes & 1)
17934 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
17935 off = 1;
17936 emit_insn (gen_strset (destreg, dst,
17937 gen_lowpart (QImode, value)));
17939 if (align_bytes & 2)
17941 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
17942 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
17943 set_mem_align (dst, 2 * BITS_PER_UNIT);
17944 off = 2;
17945 emit_insn (gen_strset (destreg, dst,
17946 gen_lowpart (HImode, value)));
17948 if (align_bytes & 4)
17950 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
17951 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
17952 set_mem_align (dst, 4 * BITS_PER_UNIT);
17953 off = 4;
17954 emit_insn (gen_strset (destreg, dst,
17955 gen_lowpart (SImode, value)));
17957 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
17958 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
17959 set_mem_align (dst, desired_align * BITS_PER_UNIT);
17960 if (dst_size)
17961 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
17962 return dst;
17965 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
17966 static enum stringop_alg
17967 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
17968 int *dynamic_check)
17970 const struct stringop_algs * algs;
17971 bool optimize_for_speed;
17972 /* Algorithms using the rep prefix want at least edi and ecx;
17973 additionally, memset wants eax and memcpy wants esi. Don't
17974 consider such algorithms if the user has appropriated those
17975 registers for their own purposes. */
17976 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
17977 || (memset
17978 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
17980 #define ALG_USABLE_P(alg) (rep_prefix_usable \
17981 || (alg != rep_prefix_1_byte \
17982 && alg != rep_prefix_4_byte \
17983 && alg != rep_prefix_8_byte))
17984 const struct processor_costs *cost;
17986 /* Even if the string operation call is cold, we still might spend a lot
17987 of time processing large blocks. */
17988 if (optimize_function_for_size_p (cfun)
17989 || (optimize_insn_for_size_p ()
17990 && expected_size != -1 && expected_size < 256))
17991 optimize_for_speed = false;
17992 else
17993 optimize_for_speed = true;
17995 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
17997 *dynamic_check = -1;
17998 if (memset)
17999 algs = &cost->memset[TARGET_64BIT != 0];
18000 else
18001 algs = &cost->memcpy[TARGET_64BIT != 0];
18002 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
18003 return stringop_alg;
18004 /* rep; movq or rep; movl is the smallest variant. */
18005 else if (!optimize_for_speed)
18007 if (!count || (count & 3))
18008 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
18009 else
18010 return rep_prefix_usable ? rep_prefix_4_byte : loop;
18012 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
18014 else if (expected_size != -1 && expected_size < 4)
18015 return loop_1_byte;
18016 else if (expected_size != -1)
18018 unsigned int i;
18019 enum stringop_alg alg = libcall;
18020 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18022 /* We get here if the algorithms that were not libcall-based
18023 were rep-prefix based and we are unable to use rep prefixes
18024 based on global register usage. Break out of the loop and
18025 use the heuristic below. */
18026 if (algs->size[i].max == 0)
18027 break;
18028 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
18030 enum stringop_alg candidate = algs->size[i].alg;
18032 if (candidate != libcall && ALG_USABLE_P (candidate))
18033 alg = candidate;
18034 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
18035 last non-libcall inline algorithm. */
18036 if (TARGET_INLINE_ALL_STRINGOPS)
18038 /* When the current size is best to be copied by a libcall,
18039 but we are still forced to inline, run the heuristic below
18040 that will pick code for medium sized blocks. */
18041 if (alg != libcall)
18042 return alg;
18043 break;
18045 else if (ALG_USABLE_P (candidate))
18046 return candidate;
18049 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
18051 /* When asked to inline the call anyway, try to pick meaningful choice.
18052 We look for maximal size of block that is faster to copy by hand and
18053 take blocks of at most of that size guessing that average size will
18054 be roughly half of the block.
18056 If this turns out to be bad, we might simply specify the preferred
18057 choice in ix86_costs. */
18058 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18059 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
18061 int max = -1;
18062 enum stringop_alg alg;
18063 int i;
18064 bool any_alg_usable_p = true;
18066 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
18068 enum stringop_alg candidate = algs->size[i].alg;
18069 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
18071 if (candidate != libcall && candidate
18072 && ALG_USABLE_P (candidate))
18073 max = algs->size[i].max;
18075 /* If there aren't any usable algorithms, then recursing on
18076 smaller sizes isn't going to find anything. Just return the
18077 simple byte-at-a-time copy loop. */
18078 if (!any_alg_usable_p)
18080 /* Pick something reasonable. */
18081 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18082 *dynamic_check = 128;
18083 return loop_1_byte;
18085 if (max == -1)
18086 max = 4096;
18087 alg = decide_alg (count, max / 2, memset, dynamic_check);
18088 gcc_assert (*dynamic_check == -1);
18089 gcc_assert (alg != libcall);
18090 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
18091 *dynamic_check = max;
18092 return alg;
18094 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
18095 #undef ALG_USABLE_P
18098 /* Decide on alignment. We know that the operand is already aligned to ALIGN
18099 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
18100 static int
18101 decide_alignment (int align,
18102 enum stringop_alg alg,
18103 int expected_size)
18105 int desired_align = 0;
18106 switch (alg)
18108 case no_stringop:
18109 gcc_unreachable ();
18110 case loop:
18111 case unrolled_loop:
18112 desired_align = GET_MODE_SIZE (Pmode);
18113 break;
18114 case rep_prefix_8_byte:
18115 desired_align = 8;
18116 break;
18117 case rep_prefix_4_byte:
18118 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18119 copying whole cacheline at once. */
18120 if (TARGET_PENTIUMPRO)
18121 desired_align = 8;
18122 else
18123 desired_align = 4;
18124 break;
18125 case rep_prefix_1_byte:
18126 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
18127 copying whole cacheline at once. */
18128 if (TARGET_PENTIUMPRO)
18129 desired_align = 8;
18130 else
18131 desired_align = 1;
18132 break;
18133 case loop_1_byte:
18134 desired_align = 1;
18135 break;
18136 case libcall:
18137 return 0;
18140 if (optimize_size)
18141 desired_align = 1;
18142 if (desired_align < align)
18143 desired_align = align;
18144 if (expected_size != -1 && expected_size < 4)
18145 desired_align = align;
18146 return desired_align;
18149 /* Return the smallest power of 2 greater than VAL. */
18150 static int
18151 smallest_pow2_greater_than (int val)
18153 int ret = 1;
18154 while (ret <= val)
18155 ret <<= 1;
18156 return ret;
18159 /* Expand string move (memcpy) operation. Use i386 string operations when
18160 profitable. expand_setmem contains similar code. The code depends upon
18161 architecture, block size and alignment, but always has the same
18162 overall structure:
18164 1) Prologue guard: Conditional that jumps up to epilogues for small
18165 blocks that can be handled by epilogue alone. This is faster but
18166 also needed for correctness, since prologue assume the block is larger
18167 than the desired alignment.
18169 Optional dynamic check for size and libcall for large
18170 blocks is emitted here too, with -minline-stringops-dynamically.
18172 2) Prologue: copy first few bytes in order to get destination aligned
18173 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
18174 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
18175 We emit either a jump tree on power of two sized blocks, or a byte loop.
18177 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
18178 with specified algorithm.
18180 4) Epilogue: code copying tail of the block that is too small to be
18181 handled by main body (or up to size guarded by prologue guard). */
18184 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
18185 rtx expected_align_exp, rtx expected_size_exp)
18187 rtx destreg;
18188 rtx srcreg;
18189 rtx label = NULL;
18190 rtx tmp;
18191 rtx jump_around_label = NULL;
18192 HOST_WIDE_INT align = 1;
18193 unsigned HOST_WIDE_INT count = 0;
18194 HOST_WIDE_INT expected_size = -1;
18195 int size_needed = 0, epilogue_size_needed;
18196 int desired_align = 0, align_bytes = 0;
18197 enum stringop_alg alg;
18198 int dynamic_check;
18199 bool need_zero_guard = false;
18201 if (CONST_INT_P (align_exp))
18202 align = INTVAL (align_exp);
18203 /* i386 can do misaligned access on reasonably increased cost. */
18204 if (CONST_INT_P (expected_align_exp)
18205 && INTVAL (expected_align_exp) > align)
18206 align = INTVAL (expected_align_exp);
18207 /* ALIGN is the minimum of destination and source alignment, but we care here
18208 just about destination alignment. */
18209 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
18210 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
18212 if (CONST_INT_P (count_exp))
18213 count = expected_size = INTVAL (count_exp);
18214 if (CONST_INT_P (expected_size_exp) && count == 0)
18215 expected_size = INTVAL (expected_size_exp);
18217 /* Make sure we don't need to care about overflow later on. */
18218 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18219 return 0;
18221 /* Step 0: Decide on preferred algorithm, desired alignment and
18222 size of chunks to be copied by main loop. */
18224 alg = decide_alg (count, expected_size, false, &dynamic_check);
18225 desired_align = decide_alignment (align, alg, expected_size);
18227 if (!TARGET_ALIGN_STRINGOPS)
18228 align = desired_align;
18230 if (alg == libcall)
18231 return 0;
18232 gcc_assert (alg != no_stringop);
18233 if (!count)
18234 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
18235 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18236 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
18237 switch (alg)
18239 case libcall:
18240 case no_stringop:
18241 gcc_unreachable ();
18242 case loop:
18243 need_zero_guard = true;
18244 size_needed = GET_MODE_SIZE (Pmode);
18245 break;
18246 case unrolled_loop:
18247 need_zero_guard = true;
18248 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
18249 break;
18250 case rep_prefix_8_byte:
18251 size_needed = 8;
18252 break;
18253 case rep_prefix_4_byte:
18254 size_needed = 4;
18255 break;
18256 case rep_prefix_1_byte:
18257 size_needed = 1;
18258 break;
18259 case loop_1_byte:
18260 need_zero_guard = true;
18261 size_needed = 1;
18262 break;
18265 epilogue_size_needed = size_needed;
18267 /* Step 1: Prologue guard. */
18269 /* Alignment code needs count to be in register. */
18270 if (CONST_INT_P (count_exp) && desired_align > align)
18272 if (INTVAL (count_exp) > desired_align
18273 && INTVAL (count_exp) > size_needed)
18275 align_bytes
18276 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18277 if (align_bytes <= 0)
18278 align_bytes = 0;
18279 else
18280 align_bytes = desired_align - align_bytes;
18282 if (align_bytes == 0)
18283 count_exp = force_reg (counter_mode (count_exp), count_exp);
18285 gcc_assert (desired_align >= 1 && align >= 1);
18287 /* Ensure that alignment prologue won't copy past end of block. */
18288 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18290 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18291 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
18292 Make sure it is power of 2. */
18293 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18295 if (count)
18297 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18299 /* If main algorithm works on QImode, no epilogue is needed.
18300 For small sizes just don't align anything. */
18301 if (size_needed == 1)
18302 desired_align = align;
18303 else
18304 goto epilogue;
18307 else
18309 label = gen_label_rtx ();
18310 emit_cmp_and_jump_insns (count_exp,
18311 GEN_INT (epilogue_size_needed),
18312 LTU, 0, counter_mode (count_exp), 1, label);
18313 if (expected_size == -1 || expected_size < epilogue_size_needed)
18314 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18315 else
18316 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18320 /* Emit code to decide on runtime whether library call or inline should be
18321 used. */
18322 if (dynamic_check != -1)
18324 if (CONST_INT_P (count_exp))
18326 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
18328 emit_block_move_via_libcall (dst, src, count_exp, false);
18329 count_exp = const0_rtx;
18330 goto epilogue;
18333 else
18335 rtx hot_label = gen_label_rtx ();
18336 jump_around_label = gen_label_rtx ();
18337 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18338 LEU, 0, GET_MODE (count_exp), 1, hot_label);
18339 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18340 emit_block_move_via_libcall (dst, src, count_exp, false);
18341 emit_jump (jump_around_label);
18342 emit_label (hot_label);
18346 /* Step 2: Alignment prologue. */
18348 if (desired_align > align)
18350 if (align_bytes == 0)
18352 /* Except for the first move in epilogue, we no longer know
18353 constant offset in aliasing info. It don't seems to worth
18354 the pain to maintain it for the first move, so throw away
18355 the info early. */
18356 src = change_address (src, BLKmode, srcreg);
18357 dst = change_address (dst, BLKmode, destreg);
18358 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
18359 desired_align);
18361 else
18363 /* If we know how many bytes need to be stored before dst is
18364 sufficiently aligned, maintain aliasing info accurately. */
18365 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
18366 desired_align, align_bytes);
18367 count_exp = plus_constant (count_exp, -align_bytes);
18368 count -= align_bytes;
18370 if (need_zero_guard
18371 && (count < (unsigned HOST_WIDE_INT) size_needed
18372 || (align_bytes == 0
18373 && count < ((unsigned HOST_WIDE_INT) size_needed
18374 + desired_align - align))))
18376 /* It is possible that we copied enough so the main loop will not
18377 execute. */
18378 gcc_assert (size_needed > 1);
18379 if (label == NULL_RTX)
18380 label = gen_label_rtx ();
18381 emit_cmp_and_jump_insns (count_exp,
18382 GEN_INT (size_needed),
18383 LTU, 0, counter_mode (count_exp), 1, label);
18384 if (expected_size == -1
18385 || expected_size < (desired_align - align) / 2 + size_needed)
18386 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18387 else
18388 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18391 if (label && size_needed == 1)
18393 emit_label (label);
18394 LABEL_NUSES (label) = 1;
18395 label = NULL;
18396 epilogue_size_needed = 1;
18398 else if (label == NULL_RTX)
18399 epilogue_size_needed = size_needed;
18401 /* Step 3: Main loop. */
18403 switch (alg)
18405 case libcall:
18406 case no_stringop:
18407 gcc_unreachable ();
18408 case loop_1_byte:
18409 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18410 count_exp, QImode, 1, expected_size);
18411 break;
18412 case loop:
18413 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18414 count_exp, Pmode, 1, expected_size);
18415 break;
18416 case unrolled_loop:
18417 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
18418 registers for 4 temporaries anyway. */
18419 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18420 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
18421 expected_size);
18422 break;
18423 case rep_prefix_8_byte:
18424 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18425 DImode);
18426 break;
18427 case rep_prefix_4_byte:
18428 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18429 SImode);
18430 break;
18431 case rep_prefix_1_byte:
18432 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18433 QImode);
18434 break;
18436 /* Adjust properly the offset of src and dest memory for aliasing. */
18437 if (CONST_INT_P (count_exp))
18439 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
18440 (count / size_needed) * size_needed);
18441 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18442 (count / size_needed) * size_needed);
18444 else
18446 src = change_address (src, BLKmode, srcreg);
18447 dst = change_address (dst, BLKmode, destreg);
18450 /* Step 4: Epilogue to copy the remaining bytes. */
18451 epilogue:
18452 if (label)
18454 /* When the main loop is done, COUNT_EXP might hold original count,
18455 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18456 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18457 bytes. Compensate if needed. */
18459 if (size_needed < epilogue_size_needed)
18461 tmp =
18462 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18463 GEN_INT (size_needed - 1), count_exp, 1,
18464 OPTAB_DIRECT);
18465 if (tmp != count_exp)
18466 emit_move_insn (count_exp, tmp);
18468 emit_label (label);
18469 LABEL_NUSES (label) = 1;
18472 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18473 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
18474 epilogue_size_needed);
18475 if (jump_around_label)
18476 emit_label (jump_around_label);
18477 return 1;
18480 /* Helper function for memcpy. For QImode value 0xXY produce
18481 0xXYXYXYXY of wide specified by MODE. This is essentially
18482 a * 0x10101010, but we can do slightly better than
18483 synth_mult by unwinding the sequence by hand on CPUs with
18484 slow multiply. */
18485 static rtx
18486 promote_duplicated_reg (enum machine_mode mode, rtx val)
18488 enum machine_mode valmode = GET_MODE (val);
18489 rtx tmp;
18490 int nops = mode == DImode ? 3 : 2;
18492 gcc_assert (mode == SImode || mode == DImode);
18493 if (val == const0_rtx)
18494 return copy_to_mode_reg (mode, const0_rtx);
18495 if (CONST_INT_P (val))
18497 HOST_WIDE_INT v = INTVAL (val) & 255;
18499 v |= v << 8;
18500 v |= v << 16;
18501 if (mode == DImode)
18502 v |= (v << 16) << 16;
18503 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
18506 if (valmode == VOIDmode)
18507 valmode = QImode;
18508 if (valmode != QImode)
18509 val = gen_lowpart (QImode, val);
18510 if (mode == QImode)
18511 return val;
18512 if (!TARGET_PARTIAL_REG_STALL)
18513 nops--;
18514 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
18515 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
18516 <= (ix86_cost->shift_const + ix86_cost->add) * nops
18517 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
18519 rtx reg = convert_modes (mode, QImode, val, true);
18520 tmp = promote_duplicated_reg (mode, const1_rtx);
18521 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
18522 OPTAB_DIRECT);
18524 else
18526 rtx reg = convert_modes (mode, QImode, val, true);
18528 if (!TARGET_PARTIAL_REG_STALL)
18529 if (mode == SImode)
18530 emit_insn (gen_movsi_insv_1 (reg, reg));
18531 else
18532 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
18533 else
18535 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
18536 NULL, 1, OPTAB_DIRECT);
18537 reg =
18538 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18540 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
18541 NULL, 1, OPTAB_DIRECT);
18542 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18543 if (mode == SImode)
18544 return reg;
18545 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
18546 NULL, 1, OPTAB_DIRECT);
18547 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18548 return reg;
18552 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
18553 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
18554 alignment from ALIGN to DESIRED_ALIGN. */
18555 static rtx
18556 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
18558 rtx promoted_val;
18560 if (TARGET_64BIT
18561 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
18562 promoted_val = promote_duplicated_reg (DImode, val);
18563 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
18564 promoted_val = promote_duplicated_reg (SImode, val);
18565 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
18566 promoted_val = promote_duplicated_reg (HImode, val);
18567 else
18568 promoted_val = val;
18570 return promoted_val;
18573 /* Expand string clear operation (bzero). Use i386 string operations when
18574 profitable. See expand_movmem comment for explanation of individual
18575 steps performed. */
18577 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
18578 rtx expected_align_exp, rtx expected_size_exp)
18580 rtx destreg;
18581 rtx label = NULL;
18582 rtx tmp;
18583 rtx jump_around_label = NULL;
18584 HOST_WIDE_INT align = 1;
18585 unsigned HOST_WIDE_INT count = 0;
18586 HOST_WIDE_INT expected_size = -1;
18587 int size_needed = 0, epilogue_size_needed;
18588 int desired_align = 0, align_bytes = 0;
18589 enum stringop_alg alg;
18590 rtx promoted_val = NULL;
18591 bool force_loopy_epilogue = false;
18592 int dynamic_check;
18593 bool need_zero_guard = false;
18595 if (CONST_INT_P (align_exp))
18596 align = INTVAL (align_exp);
18597 /* i386 can do misaligned access on reasonably increased cost. */
18598 if (CONST_INT_P (expected_align_exp)
18599 && INTVAL (expected_align_exp) > align)
18600 align = INTVAL (expected_align_exp);
18601 if (CONST_INT_P (count_exp))
18602 count = expected_size = INTVAL (count_exp);
18603 if (CONST_INT_P (expected_size_exp) && count == 0)
18604 expected_size = INTVAL (expected_size_exp);
18606 /* Make sure we don't need to care about overflow later on. */
18607 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18608 return 0;
18610 /* Step 0: Decide on preferred algorithm, desired alignment and
18611 size of chunks to be copied by main loop. */
18613 alg = decide_alg (count, expected_size, true, &dynamic_check);
18614 desired_align = decide_alignment (align, alg, expected_size);
18616 if (!TARGET_ALIGN_STRINGOPS)
18617 align = desired_align;
18619 if (alg == libcall)
18620 return 0;
18621 gcc_assert (alg != no_stringop);
18622 if (!count)
18623 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
18624 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18625 switch (alg)
18627 case libcall:
18628 case no_stringop:
18629 gcc_unreachable ();
18630 case loop:
18631 need_zero_guard = true;
18632 size_needed = GET_MODE_SIZE (Pmode);
18633 break;
18634 case unrolled_loop:
18635 need_zero_guard = true;
18636 size_needed = GET_MODE_SIZE (Pmode) * 4;
18637 break;
18638 case rep_prefix_8_byte:
18639 size_needed = 8;
18640 break;
18641 case rep_prefix_4_byte:
18642 size_needed = 4;
18643 break;
18644 case rep_prefix_1_byte:
18645 size_needed = 1;
18646 break;
18647 case loop_1_byte:
18648 need_zero_guard = true;
18649 size_needed = 1;
18650 break;
18652 epilogue_size_needed = size_needed;
18654 /* Step 1: Prologue guard. */
18656 /* Alignment code needs count to be in register. */
18657 if (CONST_INT_P (count_exp) && desired_align > align)
18659 if (INTVAL (count_exp) > desired_align
18660 && INTVAL (count_exp) > size_needed)
18662 align_bytes
18663 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18664 if (align_bytes <= 0)
18665 align_bytes = 0;
18666 else
18667 align_bytes = desired_align - align_bytes;
18669 if (align_bytes == 0)
18671 enum machine_mode mode = SImode;
18672 if (TARGET_64BIT && (count & ~0xffffffff))
18673 mode = DImode;
18674 count_exp = force_reg (mode, count_exp);
18677 /* Do the cheap promotion to allow better CSE across the
18678 main loop and epilogue (ie one load of the big constant in the
18679 front of all code. */
18680 if (CONST_INT_P (val_exp))
18681 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18682 desired_align, align);
18683 /* Ensure that alignment prologue won't copy past end of block. */
18684 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18686 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18687 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
18688 Make sure it is power of 2. */
18689 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18691 /* To improve performance of small blocks, we jump around the VAL
18692 promoting mode. This mean that if the promoted VAL is not constant,
18693 we might not use it in the epilogue and have to use byte
18694 loop variant. */
18695 if (epilogue_size_needed > 2 && !promoted_val)
18696 force_loopy_epilogue = true;
18697 if (count)
18699 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18701 /* If main algorithm works on QImode, no epilogue is needed.
18702 For small sizes just don't align anything. */
18703 if (size_needed == 1)
18704 desired_align = align;
18705 else
18706 goto epilogue;
18709 else
18711 label = gen_label_rtx ();
18712 emit_cmp_and_jump_insns (count_exp,
18713 GEN_INT (epilogue_size_needed),
18714 LTU, 0, counter_mode (count_exp), 1, label);
18715 if (expected_size == -1 || expected_size <= epilogue_size_needed)
18716 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18717 else
18718 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18721 if (dynamic_check != -1)
18723 rtx hot_label = gen_label_rtx ();
18724 jump_around_label = gen_label_rtx ();
18725 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18726 LEU, 0, counter_mode (count_exp), 1, hot_label);
18727 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18728 set_storage_via_libcall (dst, count_exp, val_exp, false);
18729 emit_jump (jump_around_label);
18730 emit_label (hot_label);
18733 /* Step 2: Alignment prologue. */
18735 /* Do the expensive promotion once we branched off the small blocks. */
18736 if (!promoted_val)
18737 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18738 desired_align, align);
18739 gcc_assert (desired_align >= 1 && align >= 1);
18741 if (desired_align > align)
18743 if (align_bytes == 0)
18745 /* Except for the first move in epilogue, we no longer know
18746 constant offset in aliasing info. It don't seems to worth
18747 the pain to maintain it for the first move, so throw away
18748 the info early. */
18749 dst = change_address (dst, BLKmode, destreg);
18750 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
18751 desired_align);
18753 else
18755 /* If we know how many bytes need to be stored before dst is
18756 sufficiently aligned, maintain aliasing info accurately. */
18757 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
18758 desired_align, align_bytes);
18759 count_exp = plus_constant (count_exp, -align_bytes);
18760 count -= align_bytes;
18762 if (need_zero_guard
18763 && (count < (unsigned HOST_WIDE_INT) size_needed
18764 || (align_bytes == 0
18765 && count < ((unsigned HOST_WIDE_INT) size_needed
18766 + desired_align - align))))
18768 /* It is possible that we copied enough so the main loop will not
18769 execute. */
18770 gcc_assert (size_needed > 1);
18771 if (label == NULL_RTX)
18772 label = gen_label_rtx ();
18773 emit_cmp_and_jump_insns (count_exp,
18774 GEN_INT (size_needed),
18775 LTU, 0, counter_mode (count_exp), 1, label);
18776 if (expected_size == -1
18777 || expected_size < (desired_align - align) / 2 + size_needed)
18778 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18779 else
18780 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18783 if (label && size_needed == 1)
18785 emit_label (label);
18786 LABEL_NUSES (label) = 1;
18787 label = NULL;
18788 promoted_val = val_exp;
18789 epilogue_size_needed = 1;
18791 else if (label == NULL_RTX)
18792 epilogue_size_needed = size_needed;
18794 /* Step 3: Main loop. */
18796 switch (alg)
18798 case libcall:
18799 case no_stringop:
18800 gcc_unreachable ();
18801 case loop_1_byte:
18802 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18803 count_exp, QImode, 1, expected_size);
18804 break;
18805 case loop:
18806 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18807 count_exp, Pmode, 1, expected_size);
18808 break;
18809 case unrolled_loop:
18810 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18811 count_exp, Pmode, 4, expected_size);
18812 break;
18813 case rep_prefix_8_byte:
18814 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18815 DImode, val_exp);
18816 break;
18817 case rep_prefix_4_byte:
18818 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18819 SImode, val_exp);
18820 break;
18821 case rep_prefix_1_byte:
18822 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18823 QImode, val_exp);
18824 break;
18826 /* Adjust properly the offset of src and dest memory for aliasing. */
18827 if (CONST_INT_P (count_exp))
18828 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18829 (count / size_needed) * size_needed);
18830 else
18831 dst = change_address (dst, BLKmode, destreg);
18833 /* Step 4: Epilogue to copy the remaining bytes. */
18835 if (label)
18837 /* When the main loop is done, COUNT_EXP might hold original count,
18838 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18839 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18840 bytes. Compensate if needed. */
18842 if (size_needed < epilogue_size_needed)
18844 tmp =
18845 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18846 GEN_INT (size_needed - 1), count_exp, 1,
18847 OPTAB_DIRECT);
18848 if (tmp != count_exp)
18849 emit_move_insn (count_exp, tmp);
18851 emit_label (label);
18852 LABEL_NUSES (label) = 1;
18854 epilogue:
18855 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18857 if (force_loopy_epilogue)
18858 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
18859 epilogue_size_needed);
18860 else
18861 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
18862 epilogue_size_needed);
18864 if (jump_around_label)
18865 emit_label (jump_around_label);
18866 return 1;
18869 /* Expand the appropriate insns for doing strlen if not just doing
18870 repnz; scasb
18872 out = result, initialized with the start address
18873 align_rtx = alignment of the address.
18874 scratch = scratch register, initialized with the startaddress when
18875 not aligned, otherwise undefined
18877 This is just the body. It needs the initializations mentioned above and
18878 some address computing at the end. These things are done in i386.md. */
18880 static void
18881 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
18883 int align;
18884 rtx tmp;
18885 rtx align_2_label = NULL_RTX;
18886 rtx align_3_label = NULL_RTX;
18887 rtx align_4_label = gen_label_rtx ();
18888 rtx end_0_label = gen_label_rtx ();
18889 rtx mem;
18890 rtx tmpreg = gen_reg_rtx (SImode);
18891 rtx scratch = gen_reg_rtx (SImode);
18892 rtx cmp;
18894 align = 0;
18895 if (CONST_INT_P (align_rtx))
18896 align = INTVAL (align_rtx);
18898 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
18900 /* Is there a known alignment and is it less than 4? */
18901 if (align < 4)
18903 rtx scratch1 = gen_reg_rtx (Pmode);
18904 emit_move_insn (scratch1, out);
18905 /* Is there a known alignment and is it not 2? */
18906 if (align != 2)
18908 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
18909 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
18911 /* Leave just the 3 lower bits. */
18912 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
18913 NULL_RTX, 0, OPTAB_WIDEN);
18915 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
18916 Pmode, 1, align_4_label);
18917 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
18918 Pmode, 1, align_2_label);
18919 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
18920 Pmode, 1, align_3_label);
18922 else
18924 /* Since the alignment is 2, we have to check 2 or 0 bytes;
18925 check if is aligned to 4 - byte. */
18927 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
18928 NULL_RTX, 0, OPTAB_WIDEN);
18930 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
18931 Pmode, 1, align_4_label);
18934 mem = change_address (src, QImode, out);
18936 /* Now compare the bytes. */
18938 /* Compare the first n unaligned byte on a byte per byte basis. */
18939 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
18940 QImode, 1, end_0_label);
18942 /* Increment the address. */
18943 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
18945 /* Not needed with an alignment of 2 */
18946 if (align != 2)
18948 emit_label (align_2_label);
18950 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
18951 end_0_label);
18953 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
18955 emit_label (align_3_label);
18958 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
18959 end_0_label);
18961 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
18964 /* Generate loop to check 4 bytes at a time. It is not a good idea to
18965 align this loop. It gives only huge programs, but does not help to
18966 speed up. */
18967 emit_label (align_4_label);
18969 mem = change_address (src, SImode, out);
18970 emit_move_insn (scratch, mem);
18971 emit_insn ((*ix86_gen_add3) (out, out, GEN_INT (4)));
18973 /* This formula yields a nonzero result iff one of the bytes is zero.
18974 This saves three branches inside loop and many cycles. */
18976 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
18977 emit_insn (gen_one_cmplsi2 (scratch, scratch));
18978 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
18979 emit_insn (gen_andsi3 (tmpreg, tmpreg,
18980 gen_int_mode (0x80808080, SImode)));
18981 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
18982 align_4_label);
18984 if (TARGET_CMOVE)
18986 rtx reg = gen_reg_rtx (SImode);
18987 rtx reg2 = gen_reg_rtx (Pmode);
18988 emit_move_insn (reg, tmpreg);
18989 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
18991 /* If zero is not in the first two bytes, move two bytes forward. */
18992 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
18993 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
18994 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
18995 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
18996 gen_rtx_IF_THEN_ELSE (SImode, tmp,
18997 reg,
18998 tmpreg)));
18999 /* Emit lea manually to avoid clobbering of flags. */
19000 emit_insn (gen_rtx_SET (SImode, reg2,
19001 gen_rtx_PLUS (Pmode, out, const2_rtx)));
19003 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19004 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
19005 emit_insn (gen_rtx_SET (VOIDmode, out,
19006 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
19007 reg2,
19008 out)));
19010 else
19012 rtx end_2_label = gen_label_rtx ();
19013 /* Is zero in the first two bytes? */
19015 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
19016 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
19017 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
19018 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
19019 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
19020 pc_rtx);
19021 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
19022 JUMP_LABEL (tmp) = end_2_label;
19024 /* Not in the first two. Move two bytes forward. */
19025 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
19026 emit_insn ((*ix86_gen_add3) (out, out, const2_rtx));
19028 emit_label (end_2_label);
19032 /* Avoid branch in fixing the byte. */
19033 tmpreg = gen_lowpart (QImode, tmpreg);
19034 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
19035 tmp = gen_rtx_REG (CCmode, FLAGS_REG);
19036 cmp = gen_rtx_LTU (VOIDmode, tmp, const0_rtx);
19037 emit_insn ((*ix86_gen_sub3_carry) (out, out, GEN_INT (3), tmp, cmp));
19039 emit_label (end_0_label);
19042 /* Expand strlen. */
19045 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
19047 rtx addr, scratch1, scratch2, scratch3, scratch4;
19049 /* The generic case of strlen expander is long. Avoid it's
19050 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
19052 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19053 && !TARGET_INLINE_ALL_STRINGOPS
19054 && !optimize_insn_for_size_p ()
19055 && (!CONST_INT_P (align) || INTVAL (align) < 4))
19056 return 0;
19058 addr = force_reg (Pmode, XEXP (src, 0));
19059 scratch1 = gen_reg_rtx (Pmode);
19061 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
19062 && !optimize_insn_for_size_p ())
19064 /* Well it seems that some optimizer does not combine a call like
19065 foo(strlen(bar), strlen(bar));
19066 when the move and the subtraction is done here. It does calculate
19067 the length just once when these instructions are done inside of
19068 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
19069 often used and I use one fewer register for the lifetime of
19070 output_strlen_unroll() this is better. */
19072 emit_move_insn (out, addr);
19074 ix86_expand_strlensi_unroll_1 (out, src, align);
19076 /* strlensi_unroll_1 returns the address of the zero at the end of
19077 the string, like memchr(), so compute the length by subtracting
19078 the start address. */
19079 emit_insn ((*ix86_gen_sub3) (out, out, addr));
19081 else
19083 rtx unspec;
19085 /* Can't use this if the user has appropriated eax, ecx, or edi. */
19086 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
19087 return false;
19089 scratch2 = gen_reg_rtx (Pmode);
19090 scratch3 = gen_reg_rtx (Pmode);
19091 scratch4 = force_reg (Pmode, constm1_rtx);
19093 emit_move_insn (scratch3, addr);
19094 eoschar = force_reg (QImode, eoschar);
19096 src = replace_equiv_address_nv (src, scratch3);
19098 /* If .md starts supporting :P, this can be done in .md. */
19099 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
19100 scratch4), UNSPEC_SCAS);
19101 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
19102 emit_insn ((*ix86_gen_one_cmpl2) (scratch2, scratch1));
19103 emit_insn ((*ix86_gen_add3) (out, scratch2, constm1_rtx));
19105 return 1;
19108 /* For given symbol (function) construct code to compute address of it's PLT
19109 entry in large x86-64 PIC model. */
19111 construct_plt_address (rtx symbol)
19113 rtx tmp = gen_reg_rtx (Pmode);
19114 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
19116 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
19117 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
19119 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
19120 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
19121 return tmp;
19124 void
19125 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
19126 rtx callarg2,
19127 rtx pop, int sibcall)
19129 rtx use = NULL, call;
19131 if (pop == const0_rtx)
19132 pop = NULL;
19133 gcc_assert (!TARGET_64BIT || !pop);
19135 if (TARGET_MACHO && !TARGET_64BIT)
19137 #if TARGET_MACHO
19138 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
19139 fnaddr = machopic_indirect_call_target (fnaddr);
19140 #endif
19142 else
19144 /* Static functions and indirect calls don't need the pic register. */
19145 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
19146 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19147 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
19148 use_reg (&use, pic_offset_table_rtx);
19151 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
19153 rtx al = gen_rtx_REG (QImode, AX_REG);
19154 emit_move_insn (al, callarg2);
19155 use_reg (&use, al);
19158 if (ix86_cmodel == CM_LARGE_PIC
19159 && MEM_P (fnaddr)
19160 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
19161 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
19162 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
19163 else if (sibcall
19164 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
19165 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
19167 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
19168 fnaddr = gen_rtx_MEM (QImode, fnaddr);
19171 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
19172 if (retval)
19173 call = gen_rtx_SET (VOIDmode, retval, call);
19174 if (pop)
19176 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
19177 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
19178 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
19180 if (TARGET_64BIT
19181 && ix86_cfun_abi () == MS_ABI
19182 && (!callarg2 || INTVAL (callarg2) != -2))
19184 /* We need to represent that SI and DI registers are clobbered
19185 by SYSV calls. */
19186 static int clobbered_registers[] = {
19187 XMM6_REG, XMM7_REG, XMM8_REG,
19188 XMM9_REG, XMM10_REG, XMM11_REG,
19189 XMM12_REG, XMM13_REG, XMM14_REG,
19190 XMM15_REG, SI_REG, DI_REG
19192 unsigned int i;
19193 rtx vec[ARRAY_SIZE (clobbered_registers) + 2];
19194 rtx unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
19195 UNSPEC_MS_TO_SYSV_CALL);
19197 vec[0] = call;
19198 vec[1] = unspec;
19199 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
19200 vec[i + 2] = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
19201 ? TImode : DImode,
19202 gen_rtx_REG
19203 (SSE_REGNO_P (clobbered_registers[i])
19204 ? TImode : DImode,
19205 clobbered_registers[i]));
19207 call = gen_rtx_PARALLEL (VOIDmode,
19208 gen_rtvec_v (ARRAY_SIZE (clobbered_registers)
19209 + 2, vec));
19212 call = emit_call_insn (call);
19213 if (use)
19214 CALL_INSN_FUNCTION_USAGE (call) = use;
19218 /* Clear stack slot assignments remembered from previous functions.
19219 This is called from INIT_EXPANDERS once before RTL is emitted for each
19220 function. */
19222 static struct machine_function *
19223 ix86_init_machine_status (void)
19225 struct machine_function *f;
19227 f = GGC_CNEW (struct machine_function);
19228 f->use_fast_prologue_epilogue_nregs = -1;
19229 f->tls_descriptor_call_expanded_p = 0;
19230 f->call_abi = ix86_abi;
19232 return f;
19235 /* Return a MEM corresponding to a stack slot with mode MODE.
19236 Allocate a new slot if necessary.
19238 The RTL for a function can have several slots available: N is
19239 which slot to use. */
19242 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
19244 struct stack_local_entry *s;
19246 gcc_assert (n < MAX_386_STACK_LOCALS);
19248 /* Virtual slot is valid only before vregs are instantiated. */
19249 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
19251 for (s = ix86_stack_locals; s; s = s->next)
19252 if (s->mode == mode && s->n == n)
19253 return copy_rtx (s->rtl);
19255 s = (struct stack_local_entry *)
19256 ggc_alloc (sizeof (struct stack_local_entry));
19257 s->n = n;
19258 s->mode = mode;
19259 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
19261 s->next = ix86_stack_locals;
19262 ix86_stack_locals = s;
19263 return s->rtl;
19266 /* Construct the SYMBOL_REF for the tls_get_addr function. */
19268 static GTY(()) rtx ix86_tls_symbol;
19270 ix86_tls_get_addr (void)
19273 if (!ix86_tls_symbol)
19275 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
19276 (TARGET_ANY_GNU_TLS
19277 && !TARGET_64BIT)
19278 ? "___tls_get_addr"
19279 : "__tls_get_addr");
19282 return ix86_tls_symbol;
19285 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
19287 static GTY(()) rtx ix86_tls_module_base_symbol;
19289 ix86_tls_module_base (void)
19292 if (!ix86_tls_module_base_symbol)
19294 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
19295 "_TLS_MODULE_BASE_");
19296 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
19297 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
19300 return ix86_tls_module_base_symbol;
19303 /* Calculate the length of the memory address in the instruction
19304 encoding. Does not include the one-byte modrm, opcode, or prefix. */
19307 memory_address_length (rtx addr)
19309 struct ix86_address parts;
19310 rtx base, index, disp;
19311 int len;
19312 int ok;
19314 if (GET_CODE (addr) == PRE_DEC
19315 || GET_CODE (addr) == POST_INC
19316 || GET_CODE (addr) == PRE_MODIFY
19317 || GET_CODE (addr) == POST_MODIFY)
19318 return 0;
19320 ok = ix86_decompose_address (addr, &parts);
19321 gcc_assert (ok);
19323 if (parts.base && GET_CODE (parts.base) == SUBREG)
19324 parts.base = SUBREG_REG (parts.base);
19325 if (parts.index && GET_CODE (parts.index) == SUBREG)
19326 parts.index = SUBREG_REG (parts.index);
19328 base = parts.base;
19329 index = parts.index;
19330 disp = parts.disp;
19331 len = 0;
19333 /* Rule of thumb:
19334 - esp as the base always wants an index,
19335 - ebp as the base always wants a displacement,
19336 - r12 as the base always wants an index,
19337 - r13 as the base always wants a displacement. */
19339 /* Register Indirect. */
19340 if (base && !index && !disp)
19342 /* esp (for its index) and ebp (for its displacement) need
19343 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
19344 code. */
19345 if (REG_P (addr)
19346 && (addr == arg_pointer_rtx
19347 || addr == frame_pointer_rtx
19348 || REGNO (addr) == SP_REG
19349 || REGNO (addr) == BP_REG
19350 || REGNO (addr) == R12_REG
19351 || REGNO (addr) == R13_REG))
19352 len = 1;
19355 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
19356 is not disp32, but disp32(%rip), so for disp32
19357 SIB byte is needed, unless print_operand_address
19358 optimizes it into disp32(%rip) or (%rip) is implied
19359 by UNSPEC. */
19360 else if (disp && !base && !index)
19362 len = 4;
19363 if (TARGET_64BIT)
19365 rtx symbol = disp;
19367 if (GET_CODE (disp) == CONST)
19368 symbol = XEXP (disp, 0);
19369 if (GET_CODE (symbol) == PLUS
19370 && CONST_INT_P (XEXP (symbol, 1)))
19371 symbol = XEXP (symbol, 0);
19373 if (GET_CODE (symbol) != LABEL_REF
19374 && (GET_CODE (symbol) != SYMBOL_REF
19375 || SYMBOL_REF_TLS_MODEL (symbol) != 0)
19376 && (GET_CODE (symbol) != UNSPEC
19377 || (XINT (symbol, 1) != UNSPEC_GOTPCREL
19378 && XINT (symbol, 1) != UNSPEC_GOTNTPOFF)))
19379 len += 1;
19383 else
19385 /* Find the length of the displacement constant. */
19386 if (disp)
19388 if (base && satisfies_constraint_K (disp))
19389 len = 1;
19390 else
19391 len = 4;
19393 /* ebp always wants a displacement. Similarly r13. */
19394 else if (base && REG_P (base)
19395 && (REGNO (base) == BP_REG || REGNO (base) == R13_REG))
19396 len = 1;
19398 /* An index requires the two-byte modrm form.... */
19399 if (index
19400 /* ...like esp (or r12), which always wants an index. */
19401 || base == arg_pointer_rtx
19402 || base == frame_pointer_rtx
19403 || (base && REG_P (base)
19404 && (REGNO (base) == SP_REG || REGNO (base) == R12_REG)))
19405 len += 1;
19408 switch (parts.seg)
19410 case SEG_FS:
19411 case SEG_GS:
19412 len += 1;
19413 break;
19414 default:
19415 break;
19418 return len;
19421 /* Compute default value for "length_immediate" attribute. When SHORTFORM
19422 is set, expect that insn have 8bit immediate alternative. */
19424 ix86_attr_length_immediate_default (rtx insn, int shortform)
19426 int len = 0;
19427 int i;
19428 extract_insn_cached (insn);
19429 for (i = recog_data.n_operands - 1; i >= 0; --i)
19430 if (CONSTANT_P (recog_data.operand[i]))
19432 enum attr_mode mode = get_attr_mode (insn);
19434 gcc_assert (!len);
19435 if (shortform && CONST_INT_P (recog_data.operand[i]))
19437 HOST_WIDE_INT ival = INTVAL (recog_data.operand[i]);
19438 switch (mode)
19440 case MODE_QI:
19441 len = 1;
19442 continue;
19443 case MODE_HI:
19444 ival = trunc_int_for_mode (ival, HImode);
19445 break;
19446 case MODE_SI:
19447 ival = trunc_int_for_mode (ival, SImode);
19448 break;
19449 default:
19450 break;
19452 if (IN_RANGE (ival, -128, 127))
19454 len = 1;
19455 continue;
19458 switch (mode)
19460 case MODE_QI:
19461 len = 1;
19462 break;
19463 case MODE_HI:
19464 len = 2;
19465 break;
19466 case MODE_SI:
19467 len = 4;
19468 break;
19469 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
19470 case MODE_DI:
19471 len = 4;
19472 break;
19473 default:
19474 fatal_insn ("unknown insn mode", insn);
19477 return len;
19479 /* Compute default value for "length_address" attribute. */
19481 ix86_attr_length_address_default (rtx insn)
19483 int i;
19485 if (get_attr_type (insn) == TYPE_LEA)
19487 rtx set = PATTERN (insn), addr;
19489 if (GET_CODE (set) == PARALLEL)
19490 set = XVECEXP (set, 0, 0);
19492 gcc_assert (GET_CODE (set) == SET);
19494 addr = SET_SRC (set);
19495 if (TARGET_64BIT && get_attr_mode (insn) == MODE_SI)
19497 if (GET_CODE (addr) == ZERO_EXTEND)
19498 addr = XEXP (addr, 0);
19499 if (GET_CODE (addr) == SUBREG)
19500 addr = SUBREG_REG (addr);
19503 return memory_address_length (addr);
19506 extract_insn_cached (insn);
19507 for (i = recog_data.n_operands - 1; i >= 0; --i)
19508 if (MEM_P (recog_data.operand[i]))
19510 constrain_operands_cached (reload_completed);
19511 if (which_alternative != -1)
19513 const char *constraints = recog_data.constraints[i];
19514 int alt = which_alternative;
19516 while (*constraints == '=' || *constraints == '+')
19517 constraints++;
19518 while (alt-- > 0)
19519 while (*constraints++ != ',')
19521 /* Skip ignored operands. */
19522 if (*constraints == 'X')
19523 continue;
19525 return memory_address_length (XEXP (recog_data.operand[i], 0));
19527 return 0;
19530 /* Compute default value for "length_vex" attribute. It includes
19531 2 or 3 byte VEX prefix and 1 opcode byte. */
19534 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
19535 int has_vex_w)
19537 int i;
19539 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
19540 byte VEX prefix. */
19541 if (!has_0f_opcode || has_vex_w)
19542 return 3 + 1;
19544 /* We can always use 2 byte VEX prefix in 32bit. */
19545 if (!TARGET_64BIT)
19546 return 2 + 1;
19548 extract_insn_cached (insn);
19550 for (i = recog_data.n_operands - 1; i >= 0; --i)
19551 if (REG_P (recog_data.operand[i]))
19553 /* REX.W bit uses 3 byte VEX prefix. */
19554 if (GET_MODE (recog_data.operand[i]) == DImode
19555 && GENERAL_REG_P (recog_data.operand[i]))
19556 return 3 + 1;
19558 else
19560 /* REX.X or REX.B bits use 3 byte VEX prefix. */
19561 if (MEM_P (recog_data.operand[i])
19562 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
19563 return 3 + 1;
19566 return 2 + 1;
19569 /* Return the maximum number of instructions a cpu can issue. */
19571 static int
19572 ix86_issue_rate (void)
19574 switch (ix86_tune)
19576 case PROCESSOR_PENTIUM:
19577 case PROCESSOR_ATOM:
19578 case PROCESSOR_K6:
19579 return 2;
19581 case PROCESSOR_PENTIUMPRO:
19582 case PROCESSOR_PENTIUM4:
19583 case PROCESSOR_ATHLON:
19584 case PROCESSOR_K8:
19585 case PROCESSOR_AMDFAM10:
19586 case PROCESSOR_NOCONA:
19587 case PROCESSOR_GENERIC32:
19588 case PROCESSOR_GENERIC64:
19589 return 3;
19591 case PROCESSOR_CORE2:
19592 return 4;
19594 default:
19595 return 1;
19599 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
19600 by DEP_INSN and nothing set by DEP_INSN. */
19602 static int
19603 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
19605 rtx set, set2;
19607 /* Simplify the test for uninteresting insns. */
19608 if (insn_type != TYPE_SETCC
19609 && insn_type != TYPE_ICMOV
19610 && insn_type != TYPE_FCMOV
19611 && insn_type != TYPE_IBR)
19612 return 0;
19614 if ((set = single_set (dep_insn)) != 0)
19616 set = SET_DEST (set);
19617 set2 = NULL_RTX;
19619 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
19620 && XVECLEN (PATTERN (dep_insn), 0) == 2
19621 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
19622 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
19624 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19625 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19627 else
19628 return 0;
19630 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
19631 return 0;
19633 /* This test is true if the dependent insn reads the flags but
19634 not any other potentially set register. */
19635 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
19636 return 0;
19638 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
19639 return 0;
19641 return 1;
19644 /* Return true iff USE_INSN has a memory address with operands set by
19645 SET_INSN. */
19647 bool
19648 ix86_agi_dependent (rtx set_insn, rtx use_insn)
19650 int i;
19651 extract_insn_cached (use_insn);
19652 for (i = recog_data.n_operands - 1; i >= 0; --i)
19653 if (MEM_P (recog_data.operand[i]))
19655 rtx addr = XEXP (recog_data.operand[i], 0);
19656 return modified_in_p (addr, set_insn) != 0;
19658 return false;
19661 static int
19662 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
19664 enum attr_type insn_type, dep_insn_type;
19665 enum attr_memory memory;
19666 rtx set, set2;
19667 int dep_insn_code_number;
19669 /* Anti and output dependencies have zero cost on all CPUs. */
19670 if (REG_NOTE_KIND (link) != 0)
19671 return 0;
19673 dep_insn_code_number = recog_memoized (dep_insn);
19675 /* If we can't recognize the insns, we can't really do anything. */
19676 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
19677 return cost;
19679 insn_type = get_attr_type (insn);
19680 dep_insn_type = get_attr_type (dep_insn);
19682 switch (ix86_tune)
19684 case PROCESSOR_PENTIUM:
19685 /* Address Generation Interlock adds a cycle of latency. */
19686 if (insn_type == TYPE_LEA)
19688 rtx addr = PATTERN (insn);
19690 if (GET_CODE (addr) == PARALLEL)
19691 addr = XVECEXP (addr, 0, 0);
19693 gcc_assert (GET_CODE (addr) == SET);
19695 addr = SET_SRC (addr);
19696 if (modified_in_p (addr, dep_insn))
19697 cost += 1;
19699 else if (ix86_agi_dependent (dep_insn, insn))
19700 cost += 1;
19702 /* ??? Compares pair with jump/setcc. */
19703 if (ix86_flags_dependent (insn, dep_insn, insn_type))
19704 cost = 0;
19706 /* Floating point stores require value to be ready one cycle earlier. */
19707 if (insn_type == TYPE_FMOV
19708 && get_attr_memory (insn) == MEMORY_STORE
19709 && !ix86_agi_dependent (dep_insn, insn))
19710 cost += 1;
19711 break;
19713 case PROCESSOR_PENTIUMPRO:
19714 memory = get_attr_memory (insn);
19716 /* INT->FP conversion is expensive. */
19717 if (get_attr_fp_int_src (dep_insn))
19718 cost += 5;
19720 /* There is one cycle extra latency between an FP op and a store. */
19721 if (insn_type == TYPE_FMOV
19722 && (set = single_set (dep_insn)) != NULL_RTX
19723 && (set2 = single_set (insn)) != NULL_RTX
19724 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
19725 && MEM_P (SET_DEST (set2)))
19726 cost += 1;
19728 /* Show ability of reorder buffer to hide latency of load by executing
19729 in parallel with previous instruction in case
19730 previous instruction is not needed to compute the address. */
19731 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19732 && !ix86_agi_dependent (dep_insn, insn))
19734 /* Claim moves to take one cycle, as core can issue one load
19735 at time and the next load can start cycle later. */
19736 if (dep_insn_type == TYPE_IMOV
19737 || dep_insn_type == TYPE_FMOV)
19738 cost = 1;
19739 else if (cost > 1)
19740 cost--;
19742 break;
19744 case PROCESSOR_K6:
19745 memory = get_attr_memory (insn);
19747 /* The esp dependency is resolved before the instruction is really
19748 finished. */
19749 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
19750 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
19751 return 1;
19753 /* INT->FP conversion is expensive. */
19754 if (get_attr_fp_int_src (dep_insn))
19755 cost += 5;
19757 /* Show ability of reorder buffer to hide latency of load by executing
19758 in parallel with previous instruction in case
19759 previous instruction is not needed to compute the address. */
19760 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19761 && !ix86_agi_dependent (dep_insn, insn))
19763 /* Claim moves to take one cycle, as core can issue one load
19764 at time and the next load can start cycle later. */
19765 if (dep_insn_type == TYPE_IMOV
19766 || dep_insn_type == TYPE_FMOV)
19767 cost = 1;
19768 else if (cost > 2)
19769 cost -= 2;
19770 else
19771 cost = 1;
19773 break;
19775 case PROCESSOR_ATHLON:
19776 case PROCESSOR_K8:
19777 case PROCESSOR_AMDFAM10:
19778 case PROCESSOR_ATOM:
19779 case PROCESSOR_GENERIC32:
19780 case PROCESSOR_GENERIC64:
19781 memory = get_attr_memory (insn);
19783 /* Show ability of reorder buffer to hide latency of load by executing
19784 in parallel with previous instruction in case
19785 previous instruction is not needed to compute the address. */
19786 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19787 && !ix86_agi_dependent (dep_insn, insn))
19789 enum attr_unit unit = get_attr_unit (insn);
19790 int loadcost = 3;
19792 /* Because of the difference between the length of integer and
19793 floating unit pipeline preparation stages, the memory operands
19794 for floating point are cheaper.
19796 ??? For Athlon it the difference is most probably 2. */
19797 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
19798 loadcost = 3;
19799 else
19800 loadcost = TARGET_ATHLON ? 2 : 0;
19802 if (cost >= loadcost)
19803 cost -= loadcost;
19804 else
19805 cost = 0;
19808 default:
19809 break;
19812 return cost;
19815 /* How many alternative schedules to try. This should be as wide as the
19816 scheduling freedom in the DFA, but no wider. Making this value too
19817 large results extra work for the scheduler. */
19819 static int
19820 ia32_multipass_dfa_lookahead (void)
19822 switch (ix86_tune)
19824 case PROCESSOR_PENTIUM:
19825 return 2;
19827 case PROCESSOR_PENTIUMPRO:
19828 case PROCESSOR_K6:
19829 return 1;
19831 default:
19832 return 0;
19837 /* Compute the alignment given to a constant that is being placed in memory.
19838 EXP is the constant and ALIGN is the alignment that the object would
19839 ordinarily have.
19840 The value of this function is used instead of that alignment to align
19841 the object. */
19844 ix86_constant_alignment (tree exp, int align)
19846 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
19847 || TREE_CODE (exp) == INTEGER_CST)
19849 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
19850 return 64;
19851 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
19852 return 128;
19854 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
19855 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
19856 return BITS_PER_WORD;
19858 return align;
19861 /* Compute the alignment for a static variable.
19862 TYPE is the data type, and ALIGN is the alignment that
19863 the object would ordinarily have. The value of this function is used
19864 instead of that alignment to align the object. */
19867 ix86_data_alignment (tree type, int align)
19869 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
19871 if (AGGREGATE_TYPE_P (type)
19872 && TYPE_SIZE (type)
19873 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
19874 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
19875 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
19876 && align < max_align)
19877 align = max_align;
19879 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
19880 to 16byte boundary. */
19881 if (TARGET_64BIT)
19883 if (AGGREGATE_TYPE_P (type)
19884 && TYPE_SIZE (type)
19885 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
19886 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
19887 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
19888 return 128;
19891 if (TREE_CODE (type) == ARRAY_TYPE)
19893 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
19894 return 64;
19895 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
19896 return 128;
19898 else if (TREE_CODE (type) == COMPLEX_TYPE)
19901 if (TYPE_MODE (type) == DCmode && align < 64)
19902 return 64;
19903 if ((TYPE_MODE (type) == XCmode
19904 || TYPE_MODE (type) == TCmode) && align < 128)
19905 return 128;
19907 else if ((TREE_CODE (type) == RECORD_TYPE
19908 || TREE_CODE (type) == UNION_TYPE
19909 || TREE_CODE (type) == QUAL_UNION_TYPE)
19910 && TYPE_FIELDS (type))
19912 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
19913 return 64;
19914 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
19915 return 128;
19917 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
19918 || TREE_CODE (type) == INTEGER_TYPE)
19920 if (TYPE_MODE (type) == DFmode && align < 64)
19921 return 64;
19922 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
19923 return 128;
19926 return align;
19929 /* Compute the alignment for a local variable or a stack slot. EXP is
19930 the data type or decl itself, MODE is the widest mode available and
19931 ALIGN is the alignment that the object would ordinarily have. The
19932 value of this macro is used instead of that alignment to align the
19933 object. */
19935 unsigned int
19936 ix86_local_alignment (tree exp, enum machine_mode mode,
19937 unsigned int align)
19939 tree type, decl;
19941 if (exp && DECL_P (exp))
19943 type = TREE_TYPE (exp);
19944 decl = exp;
19946 else
19948 type = exp;
19949 decl = NULL;
19952 /* Don't do dynamic stack realignment for long long objects with
19953 -mpreferred-stack-boundary=2. */
19954 if (!TARGET_64BIT
19955 && align == 64
19956 && ix86_preferred_stack_boundary < 64
19957 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
19958 && (!type || !TYPE_USER_ALIGN (type))
19959 && (!decl || !DECL_USER_ALIGN (decl)))
19960 align = 32;
19962 /* If TYPE is NULL, we are allocating a stack slot for caller-save
19963 register in MODE. We will return the largest alignment of XF
19964 and DF. */
19965 if (!type)
19967 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
19968 align = GET_MODE_ALIGNMENT (DFmode);
19969 return align;
19972 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
19973 to 16byte boundary. */
19974 if (TARGET_64BIT)
19976 if (AGGREGATE_TYPE_P (type)
19977 && TYPE_SIZE (type)
19978 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
19979 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
19980 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
19981 return 128;
19983 if (TREE_CODE (type) == ARRAY_TYPE)
19985 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
19986 return 64;
19987 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
19988 return 128;
19990 else if (TREE_CODE (type) == COMPLEX_TYPE)
19992 if (TYPE_MODE (type) == DCmode && align < 64)
19993 return 64;
19994 if ((TYPE_MODE (type) == XCmode
19995 || TYPE_MODE (type) == TCmode) && align < 128)
19996 return 128;
19998 else if ((TREE_CODE (type) == RECORD_TYPE
19999 || TREE_CODE (type) == UNION_TYPE
20000 || TREE_CODE (type) == QUAL_UNION_TYPE)
20001 && TYPE_FIELDS (type))
20003 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
20004 return 64;
20005 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
20006 return 128;
20008 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
20009 || TREE_CODE (type) == INTEGER_TYPE)
20012 if (TYPE_MODE (type) == DFmode && align < 64)
20013 return 64;
20014 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
20015 return 128;
20017 return align;
20020 /* Compute the minimum required alignment for dynamic stack realignment
20021 purposes for a local variable, parameter or a stack slot. EXP is
20022 the data type or decl itself, MODE is its mode and ALIGN is the
20023 alignment that the object would ordinarily have. */
20025 unsigned int
20026 ix86_minimum_alignment (tree exp, enum machine_mode mode,
20027 unsigned int align)
20029 tree type, decl;
20031 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
20032 return align;
20034 if (exp && DECL_P (exp))
20036 type = TREE_TYPE (exp);
20037 decl = exp;
20039 else
20041 type = exp;
20042 decl = NULL;
20045 /* Don't do dynamic stack realignment for long long objects with
20046 -mpreferred-stack-boundary=2. */
20047 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
20048 && (!type || !TYPE_USER_ALIGN (type))
20049 && (!decl || !DECL_USER_ALIGN (decl)))
20050 return 32;
20052 return align;
20055 /* Find a location for the static chain incoming to a nested function.
20056 This is a register, unless all free registers are used by arguments. */
20058 static rtx
20059 ix86_static_chain (const_tree fndecl, bool incoming_p)
20061 unsigned regno;
20063 if (!DECL_STATIC_CHAIN (fndecl))
20064 return NULL;
20066 if (TARGET_64BIT)
20068 /* We always use R10 in 64-bit mode. */
20069 regno = R10_REG;
20071 else
20073 tree fntype;
20074 /* By default in 32-bit mode we use ECX to pass the static chain. */
20075 regno = CX_REG;
20077 fntype = TREE_TYPE (fndecl);
20078 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
20080 /* Fastcall functions use ecx/edx for arguments, which leaves
20081 us with EAX for the static chain. */
20082 regno = AX_REG;
20084 else if (ix86_function_regparm (fntype, fndecl) == 3)
20086 /* For regparm 3, we have no free call-clobbered registers in
20087 which to store the static chain. In order to implement this,
20088 we have the trampoline push the static chain to the stack.
20089 However, we can't push a value below the return address when
20090 we call the nested function directly, so we have to use an
20091 alternate entry point. For this we use ESI, and have the
20092 alternate entry point push ESI, so that things appear the
20093 same once we're executing the nested function. */
20094 if (incoming_p)
20096 if (fndecl == current_function_decl)
20097 ix86_static_chain_on_stack = true;
20098 return gen_frame_mem (SImode,
20099 plus_constant (arg_pointer_rtx, -8));
20101 regno = SI_REG;
20105 return gen_rtx_REG (Pmode, regno);
20108 /* Emit RTL insns to initialize the variable parts of a trampoline.
20109 FNDECL is the decl of the target address; M_TRAMP is a MEM for
20110 the trampoline, and CHAIN_VALUE is an RTX for the static chain
20111 to be passed to the target function. */
20113 static void
20114 ix86_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
20116 rtx mem, fnaddr;
20118 fnaddr = XEXP (DECL_RTL (fndecl), 0);
20120 if (!TARGET_64BIT)
20122 rtx disp, chain;
20123 int opcode;
20125 /* Depending on the static chain location, either load a register
20126 with a constant, or push the constant to the stack. All of the
20127 instructions are the same size. */
20128 chain = ix86_static_chain (fndecl, true);
20129 if (REG_P (chain))
20131 if (REGNO (chain) == CX_REG)
20132 opcode = 0xb9;
20133 else if (REGNO (chain) == AX_REG)
20134 opcode = 0xb8;
20135 else
20136 gcc_unreachable ();
20138 else
20139 opcode = 0x68;
20141 mem = adjust_address (m_tramp, QImode, 0);
20142 emit_move_insn (mem, gen_int_mode (opcode, QImode));
20144 mem = adjust_address (m_tramp, SImode, 1);
20145 emit_move_insn (mem, chain_value);
20147 /* Compute offset from the end of the jmp to the target function.
20148 In the case in which the trampoline stores the static chain on
20149 the stack, we need to skip the first insn which pushes the
20150 (call-saved) register static chain; this push is 1 byte. */
20151 disp = expand_binop (SImode, sub_optab, fnaddr,
20152 plus_constant (XEXP (m_tramp, 0),
20153 MEM_P (chain) ? 9 : 10),
20154 NULL_RTX, 1, OPTAB_DIRECT);
20156 mem = adjust_address (m_tramp, QImode, 5);
20157 emit_move_insn (mem, gen_int_mode (0xe9, QImode));
20159 mem = adjust_address (m_tramp, SImode, 6);
20160 emit_move_insn (mem, disp);
20162 else
20164 int offset = 0;
20166 /* Load the function address to r11. Try to load address using
20167 the shorter movl instead of movabs. We may want to support
20168 movq for kernel mode, but kernel does not use trampolines at
20169 the moment. */
20170 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
20172 fnaddr = copy_to_mode_reg (DImode, fnaddr);
20174 mem = adjust_address (m_tramp, HImode, offset);
20175 emit_move_insn (mem, gen_int_mode (0xbb41, HImode));
20177 mem = adjust_address (m_tramp, SImode, offset + 2);
20178 emit_move_insn (mem, gen_lowpart (SImode, fnaddr));
20179 offset += 6;
20181 else
20183 mem = adjust_address (m_tramp, HImode, offset);
20184 emit_move_insn (mem, gen_int_mode (0xbb49, HImode));
20186 mem = adjust_address (m_tramp, DImode, offset + 2);
20187 emit_move_insn (mem, fnaddr);
20188 offset += 10;
20191 /* Load static chain using movabs to r10. */
20192 mem = adjust_address (m_tramp, HImode, offset);
20193 emit_move_insn (mem, gen_int_mode (0xba49, HImode));
20195 mem = adjust_address (m_tramp, DImode, offset + 2);
20196 emit_move_insn (mem, chain_value);
20197 offset += 10;
20199 /* Jump to r11; the last (unused) byte is a nop, only there to
20200 pad the write out to a single 32-bit store. */
20201 mem = adjust_address (m_tramp, SImode, offset);
20202 emit_move_insn (mem, gen_int_mode (0x90e3ff49, SImode));
20203 offset += 4;
20205 gcc_assert (offset <= TRAMPOLINE_SIZE);
20208 #ifdef ENABLE_EXECUTE_STACK
20209 #ifdef CHECK_EXECUTE_STACK_ENABLED
20210 if (CHECK_EXECUTE_STACK_ENABLED)
20211 #endif
20212 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
20213 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
20214 #endif
20217 /* The following file contains several enumerations and data structures
20218 built from the definitions in i386-builtin-types.def. */
20220 #include "i386-builtin-types.inc"
20222 /* Table for the ix86 builtin non-function types. */
20223 static GTY(()) tree ix86_builtin_type_tab[(int) IX86_BT_LAST_CPTR + 1];
20225 /* Retrieve an element from the above table, building some of
20226 the types lazily. */
20228 static tree
20229 ix86_get_builtin_type (enum ix86_builtin_type tcode)
20231 unsigned int index;
20232 tree type, itype;
20234 gcc_assert ((unsigned)tcode < ARRAY_SIZE(ix86_builtin_type_tab));
20236 type = ix86_builtin_type_tab[(int) tcode];
20237 if (type != NULL)
20238 return type;
20240 gcc_assert (tcode > IX86_BT_LAST_PRIM);
20241 if (tcode <= IX86_BT_LAST_VECT)
20243 enum machine_mode mode;
20245 index = tcode - IX86_BT_LAST_PRIM - 1;
20246 itype = ix86_get_builtin_type (ix86_builtin_type_vect_base[index]);
20247 mode = ix86_builtin_type_vect_mode[index];
20249 type = build_vector_type_for_mode (itype, mode);
20251 else
20253 int quals;
20255 index = tcode - IX86_BT_LAST_VECT - 1;
20256 if (tcode <= IX86_BT_LAST_PTR)
20257 quals = TYPE_UNQUALIFIED;
20258 else
20259 quals = TYPE_QUAL_CONST;
20261 itype = ix86_get_builtin_type (ix86_builtin_type_ptr_base[index]);
20262 if (quals != TYPE_UNQUALIFIED)
20263 itype = build_qualified_type (itype, quals);
20265 type = build_pointer_type (itype);
20268 ix86_builtin_type_tab[(int) tcode] = type;
20269 return type;
20272 /* Table for the ix86 builtin function types. */
20273 static GTY(()) tree ix86_builtin_func_type_tab[(int) IX86_BT_LAST_ALIAS + 1];
20275 /* Retrieve an element from the above table, building some of
20276 the types lazily. */
20278 static tree
20279 ix86_get_builtin_func_type (enum ix86_builtin_func_type tcode)
20281 tree type;
20283 gcc_assert ((unsigned)tcode < ARRAY_SIZE (ix86_builtin_func_type_tab));
20285 type = ix86_builtin_func_type_tab[(int) tcode];
20286 if (type != NULL)
20287 return type;
20289 if (tcode <= IX86_BT_LAST_FUNC)
20291 unsigned start = ix86_builtin_func_start[(int) tcode];
20292 unsigned after = ix86_builtin_func_start[(int) tcode + 1];
20293 tree rtype, atype, args = void_list_node;
20294 unsigned i;
20296 rtype = ix86_get_builtin_type (ix86_builtin_func_args[start]);
20297 for (i = after - 1; i > start; --i)
20299 atype = ix86_get_builtin_type (ix86_builtin_func_args[i]);
20300 args = tree_cons (NULL, atype, args);
20303 type = build_function_type (rtype, args);
20305 else
20307 unsigned index = tcode - IX86_BT_LAST_FUNC - 1;
20308 enum ix86_builtin_func_type icode;
20310 icode = ix86_builtin_func_alias_base[index];
20311 type = ix86_get_builtin_func_type (icode);
20314 ix86_builtin_func_type_tab[(int) tcode] = type;
20315 return type;
20319 /* Codes for all the SSE/MMX builtins. */
20320 enum ix86_builtins
20322 IX86_BUILTIN_ADDPS,
20323 IX86_BUILTIN_ADDSS,
20324 IX86_BUILTIN_DIVPS,
20325 IX86_BUILTIN_DIVSS,
20326 IX86_BUILTIN_MULPS,
20327 IX86_BUILTIN_MULSS,
20328 IX86_BUILTIN_SUBPS,
20329 IX86_BUILTIN_SUBSS,
20331 IX86_BUILTIN_CMPEQPS,
20332 IX86_BUILTIN_CMPLTPS,
20333 IX86_BUILTIN_CMPLEPS,
20334 IX86_BUILTIN_CMPGTPS,
20335 IX86_BUILTIN_CMPGEPS,
20336 IX86_BUILTIN_CMPNEQPS,
20337 IX86_BUILTIN_CMPNLTPS,
20338 IX86_BUILTIN_CMPNLEPS,
20339 IX86_BUILTIN_CMPNGTPS,
20340 IX86_BUILTIN_CMPNGEPS,
20341 IX86_BUILTIN_CMPORDPS,
20342 IX86_BUILTIN_CMPUNORDPS,
20343 IX86_BUILTIN_CMPEQSS,
20344 IX86_BUILTIN_CMPLTSS,
20345 IX86_BUILTIN_CMPLESS,
20346 IX86_BUILTIN_CMPNEQSS,
20347 IX86_BUILTIN_CMPNLTSS,
20348 IX86_BUILTIN_CMPNLESS,
20349 IX86_BUILTIN_CMPNGTSS,
20350 IX86_BUILTIN_CMPNGESS,
20351 IX86_BUILTIN_CMPORDSS,
20352 IX86_BUILTIN_CMPUNORDSS,
20354 IX86_BUILTIN_COMIEQSS,
20355 IX86_BUILTIN_COMILTSS,
20356 IX86_BUILTIN_COMILESS,
20357 IX86_BUILTIN_COMIGTSS,
20358 IX86_BUILTIN_COMIGESS,
20359 IX86_BUILTIN_COMINEQSS,
20360 IX86_BUILTIN_UCOMIEQSS,
20361 IX86_BUILTIN_UCOMILTSS,
20362 IX86_BUILTIN_UCOMILESS,
20363 IX86_BUILTIN_UCOMIGTSS,
20364 IX86_BUILTIN_UCOMIGESS,
20365 IX86_BUILTIN_UCOMINEQSS,
20367 IX86_BUILTIN_CVTPI2PS,
20368 IX86_BUILTIN_CVTPS2PI,
20369 IX86_BUILTIN_CVTSI2SS,
20370 IX86_BUILTIN_CVTSI642SS,
20371 IX86_BUILTIN_CVTSS2SI,
20372 IX86_BUILTIN_CVTSS2SI64,
20373 IX86_BUILTIN_CVTTPS2PI,
20374 IX86_BUILTIN_CVTTSS2SI,
20375 IX86_BUILTIN_CVTTSS2SI64,
20377 IX86_BUILTIN_MAXPS,
20378 IX86_BUILTIN_MAXSS,
20379 IX86_BUILTIN_MINPS,
20380 IX86_BUILTIN_MINSS,
20382 IX86_BUILTIN_LOADUPS,
20383 IX86_BUILTIN_STOREUPS,
20384 IX86_BUILTIN_MOVSS,
20386 IX86_BUILTIN_MOVHLPS,
20387 IX86_BUILTIN_MOVLHPS,
20388 IX86_BUILTIN_LOADHPS,
20389 IX86_BUILTIN_LOADLPS,
20390 IX86_BUILTIN_STOREHPS,
20391 IX86_BUILTIN_STORELPS,
20393 IX86_BUILTIN_MASKMOVQ,
20394 IX86_BUILTIN_MOVMSKPS,
20395 IX86_BUILTIN_PMOVMSKB,
20397 IX86_BUILTIN_MOVNTPS,
20398 IX86_BUILTIN_MOVNTQ,
20400 IX86_BUILTIN_LOADDQU,
20401 IX86_BUILTIN_STOREDQU,
20403 IX86_BUILTIN_PACKSSWB,
20404 IX86_BUILTIN_PACKSSDW,
20405 IX86_BUILTIN_PACKUSWB,
20407 IX86_BUILTIN_PADDB,
20408 IX86_BUILTIN_PADDW,
20409 IX86_BUILTIN_PADDD,
20410 IX86_BUILTIN_PADDQ,
20411 IX86_BUILTIN_PADDSB,
20412 IX86_BUILTIN_PADDSW,
20413 IX86_BUILTIN_PADDUSB,
20414 IX86_BUILTIN_PADDUSW,
20415 IX86_BUILTIN_PSUBB,
20416 IX86_BUILTIN_PSUBW,
20417 IX86_BUILTIN_PSUBD,
20418 IX86_BUILTIN_PSUBQ,
20419 IX86_BUILTIN_PSUBSB,
20420 IX86_BUILTIN_PSUBSW,
20421 IX86_BUILTIN_PSUBUSB,
20422 IX86_BUILTIN_PSUBUSW,
20424 IX86_BUILTIN_PAND,
20425 IX86_BUILTIN_PANDN,
20426 IX86_BUILTIN_POR,
20427 IX86_BUILTIN_PXOR,
20429 IX86_BUILTIN_PAVGB,
20430 IX86_BUILTIN_PAVGW,
20432 IX86_BUILTIN_PCMPEQB,
20433 IX86_BUILTIN_PCMPEQW,
20434 IX86_BUILTIN_PCMPEQD,
20435 IX86_BUILTIN_PCMPGTB,
20436 IX86_BUILTIN_PCMPGTW,
20437 IX86_BUILTIN_PCMPGTD,
20439 IX86_BUILTIN_PMADDWD,
20441 IX86_BUILTIN_PMAXSW,
20442 IX86_BUILTIN_PMAXUB,
20443 IX86_BUILTIN_PMINSW,
20444 IX86_BUILTIN_PMINUB,
20446 IX86_BUILTIN_PMULHUW,
20447 IX86_BUILTIN_PMULHW,
20448 IX86_BUILTIN_PMULLW,
20450 IX86_BUILTIN_PSADBW,
20451 IX86_BUILTIN_PSHUFW,
20453 IX86_BUILTIN_PSLLW,
20454 IX86_BUILTIN_PSLLD,
20455 IX86_BUILTIN_PSLLQ,
20456 IX86_BUILTIN_PSRAW,
20457 IX86_BUILTIN_PSRAD,
20458 IX86_BUILTIN_PSRLW,
20459 IX86_BUILTIN_PSRLD,
20460 IX86_BUILTIN_PSRLQ,
20461 IX86_BUILTIN_PSLLWI,
20462 IX86_BUILTIN_PSLLDI,
20463 IX86_BUILTIN_PSLLQI,
20464 IX86_BUILTIN_PSRAWI,
20465 IX86_BUILTIN_PSRADI,
20466 IX86_BUILTIN_PSRLWI,
20467 IX86_BUILTIN_PSRLDI,
20468 IX86_BUILTIN_PSRLQI,
20470 IX86_BUILTIN_PUNPCKHBW,
20471 IX86_BUILTIN_PUNPCKHWD,
20472 IX86_BUILTIN_PUNPCKHDQ,
20473 IX86_BUILTIN_PUNPCKLBW,
20474 IX86_BUILTIN_PUNPCKLWD,
20475 IX86_BUILTIN_PUNPCKLDQ,
20477 IX86_BUILTIN_SHUFPS,
20479 IX86_BUILTIN_RCPPS,
20480 IX86_BUILTIN_RCPSS,
20481 IX86_BUILTIN_RSQRTPS,
20482 IX86_BUILTIN_RSQRTPS_NR,
20483 IX86_BUILTIN_RSQRTSS,
20484 IX86_BUILTIN_RSQRTF,
20485 IX86_BUILTIN_SQRTPS,
20486 IX86_BUILTIN_SQRTPS_NR,
20487 IX86_BUILTIN_SQRTSS,
20489 IX86_BUILTIN_UNPCKHPS,
20490 IX86_BUILTIN_UNPCKLPS,
20492 IX86_BUILTIN_ANDPS,
20493 IX86_BUILTIN_ANDNPS,
20494 IX86_BUILTIN_ORPS,
20495 IX86_BUILTIN_XORPS,
20497 IX86_BUILTIN_EMMS,
20498 IX86_BUILTIN_LDMXCSR,
20499 IX86_BUILTIN_STMXCSR,
20500 IX86_BUILTIN_SFENCE,
20502 /* 3DNow! Original */
20503 IX86_BUILTIN_FEMMS,
20504 IX86_BUILTIN_PAVGUSB,
20505 IX86_BUILTIN_PF2ID,
20506 IX86_BUILTIN_PFACC,
20507 IX86_BUILTIN_PFADD,
20508 IX86_BUILTIN_PFCMPEQ,
20509 IX86_BUILTIN_PFCMPGE,
20510 IX86_BUILTIN_PFCMPGT,
20511 IX86_BUILTIN_PFMAX,
20512 IX86_BUILTIN_PFMIN,
20513 IX86_BUILTIN_PFMUL,
20514 IX86_BUILTIN_PFRCP,
20515 IX86_BUILTIN_PFRCPIT1,
20516 IX86_BUILTIN_PFRCPIT2,
20517 IX86_BUILTIN_PFRSQIT1,
20518 IX86_BUILTIN_PFRSQRT,
20519 IX86_BUILTIN_PFSUB,
20520 IX86_BUILTIN_PFSUBR,
20521 IX86_BUILTIN_PI2FD,
20522 IX86_BUILTIN_PMULHRW,
20524 /* 3DNow! Athlon Extensions */
20525 IX86_BUILTIN_PF2IW,
20526 IX86_BUILTIN_PFNACC,
20527 IX86_BUILTIN_PFPNACC,
20528 IX86_BUILTIN_PI2FW,
20529 IX86_BUILTIN_PSWAPDSI,
20530 IX86_BUILTIN_PSWAPDSF,
20532 /* SSE2 */
20533 IX86_BUILTIN_ADDPD,
20534 IX86_BUILTIN_ADDSD,
20535 IX86_BUILTIN_DIVPD,
20536 IX86_BUILTIN_DIVSD,
20537 IX86_BUILTIN_MULPD,
20538 IX86_BUILTIN_MULSD,
20539 IX86_BUILTIN_SUBPD,
20540 IX86_BUILTIN_SUBSD,
20542 IX86_BUILTIN_CMPEQPD,
20543 IX86_BUILTIN_CMPLTPD,
20544 IX86_BUILTIN_CMPLEPD,
20545 IX86_BUILTIN_CMPGTPD,
20546 IX86_BUILTIN_CMPGEPD,
20547 IX86_BUILTIN_CMPNEQPD,
20548 IX86_BUILTIN_CMPNLTPD,
20549 IX86_BUILTIN_CMPNLEPD,
20550 IX86_BUILTIN_CMPNGTPD,
20551 IX86_BUILTIN_CMPNGEPD,
20552 IX86_BUILTIN_CMPORDPD,
20553 IX86_BUILTIN_CMPUNORDPD,
20554 IX86_BUILTIN_CMPEQSD,
20555 IX86_BUILTIN_CMPLTSD,
20556 IX86_BUILTIN_CMPLESD,
20557 IX86_BUILTIN_CMPNEQSD,
20558 IX86_BUILTIN_CMPNLTSD,
20559 IX86_BUILTIN_CMPNLESD,
20560 IX86_BUILTIN_CMPORDSD,
20561 IX86_BUILTIN_CMPUNORDSD,
20563 IX86_BUILTIN_COMIEQSD,
20564 IX86_BUILTIN_COMILTSD,
20565 IX86_BUILTIN_COMILESD,
20566 IX86_BUILTIN_COMIGTSD,
20567 IX86_BUILTIN_COMIGESD,
20568 IX86_BUILTIN_COMINEQSD,
20569 IX86_BUILTIN_UCOMIEQSD,
20570 IX86_BUILTIN_UCOMILTSD,
20571 IX86_BUILTIN_UCOMILESD,
20572 IX86_BUILTIN_UCOMIGTSD,
20573 IX86_BUILTIN_UCOMIGESD,
20574 IX86_BUILTIN_UCOMINEQSD,
20576 IX86_BUILTIN_MAXPD,
20577 IX86_BUILTIN_MAXSD,
20578 IX86_BUILTIN_MINPD,
20579 IX86_BUILTIN_MINSD,
20581 IX86_BUILTIN_ANDPD,
20582 IX86_BUILTIN_ANDNPD,
20583 IX86_BUILTIN_ORPD,
20584 IX86_BUILTIN_XORPD,
20586 IX86_BUILTIN_SQRTPD,
20587 IX86_BUILTIN_SQRTSD,
20589 IX86_BUILTIN_UNPCKHPD,
20590 IX86_BUILTIN_UNPCKLPD,
20592 IX86_BUILTIN_SHUFPD,
20594 IX86_BUILTIN_LOADUPD,
20595 IX86_BUILTIN_STOREUPD,
20596 IX86_BUILTIN_MOVSD,
20598 IX86_BUILTIN_LOADHPD,
20599 IX86_BUILTIN_LOADLPD,
20601 IX86_BUILTIN_CVTDQ2PD,
20602 IX86_BUILTIN_CVTDQ2PS,
20604 IX86_BUILTIN_CVTPD2DQ,
20605 IX86_BUILTIN_CVTPD2PI,
20606 IX86_BUILTIN_CVTPD2PS,
20607 IX86_BUILTIN_CVTTPD2DQ,
20608 IX86_BUILTIN_CVTTPD2PI,
20610 IX86_BUILTIN_CVTPI2PD,
20611 IX86_BUILTIN_CVTSI2SD,
20612 IX86_BUILTIN_CVTSI642SD,
20614 IX86_BUILTIN_CVTSD2SI,
20615 IX86_BUILTIN_CVTSD2SI64,
20616 IX86_BUILTIN_CVTSD2SS,
20617 IX86_BUILTIN_CVTSS2SD,
20618 IX86_BUILTIN_CVTTSD2SI,
20619 IX86_BUILTIN_CVTTSD2SI64,
20621 IX86_BUILTIN_CVTPS2DQ,
20622 IX86_BUILTIN_CVTPS2PD,
20623 IX86_BUILTIN_CVTTPS2DQ,
20625 IX86_BUILTIN_MOVNTI,
20626 IX86_BUILTIN_MOVNTPD,
20627 IX86_BUILTIN_MOVNTDQ,
20629 IX86_BUILTIN_MOVQ128,
20631 /* SSE2 MMX */
20632 IX86_BUILTIN_MASKMOVDQU,
20633 IX86_BUILTIN_MOVMSKPD,
20634 IX86_BUILTIN_PMOVMSKB128,
20636 IX86_BUILTIN_PACKSSWB128,
20637 IX86_BUILTIN_PACKSSDW128,
20638 IX86_BUILTIN_PACKUSWB128,
20640 IX86_BUILTIN_PADDB128,
20641 IX86_BUILTIN_PADDW128,
20642 IX86_BUILTIN_PADDD128,
20643 IX86_BUILTIN_PADDQ128,
20644 IX86_BUILTIN_PADDSB128,
20645 IX86_BUILTIN_PADDSW128,
20646 IX86_BUILTIN_PADDUSB128,
20647 IX86_BUILTIN_PADDUSW128,
20648 IX86_BUILTIN_PSUBB128,
20649 IX86_BUILTIN_PSUBW128,
20650 IX86_BUILTIN_PSUBD128,
20651 IX86_BUILTIN_PSUBQ128,
20652 IX86_BUILTIN_PSUBSB128,
20653 IX86_BUILTIN_PSUBSW128,
20654 IX86_BUILTIN_PSUBUSB128,
20655 IX86_BUILTIN_PSUBUSW128,
20657 IX86_BUILTIN_PAND128,
20658 IX86_BUILTIN_PANDN128,
20659 IX86_BUILTIN_POR128,
20660 IX86_BUILTIN_PXOR128,
20662 IX86_BUILTIN_PAVGB128,
20663 IX86_BUILTIN_PAVGW128,
20665 IX86_BUILTIN_PCMPEQB128,
20666 IX86_BUILTIN_PCMPEQW128,
20667 IX86_BUILTIN_PCMPEQD128,
20668 IX86_BUILTIN_PCMPGTB128,
20669 IX86_BUILTIN_PCMPGTW128,
20670 IX86_BUILTIN_PCMPGTD128,
20672 IX86_BUILTIN_PMADDWD128,
20674 IX86_BUILTIN_PMAXSW128,
20675 IX86_BUILTIN_PMAXUB128,
20676 IX86_BUILTIN_PMINSW128,
20677 IX86_BUILTIN_PMINUB128,
20679 IX86_BUILTIN_PMULUDQ,
20680 IX86_BUILTIN_PMULUDQ128,
20681 IX86_BUILTIN_PMULHUW128,
20682 IX86_BUILTIN_PMULHW128,
20683 IX86_BUILTIN_PMULLW128,
20685 IX86_BUILTIN_PSADBW128,
20686 IX86_BUILTIN_PSHUFHW,
20687 IX86_BUILTIN_PSHUFLW,
20688 IX86_BUILTIN_PSHUFD,
20690 IX86_BUILTIN_PSLLDQI128,
20691 IX86_BUILTIN_PSLLWI128,
20692 IX86_BUILTIN_PSLLDI128,
20693 IX86_BUILTIN_PSLLQI128,
20694 IX86_BUILTIN_PSRAWI128,
20695 IX86_BUILTIN_PSRADI128,
20696 IX86_BUILTIN_PSRLDQI128,
20697 IX86_BUILTIN_PSRLWI128,
20698 IX86_BUILTIN_PSRLDI128,
20699 IX86_BUILTIN_PSRLQI128,
20701 IX86_BUILTIN_PSLLDQ128,
20702 IX86_BUILTIN_PSLLW128,
20703 IX86_BUILTIN_PSLLD128,
20704 IX86_BUILTIN_PSLLQ128,
20705 IX86_BUILTIN_PSRAW128,
20706 IX86_BUILTIN_PSRAD128,
20707 IX86_BUILTIN_PSRLW128,
20708 IX86_BUILTIN_PSRLD128,
20709 IX86_BUILTIN_PSRLQ128,
20711 IX86_BUILTIN_PUNPCKHBW128,
20712 IX86_BUILTIN_PUNPCKHWD128,
20713 IX86_BUILTIN_PUNPCKHDQ128,
20714 IX86_BUILTIN_PUNPCKHQDQ128,
20715 IX86_BUILTIN_PUNPCKLBW128,
20716 IX86_BUILTIN_PUNPCKLWD128,
20717 IX86_BUILTIN_PUNPCKLDQ128,
20718 IX86_BUILTIN_PUNPCKLQDQ128,
20720 IX86_BUILTIN_CLFLUSH,
20721 IX86_BUILTIN_MFENCE,
20722 IX86_BUILTIN_LFENCE,
20724 IX86_BUILTIN_BSRSI,
20725 IX86_BUILTIN_BSRDI,
20726 IX86_BUILTIN_RDPMC,
20727 IX86_BUILTIN_RDTSC,
20728 IX86_BUILTIN_RDTSCP,
20729 IX86_BUILTIN_ROLQI,
20730 IX86_BUILTIN_ROLHI,
20731 IX86_BUILTIN_RORQI,
20732 IX86_BUILTIN_RORHI,
20734 /* SSE3. */
20735 IX86_BUILTIN_ADDSUBPS,
20736 IX86_BUILTIN_HADDPS,
20737 IX86_BUILTIN_HSUBPS,
20738 IX86_BUILTIN_MOVSHDUP,
20739 IX86_BUILTIN_MOVSLDUP,
20740 IX86_BUILTIN_ADDSUBPD,
20741 IX86_BUILTIN_HADDPD,
20742 IX86_BUILTIN_HSUBPD,
20743 IX86_BUILTIN_LDDQU,
20745 IX86_BUILTIN_MONITOR,
20746 IX86_BUILTIN_MWAIT,
20748 /* SSSE3. */
20749 IX86_BUILTIN_PHADDW,
20750 IX86_BUILTIN_PHADDD,
20751 IX86_BUILTIN_PHADDSW,
20752 IX86_BUILTIN_PHSUBW,
20753 IX86_BUILTIN_PHSUBD,
20754 IX86_BUILTIN_PHSUBSW,
20755 IX86_BUILTIN_PMADDUBSW,
20756 IX86_BUILTIN_PMULHRSW,
20757 IX86_BUILTIN_PSHUFB,
20758 IX86_BUILTIN_PSIGNB,
20759 IX86_BUILTIN_PSIGNW,
20760 IX86_BUILTIN_PSIGND,
20761 IX86_BUILTIN_PALIGNR,
20762 IX86_BUILTIN_PABSB,
20763 IX86_BUILTIN_PABSW,
20764 IX86_BUILTIN_PABSD,
20766 IX86_BUILTIN_PHADDW128,
20767 IX86_BUILTIN_PHADDD128,
20768 IX86_BUILTIN_PHADDSW128,
20769 IX86_BUILTIN_PHSUBW128,
20770 IX86_BUILTIN_PHSUBD128,
20771 IX86_BUILTIN_PHSUBSW128,
20772 IX86_BUILTIN_PMADDUBSW128,
20773 IX86_BUILTIN_PMULHRSW128,
20774 IX86_BUILTIN_PSHUFB128,
20775 IX86_BUILTIN_PSIGNB128,
20776 IX86_BUILTIN_PSIGNW128,
20777 IX86_BUILTIN_PSIGND128,
20778 IX86_BUILTIN_PALIGNR128,
20779 IX86_BUILTIN_PABSB128,
20780 IX86_BUILTIN_PABSW128,
20781 IX86_BUILTIN_PABSD128,
20783 /* AMDFAM10 - SSE4A New Instructions. */
20784 IX86_BUILTIN_MOVNTSD,
20785 IX86_BUILTIN_MOVNTSS,
20786 IX86_BUILTIN_EXTRQI,
20787 IX86_BUILTIN_EXTRQ,
20788 IX86_BUILTIN_INSERTQI,
20789 IX86_BUILTIN_INSERTQ,
20791 /* SSE4.1. */
20792 IX86_BUILTIN_BLENDPD,
20793 IX86_BUILTIN_BLENDPS,
20794 IX86_BUILTIN_BLENDVPD,
20795 IX86_BUILTIN_BLENDVPS,
20796 IX86_BUILTIN_PBLENDVB128,
20797 IX86_BUILTIN_PBLENDW128,
20799 IX86_BUILTIN_DPPD,
20800 IX86_BUILTIN_DPPS,
20802 IX86_BUILTIN_INSERTPS128,
20804 IX86_BUILTIN_MOVNTDQA,
20805 IX86_BUILTIN_MPSADBW128,
20806 IX86_BUILTIN_PACKUSDW128,
20807 IX86_BUILTIN_PCMPEQQ,
20808 IX86_BUILTIN_PHMINPOSUW128,
20810 IX86_BUILTIN_PMAXSB128,
20811 IX86_BUILTIN_PMAXSD128,
20812 IX86_BUILTIN_PMAXUD128,
20813 IX86_BUILTIN_PMAXUW128,
20815 IX86_BUILTIN_PMINSB128,
20816 IX86_BUILTIN_PMINSD128,
20817 IX86_BUILTIN_PMINUD128,
20818 IX86_BUILTIN_PMINUW128,
20820 IX86_BUILTIN_PMOVSXBW128,
20821 IX86_BUILTIN_PMOVSXBD128,
20822 IX86_BUILTIN_PMOVSXBQ128,
20823 IX86_BUILTIN_PMOVSXWD128,
20824 IX86_BUILTIN_PMOVSXWQ128,
20825 IX86_BUILTIN_PMOVSXDQ128,
20827 IX86_BUILTIN_PMOVZXBW128,
20828 IX86_BUILTIN_PMOVZXBD128,
20829 IX86_BUILTIN_PMOVZXBQ128,
20830 IX86_BUILTIN_PMOVZXWD128,
20831 IX86_BUILTIN_PMOVZXWQ128,
20832 IX86_BUILTIN_PMOVZXDQ128,
20834 IX86_BUILTIN_PMULDQ128,
20835 IX86_BUILTIN_PMULLD128,
20837 IX86_BUILTIN_ROUNDPD,
20838 IX86_BUILTIN_ROUNDPS,
20839 IX86_BUILTIN_ROUNDSD,
20840 IX86_BUILTIN_ROUNDSS,
20842 IX86_BUILTIN_PTESTZ,
20843 IX86_BUILTIN_PTESTC,
20844 IX86_BUILTIN_PTESTNZC,
20846 IX86_BUILTIN_VEC_INIT_V2SI,
20847 IX86_BUILTIN_VEC_INIT_V4HI,
20848 IX86_BUILTIN_VEC_INIT_V8QI,
20849 IX86_BUILTIN_VEC_EXT_V2DF,
20850 IX86_BUILTIN_VEC_EXT_V2DI,
20851 IX86_BUILTIN_VEC_EXT_V4SF,
20852 IX86_BUILTIN_VEC_EXT_V4SI,
20853 IX86_BUILTIN_VEC_EXT_V8HI,
20854 IX86_BUILTIN_VEC_EXT_V2SI,
20855 IX86_BUILTIN_VEC_EXT_V4HI,
20856 IX86_BUILTIN_VEC_EXT_V16QI,
20857 IX86_BUILTIN_VEC_SET_V2DI,
20858 IX86_BUILTIN_VEC_SET_V4SF,
20859 IX86_BUILTIN_VEC_SET_V4SI,
20860 IX86_BUILTIN_VEC_SET_V8HI,
20861 IX86_BUILTIN_VEC_SET_V4HI,
20862 IX86_BUILTIN_VEC_SET_V16QI,
20864 IX86_BUILTIN_VEC_PACK_SFIX,
20866 /* SSE4.2. */
20867 IX86_BUILTIN_CRC32QI,
20868 IX86_BUILTIN_CRC32HI,
20869 IX86_BUILTIN_CRC32SI,
20870 IX86_BUILTIN_CRC32DI,
20872 IX86_BUILTIN_PCMPESTRI128,
20873 IX86_BUILTIN_PCMPESTRM128,
20874 IX86_BUILTIN_PCMPESTRA128,
20875 IX86_BUILTIN_PCMPESTRC128,
20876 IX86_BUILTIN_PCMPESTRO128,
20877 IX86_BUILTIN_PCMPESTRS128,
20878 IX86_BUILTIN_PCMPESTRZ128,
20879 IX86_BUILTIN_PCMPISTRI128,
20880 IX86_BUILTIN_PCMPISTRM128,
20881 IX86_BUILTIN_PCMPISTRA128,
20882 IX86_BUILTIN_PCMPISTRC128,
20883 IX86_BUILTIN_PCMPISTRO128,
20884 IX86_BUILTIN_PCMPISTRS128,
20885 IX86_BUILTIN_PCMPISTRZ128,
20887 IX86_BUILTIN_PCMPGTQ,
20889 /* AES instructions */
20890 IX86_BUILTIN_AESENC128,
20891 IX86_BUILTIN_AESENCLAST128,
20892 IX86_BUILTIN_AESDEC128,
20893 IX86_BUILTIN_AESDECLAST128,
20894 IX86_BUILTIN_AESIMC128,
20895 IX86_BUILTIN_AESKEYGENASSIST128,
20897 /* PCLMUL instruction */
20898 IX86_BUILTIN_PCLMULQDQ128,
20900 /* AVX */
20901 IX86_BUILTIN_ADDPD256,
20902 IX86_BUILTIN_ADDPS256,
20903 IX86_BUILTIN_ADDSUBPD256,
20904 IX86_BUILTIN_ADDSUBPS256,
20905 IX86_BUILTIN_ANDPD256,
20906 IX86_BUILTIN_ANDPS256,
20907 IX86_BUILTIN_ANDNPD256,
20908 IX86_BUILTIN_ANDNPS256,
20909 IX86_BUILTIN_BLENDPD256,
20910 IX86_BUILTIN_BLENDPS256,
20911 IX86_BUILTIN_BLENDVPD256,
20912 IX86_BUILTIN_BLENDVPS256,
20913 IX86_BUILTIN_DIVPD256,
20914 IX86_BUILTIN_DIVPS256,
20915 IX86_BUILTIN_DPPS256,
20916 IX86_BUILTIN_HADDPD256,
20917 IX86_BUILTIN_HADDPS256,
20918 IX86_BUILTIN_HSUBPD256,
20919 IX86_BUILTIN_HSUBPS256,
20920 IX86_BUILTIN_MAXPD256,
20921 IX86_BUILTIN_MAXPS256,
20922 IX86_BUILTIN_MINPD256,
20923 IX86_BUILTIN_MINPS256,
20924 IX86_BUILTIN_MULPD256,
20925 IX86_BUILTIN_MULPS256,
20926 IX86_BUILTIN_ORPD256,
20927 IX86_BUILTIN_ORPS256,
20928 IX86_BUILTIN_SHUFPD256,
20929 IX86_BUILTIN_SHUFPS256,
20930 IX86_BUILTIN_SUBPD256,
20931 IX86_BUILTIN_SUBPS256,
20932 IX86_BUILTIN_XORPD256,
20933 IX86_BUILTIN_XORPS256,
20934 IX86_BUILTIN_CMPSD,
20935 IX86_BUILTIN_CMPSS,
20936 IX86_BUILTIN_CMPPD,
20937 IX86_BUILTIN_CMPPS,
20938 IX86_BUILTIN_CMPPD256,
20939 IX86_BUILTIN_CMPPS256,
20940 IX86_BUILTIN_CVTDQ2PD256,
20941 IX86_BUILTIN_CVTDQ2PS256,
20942 IX86_BUILTIN_CVTPD2PS256,
20943 IX86_BUILTIN_CVTPS2DQ256,
20944 IX86_BUILTIN_CVTPS2PD256,
20945 IX86_BUILTIN_CVTTPD2DQ256,
20946 IX86_BUILTIN_CVTPD2DQ256,
20947 IX86_BUILTIN_CVTTPS2DQ256,
20948 IX86_BUILTIN_EXTRACTF128PD256,
20949 IX86_BUILTIN_EXTRACTF128PS256,
20950 IX86_BUILTIN_EXTRACTF128SI256,
20951 IX86_BUILTIN_VZEROALL,
20952 IX86_BUILTIN_VZEROUPPER,
20953 IX86_BUILTIN_VPERMILVARPD,
20954 IX86_BUILTIN_VPERMILVARPS,
20955 IX86_BUILTIN_VPERMILVARPD256,
20956 IX86_BUILTIN_VPERMILVARPS256,
20957 IX86_BUILTIN_VPERMILPD,
20958 IX86_BUILTIN_VPERMILPS,
20959 IX86_BUILTIN_VPERMILPD256,
20960 IX86_BUILTIN_VPERMILPS256,
20961 IX86_BUILTIN_VPERMIL2PD,
20962 IX86_BUILTIN_VPERMIL2PS,
20963 IX86_BUILTIN_VPERMIL2PD256,
20964 IX86_BUILTIN_VPERMIL2PS256,
20965 IX86_BUILTIN_VPERM2F128PD256,
20966 IX86_BUILTIN_VPERM2F128PS256,
20967 IX86_BUILTIN_VPERM2F128SI256,
20968 IX86_BUILTIN_VBROADCASTSS,
20969 IX86_BUILTIN_VBROADCASTSD256,
20970 IX86_BUILTIN_VBROADCASTSS256,
20971 IX86_BUILTIN_VBROADCASTPD256,
20972 IX86_BUILTIN_VBROADCASTPS256,
20973 IX86_BUILTIN_VINSERTF128PD256,
20974 IX86_BUILTIN_VINSERTF128PS256,
20975 IX86_BUILTIN_VINSERTF128SI256,
20976 IX86_BUILTIN_LOADUPD256,
20977 IX86_BUILTIN_LOADUPS256,
20978 IX86_BUILTIN_STOREUPD256,
20979 IX86_BUILTIN_STOREUPS256,
20980 IX86_BUILTIN_LDDQU256,
20981 IX86_BUILTIN_MOVNTDQ256,
20982 IX86_BUILTIN_MOVNTPD256,
20983 IX86_BUILTIN_MOVNTPS256,
20984 IX86_BUILTIN_LOADDQU256,
20985 IX86_BUILTIN_STOREDQU256,
20986 IX86_BUILTIN_MASKLOADPD,
20987 IX86_BUILTIN_MASKLOADPS,
20988 IX86_BUILTIN_MASKSTOREPD,
20989 IX86_BUILTIN_MASKSTOREPS,
20990 IX86_BUILTIN_MASKLOADPD256,
20991 IX86_BUILTIN_MASKLOADPS256,
20992 IX86_BUILTIN_MASKSTOREPD256,
20993 IX86_BUILTIN_MASKSTOREPS256,
20994 IX86_BUILTIN_MOVSHDUP256,
20995 IX86_BUILTIN_MOVSLDUP256,
20996 IX86_BUILTIN_MOVDDUP256,
20998 IX86_BUILTIN_SQRTPD256,
20999 IX86_BUILTIN_SQRTPS256,
21000 IX86_BUILTIN_SQRTPS_NR256,
21001 IX86_BUILTIN_RSQRTPS256,
21002 IX86_BUILTIN_RSQRTPS_NR256,
21004 IX86_BUILTIN_RCPPS256,
21006 IX86_BUILTIN_ROUNDPD256,
21007 IX86_BUILTIN_ROUNDPS256,
21009 IX86_BUILTIN_UNPCKHPD256,
21010 IX86_BUILTIN_UNPCKLPD256,
21011 IX86_BUILTIN_UNPCKHPS256,
21012 IX86_BUILTIN_UNPCKLPS256,
21014 IX86_BUILTIN_SI256_SI,
21015 IX86_BUILTIN_PS256_PS,
21016 IX86_BUILTIN_PD256_PD,
21017 IX86_BUILTIN_SI_SI256,
21018 IX86_BUILTIN_PS_PS256,
21019 IX86_BUILTIN_PD_PD256,
21021 IX86_BUILTIN_VTESTZPD,
21022 IX86_BUILTIN_VTESTCPD,
21023 IX86_BUILTIN_VTESTNZCPD,
21024 IX86_BUILTIN_VTESTZPS,
21025 IX86_BUILTIN_VTESTCPS,
21026 IX86_BUILTIN_VTESTNZCPS,
21027 IX86_BUILTIN_VTESTZPD256,
21028 IX86_BUILTIN_VTESTCPD256,
21029 IX86_BUILTIN_VTESTNZCPD256,
21030 IX86_BUILTIN_VTESTZPS256,
21031 IX86_BUILTIN_VTESTCPS256,
21032 IX86_BUILTIN_VTESTNZCPS256,
21033 IX86_BUILTIN_PTESTZ256,
21034 IX86_BUILTIN_PTESTC256,
21035 IX86_BUILTIN_PTESTNZC256,
21037 IX86_BUILTIN_MOVMSKPD256,
21038 IX86_BUILTIN_MOVMSKPS256,
21040 /* TFmode support builtins. */
21041 IX86_BUILTIN_INFQ,
21042 IX86_BUILTIN_HUGE_VALQ,
21043 IX86_BUILTIN_FABSQ,
21044 IX86_BUILTIN_COPYSIGNQ,
21046 /* Vectorizer support builtins. */
21047 IX86_BUILTIN_CPYSGNPS,
21048 IX86_BUILTIN_CPYSGNPD,
21050 IX86_BUILTIN_CVTUDQ2PS,
21052 IX86_BUILTIN_VEC_PERM_V2DF,
21053 IX86_BUILTIN_VEC_PERM_V4SF,
21054 IX86_BUILTIN_VEC_PERM_V2DI,
21055 IX86_BUILTIN_VEC_PERM_V4SI,
21056 IX86_BUILTIN_VEC_PERM_V8HI,
21057 IX86_BUILTIN_VEC_PERM_V16QI,
21058 IX86_BUILTIN_VEC_PERM_V2DI_U,
21059 IX86_BUILTIN_VEC_PERM_V4SI_U,
21060 IX86_BUILTIN_VEC_PERM_V8HI_U,
21061 IX86_BUILTIN_VEC_PERM_V16QI_U,
21062 IX86_BUILTIN_VEC_PERM_V4DF,
21063 IX86_BUILTIN_VEC_PERM_V8SF,
21065 /* FMA4 and XOP instructions. */
21066 IX86_BUILTIN_VFMADDSS,
21067 IX86_BUILTIN_VFMADDSD,
21068 IX86_BUILTIN_VFMADDPS,
21069 IX86_BUILTIN_VFMADDPD,
21070 IX86_BUILTIN_VFMSUBSS,
21071 IX86_BUILTIN_VFMSUBSD,
21072 IX86_BUILTIN_VFMSUBPS,
21073 IX86_BUILTIN_VFMSUBPD,
21074 IX86_BUILTIN_VFMADDSUBPS,
21075 IX86_BUILTIN_VFMADDSUBPD,
21076 IX86_BUILTIN_VFMSUBADDPS,
21077 IX86_BUILTIN_VFMSUBADDPD,
21078 IX86_BUILTIN_VFNMADDSS,
21079 IX86_BUILTIN_VFNMADDSD,
21080 IX86_BUILTIN_VFNMADDPS,
21081 IX86_BUILTIN_VFNMADDPD,
21082 IX86_BUILTIN_VFNMSUBSS,
21083 IX86_BUILTIN_VFNMSUBSD,
21084 IX86_BUILTIN_VFNMSUBPS,
21085 IX86_BUILTIN_VFNMSUBPD,
21086 IX86_BUILTIN_VFMADDPS256,
21087 IX86_BUILTIN_VFMADDPD256,
21088 IX86_BUILTIN_VFMSUBPS256,
21089 IX86_BUILTIN_VFMSUBPD256,
21090 IX86_BUILTIN_VFMADDSUBPS256,
21091 IX86_BUILTIN_VFMADDSUBPD256,
21092 IX86_BUILTIN_VFMSUBADDPS256,
21093 IX86_BUILTIN_VFMSUBADDPD256,
21094 IX86_BUILTIN_VFNMADDPS256,
21095 IX86_BUILTIN_VFNMADDPD256,
21096 IX86_BUILTIN_VFNMSUBPS256,
21097 IX86_BUILTIN_VFNMSUBPD256,
21099 IX86_BUILTIN_VPCMOV,
21100 IX86_BUILTIN_VPCMOV_V2DI,
21101 IX86_BUILTIN_VPCMOV_V4SI,
21102 IX86_BUILTIN_VPCMOV_V8HI,
21103 IX86_BUILTIN_VPCMOV_V16QI,
21104 IX86_BUILTIN_VPCMOV_V4SF,
21105 IX86_BUILTIN_VPCMOV_V2DF,
21106 IX86_BUILTIN_VPCMOV256,
21107 IX86_BUILTIN_VPCMOV_V4DI256,
21108 IX86_BUILTIN_VPCMOV_V8SI256,
21109 IX86_BUILTIN_VPCMOV_V16HI256,
21110 IX86_BUILTIN_VPCMOV_V32QI256,
21111 IX86_BUILTIN_VPCMOV_V8SF256,
21112 IX86_BUILTIN_VPCMOV_V4DF256,
21114 IX86_BUILTIN_VPPERM,
21116 IX86_BUILTIN_VPMACSSWW,
21117 IX86_BUILTIN_VPMACSWW,
21118 IX86_BUILTIN_VPMACSSWD,
21119 IX86_BUILTIN_VPMACSWD,
21120 IX86_BUILTIN_VPMACSSDD,
21121 IX86_BUILTIN_VPMACSDD,
21122 IX86_BUILTIN_VPMACSSDQL,
21123 IX86_BUILTIN_VPMACSSDQH,
21124 IX86_BUILTIN_VPMACSDQL,
21125 IX86_BUILTIN_VPMACSDQH,
21126 IX86_BUILTIN_VPMADCSSWD,
21127 IX86_BUILTIN_VPMADCSWD,
21129 IX86_BUILTIN_VPHADDBW,
21130 IX86_BUILTIN_VPHADDBD,
21131 IX86_BUILTIN_VPHADDBQ,
21132 IX86_BUILTIN_VPHADDWD,
21133 IX86_BUILTIN_VPHADDWQ,
21134 IX86_BUILTIN_VPHADDDQ,
21135 IX86_BUILTIN_VPHADDUBW,
21136 IX86_BUILTIN_VPHADDUBD,
21137 IX86_BUILTIN_VPHADDUBQ,
21138 IX86_BUILTIN_VPHADDUWD,
21139 IX86_BUILTIN_VPHADDUWQ,
21140 IX86_BUILTIN_VPHADDUDQ,
21141 IX86_BUILTIN_VPHSUBBW,
21142 IX86_BUILTIN_VPHSUBWD,
21143 IX86_BUILTIN_VPHSUBDQ,
21145 IX86_BUILTIN_VPROTB,
21146 IX86_BUILTIN_VPROTW,
21147 IX86_BUILTIN_VPROTD,
21148 IX86_BUILTIN_VPROTQ,
21149 IX86_BUILTIN_VPROTB_IMM,
21150 IX86_BUILTIN_VPROTW_IMM,
21151 IX86_BUILTIN_VPROTD_IMM,
21152 IX86_BUILTIN_VPROTQ_IMM,
21154 IX86_BUILTIN_VPSHLB,
21155 IX86_BUILTIN_VPSHLW,
21156 IX86_BUILTIN_VPSHLD,
21157 IX86_BUILTIN_VPSHLQ,
21158 IX86_BUILTIN_VPSHAB,
21159 IX86_BUILTIN_VPSHAW,
21160 IX86_BUILTIN_VPSHAD,
21161 IX86_BUILTIN_VPSHAQ,
21163 IX86_BUILTIN_VFRCZSS,
21164 IX86_BUILTIN_VFRCZSD,
21165 IX86_BUILTIN_VFRCZPS,
21166 IX86_BUILTIN_VFRCZPD,
21167 IX86_BUILTIN_VFRCZPS256,
21168 IX86_BUILTIN_VFRCZPD256,
21170 IX86_BUILTIN_VPCOMEQUB,
21171 IX86_BUILTIN_VPCOMNEUB,
21172 IX86_BUILTIN_VPCOMLTUB,
21173 IX86_BUILTIN_VPCOMLEUB,
21174 IX86_BUILTIN_VPCOMGTUB,
21175 IX86_BUILTIN_VPCOMGEUB,
21176 IX86_BUILTIN_VPCOMFALSEUB,
21177 IX86_BUILTIN_VPCOMTRUEUB,
21179 IX86_BUILTIN_VPCOMEQUW,
21180 IX86_BUILTIN_VPCOMNEUW,
21181 IX86_BUILTIN_VPCOMLTUW,
21182 IX86_BUILTIN_VPCOMLEUW,
21183 IX86_BUILTIN_VPCOMGTUW,
21184 IX86_BUILTIN_VPCOMGEUW,
21185 IX86_BUILTIN_VPCOMFALSEUW,
21186 IX86_BUILTIN_VPCOMTRUEUW,
21188 IX86_BUILTIN_VPCOMEQUD,
21189 IX86_BUILTIN_VPCOMNEUD,
21190 IX86_BUILTIN_VPCOMLTUD,
21191 IX86_BUILTIN_VPCOMLEUD,
21192 IX86_BUILTIN_VPCOMGTUD,
21193 IX86_BUILTIN_VPCOMGEUD,
21194 IX86_BUILTIN_VPCOMFALSEUD,
21195 IX86_BUILTIN_VPCOMTRUEUD,
21197 IX86_BUILTIN_VPCOMEQUQ,
21198 IX86_BUILTIN_VPCOMNEUQ,
21199 IX86_BUILTIN_VPCOMLTUQ,
21200 IX86_BUILTIN_VPCOMLEUQ,
21201 IX86_BUILTIN_VPCOMGTUQ,
21202 IX86_BUILTIN_VPCOMGEUQ,
21203 IX86_BUILTIN_VPCOMFALSEUQ,
21204 IX86_BUILTIN_VPCOMTRUEUQ,
21206 IX86_BUILTIN_VPCOMEQB,
21207 IX86_BUILTIN_VPCOMNEB,
21208 IX86_BUILTIN_VPCOMLTB,
21209 IX86_BUILTIN_VPCOMLEB,
21210 IX86_BUILTIN_VPCOMGTB,
21211 IX86_BUILTIN_VPCOMGEB,
21212 IX86_BUILTIN_VPCOMFALSEB,
21213 IX86_BUILTIN_VPCOMTRUEB,
21215 IX86_BUILTIN_VPCOMEQW,
21216 IX86_BUILTIN_VPCOMNEW,
21217 IX86_BUILTIN_VPCOMLTW,
21218 IX86_BUILTIN_VPCOMLEW,
21219 IX86_BUILTIN_VPCOMGTW,
21220 IX86_BUILTIN_VPCOMGEW,
21221 IX86_BUILTIN_VPCOMFALSEW,
21222 IX86_BUILTIN_VPCOMTRUEW,
21224 IX86_BUILTIN_VPCOMEQD,
21225 IX86_BUILTIN_VPCOMNED,
21226 IX86_BUILTIN_VPCOMLTD,
21227 IX86_BUILTIN_VPCOMLED,
21228 IX86_BUILTIN_VPCOMGTD,
21229 IX86_BUILTIN_VPCOMGED,
21230 IX86_BUILTIN_VPCOMFALSED,
21231 IX86_BUILTIN_VPCOMTRUED,
21233 IX86_BUILTIN_VPCOMEQQ,
21234 IX86_BUILTIN_VPCOMNEQ,
21235 IX86_BUILTIN_VPCOMLTQ,
21236 IX86_BUILTIN_VPCOMLEQ,
21237 IX86_BUILTIN_VPCOMGTQ,
21238 IX86_BUILTIN_VPCOMGEQ,
21239 IX86_BUILTIN_VPCOMFALSEQ,
21240 IX86_BUILTIN_VPCOMTRUEQ,
21242 /* LWP instructions. */
21243 IX86_BUILTIN_LLWPCB,
21244 IX86_BUILTIN_SLWPCB,
21245 IX86_BUILTIN_LWPVAL32,
21246 IX86_BUILTIN_LWPVAL64,
21247 IX86_BUILTIN_LWPINS32,
21248 IX86_BUILTIN_LWPINS64,
21250 IX86_BUILTIN_CLZS,
21252 IX86_BUILTIN_MAX
21255 /* Table for the ix86 builtin decls. */
21256 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
21258 /* Table of all of the builtin functions that are possible with different ISA's
21259 but are waiting to be built until a function is declared to use that
21260 ISA. */
21261 struct builtin_isa {
21262 const char *name; /* function name */
21263 enum ix86_builtin_func_type tcode; /* type to use in the declaration */
21264 int isa; /* isa_flags this builtin is defined for */
21265 bool const_p; /* true if the declaration is constant */
21266 bool set_and_not_built_p;
21269 static struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
21272 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
21273 of which isa_flags to use in the ix86_builtins_isa array. Stores the
21274 function decl in the ix86_builtins array. Returns the function decl or
21275 NULL_TREE, if the builtin was not added.
21277 If the front end has a special hook for builtin functions, delay adding
21278 builtin functions that aren't in the current ISA until the ISA is changed
21279 with function specific optimization. Doing so, can save about 300K for the
21280 default compiler. When the builtin is expanded, check at that time whether
21281 it is valid.
21283 If the front end doesn't have a special hook, record all builtins, even if
21284 it isn't an instruction set in the current ISA in case the user uses
21285 function specific options for a different ISA, so that we don't get scope
21286 errors if a builtin is added in the middle of a function scope. */
21288 static inline tree
21289 def_builtin (int mask, const char *name, enum ix86_builtin_func_type tcode,
21290 enum ix86_builtins code)
21292 tree decl = NULL_TREE;
21294 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
21296 ix86_builtins_isa[(int) code].isa = mask;
21298 if (mask == 0
21299 || (mask & ix86_isa_flags) != 0
21300 || (lang_hooks.builtin_function
21301 == lang_hooks.builtin_function_ext_scope))
21304 tree type = ix86_get_builtin_func_type (tcode);
21305 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
21306 NULL, NULL_TREE);
21307 ix86_builtins[(int) code] = decl;
21308 ix86_builtins_isa[(int) code].set_and_not_built_p = false;
21310 else
21312 ix86_builtins[(int) code] = NULL_TREE;
21313 ix86_builtins_isa[(int) code].tcode = tcode;
21314 ix86_builtins_isa[(int) code].name = name;
21315 ix86_builtins_isa[(int) code].const_p = false;
21316 ix86_builtins_isa[(int) code].set_and_not_built_p = true;
21320 return decl;
21323 /* Like def_builtin, but also marks the function decl "const". */
21325 static inline tree
21326 def_builtin_const (int mask, const char *name,
21327 enum ix86_builtin_func_type tcode, enum ix86_builtins code)
21329 tree decl = def_builtin (mask, name, tcode, code);
21330 if (decl)
21331 TREE_READONLY (decl) = 1;
21332 else
21333 ix86_builtins_isa[(int) code].const_p = true;
21335 return decl;
21338 /* Add any new builtin functions for a given ISA that may not have been
21339 declared. This saves a bit of space compared to adding all of the
21340 declarations to the tree, even if we didn't use them. */
21342 static void
21343 ix86_add_new_builtins (int isa)
21345 int i;
21347 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
21349 if ((ix86_builtins_isa[i].isa & isa) != 0
21350 && ix86_builtins_isa[i].set_and_not_built_p)
21352 tree decl, type;
21354 /* Don't define the builtin again. */
21355 ix86_builtins_isa[i].set_and_not_built_p = false;
21357 type = ix86_get_builtin_func_type (ix86_builtins_isa[i].tcode);
21358 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
21359 type, i, BUILT_IN_MD, NULL,
21360 NULL_TREE);
21362 ix86_builtins[i] = decl;
21363 if (ix86_builtins_isa[i].const_p)
21364 TREE_READONLY (decl) = 1;
21369 /* Bits for builtin_description.flag. */
21371 /* Set when we don't support the comparison natively, and should
21372 swap_comparison in order to support it. */
21373 #define BUILTIN_DESC_SWAP_OPERANDS 1
21375 struct builtin_description
21377 const unsigned int mask;
21378 const enum insn_code icode;
21379 const char *const name;
21380 const enum ix86_builtins code;
21381 const enum rtx_code comparison;
21382 const int flag;
21385 static const struct builtin_description bdesc_comi[] =
21387 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
21388 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
21389 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
21390 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
21391 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
21392 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
21393 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
21394 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
21395 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
21396 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
21397 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
21398 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
21399 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
21400 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
21401 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
21402 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
21403 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
21404 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
21405 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
21406 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
21407 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
21408 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
21409 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
21410 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
21413 static const struct builtin_description bdesc_pcmpestr[] =
21415 /* SSE4.2 */
21416 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
21417 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
21418 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
21419 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
21420 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
21421 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
21422 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
21425 static const struct builtin_description bdesc_pcmpistr[] =
21427 /* SSE4.2 */
21428 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
21429 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
21430 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
21431 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
21432 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
21433 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
21434 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
21437 /* Special builtins with variable number of arguments. */
21438 static const struct builtin_description bdesc_special_args[] =
21440 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtsc, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC, UNKNOWN, (int) UINT64_FTYPE_VOID },
21441 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtscp, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP, UNKNOWN, (int) UINT64_FTYPE_PUNSIGNED },
21443 /* MMX */
21444 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21446 /* 3DNow! */
21447 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
21449 /* SSE */
21450 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21451 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21452 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21454 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21455 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
21456 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21457 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
21459 /* SSE or 3DNow!A */
21460 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21461 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PULONGLONG_ULONGLONG },
21463 /* SSE2 */
21464 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21465 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
21466 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21467 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
21468 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21469 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
21470 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
21471 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
21472 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21474 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21475 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
21477 /* SSE3 */
21478 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
21480 /* SSE4.1 */
21481 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
21483 /* SSE4A */
21484 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
21485 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
21487 /* AVX */
21488 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
21489 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, "__builtin_ia32_vzeroupper", IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
21491 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4sf, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
21492 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv4df, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21493 { OPTION_MASK_ISA_AVX, CODE_FOR_vec_dupv8sf, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21494 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v4df, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
21495 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_v8sf, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
21497 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
21498 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
21499 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21500 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21501 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21502 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
21503 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21505 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
21506 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21507 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21509 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DF },
21510 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SF },
21511 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DF },
21512 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SF },
21513 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DF_V2DF },
21514 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SF_V4SF },
21515 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DF_V4DF },
21516 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SF_V8SF },
21518 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_llwpcb, "__builtin_ia32_llwpcb", IX86_BUILTIN_LLWPCB, UNKNOWN, (int) VOID_FTYPE_PVOID },
21519 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_slwpcb, "__builtin_ia32_slwpcb", IX86_BUILTIN_SLWPCB, UNKNOWN, (int) PVOID_FTYPE_VOID },
21520 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvalsi3, "__builtin_ia32_lwpval32", IX86_BUILTIN_LWPVAL32, UNKNOWN, (int) VOID_FTYPE_UINT_UINT_UINT },
21521 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpvaldi3, "__builtin_ia32_lwpval64", IX86_BUILTIN_LWPVAL64, UNKNOWN, (int) VOID_FTYPE_UINT64_UINT_UINT },
21522 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinssi3, "__builtin_ia32_lwpins32", IX86_BUILTIN_LWPINS32, UNKNOWN, (int) UCHAR_FTYPE_UINT_UINT_UINT },
21523 { OPTION_MASK_ISA_LWP, CODE_FOR_lwp_lwpinsdi3, "__builtin_ia32_lwpins64", IX86_BUILTIN_LWPINS64, UNKNOWN, (int) UCHAR_FTYPE_UINT64_UINT_UINT },
21527 /* Builtins with variable number of arguments. */
21528 static const struct builtin_description bdesc_args[] =
21530 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_bsr, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI, UNKNOWN, (int) INT_FTYPE_INT },
21531 { OPTION_MASK_ISA_64BIT, CODE_FOR_bsr_rex64, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI, UNKNOWN, (int) INT64_FTYPE_INT64 },
21532 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdpmc, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC, UNKNOWN, (int) UINT64_FTYPE_INT },
21533 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlqi3, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21534 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlhi3, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21535 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrqi3, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21536 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrhi3, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21538 /* MMX */
21539 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21540 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21541 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21542 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21543 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21544 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21546 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21547 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21548 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21549 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21550 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21551 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21552 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21553 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21555 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21556 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21558 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21559 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21560 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21561 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21563 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21564 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21565 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21566 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21567 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21568 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21570 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21571 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21572 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21573 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21574 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
21575 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
21577 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21578 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
21579 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21581 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
21583 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21584 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21585 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21586 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21587 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21588 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21590 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21591 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21592 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21593 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21594 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21595 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21597 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21598 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21599 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21600 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21602 /* 3DNow! */
21603 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21604 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21605 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21606 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21608 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21609 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21610 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21611 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21612 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21613 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21614 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21615 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21616 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21617 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21618 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21619 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21620 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21621 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21622 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21624 /* 3DNow!A */
21625 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21626 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21627 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
21628 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21629 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21630 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21632 /* SSE */
21633 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
21634 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21635 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21636 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21637 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21638 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21639 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21640 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21641 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21642 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21643 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21644 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21646 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21648 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21649 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21650 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21651 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21652 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21653 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21654 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21655 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21657 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21658 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21659 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21660 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21661 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21662 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21663 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21664 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21665 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21666 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21667 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
21668 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21669 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21670 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21671 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21672 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21673 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21674 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21675 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21676 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21677 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21678 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21680 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21681 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21682 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21683 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21685 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21686 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21687 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21688 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21690 { OPTION_MASK_ISA_SSE, CODE_FOR_copysignv4sf3, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21692 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21693 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21694 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21695 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_highv4sf, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21696 { OPTION_MASK_ISA_SSE, CODE_FOR_vec_interleave_lowv4sf, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21698 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
21699 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
21700 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
21702 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
21704 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21705 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21706 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21708 /* SSE MMX or 3Dnow!A */
21709 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21710 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21711 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21713 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21714 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21715 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21716 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21718 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
21719 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
21721 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
21723 /* SSE2 */
21724 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21726 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2df", IX86_BUILTIN_VEC_PERM_V2DF, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DI },
21727 { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4sf", IX86_BUILTIN_VEC_PERM_V4SF, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SI },
21728 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di", IX86_BUILTIN_VEC_PERM_V2DI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_V2DI },
21729 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si", IX86_BUILTIN_VEC_PERM_V4SI, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_V4SI },
21730 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi", IX86_BUILTIN_VEC_PERM_V8HI, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_V8HI },
21731 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi", IX86_BUILTIN_VEC_PERM_V16QI, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
21732 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v2di_u", IX86_BUILTIN_VEC_PERM_V2DI_U, UNKNOWN, (int) V2UDI_FTYPE_V2UDI_V2UDI_V2UDI },
21733 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4si_u", IX86_BUILTIN_VEC_PERM_V4SI_U, UNKNOWN, (int) V4USI_FTYPE_V4USI_V4USI_V4USI },
21734 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8hi_u", IX86_BUILTIN_VEC_PERM_V8HI_U, UNKNOWN, (int) V8UHI_FTYPE_V8UHI_V8UHI_V8UHI },
21735 { OPTION_MASK_ISA_SSE2, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v16qi_u", IX86_BUILTIN_VEC_PERM_V16QI_U, UNKNOWN, (int) V16UQI_FTYPE_V16UQI_V16UQI_V16UQI },
21736 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v4df", IX86_BUILTIN_VEC_PERM_V4DF, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DI },
21737 { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin_ia32_vec_perm_v8sf", IX86_BUILTIN_VEC_PERM_V8SF, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SI },
21739 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
21740 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
21741 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
21742 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
21743 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
21744 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtudq2ps, "__builtin_ia32_cvtudq2ps", IX86_BUILTIN_CVTUDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
21746 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21747 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21748 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
21749 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21750 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21752 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
21754 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21755 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21756 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21757 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21759 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21760 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
21761 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21763 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21764 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21765 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21766 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21767 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21768 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21769 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21770 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21772 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21773 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21774 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21775 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21776 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
21777 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21778 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21779 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21780 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21781 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21782 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21783 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21784 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21785 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21786 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21787 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21788 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21789 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21790 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21791 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21793 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21794 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21795 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21796 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21798 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21799 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21800 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21801 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21803 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysignv2df3, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21805 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21806 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2df, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21807 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2df, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21809 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
21811 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21812 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21813 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21814 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21815 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21816 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21817 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21818 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21820 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21821 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21822 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21823 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21824 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21825 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21826 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21827 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21829 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21830 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
21832 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21833 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21834 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21835 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21837 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21838 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21840 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21841 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21842 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21843 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21844 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21845 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21847 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21848 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21849 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21850 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21852 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv16qi, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21853 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv8hi, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21854 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv4si, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21855 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_highv2di, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21856 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv16qi, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21857 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv8hi, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21858 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv4si, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21859 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_interleave_lowv2di, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21861 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
21862 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
21863 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
21865 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21866 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
21868 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
21869 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
21871 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
21873 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
21874 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
21875 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
21876 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
21878 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlv1ti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
21879 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
21880 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
21881 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
21882 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
21883 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
21884 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
21886 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrv1ti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT_CONVERT },
21887 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
21888 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
21889 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
21890 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
21891 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
21892 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
21894 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
21895 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
21896 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
21897 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
21899 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
21900 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
21901 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
21903 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
21905 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
21906 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
21908 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
21910 /* SSE2 MMX */
21911 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
21912 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
21914 /* SSE3 */
21915 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
21916 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21918 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21919 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21920 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21921 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21922 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21923 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21925 /* SSSE3 */
21926 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
21927 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
21928 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
21929 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
21930 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
21931 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
21933 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21934 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21935 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21936 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21937 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21938 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21939 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21940 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21941 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21942 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21943 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21944 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21945 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
21946 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
21947 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21948 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21949 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21950 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21951 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21952 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21953 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21954 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21955 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21956 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21958 /* SSSE3. */
21959 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT_CONVERT },
21960 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_INT_CONVERT },
21962 /* SSE4.1 */
21963 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21964 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21965 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
21966 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
21967 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21968 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21969 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21970 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
21971 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
21972 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
21974 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
21975 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
21976 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
21977 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
21978 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
21979 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
21980 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
21981 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
21982 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
21983 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
21984 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
21985 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
21986 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
21988 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
21989 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21990 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21991 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21992 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21993 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21994 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21995 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21996 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21997 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21998 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
21999 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
22001 /* SSE4.1 */
22002 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22003 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22004 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22005 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22007 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22008 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22009 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
22011 /* SSE4.2 */
22012 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22013 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
22014 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
22015 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
22016 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
22018 /* SSE4A */
22019 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
22020 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
22021 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
22022 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22024 /* AES */
22025 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
22026 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
22028 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22029 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22030 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22031 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
22033 /* PCLMUL */
22034 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
22036 /* AVX */
22037 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22038 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22039 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22040 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22041 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22042 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22043 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22044 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22045 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22046 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22047 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22048 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22049 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22050 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22051 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22052 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22053 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22054 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22055 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22056 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22057 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22058 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22059 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22060 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22061 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22062 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22064 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
22065 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
22066 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
22067 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
22069 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22070 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22071 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
22072 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
22073 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22074 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22075 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22076 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpsdv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22077 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpssv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22078 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
22079 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
22080 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22081 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22082 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
22083 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
22084 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
22085 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
22086 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
22087 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
22088 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22089 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
22090 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22091 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
22092 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
22093 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
22094 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
22095 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
22096 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
22097 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
22098 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22099 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22100 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
22101 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
22102 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
22104 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22105 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22106 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22108 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
22109 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22110 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22111 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22112 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22114 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
22116 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
22117 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
22119 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22120 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
22121 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22122 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
22124 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
22125 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
22126 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
22127 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si_si256, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
22128 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps_ps256, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
22129 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd_pd256, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
22131 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22132 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22133 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
22134 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22135 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22136 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
22137 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22138 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22139 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
22140 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22141 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22142 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
22143 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22144 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22145 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
22147 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
22148 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
22150 { OPTION_MASK_ISA_ABM, CODE_FOR_clzhi2_abm, "__builtin_clzs", IX86_BUILTIN_CLZS, UNKNOWN, (int) UINT16_FTYPE_UINT16 },
22153 /* FMA4 and XOP. */
22154 #define MULTI_ARG_4_DF2_DI_I V2DF_FTYPE_V2DF_V2DF_V2DI_INT
22155 #define MULTI_ARG_4_DF2_DI_I1 V4DF_FTYPE_V4DF_V4DF_V4DI_INT
22156 #define MULTI_ARG_4_SF2_SI_I V4SF_FTYPE_V4SF_V4SF_V4SI_INT
22157 #define MULTI_ARG_4_SF2_SI_I1 V8SF_FTYPE_V8SF_V8SF_V8SI_INT
22158 #define MULTI_ARG_3_SF V4SF_FTYPE_V4SF_V4SF_V4SF
22159 #define MULTI_ARG_3_DF V2DF_FTYPE_V2DF_V2DF_V2DF
22160 #define MULTI_ARG_3_SF2 V8SF_FTYPE_V8SF_V8SF_V8SF
22161 #define MULTI_ARG_3_DF2 V4DF_FTYPE_V4DF_V4DF_V4DF
22162 #define MULTI_ARG_3_DI V2DI_FTYPE_V2DI_V2DI_V2DI
22163 #define MULTI_ARG_3_SI V4SI_FTYPE_V4SI_V4SI_V4SI
22164 #define MULTI_ARG_3_SI_DI V4SI_FTYPE_V4SI_V4SI_V2DI
22165 #define MULTI_ARG_3_HI V8HI_FTYPE_V8HI_V8HI_V8HI
22166 #define MULTI_ARG_3_HI_SI V8HI_FTYPE_V8HI_V8HI_V4SI
22167 #define MULTI_ARG_3_QI V16QI_FTYPE_V16QI_V16QI_V16QI
22168 #define MULTI_ARG_3_DI2 V4DI_FTYPE_V4DI_V4DI_V4DI
22169 #define MULTI_ARG_3_SI2 V8SI_FTYPE_V8SI_V8SI_V8SI
22170 #define MULTI_ARG_3_HI2 V16HI_FTYPE_V16HI_V16HI_V16HI
22171 #define MULTI_ARG_3_QI2 V32QI_FTYPE_V32QI_V32QI_V32QI
22172 #define MULTI_ARG_2_SF V4SF_FTYPE_V4SF_V4SF
22173 #define MULTI_ARG_2_DF V2DF_FTYPE_V2DF_V2DF
22174 #define MULTI_ARG_2_DI V2DI_FTYPE_V2DI_V2DI
22175 #define MULTI_ARG_2_SI V4SI_FTYPE_V4SI_V4SI
22176 #define MULTI_ARG_2_HI V8HI_FTYPE_V8HI_V8HI
22177 #define MULTI_ARG_2_QI V16QI_FTYPE_V16QI_V16QI
22178 #define MULTI_ARG_2_DI_IMM V2DI_FTYPE_V2DI_SI
22179 #define MULTI_ARG_2_SI_IMM V4SI_FTYPE_V4SI_SI
22180 #define MULTI_ARG_2_HI_IMM V8HI_FTYPE_V8HI_SI
22181 #define MULTI_ARG_2_QI_IMM V16QI_FTYPE_V16QI_SI
22182 #define MULTI_ARG_2_DI_CMP V2DI_FTYPE_V2DI_V2DI_CMP
22183 #define MULTI_ARG_2_SI_CMP V4SI_FTYPE_V4SI_V4SI_CMP
22184 #define MULTI_ARG_2_HI_CMP V8HI_FTYPE_V8HI_V8HI_CMP
22185 #define MULTI_ARG_2_QI_CMP V16QI_FTYPE_V16QI_V16QI_CMP
22186 #define MULTI_ARG_2_SF_TF V4SF_FTYPE_V4SF_V4SF_TF
22187 #define MULTI_ARG_2_DF_TF V2DF_FTYPE_V2DF_V2DF_TF
22188 #define MULTI_ARG_2_DI_TF V2DI_FTYPE_V2DI_V2DI_TF
22189 #define MULTI_ARG_2_SI_TF V4SI_FTYPE_V4SI_V4SI_TF
22190 #define MULTI_ARG_2_HI_TF V8HI_FTYPE_V8HI_V8HI_TF
22191 #define MULTI_ARG_2_QI_TF V16QI_FTYPE_V16QI_V16QI_TF
22192 #define MULTI_ARG_1_SF V4SF_FTYPE_V4SF
22193 #define MULTI_ARG_1_DF V2DF_FTYPE_V2DF
22194 #define MULTI_ARG_1_SF2 V8SF_FTYPE_V8SF
22195 #define MULTI_ARG_1_DF2 V4DF_FTYPE_V4DF
22196 #define MULTI_ARG_1_DI V2DI_FTYPE_V2DI
22197 #define MULTI_ARG_1_SI V4SI_FTYPE_V4SI
22198 #define MULTI_ARG_1_HI V8HI_FTYPE_V8HI
22199 #define MULTI_ARG_1_QI V16QI_FTYPE_V16QI
22200 #define MULTI_ARG_1_SI_DI V2DI_FTYPE_V4SI
22201 #define MULTI_ARG_1_HI_DI V2DI_FTYPE_V8HI
22202 #define MULTI_ARG_1_HI_SI V4SI_FTYPE_V8HI
22203 #define MULTI_ARG_1_QI_DI V2DI_FTYPE_V16QI
22204 #define MULTI_ARG_1_QI_SI V4SI_FTYPE_V16QI
22205 #define MULTI_ARG_1_QI_HI V8HI_FTYPE_V16QI
22207 static const struct builtin_description bdesc_multi_arg[] =
22209 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv4sf4, "__builtin_ia32_vfmaddss", IX86_BUILTIN_VFMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22210 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmaddv2df4, "__builtin_ia32_vfmaddsd", IX86_BUILTIN_VFMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22211 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4sf4, "__builtin_ia32_vfmaddps", IX86_BUILTIN_VFMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22212 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv2df4, "__builtin_ia32_vfmaddpd", IX86_BUILTIN_VFMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22213 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv4sf4, "__builtin_ia32_vfmsubss", IX86_BUILTIN_VFMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22214 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfmsubv2df4, "__builtin_ia32_vfmsubsd", IX86_BUILTIN_VFMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22215 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4sf4, "__builtin_ia32_vfmsubps", IX86_BUILTIN_VFMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22216 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv2df4, "__builtin_ia32_vfmsubpd", IX86_BUILTIN_VFMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22218 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv4sf4, "__builtin_ia32_vfnmaddss", IX86_BUILTIN_VFNMADDSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22219 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmaddv2df4, "__builtin_ia32_vfnmaddsd", IX86_BUILTIN_VFNMADDSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22220 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4sf4, "__builtin_ia32_vfnmaddps", IX86_BUILTIN_VFNMADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22221 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv2df4, "__builtin_ia32_vfnmaddpd", IX86_BUILTIN_VFNMADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22222 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv4sf4, "__builtin_ia32_vfnmsubss", IX86_BUILTIN_VFNMSUBSS, UNKNOWN, (int)MULTI_ARG_3_SF },
22223 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_vmfnmsubv2df4, "__builtin_ia32_vfnmsubsd", IX86_BUILTIN_VFNMSUBSD, UNKNOWN, (int)MULTI_ARG_3_DF },
22224 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4sf4, "__builtin_ia32_vfnmsubps", IX86_BUILTIN_VFNMSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22225 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv2df4, "__builtin_ia32_vfnmsubpd", IX86_BUILTIN_VFNMSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22227 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4sf4, "__builtin_ia32_vfmaddsubps", IX86_BUILTIN_VFMADDSUBPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22228 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv2df4, "__builtin_ia32_vfmaddsubpd", IX86_BUILTIN_VFMADDSUBPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22229 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4sf4, "__builtin_ia32_vfmsubaddps", IX86_BUILTIN_VFMSUBADDPS, UNKNOWN, (int)MULTI_ARG_3_SF },
22230 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv2df4, "__builtin_ia32_vfmsubaddpd", IX86_BUILTIN_VFMSUBADDPD, UNKNOWN, (int)MULTI_ARG_3_DF },
22232 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv8sf4256, "__builtin_ia32_vfmaddps256", IX86_BUILTIN_VFMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22233 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddv4df4256, "__builtin_ia32_vfmaddpd256", IX86_BUILTIN_VFMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22234 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv8sf4256, "__builtin_ia32_vfmsubps256", IX86_BUILTIN_VFMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22235 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubv4df4256, "__builtin_ia32_vfmsubpd256", IX86_BUILTIN_VFMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22237 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv8sf4256, "__builtin_ia32_vfnmaddps256", IX86_BUILTIN_VFNMADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22238 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmaddv4df4256, "__builtin_ia32_vfnmaddpd256", IX86_BUILTIN_VFNMADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22239 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv8sf4256, "__builtin_ia32_vfnmsubps256", IX86_BUILTIN_VFNMSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22240 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fnmsubv4df4256, "__builtin_ia32_vfnmsubpd256", IX86_BUILTIN_VFNMSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22242 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv8sf4, "__builtin_ia32_vfmaddsubps256", IX86_BUILTIN_VFMADDSUBPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22243 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmaddsubv4df4, "__builtin_ia32_vfmaddsubpd256", IX86_BUILTIN_VFMADDSUBPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22244 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv8sf4, "__builtin_ia32_vfmsubaddps256", IX86_BUILTIN_VFMSUBADDPS256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22245 { OPTION_MASK_ISA_FMA4, CODE_FOR_fma4i_fmsubaddv4df4, "__builtin_ia32_vfmsubaddpd256", IX86_BUILTIN_VFMSUBADDPD256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22247 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov", IX86_BUILTIN_VPCMOV, UNKNOWN, (int)MULTI_ARG_3_DI },
22248 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2di, "__builtin_ia32_vpcmov_v2di", IX86_BUILTIN_VPCMOV_V2DI, UNKNOWN, (int)MULTI_ARG_3_DI },
22249 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4si, "__builtin_ia32_vpcmov_v4si", IX86_BUILTIN_VPCMOV_V4SI, UNKNOWN, (int)MULTI_ARG_3_SI },
22250 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8hi, "__builtin_ia32_vpcmov_v8hi", IX86_BUILTIN_VPCMOV_V8HI, UNKNOWN, (int)MULTI_ARG_3_HI },
22251 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16qi, "__builtin_ia32_vpcmov_v16qi",IX86_BUILTIN_VPCMOV_V16QI,UNKNOWN, (int)MULTI_ARG_3_QI },
22252 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v2df, "__builtin_ia32_vpcmov_v2df", IX86_BUILTIN_VPCMOV_V2DF, UNKNOWN, (int)MULTI_ARG_3_DF },
22253 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4sf, "__builtin_ia32_vpcmov_v4sf", IX86_BUILTIN_VPCMOV_V4SF, UNKNOWN, (int)MULTI_ARG_3_SF },
22255 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov256", IX86_BUILTIN_VPCMOV256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22256 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4di256, "__builtin_ia32_vpcmov_v4di256", IX86_BUILTIN_VPCMOV_V4DI256, UNKNOWN, (int)MULTI_ARG_3_DI2 },
22257 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8si256, "__builtin_ia32_vpcmov_v8si256", IX86_BUILTIN_VPCMOV_V8SI256, UNKNOWN, (int)MULTI_ARG_3_SI2 },
22258 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v16hi256, "__builtin_ia32_vpcmov_v16hi256", IX86_BUILTIN_VPCMOV_V16HI256, UNKNOWN, (int)MULTI_ARG_3_HI2 },
22259 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v32qi256, "__builtin_ia32_vpcmov_v32qi256", IX86_BUILTIN_VPCMOV_V32QI256, UNKNOWN, (int)MULTI_ARG_3_QI2 },
22260 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v4df256, "__builtin_ia32_vpcmov_v4df256", IX86_BUILTIN_VPCMOV_V4DF256, UNKNOWN, (int)MULTI_ARG_3_DF2 },
22261 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcmov_v8sf256, "__builtin_ia32_vpcmov_v8sf256", IX86_BUILTIN_VPCMOV_V8SF256, UNKNOWN, (int)MULTI_ARG_3_SF2 },
22263 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pperm, "__builtin_ia32_vpperm", IX86_BUILTIN_VPPERM, UNKNOWN, (int)MULTI_ARG_3_QI },
22265 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssww, "__builtin_ia32_vpmacssww", IX86_BUILTIN_VPMACSSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22266 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsww, "__builtin_ia32_vpmacsww", IX86_BUILTIN_VPMACSWW, UNKNOWN, (int)MULTI_ARG_3_HI },
22267 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsswd, "__builtin_ia32_vpmacsswd", IX86_BUILTIN_VPMACSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22268 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacswd, "__builtin_ia32_vpmacswd", IX86_BUILTIN_VPMACSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22269 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdd, "__builtin_ia32_vpmacssdd", IX86_BUILTIN_VPMACSSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22270 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdd, "__builtin_ia32_vpmacsdd", IX86_BUILTIN_VPMACSDD, UNKNOWN, (int)MULTI_ARG_3_SI },
22271 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdql, "__builtin_ia32_vpmacssdql", IX86_BUILTIN_VPMACSSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22272 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacssdqh, "__builtin_ia32_vpmacssdqh", IX86_BUILTIN_VPMACSSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22273 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdql, "__builtin_ia32_vpmacsdql", IX86_BUILTIN_VPMACSDQL, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22274 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmacsdqh, "__builtin_ia32_vpmacsdqh", IX86_BUILTIN_VPMACSDQH, UNKNOWN, (int)MULTI_ARG_3_SI_DI },
22275 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcsswd, "__builtin_ia32_vpmadcsswd", IX86_BUILTIN_VPMADCSSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22276 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pmadcswd, "__builtin_ia32_vpmadcswd", IX86_BUILTIN_VPMADCSWD, UNKNOWN, (int)MULTI_ARG_3_HI_SI },
22278 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv2di3, "__builtin_ia32_vprotq", IX86_BUILTIN_VPROTQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22279 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv4si3, "__builtin_ia32_vprotd", IX86_BUILTIN_VPROTD, UNKNOWN, (int)MULTI_ARG_2_SI },
22280 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv8hi3, "__builtin_ia32_vprotw", IX86_BUILTIN_VPROTW, UNKNOWN, (int)MULTI_ARG_2_HI },
22281 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vrotlv16qi3, "__builtin_ia32_vprotb", IX86_BUILTIN_VPROTB, UNKNOWN, (int)MULTI_ARG_2_QI },
22282 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv2di3, "__builtin_ia32_vprotqi", IX86_BUILTIN_VPROTQ_IMM, UNKNOWN, (int)MULTI_ARG_2_DI_IMM },
22283 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv4si3, "__builtin_ia32_vprotdi", IX86_BUILTIN_VPROTD_IMM, UNKNOWN, (int)MULTI_ARG_2_SI_IMM },
22284 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv8hi3, "__builtin_ia32_vprotwi", IX86_BUILTIN_VPROTW_IMM, UNKNOWN, (int)MULTI_ARG_2_HI_IMM },
22285 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_rotlv16qi3, "__builtin_ia32_vprotbi", IX86_BUILTIN_VPROTB_IMM, UNKNOWN, (int)MULTI_ARG_2_QI_IMM },
22286 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv2di3, "__builtin_ia32_vpshaq", IX86_BUILTIN_VPSHAQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22287 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv4si3, "__builtin_ia32_vpshad", IX86_BUILTIN_VPSHAD, UNKNOWN, (int)MULTI_ARG_2_SI },
22288 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv8hi3, "__builtin_ia32_vpshaw", IX86_BUILTIN_VPSHAW, UNKNOWN, (int)MULTI_ARG_2_HI },
22289 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_ashlv16qi3, "__builtin_ia32_vpshab", IX86_BUILTIN_VPSHAB, UNKNOWN, (int)MULTI_ARG_2_QI },
22290 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv2di3, "__builtin_ia32_vpshlq", IX86_BUILTIN_VPSHLQ, UNKNOWN, (int)MULTI_ARG_2_DI },
22291 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv4si3, "__builtin_ia32_vpshld", IX86_BUILTIN_VPSHLD, UNKNOWN, (int)MULTI_ARG_2_SI },
22292 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv8hi3, "__builtin_ia32_vpshlw", IX86_BUILTIN_VPSHLW, UNKNOWN, (int)MULTI_ARG_2_HI },
22293 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_lshlv16qi3, "__builtin_ia32_vpshlb", IX86_BUILTIN_VPSHLB, UNKNOWN, (int)MULTI_ARG_2_QI },
22295 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv4sf2, "__builtin_ia32_vfrczss", IX86_BUILTIN_VFRCZSS, UNKNOWN, (int)MULTI_ARG_2_SF },
22296 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_vmfrczv2df2, "__builtin_ia32_vfrczsd", IX86_BUILTIN_VFRCZSD, UNKNOWN, (int)MULTI_ARG_2_DF },
22297 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4sf2, "__builtin_ia32_vfrczps", IX86_BUILTIN_VFRCZPS, UNKNOWN, (int)MULTI_ARG_1_SF },
22298 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv2df2, "__builtin_ia32_vfrczpd", IX86_BUILTIN_VFRCZPD, UNKNOWN, (int)MULTI_ARG_1_DF },
22299 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv8sf2256, "__builtin_ia32_vfrczps256", IX86_BUILTIN_VFRCZPS256, UNKNOWN, (int)MULTI_ARG_1_SF2 },
22300 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_frczv4df2256, "__builtin_ia32_vfrczpd256", IX86_BUILTIN_VFRCZPD256, UNKNOWN, (int)MULTI_ARG_1_DF2 },
22302 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbw, "__builtin_ia32_vphaddbw", IX86_BUILTIN_VPHADDBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22303 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbd, "__builtin_ia32_vphaddbd", IX86_BUILTIN_VPHADDBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22304 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddbq, "__builtin_ia32_vphaddbq", IX86_BUILTIN_VPHADDBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22305 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwd, "__builtin_ia32_vphaddwd", IX86_BUILTIN_VPHADDWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22306 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddwq, "__builtin_ia32_vphaddwq", IX86_BUILTIN_VPHADDWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22307 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadddq, "__builtin_ia32_vphadddq", IX86_BUILTIN_VPHADDDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22308 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubw, "__builtin_ia32_vphaddubw", IX86_BUILTIN_VPHADDUBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22309 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubd, "__builtin_ia32_vphaddubd", IX86_BUILTIN_VPHADDUBD, UNKNOWN, (int)MULTI_ARG_1_QI_SI },
22310 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddubq, "__builtin_ia32_vphaddubq", IX86_BUILTIN_VPHADDUBQ, UNKNOWN, (int)MULTI_ARG_1_QI_DI },
22311 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwd, "__builtin_ia32_vphadduwd", IX86_BUILTIN_VPHADDUWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22312 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phadduwq, "__builtin_ia32_vphadduwq", IX86_BUILTIN_VPHADDUWQ, UNKNOWN, (int)MULTI_ARG_1_HI_DI },
22313 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phaddudq, "__builtin_ia32_vphaddudq", IX86_BUILTIN_VPHADDUDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22314 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubbw, "__builtin_ia32_vphsubbw", IX86_BUILTIN_VPHSUBBW, UNKNOWN, (int)MULTI_ARG_1_QI_HI },
22315 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubwd, "__builtin_ia32_vphsubwd", IX86_BUILTIN_VPHSUBWD, UNKNOWN, (int)MULTI_ARG_1_HI_SI },
22316 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_phsubdq, "__builtin_ia32_vphsubdq", IX86_BUILTIN_VPHSUBDQ, UNKNOWN, (int)MULTI_ARG_1_SI_DI },
22318 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomeqb", IX86_BUILTIN_VPCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
22319 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22320 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomneqb", IX86_BUILTIN_VPCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
22321 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomltb", IX86_BUILTIN_VPCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
22322 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomleb", IX86_BUILTIN_VPCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
22323 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgtb", IX86_BUILTIN_VPCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
22324 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv16qi3, "__builtin_ia32_vpcomgeb", IX86_BUILTIN_VPCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
22326 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomeqw", IX86_BUILTIN_VPCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
22327 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomnew", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22328 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomneqw", IX86_BUILTIN_VPCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
22329 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomltw", IX86_BUILTIN_VPCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
22330 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomlew", IX86_BUILTIN_VPCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
22331 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgtw", IX86_BUILTIN_VPCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
22332 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv8hi3, "__builtin_ia32_vpcomgew", IX86_BUILTIN_VPCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
22334 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomeqd", IX86_BUILTIN_VPCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
22335 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomned", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22336 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomneqd", IX86_BUILTIN_VPCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
22337 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomltd", IX86_BUILTIN_VPCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
22338 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomled", IX86_BUILTIN_VPCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
22339 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomgtd", IX86_BUILTIN_VPCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
22340 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv4si3, "__builtin_ia32_vpcomged", IX86_BUILTIN_VPCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
22342 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomeqq", IX86_BUILTIN_VPCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22343 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22344 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomneqq", IX86_BUILTIN_VPCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
22345 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomltq", IX86_BUILTIN_VPCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
22346 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomleq", IX86_BUILTIN_VPCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
22347 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgtq", IX86_BUILTIN_VPCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
22348 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmpv2di3, "__builtin_ia32_vpcomgeq", IX86_BUILTIN_VPCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
22350 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomequb", IX86_BUILTIN_VPCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
22351 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomneub", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22352 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v16qi3,"__builtin_ia32_vpcomnequb", IX86_BUILTIN_VPCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
22353 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomltub", IX86_BUILTIN_VPCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
22354 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomleub", IX86_BUILTIN_VPCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
22355 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgtub", IX86_BUILTIN_VPCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
22356 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv16qi3, "__builtin_ia32_vpcomgeub", IX86_BUILTIN_VPCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
22358 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomequw", IX86_BUILTIN_VPCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
22359 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomneuw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22360 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v8hi3, "__builtin_ia32_vpcomnequw", IX86_BUILTIN_VPCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
22361 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomltuw", IX86_BUILTIN_VPCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
22362 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomleuw", IX86_BUILTIN_VPCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
22363 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgtuw", IX86_BUILTIN_VPCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
22364 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv8hi3, "__builtin_ia32_vpcomgeuw", IX86_BUILTIN_VPCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
22366 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomequd", IX86_BUILTIN_VPCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
22367 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomneud", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22368 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v4si3, "__builtin_ia32_vpcomnequd", IX86_BUILTIN_VPCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
22369 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomltud", IX86_BUILTIN_VPCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
22370 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomleud", IX86_BUILTIN_VPCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
22371 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgtud", IX86_BUILTIN_VPCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
22372 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv4si3, "__builtin_ia32_vpcomgeud", IX86_BUILTIN_VPCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
22374 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomequq", IX86_BUILTIN_VPCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
22375 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomneuq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22376 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_uns2v2di3, "__builtin_ia32_vpcomnequq", IX86_BUILTIN_VPCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
22377 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomltuq", IX86_BUILTIN_VPCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
22378 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomleuq", IX86_BUILTIN_VPCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
22379 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgtuq", IX86_BUILTIN_VPCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
22380 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_maskcmp_unsv2di3, "__builtin_ia32_vpcomgeuq", IX86_BUILTIN_VPCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
22382 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseb", IX86_BUILTIN_VPCOMFALSEB, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22383 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalsew", IX86_BUILTIN_VPCOMFALSEW, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22384 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalsed", IX86_BUILTIN_VPCOMFALSED, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22385 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseq", IX86_BUILTIN_VPCOMFALSEQ, (enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22386 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomfalseub",IX86_BUILTIN_VPCOMFALSEUB,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
22387 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomfalseuw",IX86_BUILTIN_VPCOMFALSEUW,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
22388 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomfalseud",IX86_BUILTIN_VPCOMFALSEUD,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
22389 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomfalseuq",IX86_BUILTIN_VPCOMFALSEUQ,(enum rtx_code) PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
22391 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueb", IX86_BUILTIN_VPCOMTRUEB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22392 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtruew", IX86_BUILTIN_VPCOMTRUEW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22393 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrued", IX86_BUILTIN_VPCOMTRUED, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22394 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueq", IX86_BUILTIN_VPCOMTRUEQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22395 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv16qi3, "__builtin_ia32_vpcomtrueub", IX86_BUILTIN_VPCOMTRUEUB, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
22396 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv8hi3, "__builtin_ia32_vpcomtrueuw", IX86_BUILTIN_VPCOMTRUEUW, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
22397 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv4si3, "__builtin_ia32_vpcomtrueud", IX86_BUILTIN_VPCOMTRUEUD, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
22398 { OPTION_MASK_ISA_XOP, CODE_FOR_xop_pcom_tfv2di3, "__builtin_ia32_vpcomtrueuq", IX86_BUILTIN_VPCOMTRUEUQ, (enum rtx_code) PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
22400 { OPTION_MASK_ISA_AVX, CODE_FOR_xop_vpermil2v2df3, "__builtin_ia32_vpermil2pd", IX86_BUILTIN_VPERMIL2PD, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I },
22401 { OPTION_MASK_ISA_AVX, CODE_FOR_xop_vpermil2v4sf3, "__builtin_ia32_vpermil2ps", IX86_BUILTIN_VPERMIL2PS, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I },
22402 { OPTION_MASK_ISA_AVX, CODE_FOR_xop_vpermil2v4df3, "__builtin_ia32_vpermil2pd256", IX86_BUILTIN_VPERMIL2PD256, UNKNOWN, (int)MULTI_ARG_4_DF2_DI_I1 },
22403 { OPTION_MASK_ISA_AVX, CODE_FOR_xop_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I1 },
22407 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
22408 in the current target ISA to allow the user to compile particular modules
22409 with different target specific options that differ from the command line
22410 options. */
22411 static void
22412 ix86_init_mmx_sse_builtins (void)
22414 const struct builtin_description * d;
22415 enum ix86_builtin_func_type ftype;
22416 size_t i;
22418 /* Add all special builtins with variable number of operands. */
22419 for (i = 0, d = bdesc_special_args;
22420 i < ARRAY_SIZE (bdesc_special_args);
22421 i++, d++)
22423 if (d->name == 0)
22424 continue;
22426 ftype = (enum ix86_builtin_func_type) d->flag;
22427 def_builtin (d->mask, d->name, ftype, d->code);
22430 /* Add all builtins with variable number of operands. */
22431 for (i = 0, d = bdesc_args;
22432 i < ARRAY_SIZE (bdesc_args);
22433 i++, d++)
22435 if (d->name == 0)
22436 continue;
22438 ftype = (enum ix86_builtin_func_type) d->flag;
22439 def_builtin_const (d->mask, d->name, ftype, d->code);
22442 /* pcmpestr[im] insns. */
22443 for (i = 0, d = bdesc_pcmpestr;
22444 i < ARRAY_SIZE (bdesc_pcmpestr);
22445 i++, d++)
22447 if (d->code == IX86_BUILTIN_PCMPESTRM128)
22448 ftype = V16QI_FTYPE_V16QI_INT_V16QI_INT_INT;
22449 else
22450 ftype = INT_FTYPE_V16QI_INT_V16QI_INT_INT;
22451 def_builtin_const (d->mask, d->name, ftype, d->code);
22454 /* pcmpistr[im] insns. */
22455 for (i = 0, d = bdesc_pcmpistr;
22456 i < ARRAY_SIZE (bdesc_pcmpistr);
22457 i++, d++)
22459 if (d->code == IX86_BUILTIN_PCMPISTRM128)
22460 ftype = V16QI_FTYPE_V16QI_V16QI_INT;
22461 else
22462 ftype = INT_FTYPE_V16QI_V16QI_INT;
22463 def_builtin_const (d->mask, d->name, ftype, d->code);
22466 /* comi/ucomi insns. */
22467 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
22469 if (d->mask == OPTION_MASK_ISA_SSE2)
22470 ftype = INT_FTYPE_V2DF_V2DF;
22471 else
22472 ftype = INT_FTYPE_V4SF_V4SF;
22473 def_builtin_const (d->mask, d->name, ftype, d->code);
22476 /* SSE */
22477 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr",
22478 VOID_FTYPE_UNSIGNED, IX86_BUILTIN_LDMXCSR);
22479 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr",
22480 UNSIGNED_FTYPE_VOID, IX86_BUILTIN_STMXCSR);
22482 /* SSE or 3DNow!A */
22483 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22484 "__builtin_ia32_maskmovq", VOID_FTYPE_V8QI_V8QI_PCHAR,
22485 IX86_BUILTIN_MASKMOVQ);
22487 /* SSE2 */
22488 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu",
22489 VOID_FTYPE_V16QI_V16QI_PCHAR, IX86_BUILTIN_MASKMOVDQU);
22491 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush",
22492 VOID_FTYPE_PCVOID, IX86_BUILTIN_CLFLUSH);
22493 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence",
22494 VOID_FTYPE_VOID, IX86_BUILTIN_MFENCE);
22496 /* SSE3. */
22497 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor",
22498 VOID_FTYPE_PCVOID_UNSIGNED_UNSIGNED, IX86_BUILTIN_MONITOR);
22499 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait",
22500 VOID_FTYPE_UNSIGNED_UNSIGNED, IX86_BUILTIN_MWAIT);
22502 /* AES */
22503 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128",
22504 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENC128);
22505 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128",
22506 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESENCLAST128);
22507 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128",
22508 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDEC128);
22509 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128",
22510 V2DI_FTYPE_V2DI_V2DI, IX86_BUILTIN_AESDECLAST128);
22511 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128",
22512 V2DI_FTYPE_V2DI, IX86_BUILTIN_AESIMC128);
22513 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128",
22514 V2DI_FTYPE_V2DI_INT, IX86_BUILTIN_AESKEYGENASSIST128);
22516 /* PCLMUL */
22517 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128",
22518 V2DI_FTYPE_V2DI_V2DI_INT, IX86_BUILTIN_PCLMULQDQ128);
22520 /* MMX access to the vec_init patterns. */
22521 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si",
22522 V2SI_FTYPE_INT_INT, IX86_BUILTIN_VEC_INIT_V2SI);
22524 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi",
22525 V4HI_FTYPE_HI_HI_HI_HI,
22526 IX86_BUILTIN_VEC_INIT_V4HI);
22528 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi",
22529 V8QI_FTYPE_QI_QI_QI_QI_QI_QI_QI_QI,
22530 IX86_BUILTIN_VEC_INIT_V8QI);
22532 /* Access to the vec_extract patterns. */
22533 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df",
22534 DOUBLE_FTYPE_V2DF_INT, IX86_BUILTIN_VEC_EXT_V2DF);
22535 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di",
22536 DI_FTYPE_V2DI_INT, IX86_BUILTIN_VEC_EXT_V2DI);
22537 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf",
22538 FLOAT_FTYPE_V4SF_INT, IX86_BUILTIN_VEC_EXT_V4SF);
22539 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si",
22540 SI_FTYPE_V4SI_INT, IX86_BUILTIN_VEC_EXT_V4SI);
22541 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi",
22542 HI_FTYPE_V8HI_INT, IX86_BUILTIN_VEC_EXT_V8HI);
22544 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22545 "__builtin_ia32_vec_ext_v4hi",
22546 HI_FTYPE_V4HI_INT, IX86_BUILTIN_VEC_EXT_V4HI);
22548 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si",
22549 SI_FTYPE_V2SI_INT, IX86_BUILTIN_VEC_EXT_V2SI);
22551 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi",
22552 QI_FTYPE_V16QI_INT, IX86_BUILTIN_VEC_EXT_V16QI);
22554 /* Access to the vec_set patterns. */
22555 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT,
22556 "__builtin_ia32_vec_set_v2di",
22557 V2DI_FTYPE_V2DI_DI_INT, IX86_BUILTIN_VEC_SET_V2DI);
22559 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf",
22560 V4SF_FTYPE_V4SF_FLOAT_INT, IX86_BUILTIN_VEC_SET_V4SF);
22562 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si",
22563 V4SI_FTYPE_V4SI_SI_INT, IX86_BUILTIN_VEC_SET_V4SI);
22565 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi",
22566 V8HI_FTYPE_V8HI_HI_INT, IX86_BUILTIN_VEC_SET_V8HI);
22568 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A,
22569 "__builtin_ia32_vec_set_v4hi",
22570 V4HI_FTYPE_V4HI_HI_INT, IX86_BUILTIN_VEC_SET_V4HI);
22572 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi",
22573 V16QI_FTYPE_V16QI_QI_INT, IX86_BUILTIN_VEC_SET_V16QI);
22575 /* Add FMA4 multi-arg argument instructions */
22576 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
22578 if (d->name == 0)
22579 continue;
22581 ftype = (enum ix86_builtin_func_type) d->flag;
22582 def_builtin_const (d->mask, d->name, ftype, d->code);
22586 /* Internal method for ix86_init_builtins. */
22588 static void
22589 ix86_init_builtins_va_builtins_abi (void)
22591 tree ms_va_ref, sysv_va_ref;
22592 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
22593 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
22594 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
22595 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
22597 if (!TARGET_64BIT)
22598 return;
22599 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
22600 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
22601 ms_va_ref = build_reference_type (ms_va_list_type_node);
22602 sysv_va_ref =
22603 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
22605 fnvoid_va_end_ms =
22606 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22607 fnvoid_va_start_ms =
22608 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
22609 fnvoid_va_end_sysv =
22610 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
22611 fnvoid_va_start_sysv =
22612 build_varargs_function_type_list (void_type_node, sysv_va_ref,
22613 NULL_TREE);
22614 fnvoid_va_copy_ms =
22615 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
22616 NULL_TREE);
22617 fnvoid_va_copy_sysv =
22618 build_function_type_list (void_type_node, sysv_va_ref,
22619 sysv_va_ref, NULL_TREE);
22621 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
22622 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
22623 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
22624 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
22625 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
22626 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
22627 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
22628 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22629 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
22630 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22631 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
22632 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
22635 static void
22636 ix86_init_builtin_types (void)
22638 tree float128_type_node, float80_type_node;
22640 /* The __float80 type. */
22641 float80_type_node = long_double_type_node;
22642 if (TYPE_MODE (float80_type_node) != XFmode)
22644 /* The __float80 type. */
22645 float80_type_node = make_node (REAL_TYPE);
22647 TYPE_PRECISION (float80_type_node) = 80;
22648 layout_type (float80_type_node);
22650 (*lang_hooks.types.register_builtin_type) (float80_type_node, "__float80");
22652 /* The __float128 type. */
22653 float128_type_node = make_node (REAL_TYPE);
22654 TYPE_PRECISION (float128_type_node) = 128;
22655 layout_type (float128_type_node);
22656 (*lang_hooks.types.register_builtin_type) (float128_type_node, "__float128");
22658 /* This macro is built by i386-builtin-types.awk. */
22659 DEFINE_BUILTIN_PRIMITIVE_TYPES;
22662 static void
22663 ix86_init_builtins (void)
22665 tree t;
22667 ix86_init_builtin_types ();
22669 /* TFmode support builtins. */
22670 def_builtin_const (0, "__builtin_infq",
22671 FLOAT128_FTYPE_VOID, IX86_BUILTIN_INFQ);
22672 def_builtin_const (0, "__builtin_huge_valq",
22673 FLOAT128_FTYPE_VOID, IX86_BUILTIN_HUGE_VALQ);
22675 /* We will expand them to normal call if SSE2 isn't available since
22676 they are used by libgcc. */
22677 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128);
22678 t = add_builtin_function ("__builtin_fabsq", t, IX86_BUILTIN_FABSQ,
22679 BUILT_IN_MD, "__fabstf2", NULL_TREE);
22680 TREE_READONLY (t) = 1;
22681 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = t;
22683 t = ix86_get_builtin_func_type (FLOAT128_FTYPE_FLOAT128_FLOAT128);
22684 t = add_builtin_function ("__builtin_copysignq", t, IX86_BUILTIN_COPYSIGNQ,
22685 BUILT_IN_MD, "__copysigntf3", NULL_TREE);
22686 TREE_READONLY (t) = 1;
22687 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = t;
22689 ix86_init_mmx_sse_builtins ();
22691 if (TARGET_64BIT)
22692 ix86_init_builtins_va_builtins_abi ();
22695 /* Return the ix86 builtin for CODE. */
22697 static tree
22698 ix86_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
22700 if (code >= IX86_BUILTIN_MAX)
22701 return error_mark_node;
22703 return ix86_builtins[code];
22706 /* Errors in the source file can cause expand_expr to return const0_rtx
22707 where we expect a vector. To avoid crashing, use one of the vector
22708 clear instructions. */
22709 static rtx
22710 safe_vector_operand (rtx x, enum machine_mode mode)
22712 if (x == const0_rtx)
22713 x = CONST0_RTX (mode);
22714 return x;
22717 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
22719 static rtx
22720 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
22722 rtx pat;
22723 tree arg0 = CALL_EXPR_ARG (exp, 0);
22724 tree arg1 = CALL_EXPR_ARG (exp, 1);
22725 rtx op0 = expand_normal (arg0);
22726 rtx op1 = expand_normal (arg1);
22727 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22728 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
22729 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
22731 if (VECTOR_MODE_P (mode0))
22732 op0 = safe_vector_operand (op0, mode0);
22733 if (VECTOR_MODE_P (mode1))
22734 op1 = safe_vector_operand (op1, mode1);
22736 if (optimize || !target
22737 || GET_MODE (target) != tmode
22738 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
22739 target = gen_reg_rtx (tmode);
22741 if (GET_MODE (op1) == SImode && mode1 == TImode)
22743 rtx x = gen_reg_rtx (V4SImode);
22744 emit_insn (gen_sse2_loadd (x, op1));
22745 op1 = gen_lowpart (TImode, x);
22748 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
22749 op0 = copy_to_mode_reg (mode0, op0);
22750 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
22751 op1 = copy_to_mode_reg (mode1, op1);
22753 pat = GEN_FCN (icode) (target, op0, op1);
22754 if (! pat)
22755 return 0;
22757 emit_insn (pat);
22759 return target;
22762 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
22764 static rtx
22765 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
22766 enum ix86_builtin_func_type m_type,
22767 enum rtx_code sub_code)
22769 rtx pat;
22770 int i;
22771 int nargs;
22772 bool comparison_p = false;
22773 bool tf_p = false;
22774 bool last_arg_constant = false;
22775 int num_memory = 0;
22776 struct {
22777 rtx op;
22778 enum machine_mode mode;
22779 } args[4];
22781 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22783 switch (m_type)
22785 case MULTI_ARG_4_DF2_DI_I:
22786 case MULTI_ARG_4_DF2_DI_I1:
22787 case MULTI_ARG_4_SF2_SI_I:
22788 case MULTI_ARG_4_SF2_SI_I1:
22789 nargs = 4;
22790 last_arg_constant = true;
22791 break;
22793 case MULTI_ARG_3_SF:
22794 case MULTI_ARG_3_DF:
22795 case MULTI_ARG_3_SF2:
22796 case MULTI_ARG_3_DF2:
22797 case MULTI_ARG_3_DI:
22798 case MULTI_ARG_3_SI:
22799 case MULTI_ARG_3_SI_DI:
22800 case MULTI_ARG_3_HI:
22801 case MULTI_ARG_3_HI_SI:
22802 case MULTI_ARG_3_QI:
22803 case MULTI_ARG_3_DI2:
22804 case MULTI_ARG_3_SI2:
22805 case MULTI_ARG_3_HI2:
22806 case MULTI_ARG_3_QI2:
22807 nargs = 3;
22808 break;
22810 case MULTI_ARG_2_SF:
22811 case MULTI_ARG_2_DF:
22812 case MULTI_ARG_2_DI:
22813 case MULTI_ARG_2_SI:
22814 case MULTI_ARG_2_HI:
22815 case MULTI_ARG_2_QI:
22816 nargs = 2;
22817 break;
22819 case MULTI_ARG_2_DI_IMM:
22820 case MULTI_ARG_2_SI_IMM:
22821 case MULTI_ARG_2_HI_IMM:
22822 case MULTI_ARG_2_QI_IMM:
22823 nargs = 2;
22824 last_arg_constant = true;
22825 break;
22827 case MULTI_ARG_1_SF:
22828 case MULTI_ARG_1_DF:
22829 case MULTI_ARG_1_SF2:
22830 case MULTI_ARG_1_DF2:
22831 case MULTI_ARG_1_DI:
22832 case MULTI_ARG_1_SI:
22833 case MULTI_ARG_1_HI:
22834 case MULTI_ARG_1_QI:
22835 case MULTI_ARG_1_SI_DI:
22836 case MULTI_ARG_1_HI_DI:
22837 case MULTI_ARG_1_HI_SI:
22838 case MULTI_ARG_1_QI_DI:
22839 case MULTI_ARG_1_QI_SI:
22840 case MULTI_ARG_1_QI_HI:
22841 nargs = 1;
22842 break;
22844 case MULTI_ARG_2_DI_CMP:
22845 case MULTI_ARG_2_SI_CMP:
22846 case MULTI_ARG_2_HI_CMP:
22847 case MULTI_ARG_2_QI_CMP:
22848 nargs = 2;
22849 comparison_p = true;
22850 break;
22852 case MULTI_ARG_2_SF_TF:
22853 case MULTI_ARG_2_DF_TF:
22854 case MULTI_ARG_2_DI_TF:
22855 case MULTI_ARG_2_SI_TF:
22856 case MULTI_ARG_2_HI_TF:
22857 case MULTI_ARG_2_QI_TF:
22858 nargs = 2;
22859 tf_p = true;
22860 break;
22862 default:
22863 gcc_unreachable ();
22866 if (optimize || !target
22867 || GET_MODE (target) != tmode
22868 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
22869 target = gen_reg_rtx (tmode);
22871 gcc_assert (nargs <= 4);
22873 for (i = 0; i < nargs; i++)
22875 tree arg = CALL_EXPR_ARG (exp, i);
22876 rtx op = expand_normal (arg);
22877 int adjust = (comparison_p) ? 1 : 0;
22878 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
22880 if (last_arg_constant && i == nargs-1)
22882 if (!CONST_INT_P (op))
22884 error ("last argument must be an immediate");
22885 return gen_reg_rtx (tmode);
22888 else
22890 if (VECTOR_MODE_P (mode))
22891 op = safe_vector_operand (op, mode);
22893 /* If we aren't optimizing, only allow one memory operand to be
22894 generated. */
22895 if (memory_operand (op, mode))
22896 num_memory++;
22898 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
22900 if (optimize
22901 || ! (*insn_data[icode].operand[i+adjust+1].predicate) (op, mode)
22902 || num_memory > 1)
22903 op = force_reg (mode, op);
22906 args[i].op = op;
22907 args[i].mode = mode;
22910 switch (nargs)
22912 case 1:
22913 pat = GEN_FCN (icode) (target, args[0].op);
22914 break;
22916 case 2:
22917 if (tf_p)
22918 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
22919 GEN_INT ((int)sub_code));
22920 else if (! comparison_p)
22921 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
22922 else
22924 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
22925 args[0].op,
22926 args[1].op);
22928 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
22930 break;
22932 case 3:
22933 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
22934 break;
22936 case 4:
22937 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op, args[3].op);
22938 break;
22940 default:
22941 gcc_unreachable ();
22944 if (! pat)
22945 return 0;
22947 emit_insn (pat);
22948 return target;
22951 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
22952 insns with vec_merge. */
22954 static rtx
22955 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
22956 rtx target)
22958 rtx pat;
22959 tree arg0 = CALL_EXPR_ARG (exp, 0);
22960 rtx op1, op0 = expand_normal (arg0);
22961 enum machine_mode tmode = insn_data[icode].operand[0].mode;
22962 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
22964 if (optimize || !target
22965 || GET_MODE (target) != tmode
22966 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
22967 target = gen_reg_rtx (tmode);
22969 if (VECTOR_MODE_P (mode0))
22970 op0 = safe_vector_operand (op0, mode0);
22972 if ((optimize && !register_operand (op0, mode0))
22973 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
22974 op0 = copy_to_mode_reg (mode0, op0);
22976 op1 = op0;
22977 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
22978 op1 = copy_to_mode_reg (mode0, op1);
22980 pat = GEN_FCN (icode) (target, op0, op1);
22981 if (! pat)
22982 return 0;
22983 emit_insn (pat);
22984 return target;
22987 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
22989 static rtx
22990 ix86_expand_sse_compare (const struct builtin_description *d,
22991 tree exp, rtx target, bool swap)
22993 rtx pat;
22994 tree arg0 = CALL_EXPR_ARG (exp, 0);
22995 tree arg1 = CALL_EXPR_ARG (exp, 1);
22996 rtx op0 = expand_normal (arg0);
22997 rtx op1 = expand_normal (arg1);
22998 rtx op2;
22999 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
23000 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
23001 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
23002 enum rtx_code comparison = d->comparison;
23004 if (VECTOR_MODE_P (mode0))
23005 op0 = safe_vector_operand (op0, mode0);
23006 if (VECTOR_MODE_P (mode1))
23007 op1 = safe_vector_operand (op1, mode1);
23009 /* Swap operands if we have a comparison that isn't available in
23010 hardware. */
23011 if (swap)
23013 rtx tmp = gen_reg_rtx (mode1);
23014 emit_move_insn (tmp, op1);
23015 op1 = op0;
23016 op0 = tmp;
23019 if (optimize || !target
23020 || GET_MODE (target) != tmode
23021 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
23022 target = gen_reg_rtx (tmode);
23024 if ((optimize && !register_operand (op0, mode0))
23025 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
23026 op0 = copy_to_mode_reg (mode0, op0);
23027 if ((optimize && !register_operand (op1, mode1))
23028 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
23029 op1 = copy_to_mode_reg (mode1, op1);
23031 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
23032 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
23033 if (! pat)
23034 return 0;
23035 emit_insn (pat);
23036 return target;
23039 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
23041 static rtx
23042 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
23043 rtx target)
23045 rtx pat;
23046 tree arg0 = CALL_EXPR_ARG (exp, 0);
23047 tree arg1 = CALL_EXPR_ARG (exp, 1);
23048 rtx op0 = expand_normal (arg0);
23049 rtx op1 = expand_normal (arg1);
23050 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23051 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23052 enum rtx_code comparison = d->comparison;
23054 if (VECTOR_MODE_P (mode0))
23055 op0 = safe_vector_operand (op0, mode0);
23056 if (VECTOR_MODE_P (mode1))
23057 op1 = safe_vector_operand (op1, mode1);
23059 /* Swap operands if we have a comparison that isn't available in
23060 hardware. */
23061 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
23063 rtx tmp = op1;
23064 op1 = op0;
23065 op0 = tmp;
23068 target = gen_reg_rtx (SImode);
23069 emit_move_insn (target, const0_rtx);
23070 target = gen_rtx_SUBREG (QImode, target, 0);
23072 if ((optimize && !register_operand (op0, mode0))
23073 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23074 op0 = copy_to_mode_reg (mode0, op0);
23075 if ((optimize && !register_operand (op1, mode1))
23076 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23077 op1 = copy_to_mode_reg (mode1, op1);
23079 pat = GEN_FCN (d->icode) (op0, op1);
23080 if (! pat)
23081 return 0;
23082 emit_insn (pat);
23083 emit_insn (gen_rtx_SET (VOIDmode,
23084 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23085 gen_rtx_fmt_ee (comparison, QImode,
23086 SET_DEST (pat),
23087 const0_rtx)));
23089 return SUBREG_REG (target);
23092 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
23094 static rtx
23095 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
23096 rtx target)
23098 rtx pat;
23099 tree arg0 = CALL_EXPR_ARG (exp, 0);
23100 tree arg1 = CALL_EXPR_ARG (exp, 1);
23101 rtx op0 = expand_normal (arg0);
23102 rtx op1 = expand_normal (arg1);
23103 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23104 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23105 enum rtx_code comparison = d->comparison;
23107 if (VECTOR_MODE_P (mode0))
23108 op0 = safe_vector_operand (op0, mode0);
23109 if (VECTOR_MODE_P (mode1))
23110 op1 = safe_vector_operand (op1, mode1);
23112 target = gen_reg_rtx (SImode);
23113 emit_move_insn (target, const0_rtx);
23114 target = gen_rtx_SUBREG (QImode, target, 0);
23116 if ((optimize && !register_operand (op0, mode0))
23117 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23118 op0 = copy_to_mode_reg (mode0, op0);
23119 if ((optimize && !register_operand (op1, mode1))
23120 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23121 op1 = copy_to_mode_reg (mode1, op1);
23123 pat = GEN_FCN (d->icode) (op0, op1);
23124 if (! pat)
23125 return 0;
23126 emit_insn (pat);
23127 emit_insn (gen_rtx_SET (VOIDmode,
23128 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23129 gen_rtx_fmt_ee (comparison, QImode,
23130 SET_DEST (pat),
23131 const0_rtx)));
23133 return SUBREG_REG (target);
23136 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
23138 static rtx
23139 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
23140 tree exp, rtx target)
23142 rtx pat;
23143 tree arg0 = CALL_EXPR_ARG (exp, 0);
23144 tree arg1 = CALL_EXPR_ARG (exp, 1);
23145 tree arg2 = CALL_EXPR_ARG (exp, 2);
23146 tree arg3 = CALL_EXPR_ARG (exp, 3);
23147 tree arg4 = CALL_EXPR_ARG (exp, 4);
23148 rtx scratch0, scratch1;
23149 rtx op0 = expand_normal (arg0);
23150 rtx op1 = expand_normal (arg1);
23151 rtx op2 = expand_normal (arg2);
23152 rtx op3 = expand_normal (arg3);
23153 rtx op4 = expand_normal (arg4);
23154 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
23156 tmode0 = insn_data[d->icode].operand[0].mode;
23157 tmode1 = insn_data[d->icode].operand[1].mode;
23158 modev2 = insn_data[d->icode].operand[2].mode;
23159 modei3 = insn_data[d->icode].operand[3].mode;
23160 modev4 = insn_data[d->icode].operand[4].mode;
23161 modei5 = insn_data[d->icode].operand[5].mode;
23162 modeimm = insn_data[d->icode].operand[6].mode;
23164 if (VECTOR_MODE_P (modev2))
23165 op0 = safe_vector_operand (op0, modev2);
23166 if (VECTOR_MODE_P (modev4))
23167 op2 = safe_vector_operand (op2, modev4);
23169 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23170 op0 = copy_to_mode_reg (modev2, op0);
23171 if (! (*insn_data[d->icode].operand[3].predicate) (op1, modei3))
23172 op1 = copy_to_mode_reg (modei3, op1);
23173 if ((optimize && !register_operand (op2, modev4))
23174 || !(*insn_data[d->icode].operand[4].predicate) (op2, modev4))
23175 op2 = copy_to_mode_reg (modev4, op2);
23176 if (! (*insn_data[d->icode].operand[5].predicate) (op3, modei5))
23177 op3 = copy_to_mode_reg (modei5, op3);
23179 if (! (*insn_data[d->icode].operand[6].predicate) (op4, modeimm))
23181 error ("the fifth argument must be a 8-bit immediate");
23182 return const0_rtx;
23185 if (d->code == IX86_BUILTIN_PCMPESTRI128)
23187 if (optimize || !target
23188 || GET_MODE (target) != tmode0
23189 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23190 target = gen_reg_rtx (tmode0);
23192 scratch1 = gen_reg_rtx (tmode1);
23194 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
23196 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
23198 if (optimize || !target
23199 || GET_MODE (target) != tmode1
23200 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23201 target = gen_reg_rtx (tmode1);
23203 scratch0 = gen_reg_rtx (tmode0);
23205 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
23207 else
23209 gcc_assert (d->flag);
23211 scratch0 = gen_reg_rtx (tmode0);
23212 scratch1 = gen_reg_rtx (tmode1);
23214 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
23217 if (! pat)
23218 return 0;
23220 emit_insn (pat);
23222 if (d->flag)
23224 target = gen_reg_rtx (SImode);
23225 emit_move_insn (target, const0_rtx);
23226 target = gen_rtx_SUBREG (QImode, target, 0);
23228 emit_insn
23229 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23230 gen_rtx_fmt_ee (EQ, QImode,
23231 gen_rtx_REG ((enum machine_mode) d->flag,
23232 FLAGS_REG),
23233 const0_rtx)));
23234 return SUBREG_REG (target);
23236 else
23237 return target;
23241 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
23243 static rtx
23244 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
23245 tree exp, rtx target)
23247 rtx pat;
23248 tree arg0 = CALL_EXPR_ARG (exp, 0);
23249 tree arg1 = CALL_EXPR_ARG (exp, 1);
23250 tree arg2 = CALL_EXPR_ARG (exp, 2);
23251 rtx scratch0, scratch1;
23252 rtx op0 = expand_normal (arg0);
23253 rtx op1 = expand_normal (arg1);
23254 rtx op2 = expand_normal (arg2);
23255 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
23257 tmode0 = insn_data[d->icode].operand[0].mode;
23258 tmode1 = insn_data[d->icode].operand[1].mode;
23259 modev2 = insn_data[d->icode].operand[2].mode;
23260 modev3 = insn_data[d->icode].operand[3].mode;
23261 modeimm = insn_data[d->icode].operand[4].mode;
23263 if (VECTOR_MODE_P (modev2))
23264 op0 = safe_vector_operand (op0, modev2);
23265 if (VECTOR_MODE_P (modev3))
23266 op1 = safe_vector_operand (op1, modev3);
23268 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23269 op0 = copy_to_mode_reg (modev2, op0);
23270 if ((optimize && !register_operand (op1, modev3))
23271 || !(*insn_data[d->icode].operand[3].predicate) (op1, modev3))
23272 op1 = copy_to_mode_reg (modev3, op1);
23274 if (! (*insn_data[d->icode].operand[4].predicate) (op2, modeimm))
23276 error ("the third argument must be a 8-bit immediate");
23277 return const0_rtx;
23280 if (d->code == IX86_BUILTIN_PCMPISTRI128)
23282 if (optimize || !target
23283 || GET_MODE (target) != tmode0
23284 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23285 target = gen_reg_rtx (tmode0);
23287 scratch1 = gen_reg_rtx (tmode1);
23289 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
23291 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
23293 if (optimize || !target
23294 || GET_MODE (target) != tmode1
23295 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23296 target = gen_reg_rtx (tmode1);
23298 scratch0 = gen_reg_rtx (tmode0);
23300 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
23302 else
23304 gcc_assert (d->flag);
23306 scratch0 = gen_reg_rtx (tmode0);
23307 scratch1 = gen_reg_rtx (tmode1);
23309 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
23312 if (! pat)
23313 return 0;
23315 emit_insn (pat);
23317 if (d->flag)
23319 target = gen_reg_rtx (SImode);
23320 emit_move_insn (target, const0_rtx);
23321 target = gen_rtx_SUBREG (QImode, target, 0);
23323 emit_insn
23324 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23325 gen_rtx_fmt_ee (EQ, QImode,
23326 gen_rtx_REG ((enum machine_mode) d->flag,
23327 FLAGS_REG),
23328 const0_rtx)));
23329 return SUBREG_REG (target);
23331 else
23332 return target;
23335 /* Subroutine of ix86_expand_builtin to take care of insns with
23336 variable number of operands. */
23338 static rtx
23339 ix86_expand_args_builtin (const struct builtin_description *d,
23340 tree exp, rtx target)
23342 rtx pat, real_target;
23343 unsigned int i, nargs;
23344 unsigned int nargs_constant = 0;
23345 int num_memory = 0;
23346 struct
23348 rtx op;
23349 enum machine_mode mode;
23350 } args[4];
23351 bool last_arg_count = false;
23352 enum insn_code icode = d->icode;
23353 const struct insn_data *insn_p = &insn_data[icode];
23354 enum machine_mode tmode = insn_p->operand[0].mode;
23355 enum machine_mode rmode = VOIDmode;
23356 bool swap = false;
23357 enum rtx_code comparison = d->comparison;
23359 switch ((enum ix86_builtin_func_type) d->flag)
23361 case INT_FTYPE_V8SF_V8SF_PTEST:
23362 case INT_FTYPE_V4DI_V4DI_PTEST:
23363 case INT_FTYPE_V4DF_V4DF_PTEST:
23364 case INT_FTYPE_V4SF_V4SF_PTEST:
23365 case INT_FTYPE_V2DI_V2DI_PTEST:
23366 case INT_FTYPE_V2DF_V2DF_PTEST:
23367 return ix86_expand_sse_ptest (d, exp, target);
23368 case FLOAT128_FTYPE_FLOAT128:
23369 case FLOAT_FTYPE_FLOAT:
23370 case INT_FTYPE_INT:
23371 case UINT64_FTYPE_INT:
23372 case UINT16_FTYPE_UINT16:
23373 case INT64_FTYPE_INT64:
23374 case INT64_FTYPE_V4SF:
23375 case INT64_FTYPE_V2DF:
23376 case INT_FTYPE_V16QI:
23377 case INT_FTYPE_V8QI:
23378 case INT_FTYPE_V8SF:
23379 case INT_FTYPE_V4DF:
23380 case INT_FTYPE_V4SF:
23381 case INT_FTYPE_V2DF:
23382 case V16QI_FTYPE_V16QI:
23383 case V8SI_FTYPE_V8SF:
23384 case V8SI_FTYPE_V4SI:
23385 case V8HI_FTYPE_V8HI:
23386 case V8HI_FTYPE_V16QI:
23387 case V8QI_FTYPE_V8QI:
23388 case V8SF_FTYPE_V8SF:
23389 case V8SF_FTYPE_V8SI:
23390 case V8SF_FTYPE_V4SF:
23391 case V4SI_FTYPE_V4SI:
23392 case V4SI_FTYPE_V16QI:
23393 case V4SI_FTYPE_V4SF:
23394 case V4SI_FTYPE_V8SI:
23395 case V4SI_FTYPE_V8HI:
23396 case V4SI_FTYPE_V4DF:
23397 case V4SI_FTYPE_V2DF:
23398 case V4HI_FTYPE_V4HI:
23399 case V4DF_FTYPE_V4DF:
23400 case V4DF_FTYPE_V4SI:
23401 case V4DF_FTYPE_V4SF:
23402 case V4DF_FTYPE_V2DF:
23403 case V4SF_FTYPE_V4SF:
23404 case V4SF_FTYPE_V4SI:
23405 case V4SF_FTYPE_V8SF:
23406 case V4SF_FTYPE_V4DF:
23407 case V4SF_FTYPE_V2DF:
23408 case V2DI_FTYPE_V2DI:
23409 case V2DI_FTYPE_V16QI:
23410 case V2DI_FTYPE_V8HI:
23411 case V2DI_FTYPE_V4SI:
23412 case V2DF_FTYPE_V2DF:
23413 case V2DF_FTYPE_V4SI:
23414 case V2DF_FTYPE_V4DF:
23415 case V2DF_FTYPE_V4SF:
23416 case V2DF_FTYPE_V2SI:
23417 case V2SI_FTYPE_V2SI:
23418 case V2SI_FTYPE_V4SF:
23419 case V2SI_FTYPE_V2SF:
23420 case V2SI_FTYPE_V2DF:
23421 case V2SF_FTYPE_V2SF:
23422 case V2SF_FTYPE_V2SI:
23423 nargs = 1;
23424 break;
23425 case V4SF_FTYPE_V4SF_VEC_MERGE:
23426 case V2DF_FTYPE_V2DF_VEC_MERGE:
23427 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
23428 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
23429 case V16QI_FTYPE_V16QI_V16QI:
23430 case V16QI_FTYPE_V8HI_V8HI:
23431 case V8QI_FTYPE_V8QI_V8QI:
23432 case V8QI_FTYPE_V4HI_V4HI:
23433 case V8HI_FTYPE_V8HI_V8HI:
23434 case V8HI_FTYPE_V16QI_V16QI:
23435 case V8HI_FTYPE_V4SI_V4SI:
23436 case V8SF_FTYPE_V8SF_V8SF:
23437 case V8SF_FTYPE_V8SF_V8SI:
23438 case V4SI_FTYPE_V4SI_V4SI:
23439 case V4SI_FTYPE_V8HI_V8HI:
23440 case V4SI_FTYPE_V4SF_V4SF:
23441 case V4SI_FTYPE_V2DF_V2DF:
23442 case V4HI_FTYPE_V4HI_V4HI:
23443 case V4HI_FTYPE_V8QI_V8QI:
23444 case V4HI_FTYPE_V2SI_V2SI:
23445 case V4DF_FTYPE_V4DF_V4DF:
23446 case V4DF_FTYPE_V4DF_V4DI:
23447 case V4SF_FTYPE_V4SF_V4SF:
23448 case V4SF_FTYPE_V4SF_V4SI:
23449 case V4SF_FTYPE_V4SF_V2SI:
23450 case V4SF_FTYPE_V4SF_V2DF:
23451 case V4SF_FTYPE_V4SF_DI:
23452 case V4SF_FTYPE_V4SF_SI:
23453 case V2DI_FTYPE_V2DI_V2DI:
23454 case V2DI_FTYPE_V16QI_V16QI:
23455 case V2DI_FTYPE_V4SI_V4SI:
23456 case V2DI_FTYPE_V2DI_V16QI:
23457 case V2DI_FTYPE_V2DF_V2DF:
23458 case V2SI_FTYPE_V2SI_V2SI:
23459 case V2SI_FTYPE_V4HI_V4HI:
23460 case V2SI_FTYPE_V2SF_V2SF:
23461 case V2DF_FTYPE_V2DF_V2DF:
23462 case V2DF_FTYPE_V2DF_V4SF:
23463 case V2DF_FTYPE_V2DF_V2DI:
23464 case V2DF_FTYPE_V2DF_DI:
23465 case V2DF_FTYPE_V2DF_SI:
23466 case V2SF_FTYPE_V2SF_V2SF:
23467 case V1DI_FTYPE_V1DI_V1DI:
23468 case V1DI_FTYPE_V8QI_V8QI:
23469 case V1DI_FTYPE_V2SI_V2SI:
23470 if (comparison == UNKNOWN)
23471 return ix86_expand_binop_builtin (icode, exp, target);
23472 nargs = 2;
23473 break;
23474 case V4SF_FTYPE_V4SF_V4SF_SWAP:
23475 case V2DF_FTYPE_V2DF_V2DF_SWAP:
23476 gcc_assert (comparison != UNKNOWN);
23477 nargs = 2;
23478 swap = true;
23479 break;
23480 case V8HI_FTYPE_V8HI_V8HI_COUNT:
23481 case V8HI_FTYPE_V8HI_SI_COUNT:
23482 case V4SI_FTYPE_V4SI_V4SI_COUNT:
23483 case V4SI_FTYPE_V4SI_SI_COUNT:
23484 case V4HI_FTYPE_V4HI_V4HI_COUNT:
23485 case V4HI_FTYPE_V4HI_SI_COUNT:
23486 case V2DI_FTYPE_V2DI_V2DI_COUNT:
23487 case V2DI_FTYPE_V2DI_SI_COUNT:
23488 case V2SI_FTYPE_V2SI_V2SI_COUNT:
23489 case V2SI_FTYPE_V2SI_SI_COUNT:
23490 case V1DI_FTYPE_V1DI_V1DI_COUNT:
23491 case V1DI_FTYPE_V1DI_SI_COUNT:
23492 nargs = 2;
23493 last_arg_count = true;
23494 break;
23495 case UINT64_FTYPE_UINT64_UINT64:
23496 case UINT_FTYPE_UINT_UINT:
23497 case UINT_FTYPE_UINT_USHORT:
23498 case UINT_FTYPE_UINT_UCHAR:
23499 case UINT16_FTYPE_UINT16_INT:
23500 case UINT8_FTYPE_UINT8_INT:
23501 nargs = 2;
23502 break;
23503 case V2DI_FTYPE_V2DI_INT_CONVERT:
23504 nargs = 2;
23505 rmode = V1TImode;
23506 nargs_constant = 1;
23507 break;
23508 case V8HI_FTYPE_V8HI_INT:
23509 case V8SF_FTYPE_V8SF_INT:
23510 case V4SI_FTYPE_V4SI_INT:
23511 case V4SI_FTYPE_V8SI_INT:
23512 case V4HI_FTYPE_V4HI_INT:
23513 case V4DF_FTYPE_V4DF_INT:
23514 case V4SF_FTYPE_V4SF_INT:
23515 case V4SF_FTYPE_V8SF_INT:
23516 case V2DI_FTYPE_V2DI_INT:
23517 case V2DF_FTYPE_V2DF_INT:
23518 case V2DF_FTYPE_V4DF_INT:
23519 nargs = 2;
23520 nargs_constant = 1;
23521 break;
23522 case V16QI_FTYPE_V16QI_V16QI_V16QI:
23523 case V8SF_FTYPE_V8SF_V8SF_V8SF:
23524 case V4DF_FTYPE_V4DF_V4DF_V4DF:
23525 case V4SF_FTYPE_V4SF_V4SF_V4SF:
23526 case V2DF_FTYPE_V2DF_V2DF_V2DF:
23527 nargs = 3;
23528 break;
23529 case V16QI_FTYPE_V16QI_V16QI_INT:
23530 case V8HI_FTYPE_V8HI_V8HI_INT:
23531 case V8SI_FTYPE_V8SI_V8SI_INT:
23532 case V8SI_FTYPE_V8SI_V4SI_INT:
23533 case V8SF_FTYPE_V8SF_V8SF_INT:
23534 case V8SF_FTYPE_V8SF_V4SF_INT:
23535 case V4SI_FTYPE_V4SI_V4SI_INT:
23536 case V4DF_FTYPE_V4DF_V4DF_INT:
23537 case V4DF_FTYPE_V4DF_V2DF_INT:
23538 case V4SF_FTYPE_V4SF_V4SF_INT:
23539 case V2DI_FTYPE_V2DI_V2DI_INT:
23540 case V2DF_FTYPE_V2DF_V2DF_INT:
23541 nargs = 3;
23542 nargs_constant = 1;
23543 break;
23544 case V2DI_FTYPE_V2DI_V2DI_INT_CONVERT:
23545 nargs = 3;
23546 rmode = V2DImode;
23547 nargs_constant = 1;
23548 break;
23549 case V1DI_FTYPE_V1DI_V1DI_INT_CONVERT:
23550 nargs = 3;
23551 rmode = DImode;
23552 nargs_constant = 1;
23553 break;
23554 case V2DI_FTYPE_V2DI_UINT_UINT:
23555 nargs = 3;
23556 nargs_constant = 2;
23557 break;
23558 case MULTI_ARG_4_DF2_DI_I:
23559 case MULTI_ARG_4_DF2_DI_I1:
23560 case MULTI_ARG_4_SF2_SI_I:
23561 case MULTI_ARG_4_SF2_SI_I1:
23562 nargs = 4;
23563 nargs_constant = 1;
23564 break;
23565 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
23566 nargs = 4;
23567 nargs_constant = 2;
23568 break;
23569 default:
23570 gcc_unreachable ();
23573 gcc_assert (nargs <= ARRAY_SIZE (args));
23575 if (comparison != UNKNOWN)
23577 gcc_assert (nargs == 2);
23578 return ix86_expand_sse_compare (d, exp, target, swap);
23581 if (rmode == VOIDmode || rmode == tmode)
23583 if (optimize
23584 || target == 0
23585 || GET_MODE (target) != tmode
23586 || ! (*insn_p->operand[0].predicate) (target, tmode))
23587 target = gen_reg_rtx (tmode);
23588 real_target = target;
23590 else
23592 target = gen_reg_rtx (rmode);
23593 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
23596 for (i = 0; i < nargs; i++)
23598 tree arg = CALL_EXPR_ARG (exp, i);
23599 rtx op = expand_normal (arg);
23600 enum machine_mode mode = insn_p->operand[i + 1].mode;
23601 bool match = (*insn_p->operand[i + 1].predicate) (op, mode);
23603 if (last_arg_count && (i + 1) == nargs)
23605 /* SIMD shift insns take either an 8-bit immediate or
23606 register as count. But builtin functions take int as
23607 count. If count doesn't match, we put it in register. */
23608 if (!match)
23610 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
23611 if (!(*insn_p->operand[i + 1].predicate) (op, mode))
23612 op = copy_to_reg (op);
23615 else if ((nargs - i) <= nargs_constant)
23617 if (!match)
23618 switch (icode)
23620 case CODE_FOR_sse4_1_roundpd:
23621 case CODE_FOR_sse4_1_roundps:
23622 case CODE_FOR_sse4_1_roundsd:
23623 case CODE_FOR_sse4_1_roundss:
23624 case CODE_FOR_sse4_1_blendps:
23625 case CODE_FOR_avx_blendpd256:
23626 case CODE_FOR_avx_vpermilv4df:
23627 case CODE_FOR_avx_roundpd256:
23628 case CODE_FOR_avx_roundps256:
23629 error ("the last argument must be a 4-bit immediate");
23630 return const0_rtx;
23632 case CODE_FOR_sse4_1_blendpd:
23633 case CODE_FOR_avx_vpermilv2df:
23634 case CODE_FOR_xop_vpermil2v2df3:
23635 case CODE_FOR_xop_vpermil2v4sf3:
23636 case CODE_FOR_xop_vpermil2v4df3:
23637 case CODE_FOR_xop_vpermil2v8sf3:
23638 error ("the last argument must be a 2-bit immediate");
23639 return const0_rtx;
23641 case CODE_FOR_avx_vextractf128v4df:
23642 case CODE_FOR_avx_vextractf128v8sf:
23643 case CODE_FOR_avx_vextractf128v8si:
23644 case CODE_FOR_avx_vinsertf128v4df:
23645 case CODE_FOR_avx_vinsertf128v8sf:
23646 case CODE_FOR_avx_vinsertf128v8si:
23647 error ("the last argument must be a 1-bit immediate");
23648 return const0_rtx;
23650 case CODE_FOR_avx_cmpsdv2df3:
23651 case CODE_FOR_avx_cmpssv4sf3:
23652 case CODE_FOR_avx_cmppdv2df3:
23653 case CODE_FOR_avx_cmppsv4sf3:
23654 case CODE_FOR_avx_cmppdv4df3:
23655 case CODE_FOR_avx_cmppsv8sf3:
23656 error ("the last argument must be a 5-bit immediate");
23657 return const0_rtx;
23659 default:
23660 switch (nargs_constant)
23662 case 2:
23663 if ((nargs - i) == nargs_constant)
23665 error ("the next to last argument must be an 8-bit immediate");
23666 break;
23668 case 1:
23669 error ("the last argument must be an 8-bit immediate");
23670 break;
23671 default:
23672 gcc_unreachable ();
23674 return const0_rtx;
23677 else
23679 if (VECTOR_MODE_P (mode))
23680 op = safe_vector_operand (op, mode);
23682 /* If we aren't optimizing, only allow one memory operand to
23683 be generated. */
23684 if (memory_operand (op, mode))
23685 num_memory++;
23687 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
23689 if (optimize || !match || num_memory > 1)
23690 op = copy_to_mode_reg (mode, op);
23692 else
23694 op = copy_to_reg (op);
23695 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
23699 args[i].op = op;
23700 args[i].mode = mode;
23703 switch (nargs)
23705 case 1:
23706 pat = GEN_FCN (icode) (real_target, args[0].op);
23707 break;
23708 case 2:
23709 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
23710 break;
23711 case 3:
23712 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23713 args[2].op);
23714 break;
23715 case 4:
23716 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23717 args[2].op, args[3].op);
23718 break;
23719 default:
23720 gcc_unreachable ();
23723 if (! pat)
23724 return 0;
23726 emit_insn (pat);
23727 return target;
23730 /* Subroutine of ix86_expand_builtin to take care of special insns
23731 with variable number of operands. */
23733 static rtx
23734 ix86_expand_special_args_builtin (const struct builtin_description *d,
23735 tree exp, rtx target)
23737 tree arg;
23738 rtx pat, op;
23739 unsigned int i, nargs, arg_adjust, memory;
23740 struct
23742 rtx op;
23743 enum machine_mode mode;
23744 } args[3];
23745 enum insn_code icode = d->icode;
23746 bool last_arg_constant = false;
23747 const struct insn_data *insn_p = &insn_data[icode];
23748 enum machine_mode tmode = insn_p->operand[0].mode;
23749 enum { load, store } klass;
23751 switch ((enum ix86_builtin_func_type) d->flag)
23753 case VOID_FTYPE_VOID:
23754 emit_insn (GEN_FCN (icode) (target));
23755 return 0;
23756 case UINT64_FTYPE_VOID:
23757 nargs = 0;
23758 klass = load;
23759 memory = 0;
23760 break;
23761 case UINT64_FTYPE_PUNSIGNED:
23762 case V2DI_FTYPE_PV2DI:
23763 case V32QI_FTYPE_PCCHAR:
23764 case V16QI_FTYPE_PCCHAR:
23765 case V8SF_FTYPE_PCV4SF:
23766 case V8SF_FTYPE_PCFLOAT:
23767 case V4SF_FTYPE_PCFLOAT:
23768 case V4DF_FTYPE_PCV2DF:
23769 case V4DF_FTYPE_PCDOUBLE:
23770 case V2DF_FTYPE_PCDOUBLE:
23771 case VOID_FTYPE_PVOID:
23772 nargs = 1;
23773 klass = load;
23774 memory = 0;
23775 break;
23776 case VOID_FTYPE_PV2SF_V4SF:
23777 case VOID_FTYPE_PV4DI_V4DI:
23778 case VOID_FTYPE_PV2DI_V2DI:
23779 case VOID_FTYPE_PCHAR_V32QI:
23780 case VOID_FTYPE_PCHAR_V16QI:
23781 case VOID_FTYPE_PFLOAT_V8SF:
23782 case VOID_FTYPE_PFLOAT_V4SF:
23783 case VOID_FTYPE_PDOUBLE_V4DF:
23784 case VOID_FTYPE_PDOUBLE_V2DF:
23785 case VOID_FTYPE_PULONGLONG_ULONGLONG:
23786 case VOID_FTYPE_PINT_INT:
23787 nargs = 1;
23788 klass = store;
23789 /* Reserve memory operand for target. */
23790 memory = ARRAY_SIZE (args);
23791 break;
23792 case V4SF_FTYPE_V4SF_PCV2SF:
23793 case V2DF_FTYPE_V2DF_PCDOUBLE:
23794 nargs = 2;
23795 klass = load;
23796 memory = 1;
23797 break;
23798 case V8SF_FTYPE_PCV8SF_V8SF:
23799 case V4DF_FTYPE_PCV4DF_V4DF:
23800 case V4SF_FTYPE_PCV4SF_V4SF:
23801 case V2DF_FTYPE_PCV2DF_V2DF:
23802 nargs = 2;
23803 klass = load;
23804 memory = 0;
23805 break;
23806 case VOID_FTYPE_PV8SF_V8SF_V8SF:
23807 case VOID_FTYPE_PV4DF_V4DF_V4DF:
23808 case VOID_FTYPE_PV4SF_V4SF_V4SF:
23809 case VOID_FTYPE_PV2DF_V2DF_V2DF:
23810 nargs = 2;
23811 klass = store;
23812 /* Reserve memory operand for target. */
23813 memory = ARRAY_SIZE (args);
23814 break;
23815 case VOID_FTYPE_UINT_UINT_UINT:
23816 case VOID_FTYPE_UINT64_UINT_UINT:
23817 case UCHAR_FTYPE_UINT_UINT_UINT:
23818 case UCHAR_FTYPE_UINT64_UINT_UINT:
23819 nargs = 3;
23820 klass = load;
23821 memory = ARRAY_SIZE (args);
23822 last_arg_constant = true;
23823 break;
23824 default:
23825 gcc_unreachable ();
23828 gcc_assert (nargs <= ARRAY_SIZE (args));
23830 if (klass == store)
23832 arg = CALL_EXPR_ARG (exp, 0);
23833 op = expand_normal (arg);
23834 gcc_assert (target == 0);
23835 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
23836 arg_adjust = 1;
23838 else
23840 arg_adjust = 0;
23841 if (optimize
23842 || target == 0
23843 || GET_MODE (target) != tmode
23844 || ! (*insn_p->operand[0].predicate) (target, tmode))
23845 target = gen_reg_rtx (tmode);
23848 for (i = 0; i < nargs; i++)
23850 enum machine_mode mode = insn_p->operand[i + 1].mode;
23851 bool match;
23853 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
23854 op = expand_normal (arg);
23855 match = (*insn_p->operand[i + 1].predicate) (op, mode);
23857 if (last_arg_constant && (i + 1) == nargs)
23859 if (!match)
23861 if (icode == CODE_FOR_lwp_lwpvalsi3
23862 || icode == CODE_FOR_lwp_lwpinssi3
23863 || icode == CODE_FOR_lwp_lwpvaldi3
23864 || icode == CODE_FOR_lwp_lwpinsdi3)
23865 error ("the last argument must be a 32-bit immediate");
23866 else
23867 error ("the last argument must be an 8-bit immediate");
23868 return const0_rtx;
23871 else
23873 if (i == memory)
23875 /* This must be the memory operand. */
23876 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
23877 gcc_assert (GET_MODE (op) == mode
23878 || GET_MODE (op) == VOIDmode);
23880 else
23882 /* This must be register. */
23883 if (VECTOR_MODE_P (mode))
23884 op = safe_vector_operand (op, mode);
23886 gcc_assert (GET_MODE (op) == mode
23887 || GET_MODE (op) == VOIDmode);
23888 op = copy_to_mode_reg (mode, op);
23892 args[i].op = op;
23893 args[i].mode = mode;
23896 switch (nargs)
23898 case 0:
23899 pat = GEN_FCN (icode) (target);
23900 break;
23901 case 1:
23902 pat = GEN_FCN (icode) (target, args[0].op);
23903 break;
23904 case 2:
23905 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
23906 break;
23907 case 3:
23908 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
23909 break;
23910 default:
23911 gcc_unreachable ();
23914 if (! pat)
23915 return 0;
23916 emit_insn (pat);
23917 return klass == store ? 0 : target;
23920 /* Return the integer constant in ARG. Constrain it to be in the range
23921 of the subparts of VEC_TYPE; issue an error if not. */
23923 static int
23924 get_element_number (tree vec_type, tree arg)
23926 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
23928 if (!host_integerp (arg, 1)
23929 || (elt = tree_low_cst (arg, 1), elt > max))
23931 error ("selector must be an integer constant in the range 0..%wi", max);
23932 return 0;
23935 return elt;
23938 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
23939 ix86_expand_vector_init. We DO have language-level syntax for this, in
23940 the form of (type){ init-list }. Except that since we can't place emms
23941 instructions from inside the compiler, we can't allow the use of MMX
23942 registers unless the user explicitly asks for it. So we do *not* define
23943 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
23944 we have builtins invoked by mmintrin.h that gives us license to emit
23945 these sorts of instructions. */
23947 static rtx
23948 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
23950 enum machine_mode tmode = TYPE_MODE (type);
23951 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
23952 int i, n_elt = GET_MODE_NUNITS (tmode);
23953 rtvec v = rtvec_alloc (n_elt);
23955 gcc_assert (VECTOR_MODE_P (tmode));
23956 gcc_assert (call_expr_nargs (exp) == n_elt);
23958 for (i = 0; i < n_elt; ++i)
23960 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
23961 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
23964 if (!target || !register_operand (target, tmode))
23965 target = gen_reg_rtx (tmode);
23967 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
23968 return target;
23971 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
23972 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
23973 had a language-level syntax for referencing vector elements. */
23975 static rtx
23976 ix86_expand_vec_ext_builtin (tree exp, rtx target)
23978 enum machine_mode tmode, mode0;
23979 tree arg0, arg1;
23980 int elt;
23981 rtx op0;
23983 arg0 = CALL_EXPR_ARG (exp, 0);
23984 arg1 = CALL_EXPR_ARG (exp, 1);
23986 op0 = expand_normal (arg0);
23987 elt = get_element_number (TREE_TYPE (arg0), arg1);
23989 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
23990 mode0 = TYPE_MODE (TREE_TYPE (arg0));
23991 gcc_assert (VECTOR_MODE_P (mode0));
23993 op0 = force_reg (mode0, op0);
23995 if (optimize || !target || !register_operand (target, tmode))
23996 target = gen_reg_rtx (tmode);
23998 ix86_expand_vector_extract (true, target, op0, elt);
24000 return target;
24003 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24004 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
24005 a language-level syntax for referencing vector elements. */
24007 static rtx
24008 ix86_expand_vec_set_builtin (tree exp)
24010 enum machine_mode tmode, mode1;
24011 tree arg0, arg1, arg2;
24012 int elt;
24013 rtx op0, op1, target;
24015 arg0 = CALL_EXPR_ARG (exp, 0);
24016 arg1 = CALL_EXPR_ARG (exp, 1);
24017 arg2 = CALL_EXPR_ARG (exp, 2);
24019 tmode = TYPE_MODE (TREE_TYPE (arg0));
24020 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24021 gcc_assert (VECTOR_MODE_P (tmode));
24023 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
24024 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
24025 elt = get_element_number (TREE_TYPE (arg0), arg2);
24027 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
24028 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
24030 op0 = force_reg (tmode, op0);
24031 op1 = force_reg (mode1, op1);
24033 /* OP0 is the source of these builtin functions and shouldn't be
24034 modified. Create a copy, use it and return it as target. */
24035 target = gen_reg_rtx (tmode);
24036 emit_move_insn (target, op0);
24037 ix86_expand_vector_set (true, target, op1, elt);
24039 return target;
24042 /* Expand an expression EXP that calls a built-in function,
24043 with result going to TARGET if that's convenient
24044 (and in mode MODE if that's convenient).
24045 SUBTARGET may be used as the target for computing one of EXP's operands.
24046 IGNORE is nonzero if the value is to be ignored. */
24048 static rtx
24049 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
24050 enum machine_mode mode ATTRIBUTE_UNUSED,
24051 int ignore ATTRIBUTE_UNUSED)
24053 const struct builtin_description *d;
24054 size_t i;
24055 enum insn_code icode;
24056 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
24057 tree arg0, arg1, arg2;
24058 rtx op0, op1, op2, pat;
24059 enum machine_mode mode0, mode1, mode2;
24060 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
24062 /* Determine whether the builtin function is available under the current ISA.
24063 Originally the builtin was not created if it wasn't applicable to the
24064 current ISA based on the command line switches. With function specific
24065 options, we need to check in the context of the function making the call
24066 whether it is supported. */
24067 if (ix86_builtins_isa[fcode].isa
24068 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
24070 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
24071 NULL, NULL, false);
24073 if (!opts)
24074 error ("%qE needs unknown isa option", fndecl);
24075 else
24077 gcc_assert (opts != NULL);
24078 error ("%qE needs isa option %s", fndecl, opts);
24079 free (opts);
24081 return const0_rtx;
24084 switch (fcode)
24086 case IX86_BUILTIN_MASKMOVQ:
24087 case IX86_BUILTIN_MASKMOVDQU:
24088 icode = (fcode == IX86_BUILTIN_MASKMOVQ
24089 ? CODE_FOR_mmx_maskmovq
24090 : CODE_FOR_sse2_maskmovdqu);
24091 /* Note the arg order is different from the operand order. */
24092 arg1 = CALL_EXPR_ARG (exp, 0);
24093 arg2 = CALL_EXPR_ARG (exp, 1);
24094 arg0 = CALL_EXPR_ARG (exp, 2);
24095 op0 = expand_normal (arg0);
24096 op1 = expand_normal (arg1);
24097 op2 = expand_normal (arg2);
24098 mode0 = insn_data[icode].operand[0].mode;
24099 mode1 = insn_data[icode].operand[1].mode;
24100 mode2 = insn_data[icode].operand[2].mode;
24102 op0 = force_reg (Pmode, op0);
24103 op0 = gen_rtx_MEM (mode1, op0);
24105 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
24106 op0 = copy_to_mode_reg (mode0, op0);
24107 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
24108 op1 = copy_to_mode_reg (mode1, op1);
24109 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
24110 op2 = copy_to_mode_reg (mode2, op2);
24111 pat = GEN_FCN (icode) (op0, op1, op2);
24112 if (! pat)
24113 return 0;
24114 emit_insn (pat);
24115 return 0;
24117 case IX86_BUILTIN_LDMXCSR:
24118 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
24119 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24120 emit_move_insn (target, op0);
24121 emit_insn (gen_sse_ldmxcsr (target));
24122 return 0;
24124 case IX86_BUILTIN_STMXCSR:
24125 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24126 emit_insn (gen_sse_stmxcsr (target));
24127 return copy_to_mode_reg (SImode, target);
24129 case IX86_BUILTIN_CLFLUSH:
24130 arg0 = CALL_EXPR_ARG (exp, 0);
24131 op0 = expand_normal (arg0);
24132 icode = CODE_FOR_sse2_clflush;
24133 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24134 op0 = copy_to_mode_reg (Pmode, op0);
24136 emit_insn (gen_sse2_clflush (op0));
24137 return 0;
24139 case IX86_BUILTIN_MONITOR:
24140 arg0 = CALL_EXPR_ARG (exp, 0);
24141 arg1 = CALL_EXPR_ARG (exp, 1);
24142 arg2 = CALL_EXPR_ARG (exp, 2);
24143 op0 = expand_normal (arg0);
24144 op1 = expand_normal (arg1);
24145 op2 = expand_normal (arg2);
24146 if (!REG_P (op0))
24147 op0 = copy_to_mode_reg (Pmode, op0);
24148 if (!REG_P (op1))
24149 op1 = copy_to_mode_reg (SImode, op1);
24150 if (!REG_P (op2))
24151 op2 = copy_to_mode_reg (SImode, op2);
24152 emit_insn ((*ix86_gen_monitor) (op0, op1, op2));
24153 return 0;
24155 case IX86_BUILTIN_MWAIT:
24156 arg0 = CALL_EXPR_ARG (exp, 0);
24157 arg1 = CALL_EXPR_ARG (exp, 1);
24158 op0 = expand_normal (arg0);
24159 op1 = expand_normal (arg1);
24160 if (!REG_P (op0))
24161 op0 = copy_to_mode_reg (SImode, op0);
24162 if (!REG_P (op1))
24163 op1 = copy_to_mode_reg (SImode, op1);
24164 emit_insn (gen_sse3_mwait (op0, op1));
24165 return 0;
24167 case IX86_BUILTIN_VEC_INIT_V2SI:
24168 case IX86_BUILTIN_VEC_INIT_V4HI:
24169 case IX86_BUILTIN_VEC_INIT_V8QI:
24170 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
24172 case IX86_BUILTIN_VEC_EXT_V2DF:
24173 case IX86_BUILTIN_VEC_EXT_V2DI:
24174 case IX86_BUILTIN_VEC_EXT_V4SF:
24175 case IX86_BUILTIN_VEC_EXT_V4SI:
24176 case IX86_BUILTIN_VEC_EXT_V8HI:
24177 case IX86_BUILTIN_VEC_EXT_V2SI:
24178 case IX86_BUILTIN_VEC_EXT_V4HI:
24179 case IX86_BUILTIN_VEC_EXT_V16QI:
24180 return ix86_expand_vec_ext_builtin (exp, target);
24182 case IX86_BUILTIN_VEC_SET_V2DI:
24183 case IX86_BUILTIN_VEC_SET_V4SF:
24184 case IX86_BUILTIN_VEC_SET_V4SI:
24185 case IX86_BUILTIN_VEC_SET_V8HI:
24186 case IX86_BUILTIN_VEC_SET_V4HI:
24187 case IX86_BUILTIN_VEC_SET_V16QI:
24188 return ix86_expand_vec_set_builtin (exp);
24190 case IX86_BUILTIN_VEC_PERM_V2DF:
24191 case IX86_BUILTIN_VEC_PERM_V4SF:
24192 case IX86_BUILTIN_VEC_PERM_V2DI:
24193 case IX86_BUILTIN_VEC_PERM_V4SI:
24194 case IX86_BUILTIN_VEC_PERM_V8HI:
24195 case IX86_BUILTIN_VEC_PERM_V16QI:
24196 case IX86_BUILTIN_VEC_PERM_V2DI_U:
24197 case IX86_BUILTIN_VEC_PERM_V4SI_U:
24198 case IX86_BUILTIN_VEC_PERM_V8HI_U:
24199 case IX86_BUILTIN_VEC_PERM_V16QI_U:
24200 case IX86_BUILTIN_VEC_PERM_V4DF:
24201 case IX86_BUILTIN_VEC_PERM_V8SF:
24202 return ix86_expand_vec_perm_builtin (exp);
24204 case IX86_BUILTIN_INFQ:
24205 case IX86_BUILTIN_HUGE_VALQ:
24207 REAL_VALUE_TYPE inf;
24208 rtx tmp;
24210 real_inf (&inf);
24211 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
24213 tmp = validize_mem (force_const_mem (mode, tmp));
24215 if (target == 0)
24216 target = gen_reg_rtx (mode);
24218 emit_move_insn (target, tmp);
24219 return target;
24222 case IX86_BUILTIN_LLWPCB:
24223 arg0 = CALL_EXPR_ARG (exp, 0);
24224 op0 = expand_normal (arg0);
24225 icode = CODE_FOR_lwp_llwpcb;
24226 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24227 op0 = copy_to_mode_reg (Pmode, op0);
24228 emit_insn (gen_lwp_llwpcb (op0));
24229 return 0;
24231 case IX86_BUILTIN_SLWPCB:
24232 icode = CODE_FOR_lwp_slwpcb;
24233 if (!target
24234 || ! (*insn_data[icode].operand[0].predicate) (target, Pmode))
24235 target = gen_reg_rtx (Pmode);
24236 emit_insn (gen_lwp_slwpcb (target));
24237 return target;
24239 default:
24240 break;
24243 for (i = 0, d = bdesc_special_args;
24244 i < ARRAY_SIZE (bdesc_special_args);
24245 i++, d++)
24246 if (d->code == fcode)
24247 return ix86_expand_special_args_builtin (d, exp, target);
24249 for (i = 0, d = bdesc_args;
24250 i < ARRAY_SIZE (bdesc_args);
24251 i++, d++)
24252 if (d->code == fcode)
24253 switch (fcode)
24255 case IX86_BUILTIN_FABSQ:
24256 case IX86_BUILTIN_COPYSIGNQ:
24257 if (!TARGET_SSE2)
24258 /* Emit a normal call if SSE2 isn't available. */
24259 return expand_call (exp, target, ignore);
24260 default:
24261 return ix86_expand_args_builtin (d, exp, target);
24264 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
24265 if (d->code == fcode)
24266 return ix86_expand_sse_comi (d, exp, target);
24268 for (i = 0, d = bdesc_pcmpestr;
24269 i < ARRAY_SIZE (bdesc_pcmpestr);
24270 i++, d++)
24271 if (d->code == fcode)
24272 return ix86_expand_sse_pcmpestr (d, exp, target);
24274 for (i = 0, d = bdesc_pcmpistr;
24275 i < ARRAY_SIZE (bdesc_pcmpistr);
24276 i++, d++)
24277 if (d->code == fcode)
24278 return ix86_expand_sse_pcmpistr (d, exp, target);
24280 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
24281 if (d->code == fcode)
24282 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
24283 (enum ix86_builtin_func_type)
24284 d->flag, d->comparison);
24286 gcc_unreachable ();
24289 /* Returns a function decl for a vectorized version of the builtin function
24290 with builtin function code FN and the result vector type TYPE, or NULL_TREE
24291 if it is not available. */
24293 static tree
24294 ix86_builtin_vectorized_function (unsigned int fn, tree type_out,
24295 tree type_in)
24297 enum machine_mode in_mode, out_mode;
24298 int in_n, out_n;
24300 if (TREE_CODE (type_out) != VECTOR_TYPE
24301 || TREE_CODE (type_in) != VECTOR_TYPE)
24302 return NULL_TREE;
24304 out_mode = TYPE_MODE (TREE_TYPE (type_out));
24305 out_n = TYPE_VECTOR_SUBPARTS (type_out);
24306 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24307 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24309 switch (fn)
24311 case BUILT_IN_SQRT:
24312 if (out_mode == DFmode && out_n == 2
24313 && in_mode == DFmode && in_n == 2)
24314 return ix86_builtins[IX86_BUILTIN_SQRTPD];
24315 break;
24317 case BUILT_IN_SQRTF:
24318 if (out_mode == SFmode && out_n == 4
24319 && in_mode == SFmode && in_n == 4)
24320 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
24321 break;
24323 case BUILT_IN_LRINT:
24324 if (out_mode == SImode && out_n == 4
24325 && in_mode == DFmode && in_n == 2)
24326 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
24327 break;
24329 case BUILT_IN_LRINTF:
24330 if (out_mode == SImode && out_n == 4
24331 && in_mode == SFmode && in_n == 4)
24332 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
24333 break;
24335 case BUILT_IN_COPYSIGN:
24336 if (out_mode == DFmode && out_n == 2
24337 && in_mode == DFmode && in_n == 2)
24338 return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
24339 break;
24341 case BUILT_IN_COPYSIGNF:
24342 if (out_mode == SFmode && out_n == 4
24343 && in_mode == SFmode && in_n == 4)
24344 return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
24345 break;
24347 default:
24351 /* Dispatch to a handler for a vectorization library. */
24352 if (ix86_veclib_handler)
24353 return (*ix86_veclib_handler) ((enum built_in_function) fn, type_out,
24354 type_in);
24356 return NULL_TREE;
24359 /* Handler for an SVML-style interface to
24360 a library with vectorized intrinsics. */
24362 static tree
24363 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
24365 char name[20];
24366 tree fntype, new_fndecl, args;
24367 unsigned arity;
24368 const char *bname;
24369 enum machine_mode el_mode, in_mode;
24370 int n, in_n;
24372 /* The SVML is suitable for unsafe math only. */
24373 if (!flag_unsafe_math_optimizations)
24374 return NULL_TREE;
24376 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24377 n = TYPE_VECTOR_SUBPARTS (type_out);
24378 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24379 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24380 if (el_mode != in_mode
24381 || n != in_n)
24382 return NULL_TREE;
24384 switch (fn)
24386 case BUILT_IN_EXP:
24387 case BUILT_IN_LOG:
24388 case BUILT_IN_LOG10:
24389 case BUILT_IN_POW:
24390 case BUILT_IN_TANH:
24391 case BUILT_IN_TAN:
24392 case BUILT_IN_ATAN:
24393 case BUILT_IN_ATAN2:
24394 case BUILT_IN_ATANH:
24395 case BUILT_IN_CBRT:
24396 case BUILT_IN_SINH:
24397 case BUILT_IN_SIN:
24398 case BUILT_IN_ASINH:
24399 case BUILT_IN_ASIN:
24400 case BUILT_IN_COSH:
24401 case BUILT_IN_COS:
24402 case BUILT_IN_ACOSH:
24403 case BUILT_IN_ACOS:
24404 if (el_mode != DFmode || n != 2)
24405 return NULL_TREE;
24406 break;
24408 case BUILT_IN_EXPF:
24409 case BUILT_IN_LOGF:
24410 case BUILT_IN_LOG10F:
24411 case BUILT_IN_POWF:
24412 case BUILT_IN_TANHF:
24413 case BUILT_IN_TANF:
24414 case BUILT_IN_ATANF:
24415 case BUILT_IN_ATAN2F:
24416 case BUILT_IN_ATANHF:
24417 case BUILT_IN_CBRTF:
24418 case BUILT_IN_SINHF:
24419 case BUILT_IN_SINF:
24420 case BUILT_IN_ASINHF:
24421 case BUILT_IN_ASINF:
24422 case BUILT_IN_COSHF:
24423 case BUILT_IN_COSF:
24424 case BUILT_IN_ACOSHF:
24425 case BUILT_IN_ACOSF:
24426 if (el_mode != SFmode || n != 4)
24427 return NULL_TREE;
24428 break;
24430 default:
24431 return NULL_TREE;
24434 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24436 if (fn == BUILT_IN_LOGF)
24437 strcpy (name, "vmlsLn4");
24438 else if (fn == BUILT_IN_LOG)
24439 strcpy (name, "vmldLn2");
24440 else if (n == 4)
24442 sprintf (name, "vmls%s", bname+10);
24443 name[strlen (name)-1] = '4';
24445 else
24446 sprintf (name, "vmld%s2", bname+10);
24448 /* Convert to uppercase. */
24449 name[4] &= ~0x20;
24451 arity = 0;
24452 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24453 args = TREE_CHAIN (args))
24454 arity++;
24456 if (arity == 1)
24457 fntype = build_function_type_list (type_out, type_in, NULL);
24458 else
24459 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24461 /* Build a function declaration for the vectorized function. */
24462 new_fndecl = build_decl (BUILTINS_LOCATION,
24463 FUNCTION_DECL, get_identifier (name), fntype);
24464 TREE_PUBLIC (new_fndecl) = 1;
24465 DECL_EXTERNAL (new_fndecl) = 1;
24466 DECL_IS_NOVOPS (new_fndecl) = 1;
24467 TREE_READONLY (new_fndecl) = 1;
24469 return new_fndecl;
24472 /* Handler for an ACML-style interface to
24473 a library with vectorized intrinsics. */
24475 static tree
24476 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
24478 char name[20] = "__vr.._";
24479 tree fntype, new_fndecl, args;
24480 unsigned arity;
24481 const char *bname;
24482 enum machine_mode el_mode, in_mode;
24483 int n, in_n;
24485 /* The ACML is 64bits only and suitable for unsafe math only as
24486 it does not correctly support parts of IEEE with the required
24487 precision such as denormals. */
24488 if (!TARGET_64BIT
24489 || !flag_unsafe_math_optimizations)
24490 return NULL_TREE;
24492 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24493 n = TYPE_VECTOR_SUBPARTS (type_out);
24494 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24495 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24496 if (el_mode != in_mode
24497 || n != in_n)
24498 return NULL_TREE;
24500 switch (fn)
24502 case BUILT_IN_SIN:
24503 case BUILT_IN_COS:
24504 case BUILT_IN_EXP:
24505 case BUILT_IN_LOG:
24506 case BUILT_IN_LOG2:
24507 case BUILT_IN_LOG10:
24508 name[4] = 'd';
24509 name[5] = '2';
24510 if (el_mode != DFmode
24511 || n != 2)
24512 return NULL_TREE;
24513 break;
24515 case BUILT_IN_SINF:
24516 case BUILT_IN_COSF:
24517 case BUILT_IN_EXPF:
24518 case BUILT_IN_POWF:
24519 case BUILT_IN_LOGF:
24520 case BUILT_IN_LOG2F:
24521 case BUILT_IN_LOG10F:
24522 name[4] = 's';
24523 name[5] = '4';
24524 if (el_mode != SFmode
24525 || n != 4)
24526 return NULL_TREE;
24527 break;
24529 default:
24530 return NULL_TREE;
24533 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24534 sprintf (name + 7, "%s", bname+10);
24536 arity = 0;
24537 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24538 args = TREE_CHAIN (args))
24539 arity++;
24541 if (arity == 1)
24542 fntype = build_function_type_list (type_out, type_in, NULL);
24543 else
24544 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24546 /* Build a function declaration for the vectorized function. */
24547 new_fndecl = build_decl (BUILTINS_LOCATION,
24548 FUNCTION_DECL, get_identifier (name), fntype);
24549 TREE_PUBLIC (new_fndecl) = 1;
24550 DECL_EXTERNAL (new_fndecl) = 1;
24551 DECL_IS_NOVOPS (new_fndecl) = 1;
24552 TREE_READONLY (new_fndecl) = 1;
24554 return new_fndecl;
24558 /* Returns a decl of a function that implements conversion of an integer vector
24559 into a floating-point vector, or vice-versa. TYPE is the type of the integer
24560 side of the conversion.
24561 Return NULL_TREE if it is not available. */
24563 static tree
24564 ix86_vectorize_builtin_conversion (unsigned int code, tree type)
24566 if (! (TARGET_SSE2 && TREE_CODE (type) == VECTOR_TYPE))
24567 return NULL_TREE;
24569 switch (code)
24571 case FLOAT_EXPR:
24572 switch (TYPE_MODE (type))
24574 case V4SImode:
24575 return TYPE_UNSIGNED (type)
24576 ? ix86_builtins[IX86_BUILTIN_CVTUDQ2PS]
24577 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS];
24578 default:
24579 return NULL_TREE;
24582 case FIX_TRUNC_EXPR:
24583 switch (TYPE_MODE (type))
24585 case V4SImode:
24586 return TYPE_UNSIGNED (type)
24587 ? NULL_TREE
24588 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ];
24589 default:
24590 return NULL_TREE;
24592 default:
24593 return NULL_TREE;
24598 /* Returns a code for a target-specific builtin that implements
24599 reciprocal of the function, or NULL_TREE if not available. */
24601 static tree
24602 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
24603 bool sqrt ATTRIBUTE_UNUSED)
24605 if (! (TARGET_SSE_MATH && !optimize_insn_for_size_p ()
24606 && flag_finite_math_only && !flag_trapping_math
24607 && flag_unsafe_math_optimizations))
24608 return NULL_TREE;
24610 if (md_fn)
24611 /* Machine dependent builtins. */
24612 switch (fn)
24614 /* Vectorized version of sqrt to rsqrt conversion. */
24615 case IX86_BUILTIN_SQRTPS_NR:
24616 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
24618 default:
24619 return NULL_TREE;
24621 else
24622 /* Normal builtins. */
24623 switch (fn)
24625 /* Sqrt to rsqrt conversion. */
24626 case BUILT_IN_SQRTF:
24627 return ix86_builtins[IX86_BUILTIN_RSQRTF];
24629 default:
24630 return NULL_TREE;
24634 /* Helper for avx_vpermilps256_operand et al. This is also used by
24635 the expansion functions to turn the parallel back into a mask.
24636 The return value is 0 for no match and the imm8+1 for a match. */
24639 avx_vpermilp_parallel (rtx par, enum machine_mode mode)
24641 unsigned i, nelt = GET_MODE_NUNITS (mode);
24642 unsigned mask = 0;
24643 unsigned char ipar[8];
24645 if (XVECLEN (par, 0) != (int) nelt)
24646 return 0;
24648 /* Validate that all of the elements are constants, and not totally
24649 out of range. Copy the data into an integral array to make the
24650 subsequent checks easier. */
24651 for (i = 0; i < nelt; ++i)
24653 rtx er = XVECEXP (par, 0, i);
24654 unsigned HOST_WIDE_INT ei;
24656 if (!CONST_INT_P (er))
24657 return 0;
24658 ei = INTVAL (er);
24659 if (ei >= 2 * nelt)
24660 return 0;
24661 ipar[i] = ei;
24664 switch (mode)
24666 case V4DFmode:
24667 /* In the 256-bit DFmode case, we can only move elements within
24668 a 128-bit lane. */
24669 for (i = 0; i < 2; ++i)
24671 if (ipar[i] >= 2)
24672 return 0;
24673 mask |= ipar[i] << i;
24675 for (i = 2; i < 4; ++i)
24677 if (ipar[i] < 2)
24678 return 0;
24679 mask |= (ipar[i] - 2) << i;
24681 break;
24683 case V8SFmode:
24684 /* In the 256-bit SFmode case, we have full freedom of movement
24685 within the low 128-bit lane, but the high 128-bit lane must
24686 mirror the exact same pattern. */
24687 for (i = 0; i < 4; ++i)
24688 if (ipar[i] + 4 != ipar[i + 4])
24689 return 0;
24690 nelt = 4;
24691 /* FALLTHRU */
24693 case V2DFmode:
24694 case V4SFmode:
24695 /* In the 128-bit case, we've full freedom in the placement of
24696 the elements from the source operand. */
24697 for (i = 0; i < nelt; ++i)
24698 mask |= ipar[i] << (i * (nelt / 2));
24699 break;
24701 default:
24702 gcc_unreachable ();
24705 /* Make sure success has a non-zero value by adding one. */
24706 return mask + 1;
24709 /* Helper for avx_vperm2f128_v4df_operand et al. This is also used by
24710 the expansion functions to turn the parallel back into a mask.
24711 The return value is 0 for no match and the imm8+1 for a match. */
24714 avx_vperm2f128_parallel (rtx par, enum machine_mode mode)
24716 unsigned i, nelt = GET_MODE_NUNITS (mode), nelt2 = nelt / 2;
24717 unsigned mask = 0;
24718 unsigned char ipar[8];
24720 if (XVECLEN (par, 0) != (int) nelt)
24721 return 0;
24723 /* Validate that all of the elements are constants, and not totally
24724 out of range. Copy the data into an integral array to make the
24725 subsequent checks easier. */
24726 for (i = 0; i < nelt; ++i)
24728 rtx er = XVECEXP (par, 0, i);
24729 unsigned HOST_WIDE_INT ei;
24731 if (!CONST_INT_P (er))
24732 return 0;
24733 ei = INTVAL (er);
24734 if (ei >= 2 * nelt)
24735 return 0;
24736 ipar[i] = ei;
24739 /* Validate that the halves of the permute are halves. */
24740 for (i = 0; i < nelt2 - 1; ++i)
24741 if (ipar[i] + 1 != ipar[i + 1])
24742 return 0;
24743 for (i = nelt2; i < nelt - 1; ++i)
24744 if (ipar[i] + 1 != ipar[i + 1])
24745 return 0;
24747 /* Reconstruct the mask. */
24748 for (i = 0; i < 2; ++i)
24750 unsigned e = ipar[i * nelt2];
24751 if (e % nelt2)
24752 return 0;
24753 e /= nelt2;
24754 mask |= e << (i * 4);
24757 /* Make sure success has a non-zero value by adding one. */
24758 return mask + 1;
24762 /* Store OPERAND to the memory after reload is completed. This means
24763 that we can't easily use assign_stack_local. */
24765 ix86_force_to_memory (enum machine_mode mode, rtx operand)
24767 rtx result;
24769 gcc_assert (reload_completed);
24770 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE)
24772 result = gen_rtx_MEM (mode,
24773 gen_rtx_PLUS (Pmode,
24774 stack_pointer_rtx,
24775 GEN_INT (-RED_ZONE_SIZE)));
24776 emit_move_insn (result, operand);
24778 else if ((TARGET_64BIT_MS_ABI || !TARGET_RED_ZONE) && TARGET_64BIT)
24780 switch (mode)
24782 case HImode:
24783 case SImode:
24784 operand = gen_lowpart (DImode, operand);
24785 /* FALLTHRU */
24786 case DImode:
24787 emit_insn (
24788 gen_rtx_SET (VOIDmode,
24789 gen_rtx_MEM (DImode,
24790 gen_rtx_PRE_DEC (DImode,
24791 stack_pointer_rtx)),
24792 operand));
24793 break;
24794 default:
24795 gcc_unreachable ();
24797 result = gen_rtx_MEM (mode, stack_pointer_rtx);
24799 else
24801 switch (mode)
24803 case DImode:
24805 rtx operands[2];
24806 split_di (&operand, 1, operands, operands + 1);
24807 emit_insn (
24808 gen_rtx_SET (VOIDmode,
24809 gen_rtx_MEM (SImode,
24810 gen_rtx_PRE_DEC (Pmode,
24811 stack_pointer_rtx)),
24812 operands[1]));
24813 emit_insn (
24814 gen_rtx_SET (VOIDmode,
24815 gen_rtx_MEM (SImode,
24816 gen_rtx_PRE_DEC (Pmode,
24817 stack_pointer_rtx)),
24818 operands[0]));
24820 break;
24821 case HImode:
24822 /* Store HImodes as SImodes. */
24823 operand = gen_lowpart (SImode, operand);
24824 /* FALLTHRU */
24825 case SImode:
24826 emit_insn (
24827 gen_rtx_SET (VOIDmode,
24828 gen_rtx_MEM (GET_MODE (operand),
24829 gen_rtx_PRE_DEC (SImode,
24830 stack_pointer_rtx)),
24831 operand));
24832 break;
24833 default:
24834 gcc_unreachable ();
24836 result = gen_rtx_MEM (mode, stack_pointer_rtx);
24838 return result;
24841 /* Free operand from the memory. */
24842 void
24843 ix86_free_from_memory (enum machine_mode mode)
24845 if (!TARGET_RED_ZONE || TARGET_64BIT_MS_ABI)
24847 int size;
24849 if (mode == DImode || TARGET_64BIT)
24850 size = 8;
24851 else
24852 size = 4;
24853 /* Use LEA to deallocate stack space. In peephole2 it will be converted
24854 to pop or add instruction if registers are available. */
24855 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
24856 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
24857 GEN_INT (size))));
24861 /* Implement TARGET_IRA_COVER_CLASSES. If -mfpmath=sse, we prefer
24862 SSE_REGS to FLOAT_REGS if their costs for a pseudo are the
24863 same. */
24864 static const enum reg_class *
24865 i386_ira_cover_classes (void)
24867 static const enum reg_class sse_fpmath_classes[] = {
24868 GENERAL_REGS, SSE_REGS, MMX_REGS, FLOAT_REGS, LIM_REG_CLASSES
24870 static const enum reg_class no_sse_fpmath_classes[] = {
24871 GENERAL_REGS, FLOAT_REGS, MMX_REGS, SSE_REGS, LIM_REG_CLASSES
24874 return TARGET_SSE_MATH ? sse_fpmath_classes : no_sse_fpmath_classes;
24877 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
24878 QImode must go into class Q_REGS.
24879 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
24880 movdf to do mem-to-mem moves through integer regs. */
24881 enum reg_class
24882 ix86_preferred_reload_class (rtx x, enum reg_class regclass)
24884 enum machine_mode mode = GET_MODE (x);
24886 /* We're only allowed to return a subclass of CLASS. Many of the
24887 following checks fail for NO_REGS, so eliminate that early. */
24888 if (regclass == NO_REGS)
24889 return NO_REGS;
24891 /* All classes can load zeros. */
24892 if (x == CONST0_RTX (mode))
24893 return regclass;
24895 /* Force constants into memory if we are loading a (nonzero) constant into
24896 an MMX or SSE register. This is because there are no MMX/SSE instructions
24897 to load from a constant. */
24898 if (CONSTANT_P (x)
24899 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
24900 return NO_REGS;
24902 /* Prefer SSE regs only, if we can use them for math. */
24903 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
24904 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
24906 /* Floating-point constants need more complex checks. */
24907 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
24909 /* General regs can load everything. */
24910 if (reg_class_subset_p (regclass, GENERAL_REGS))
24911 return regclass;
24913 /* Floats can load 0 and 1 plus some others. Note that we eliminated
24914 zero above. We only want to wind up preferring 80387 registers if
24915 we plan on doing computation with them. */
24916 if (TARGET_80387
24917 && standard_80387_constant_p (x))
24919 /* Limit class to non-sse. */
24920 if (regclass == FLOAT_SSE_REGS)
24921 return FLOAT_REGS;
24922 if (regclass == FP_TOP_SSE_REGS)
24923 return FP_TOP_REG;
24924 if (regclass == FP_SECOND_SSE_REGS)
24925 return FP_SECOND_REG;
24926 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
24927 return regclass;
24930 return NO_REGS;
24933 /* Generally when we see PLUS here, it's the function invariant
24934 (plus soft-fp const_int). Which can only be computed into general
24935 regs. */
24936 if (GET_CODE (x) == PLUS)
24937 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
24939 /* QImode constants are easy to load, but non-constant QImode data
24940 must go into Q_REGS. */
24941 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
24943 if (reg_class_subset_p (regclass, Q_REGS))
24944 return regclass;
24945 if (reg_class_subset_p (Q_REGS, regclass))
24946 return Q_REGS;
24947 return NO_REGS;
24950 return regclass;
24953 /* Discourage putting floating-point values in SSE registers unless
24954 SSE math is being used, and likewise for the 387 registers. */
24955 enum reg_class
24956 ix86_preferred_output_reload_class (rtx x, enum reg_class regclass)
24958 enum machine_mode mode = GET_MODE (x);
24960 /* Restrict the output reload class to the register bank that we are doing
24961 math on. If we would like not to return a subset of CLASS, reject this
24962 alternative: if reload cannot do this, it will still use its choice. */
24963 mode = GET_MODE (x);
24964 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
24965 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
24967 if (X87_FLOAT_MODE_P (mode))
24969 if (regclass == FP_TOP_SSE_REGS)
24970 return FP_TOP_REG;
24971 else if (regclass == FP_SECOND_SSE_REGS)
24972 return FP_SECOND_REG;
24973 else
24974 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
24977 return regclass;
24980 static enum reg_class
24981 ix86_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
24982 enum machine_mode mode,
24983 secondary_reload_info *sri ATTRIBUTE_UNUSED)
24985 /* QImode spills from non-QI registers require
24986 intermediate register on 32bit targets. */
24987 if (!in_p && mode == QImode && !TARGET_64BIT
24988 && (rclass == GENERAL_REGS
24989 || rclass == LEGACY_REGS
24990 || rclass == INDEX_REGS))
24992 int regno;
24994 if (REG_P (x))
24995 regno = REGNO (x);
24996 else
24997 regno = -1;
24999 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
25000 regno = true_regnum (x);
25002 /* Return Q_REGS if the operand is in memory. */
25003 if (regno == -1)
25004 return Q_REGS;
25007 return NO_REGS;
25010 /* If we are copying between general and FP registers, we need a memory
25011 location. The same is true for SSE and MMX registers.
25013 To optimize register_move_cost performance, allow inline variant.
25015 The macro can't work reliably when one of the CLASSES is class containing
25016 registers from multiple units (SSE, MMX, integer). We avoid this by never
25017 combining those units in single alternative in the machine description.
25018 Ensure that this constraint holds to avoid unexpected surprises.
25020 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
25021 enforce these sanity checks. */
25023 static inline int
25024 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25025 enum machine_mode mode, int strict)
25027 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
25028 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
25029 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
25030 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
25031 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
25032 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
25034 gcc_assert (!strict);
25035 return true;
25038 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
25039 return true;
25041 /* ??? This is a lie. We do have moves between mmx/general, and for
25042 mmx/sse2. But by saying we need secondary memory we discourage the
25043 register allocator from using the mmx registers unless needed. */
25044 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
25045 return true;
25047 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25049 /* SSE1 doesn't have any direct moves from other classes. */
25050 if (!TARGET_SSE2)
25051 return true;
25053 /* If the target says that inter-unit moves are more expensive
25054 than moving through memory, then don't generate them. */
25055 if (!TARGET_INTER_UNIT_MOVES)
25056 return true;
25058 /* Between SSE and general, we have moves no larger than word size. */
25059 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
25060 return true;
25063 return false;
25067 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25068 enum machine_mode mode, int strict)
25070 return inline_secondary_memory_needed (class1, class2, mode, strict);
25073 /* Return true if the registers in CLASS cannot represent the change from
25074 modes FROM to TO. */
25076 bool
25077 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
25078 enum reg_class regclass)
25080 if (from == to)
25081 return false;
25083 /* x87 registers can't do subreg at all, as all values are reformatted
25084 to extended precision. */
25085 if (MAYBE_FLOAT_CLASS_P (regclass))
25086 return true;
25088 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
25090 /* Vector registers do not support QI or HImode loads. If we don't
25091 disallow a change to these modes, reload will assume it's ok to
25092 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
25093 the vec_dupv4hi pattern. */
25094 if (GET_MODE_SIZE (from) < 4)
25095 return true;
25097 /* Vector registers do not support subreg with nonzero offsets, which
25098 are otherwise valid for integer registers. Since we can't see
25099 whether we have a nonzero offset from here, prohibit all
25100 nonparadoxical subregs changing size. */
25101 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
25102 return true;
25105 return false;
25108 /* Return the cost of moving data of mode M between a
25109 register and memory. A value of 2 is the default; this cost is
25110 relative to those in `REGISTER_MOVE_COST'.
25112 This function is used extensively by register_move_cost that is used to
25113 build tables at startup. Make it inline in this case.
25114 When IN is 2, return maximum of in and out move cost.
25116 If moving between registers and memory is more expensive than
25117 between two registers, you should define this macro to express the
25118 relative cost.
25120 Model also increased moving costs of QImode registers in non
25121 Q_REGS classes.
25123 static inline int
25124 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
25125 int in)
25127 int cost;
25128 if (FLOAT_CLASS_P (regclass))
25130 int index;
25131 switch (mode)
25133 case SFmode:
25134 index = 0;
25135 break;
25136 case DFmode:
25137 index = 1;
25138 break;
25139 case XFmode:
25140 index = 2;
25141 break;
25142 default:
25143 return 100;
25145 if (in == 2)
25146 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
25147 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
25149 if (SSE_CLASS_P (regclass))
25151 int index;
25152 switch (GET_MODE_SIZE (mode))
25154 case 4:
25155 index = 0;
25156 break;
25157 case 8:
25158 index = 1;
25159 break;
25160 case 16:
25161 index = 2;
25162 break;
25163 default:
25164 return 100;
25166 if (in == 2)
25167 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
25168 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
25170 if (MMX_CLASS_P (regclass))
25172 int index;
25173 switch (GET_MODE_SIZE (mode))
25175 case 4:
25176 index = 0;
25177 break;
25178 case 8:
25179 index = 1;
25180 break;
25181 default:
25182 return 100;
25184 if (in)
25185 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
25186 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
25188 switch (GET_MODE_SIZE (mode))
25190 case 1:
25191 if (Q_CLASS_P (regclass) || TARGET_64BIT)
25193 if (!in)
25194 return ix86_cost->int_store[0];
25195 if (TARGET_PARTIAL_REG_DEPENDENCY
25196 && optimize_function_for_speed_p (cfun))
25197 cost = ix86_cost->movzbl_load;
25198 else
25199 cost = ix86_cost->int_load[0];
25200 if (in == 2)
25201 return MAX (cost, ix86_cost->int_store[0]);
25202 return cost;
25204 else
25206 if (in == 2)
25207 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
25208 if (in)
25209 return ix86_cost->movzbl_load;
25210 else
25211 return ix86_cost->int_store[0] + 4;
25213 break;
25214 case 2:
25215 if (in == 2)
25216 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
25217 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
25218 default:
25219 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
25220 if (mode == TFmode)
25221 mode = XFmode;
25222 if (in == 2)
25223 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
25224 else if (in)
25225 cost = ix86_cost->int_load[2];
25226 else
25227 cost = ix86_cost->int_store[2];
25228 return (cost * (((int) GET_MODE_SIZE (mode)
25229 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
25234 ix86_memory_move_cost (enum machine_mode mode, enum reg_class regclass, int in)
25236 return inline_memory_move_cost (mode, regclass, in);
25240 /* Return the cost of moving data from a register in class CLASS1 to
25241 one in class CLASS2.
25243 It is not required that the cost always equal 2 when FROM is the same as TO;
25244 on some machines it is expensive to move between registers if they are not
25245 general registers. */
25248 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
25249 enum reg_class class2)
25251 /* In case we require secondary memory, compute cost of the store followed
25252 by load. In order to avoid bad register allocation choices, we need
25253 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
25255 if (inline_secondary_memory_needed (class1, class2, mode, 0))
25257 int cost = 1;
25259 cost += inline_memory_move_cost (mode, class1, 2);
25260 cost += inline_memory_move_cost (mode, class2, 2);
25262 /* In case of copying from general_purpose_register we may emit multiple
25263 stores followed by single load causing memory size mismatch stall.
25264 Count this as arbitrarily high cost of 20. */
25265 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
25266 cost += 20;
25268 /* In the case of FP/MMX moves, the registers actually overlap, and we
25269 have to switch modes in order to treat them differently. */
25270 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
25271 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
25272 cost += 20;
25274 return cost;
25277 /* Moves between SSE/MMX and integer unit are expensive. */
25278 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
25279 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25281 /* ??? By keeping returned value relatively high, we limit the number
25282 of moves between integer and MMX/SSE registers for all targets.
25283 Additionally, high value prevents problem with x86_modes_tieable_p(),
25284 where integer modes in MMX/SSE registers are not tieable
25285 because of missing QImode and HImode moves to, from or between
25286 MMX/SSE registers. */
25287 return MAX (8, ix86_cost->mmxsse_to_integer);
25289 if (MAYBE_FLOAT_CLASS_P (class1))
25290 return ix86_cost->fp_move;
25291 if (MAYBE_SSE_CLASS_P (class1))
25292 return ix86_cost->sse_move;
25293 if (MAYBE_MMX_CLASS_P (class1))
25294 return ix86_cost->mmx_move;
25295 return 2;
25298 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
25300 bool
25301 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
25303 /* Flags and only flags can only hold CCmode values. */
25304 if (CC_REGNO_P (regno))
25305 return GET_MODE_CLASS (mode) == MODE_CC;
25306 if (GET_MODE_CLASS (mode) == MODE_CC
25307 || GET_MODE_CLASS (mode) == MODE_RANDOM
25308 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
25309 return 0;
25310 if (FP_REGNO_P (regno))
25311 return VALID_FP_MODE_P (mode);
25312 if (SSE_REGNO_P (regno))
25314 /* We implement the move patterns for all vector modes into and
25315 out of SSE registers, even when no operation instructions
25316 are available. OImode move is available only when AVX is
25317 enabled. */
25318 return ((TARGET_AVX && mode == OImode)
25319 || VALID_AVX256_REG_MODE (mode)
25320 || VALID_SSE_REG_MODE (mode)
25321 || VALID_SSE2_REG_MODE (mode)
25322 || VALID_MMX_REG_MODE (mode)
25323 || VALID_MMX_REG_MODE_3DNOW (mode));
25325 if (MMX_REGNO_P (regno))
25327 /* We implement the move patterns for 3DNOW modes even in MMX mode,
25328 so if the register is available at all, then we can move data of
25329 the given mode into or out of it. */
25330 return (VALID_MMX_REG_MODE (mode)
25331 || VALID_MMX_REG_MODE_3DNOW (mode));
25334 if (mode == QImode)
25336 /* Take care for QImode values - they can be in non-QI regs,
25337 but then they do cause partial register stalls. */
25338 if (regno <= BX_REG || TARGET_64BIT)
25339 return 1;
25340 if (!TARGET_PARTIAL_REG_STALL)
25341 return 1;
25342 return reload_in_progress || reload_completed;
25344 /* We handle both integer and floats in the general purpose registers. */
25345 else if (VALID_INT_MODE_P (mode))
25346 return 1;
25347 else if (VALID_FP_MODE_P (mode))
25348 return 1;
25349 else if (VALID_DFP_MODE_P (mode))
25350 return 1;
25351 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
25352 on to use that value in smaller contexts, this can easily force a
25353 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
25354 supporting DImode, allow it. */
25355 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
25356 return 1;
25358 return 0;
25361 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
25362 tieable integer mode. */
25364 static bool
25365 ix86_tieable_integer_mode_p (enum machine_mode mode)
25367 switch (mode)
25369 case HImode:
25370 case SImode:
25371 return true;
25373 case QImode:
25374 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
25376 case DImode:
25377 return TARGET_64BIT;
25379 default:
25380 return false;
25384 /* Return true if MODE1 is accessible in a register that can hold MODE2
25385 without copying. That is, all register classes that can hold MODE2
25386 can also hold MODE1. */
25388 bool
25389 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
25391 if (mode1 == mode2)
25392 return true;
25394 if (ix86_tieable_integer_mode_p (mode1)
25395 && ix86_tieable_integer_mode_p (mode2))
25396 return true;
25398 /* MODE2 being XFmode implies fp stack or general regs, which means we
25399 can tie any smaller floating point modes to it. Note that we do not
25400 tie this with TFmode. */
25401 if (mode2 == XFmode)
25402 return mode1 == SFmode || mode1 == DFmode;
25404 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
25405 that we can tie it with SFmode. */
25406 if (mode2 == DFmode)
25407 return mode1 == SFmode;
25409 /* If MODE2 is only appropriate for an SSE register, then tie with
25410 any other mode acceptable to SSE registers. */
25411 if (GET_MODE_SIZE (mode2) == 16
25412 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
25413 return (GET_MODE_SIZE (mode1) == 16
25414 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
25416 /* If MODE2 is appropriate for an MMX register, then tie
25417 with any other mode acceptable to MMX registers. */
25418 if (GET_MODE_SIZE (mode2) == 8
25419 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
25420 return (GET_MODE_SIZE (mode1) == 8
25421 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
25423 return false;
25426 /* Compute a (partial) cost for rtx X. Return true if the complete
25427 cost has been computed, and false if subexpressions should be
25428 scanned. In either case, *TOTAL contains the cost result. */
25430 static bool
25431 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
25433 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
25434 enum machine_mode mode = GET_MODE (x);
25435 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
25437 switch (code)
25439 case CONST_INT:
25440 case CONST:
25441 case LABEL_REF:
25442 case SYMBOL_REF:
25443 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
25444 *total = 3;
25445 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
25446 *total = 2;
25447 else if (flag_pic && SYMBOLIC_CONST (x)
25448 && (!TARGET_64BIT
25449 || (!GET_CODE (x) != LABEL_REF
25450 && (GET_CODE (x) != SYMBOL_REF
25451 || !SYMBOL_REF_LOCAL_P (x)))))
25452 *total = 1;
25453 else
25454 *total = 0;
25455 return true;
25457 case CONST_DOUBLE:
25458 if (mode == VOIDmode)
25459 *total = 0;
25460 else
25461 switch (standard_80387_constant_p (x))
25463 case 1: /* 0.0 */
25464 *total = 1;
25465 break;
25466 default: /* Other constants */
25467 *total = 2;
25468 break;
25469 case 0:
25470 case -1:
25471 /* Start with (MEM (SYMBOL_REF)), since that's where
25472 it'll probably end up. Add a penalty for size. */
25473 *total = (COSTS_N_INSNS (1)
25474 + (flag_pic != 0 && !TARGET_64BIT)
25475 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
25476 break;
25478 return true;
25480 case ZERO_EXTEND:
25481 /* The zero extensions is often completely free on x86_64, so make
25482 it as cheap as possible. */
25483 if (TARGET_64BIT && mode == DImode
25484 && GET_MODE (XEXP (x, 0)) == SImode)
25485 *total = 1;
25486 else if (TARGET_ZERO_EXTEND_WITH_AND)
25487 *total = cost->add;
25488 else
25489 *total = cost->movzx;
25490 return false;
25492 case SIGN_EXTEND:
25493 *total = cost->movsx;
25494 return false;
25496 case ASHIFT:
25497 if (CONST_INT_P (XEXP (x, 1))
25498 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
25500 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25501 if (value == 1)
25503 *total = cost->add;
25504 return false;
25506 if ((value == 2 || value == 3)
25507 && cost->lea <= cost->shift_const)
25509 *total = cost->lea;
25510 return false;
25513 /* FALLTHRU */
25515 case ROTATE:
25516 case ASHIFTRT:
25517 case LSHIFTRT:
25518 case ROTATERT:
25519 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
25521 if (CONST_INT_P (XEXP (x, 1)))
25523 if (INTVAL (XEXP (x, 1)) > 32)
25524 *total = cost->shift_const + COSTS_N_INSNS (2);
25525 else
25526 *total = cost->shift_const * 2;
25528 else
25530 if (GET_CODE (XEXP (x, 1)) == AND)
25531 *total = cost->shift_var * 2;
25532 else
25533 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
25536 else
25538 if (CONST_INT_P (XEXP (x, 1)))
25539 *total = cost->shift_const;
25540 else
25541 *total = cost->shift_var;
25543 return false;
25545 case MULT:
25546 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25548 /* ??? SSE scalar cost should be used here. */
25549 *total = cost->fmul;
25550 return false;
25552 else if (X87_FLOAT_MODE_P (mode))
25554 *total = cost->fmul;
25555 return false;
25557 else if (FLOAT_MODE_P (mode))
25559 /* ??? SSE vector cost should be used here. */
25560 *total = cost->fmul;
25561 return false;
25563 else
25565 rtx op0 = XEXP (x, 0);
25566 rtx op1 = XEXP (x, 1);
25567 int nbits;
25568 if (CONST_INT_P (XEXP (x, 1)))
25570 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25571 for (nbits = 0; value != 0; value &= value - 1)
25572 nbits++;
25574 else
25575 /* This is arbitrary. */
25576 nbits = 7;
25578 /* Compute costs correctly for widening multiplication. */
25579 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
25580 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
25581 == GET_MODE_SIZE (mode))
25583 int is_mulwiden = 0;
25584 enum machine_mode inner_mode = GET_MODE (op0);
25586 if (GET_CODE (op0) == GET_CODE (op1))
25587 is_mulwiden = 1, op1 = XEXP (op1, 0);
25588 else if (CONST_INT_P (op1))
25590 if (GET_CODE (op0) == SIGN_EXTEND)
25591 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
25592 == INTVAL (op1);
25593 else
25594 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
25597 if (is_mulwiden)
25598 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
25601 *total = (cost->mult_init[MODE_INDEX (mode)]
25602 + nbits * cost->mult_bit
25603 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
25605 return true;
25608 case DIV:
25609 case UDIV:
25610 case MOD:
25611 case UMOD:
25612 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25613 /* ??? SSE cost should be used here. */
25614 *total = cost->fdiv;
25615 else if (X87_FLOAT_MODE_P (mode))
25616 *total = cost->fdiv;
25617 else if (FLOAT_MODE_P (mode))
25618 /* ??? SSE vector cost should be used here. */
25619 *total = cost->fdiv;
25620 else
25621 *total = cost->divide[MODE_INDEX (mode)];
25622 return false;
25624 case PLUS:
25625 if (GET_MODE_CLASS (mode) == MODE_INT
25626 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
25628 if (GET_CODE (XEXP (x, 0)) == PLUS
25629 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
25630 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
25631 && CONSTANT_P (XEXP (x, 1)))
25633 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
25634 if (val == 2 || val == 4 || val == 8)
25636 *total = cost->lea;
25637 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25638 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
25639 outer_code, speed);
25640 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25641 return true;
25644 else if (GET_CODE (XEXP (x, 0)) == MULT
25645 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
25647 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
25648 if (val == 2 || val == 4 || val == 8)
25650 *total = cost->lea;
25651 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25652 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25653 return true;
25656 else if (GET_CODE (XEXP (x, 0)) == PLUS)
25658 *total = cost->lea;
25659 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25660 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25661 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25662 return true;
25665 /* FALLTHRU */
25667 case MINUS:
25668 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25670 /* ??? SSE cost should be used here. */
25671 *total = cost->fadd;
25672 return false;
25674 else if (X87_FLOAT_MODE_P (mode))
25676 *total = cost->fadd;
25677 return false;
25679 else if (FLOAT_MODE_P (mode))
25681 /* ??? SSE vector cost should be used here. */
25682 *total = cost->fadd;
25683 return false;
25685 /* FALLTHRU */
25687 case AND:
25688 case IOR:
25689 case XOR:
25690 if (!TARGET_64BIT && mode == DImode)
25692 *total = (cost->add * 2
25693 + (rtx_cost (XEXP (x, 0), outer_code, speed)
25694 << (GET_MODE (XEXP (x, 0)) != DImode))
25695 + (rtx_cost (XEXP (x, 1), outer_code, speed)
25696 << (GET_MODE (XEXP (x, 1)) != DImode)));
25697 return true;
25699 /* FALLTHRU */
25701 case NEG:
25702 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25704 /* ??? SSE cost should be used here. */
25705 *total = cost->fchs;
25706 return false;
25708 else if (X87_FLOAT_MODE_P (mode))
25710 *total = cost->fchs;
25711 return false;
25713 else if (FLOAT_MODE_P (mode))
25715 /* ??? SSE vector cost should be used here. */
25716 *total = cost->fchs;
25717 return false;
25719 /* FALLTHRU */
25721 case NOT:
25722 if (!TARGET_64BIT && mode == DImode)
25723 *total = cost->add * 2;
25724 else
25725 *total = cost->add;
25726 return false;
25728 case COMPARE:
25729 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
25730 && XEXP (XEXP (x, 0), 1) == const1_rtx
25731 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
25732 && XEXP (x, 1) == const0_rtx)
25734 /* This kind of construct is implemented using test[bwl].
25735 Treat it as if we had an AND. */
25736 *total = (cost->add
25737 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
25738 + rtx_cost (const1_rtx, outer_code, speed));
25739 return true;
25741 return false;
25743 case FLOAT_EXTEND:
25744 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
25745 *total = 0;
25746 return false;
25748 case ABS:
25749 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25750 /* ??? SSE cost should be used here. */
25751 *total = cost->fabs;
25752 else if (X87_FLOAT_MODE_P (mode))
25753 *total = cost->fabs;
25754 else if (FLOAT_MODE_P (mode))
25755 /* ??? SSE vector cost should be used here. */
25756 *total = cost->fabs;
25757 return false;
25759 case SQRT:
25760 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25761 /* ??? SSE cost should be used here. */
25762 *total = cost->fsqrt;
25763 else if (X87_FLOAT_MODE_P (mode))
25764 *total = cost->fsqrt;
25765 else if (FLOAT_MODE_P (mode))
25766 /* ??? SSE vector cost should be used here. */
25767 *total = cost->fsqrt;
25768 return false;
25770 case UNSPEC:
25771 if (XINT (x, 1) == UNSPEC_TP)
25772 *total = 0;
25773 return false;
25775 case VEC_SELECT:
25776 case VEC_CONCAT:
25777 case VEC_MERGE:
25778 case VEC_DUPLICATE:
25779 /* ??? Assume all of these vector manipulation patterns are
25780 recognizable. In which case they all pretty much have the
25781 same cost. */
25782 *total = COSTS_N_INSNS (1);
25783 return true;
25785 default:
25786 return false;
25790 #if TARGET_MACHO
25792 static int current_machopic_label_num;
25794 /* Given a symbol name and its associated stub, write out the
25795 definition of the stub. */
25797 void
25798 machopic_output_stub (FILE *file, const char *symb, const char *stub)
25800 unsigned int length;
25801 char *binder_name, *symbol_name, lazy_ptr_name[32];
25802 int label = ++current_machopic_label_num;
25804 /* For 64-bit we shouldn't get here. */
25805 gcc_assert (!TARGET_64BIT);
25807 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
25808 symb = (*targetm.strip_name_encoding) (symb);
25810 length = strlen (stub);
25811 binder_name = XALLOCAVEC (char, length + 32);
25812 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
25814 length = strlen (symb);
25815 symbol_name = XALLOCAVEC (char, length + 32);
25816 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
25818 sprintf (lazy_ptr_name, "L%d$lz", label);
25820 if (MACHOPIC_PURE)
25821 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
25822 else
25823 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
25825 fprintf (file, "%s:\n", stub);
25826 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25828 if (MACHOPIC_PURE)
25830 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
25831 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
25832 fprintf (file, "\tjmp\t*%%edx\n");
25834 else
25835 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
25837 fprintf (file, "%s:\n", binder_name);
25839 if (MACHOPIC_PURE)
25841 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
25842 fputs ("\tpushl\t%eax\n", file);
25844 else
25845 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
25847 fputs ("\tjmp\tdyld_stub_binding_helper\n", file);
25849 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
25850 fprintf (file, "%s:\n", lazy_ptr_name);
25851 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25852 fprintf (file, ASM_LONG "%s\n", binder_name);
25855 void
25856 darwin_x86_file_end (void)
25858 darwin_file_end ();
25859 ix86_file_end ();
25861 #endif /* TARGET_MACHO */
25863 /* Order the registers for register allocator. */
25865 void
25866 x86_order_regs_for_local_alloc (void)
25868 int pos = 0;
25869 int i;
25871 /* First allocate the local general purpose registers. */
25872 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
25873 if (GENERAL_REGNO_P (i) && call_used_regs[i])
25874 reg_alloc_order [pos++] = i;
25876 /* Global general purpose registers. */
25877 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
25878 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
25879 reg_alloc_order [pos++] = i;
25881 /* x87 registers come first in case we are doing FP math
25882 using them. */
25883 if (!TARGET_SSE_MATH)
25884 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
25885 reg_alloc_order [pos++] = i;
25887 /* SSE registers. */
25888 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
25889 reg_alloc_order [pos++] = i;
25890 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
25891 reg_alloc_order [pos++] = i;
25893 /* x87 registers. */
25894 if (TARGET_SSE_MATH)
25895 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
25896 reg_alloc_order [pos++] = i;
25898 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
25899 reg_alloc_order [pos++] = i;
25901 /* Initialize the rest of array as we do not allocate some registers
25902 at all. */
25903 while (pos < FIRST_PSEUDO_REGISTER)
25904 reg_alloc_order [pos++] = 0;
25907 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
25908 struct attribute_spec.handler. */
25909 static tree
25910 ix86_handle_abi_attribute (tree *node, tree name,
25911 tree args ATTRIBUTE_UNUSED,
25912 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
25914 if (TREE_CODE (*node) != FUNCTION_TYPE
25915 && TREE_CODE (*node) != METHOD_TYPE
25916 && TREE_CODE (*node) != FIELD_DECL
25917 && TREE_CODE (*node) != TYPE_DECL)
25919 warning (OPT_Wattributes, "%qE attribute only applies to functions",
25920 name);
25921 *no_add_attrs = true;
25922 return NULL_TREE;
25924 if (!TARGET_64BIT)
25926 warning (OPT_Wattributes, "%qE attribute only available for 64-bit",
25927 name);
25928 *no_add_attrs = true;
25929 return NULL_TREE;
25932 /* Can combine regparm with all attributes but fastcall. */
25933 if (is_attribute_p ("ms_abi", name))
25935 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
25937 error ("ms_abi and sysv_abi attributes are not compatible");
25940 return NULL_TREE;
25942 else if (is_attribute_p ("sysv_abi", name))
25944 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
25946 error ("ms_abi and sysv_abi attributes are not compatible");
25949 return NULL_TREE;
25952 return NULL_TREE;
25955 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
25956 struct attribute_spec.handler. */
25957 static tree
25958 ix86_handle_struct_attribute (tree *node, tree name,
25959 tree args ATTRIBUTE_UNUSED,
25960 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
25962 tree *type = NULL;
25963 if (DECL_P (*node))
25965 if (TREE_CODE (*node) == TYPE_DECL)
25966 type = &TREE_TYPE (*node);
25968 else
25969 type = node;
25971 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
25972 || TREE_CODE (*type) == UNION_TYPE)))
25974 warning (OPT_Wattributes, "%qE attribute ignored",
25975 name);
25976 *no_add_attrs = true;
25979 else if ((is_attribute_p ("ms_struct", name)
25980 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
25981 || ((is_attribute_p ("gcc_struct", name)
25982 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
25984 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
25985 name);
25986 *no_add_attrs = true;
25989 return NULL_TREE;
25992 static tree
25993 ix86_handle_fndecl_attribute (tree *node, tree name,
25994 tree args ATTRIBUTE_UNUSED,
25995 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
25997 if (TREE_CODE (*node) != FUNCTION_DECL)
25999 warning (OPT_Wattributes, "%qE attribute only applies to functions",
26000 name);
26001 *no_add_attrs = true;
26002 return NULL_TREE;
26005 if (TARGET_64BIT)
26007 warning (OPT_Wattributes, "%qE attribute only available for 32-bit",
26008 name);
26009 return NULL_TREE;
26012 #ifndef HAVE_AS_IX86_SWAP
26013 sorry ("ms_hook_prologue attribute needs assembler swap suffix support");
26014 #endif
26016 return NULL_TREE;
26019 static bool
26020 ix86_ms_bitfield_layout_p (const_tree record_type)
26022 return (TARGET_MS_BITFIELD_LAYOUT &&
26023 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
26024 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
26027 /* Returns an expression indicating where the this parameter is
26028 located on entry to the FUNCTION. */
26030 static rtx
26031 x86_this_parameter (tree function)
26033 tree type = TREE_TYPE (function);
26034 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
26035 int nregs;
26037 if (TARGET_64BIT)
26039 const int *parm_regs;
26041 if (ix86_function_type_abi (type) == MS_ABI)
26042 parm_regs = x86_64_ms_abi_int_parameter_registers;
26043 else
26044 parm_regs = x86_64_int_parameter_registers;
26045 return gen_rtx_REG (DImode, parm_regs[aggr]);
26048 nregs = ix86_function_regparm (type, function);
26050 if (nregs > 0 && !stdarg_p (type))
26052 int regno;
26054 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
26055 regno = aggr ? DX_REG : CX_REG;
26056 else
26058 regno = AX_REG;
26059 if (aggr)
26061 regno = DX_REG;
26062 if (nregs == 1)
26063 return gen_rtx_MEM (SImode,
26064 plus_constant (stack_pointer_rtx, 4));
26067 return gen_rtx_REG (SImode, regno);
26070 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
26073 /* Determine whether x86_output_mi_thunk can succeed. */
26075 static bool
26076 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
26077 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
26078 HOST_WIDE_INT vcall_offset, const_tree function)
26080 /* 64-bit can handle anything. */
26081 if (TARGET_64BIT)
26082 return true;
26084 /* For 32-bit, everything's fine if we have one free register. */
26085 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
26086 return true;
26088 /* Need a free register for vcall_offset. */
26089 if (vcall_offset)
26090 return false;
26092 /* Need a free register for GOT references. */
26093 if (flag_pic && !(*targetm.binds_local_p) (function))
26094 return false;
26096 /* Otherwise ok. */
26097 return true;
26100 /* Output the assembler code for a thunk function. THUNK_DECL is the
26101 declaration for the thunk function itself, FUNCTION is the decl for
26102 the target function. DELTA is an immediate constant offset to be
26103 added to THIS. If VCALL_OFFSET is nonzero, the word at
26104 *(*this + vcall_offset) should be added to THIS. */
26106 static void
26107 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
26108 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
26109 HOST_WIDE_INT vcall_offset, tree function)
26111 rtx xops[3];
26112 rtx this_param = x86_this_parameter (function);
26113 rtx this_reg, tmp;
26115 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
26116 pull it in now and let DELTA benefit. */
26117 if (REG_P (this_param))
26118 this_reg = this_param;
26119 else if (vcall_offset)
26121 /* Put the this parameter into %eax. */
26122 xops[0] = this_param;
26123 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
26124 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26126 else
26127 this_reg = NULL_RTX;
26129 /* Adjust the this parameter by a fixed constant. */
26130 if (delta)
26132 /* Make things pretty and `subl $4,%eax' rather than `addl $-4,%eax'.
26133 Exceptions: -128 encodes smaller than 128, so swap sign and op. */
26134 bool sub = delta < 0 || delta == 128;
26135 xops[0] = GEN_INT (sub ? -delta : delta);
26136 xops[1] = this_reg ? this_reg : this_param;
26137 if (TARGET_64BIT)
26139 if (!x86_64_general_operand (xops[0], DImode))
26141 tmp = gen_rtx_REG (DImode, R10_REG);
26142 xops[1] = tmp;
26143 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
26144 xops[0] = tmp;
26145 xops[1] = this_param;
26147 if (sub)
26148 output_asm_insn ("sub{q}\t{%0, %1|%1, %0}", xops);
26149 else
26150 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
26152 else if (sub)
26153 output_asm_insn ("sub{l}\t{%0, %1|%1, %0}", xops);
26154 else
26155 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
26158 /* Adjust the this parameter by a value stored in the vtable. */
26159 if (vcall_offset)
26161 if (TARGET_64BIT)
26162 tmp = gen_rtx_REG (DImode, R10_REG);
26163 else
26165 int tmp_regno = CX_REG;
26166 if (lookup_attribute ("fastcall",
26167 TYPE_ATTRIBUTES (TREE_TYPE (function))))
26168 tmp_regno = AX_REG;
26169 tmp = gen_rtx_REG (SImode, tmp_regno);
26172 xops[0] = gen_rtx_MEM (Pmode, this_reg);
26173 xops[1] = tmp;
26174 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26176 /* Adjust the this parameter. */
26177 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
26178 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
26180 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
26181 xops[0] = GEN_INT (vcall_offset);
26182 xops[1] = tmp2;
26183 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
26184 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
26186 xops[1] = this_reg;
26187 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
26190 /* If necessary, drop THIS back to its stack slot. */
26191 if (this_reg && this_reg != this_param)
26193 xops[0] = this_reg;
26194 xops[1] = this_param;
26195 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26198 xops[0] = XEXP (DECL_RTL (function), 0);
26199 if (TARGET_64BIT)
26201 if (!flag_pic || (*targetm.binds_local_p) (function))
26202 output_asm_insn ("jmp\t%P0", xops);
26203 /* All thunks should be in the same object as their target,
26204 and thus binds_local_p should be true. */
26205 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
26206 gcc_unreachable ();
26207 else
26209 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
26210 tmp = gen_rtx_CONST (Pmode, tmp);
26211 tmp = gen_rtx_MEM (QImode, tmp);
26212 xops[0] = tmp;
26213 output_asm_insn ("jmp\t%A0", xops);
26216 else
26218 if (!flag_pic || (*targetm.binds_local_p) (function))
26219 output_asm_insn ("jmp\t%P0", xops);
26220 else
26221 #if TARGET_MACHO
26222 if (TARGET_MACHO)
26224 rtx sym_ref = XEXP (DECL_RTL (function), 0);
26225 tmp = (gen_rtx_SYMBOL_REF
26226 (Pmode,
26227 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
26228 tmp = gen_rtx_MEM (QImode, tmp);
26229 xops[0] = tmp;
26230 output_asm_insn ("jmp\t%0", xops);
26232 else
26233 #endif /* TARGET_MACHO */
26235 tmp = gen_rtx_REG (SImode, CX_REG);
26236 output_set_got (tmp, NULL_RTX);
26238 xops[1] = tmp;
26239 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
26240 output_asm_insn ("jmp\t{*}%1", xops);
26245 static void
26246 x86_file_start (void)
26248 default_file_start ();
26249 #if TARGET_MACHO
26250 darwin_file_start ();
26251 #endif
26252 if (X86_FILE_START_VERSION_DIRECTIVE)
26253 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
26254 if (X86_FILE_START_FLTUSED)
26255 fputs ("\t.global\t__fltused\n", asm_out_file);
26256 if (ix86_asm_dialect == ASM_INTEL)
26257 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
26261 x86_field_alignment (tree field, int computed)
26263 enum machine_mode mode;
26264 tree type = TREE_TYPE (field);
26266 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
26267 return computed;
26268 mode = TYPE_MODE (strip_array_types (type));
26269 if (mode == DFmode || mode == DCmode
26270 || GET_MODE_CLASS (mode) == MODE_INT
26271 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
26272 return MIN (32, computed);
26273 return computed;
26276 /* Output assembler code to FILE to increment profiler label # LABELNO
26277 for profiling a function entry. */
26278 void
26279 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
26281 if (TARGET_64BIT)
26283 #ifndef NO_PROFILE_COUNTERS
26284 fprintf (file, "\tleaq\t" LPREFIX "P%d@(%%rip),%%r11\n", labelno);
26285 #endif
26287 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
26288 fputs ("\tcall\t*" MCOUNT_NAME "@GOTPCREL(%rip)\n", file);
26289 else
26290 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26292 else if (flag_pic)
26294 #ifndef NO_PROFILE_COUNTERS
26295 fprintf (file, "\tleal\t" LPREFIX "P%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER "\n",
26296 labelno);
26297 #endif
26298 fputs ("\tcall\t*" MCOUNT_NAME "@GOT(%ebx)\n", file);
26300 else
26302 #ifndef NO_PROFILE_COUNTERS
26303 fprintf (file, "\tmovl\t$" LPREFIX "P%d,%%" PROFILE_COUNT_REGISTER "\n",
26304 labelno);
26305 #endif
26306 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26310 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26311 /* We don't have exact information about the insn sizes, but we may assume
26312 quite safely that we are informed about all 1 byte insns and memory
26313 address sizes. This is enough to eliminate unnecessary padding in
26314 99% of cases. */
26316 static int
26317 min_insn_size (rtx insn)
26319 int l = 0, len;
26321 if (!INSN_P (insn) || !active_insn_p (insn))
26322 return 0;
26324 /* Discard alignments we've emit and jump instructions. */
26325 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
26326 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
26327 return 0;
26328 if (JUMP_TABLE_DATA_P (insn))
26329 return 0;
26331 /* Important case - calls are always 5 bytes.
26332 It is common to have many calls in the row. */
26333 if (CALL_P (insn)
26334 && symbolic_reference_mentioned_p (PATTERN (insn))
26335 && !SIBLING_CALL_P (insn))
26336 return 5;
26337 len = get_attr_length (insn);
26338 if (len <= 1)
26339 return 1;
26341 /* For normal instructions we rely on get_attr_length being exact,
26342 with a few exceptions. */
26343 if (!JUMP_P (insn))
26345 enum attr_type type = get_attr_type (insn);
26347 switch (type)
26349 case TYPE_MULTI:
26350 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
26351 || asm_noperands (PATTERN (insn)) >= 0)
26352 return 0;
26353 break;
26354 case TYPE_OTHER:
26355 case TYPE_FCMP:
26356 break;
26357 default:
26358 /* Otherwise trust get_attr_length. */
26359 return len;
26362 l = get_attr_length_address (insn);
26363 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
26364 l = 4;
26366 if (l)
26367 return 1+l;
26368 else
26369 return 2;
26372 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
26373 window. */
26375 static void
26376 ix86_avoid_jump_mispredicts (void)
26378 rtx insn, start = get_insns ();
26379 int nbytes = 0, njumps = 0;
26380 int isjump = 0;
26382 /* Look for all minimal intervals of instructions containing 4 jumps.
26383 The intervals are bounded by START and INSN. NBYTES is the total
26384 size of instructions in the interval including INSN and not including
26385 START. When the NBYTES is smaller than 16 bytes, it is possible
26386 that the end of START and INSN ends up in the same 16byte page.
26388 The smallest offset in the page INSN can start is the case where START
26389 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
26390 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
26392 for (insn = start; insn; insn = NEXT_INSN (insn))
26394 int min_size;
26396 if (LABEL_P (insn))
26398 int align = label_to_alignment (insn);
26399 int max_skip = label_to_max_skip (insn);
26401 if (max_skip > 15)
26402 max_skip = 15;
26403 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
26404 already in the current 16 byte page, because otherwise
26405 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
26406 bytes to reach 16 byte boundary. */
26407 if (align <= 0
26408 || (align <= 3 && max_skip != (1 << align) - 1))
26409 max_skip = 0;
26410 if (dump_file)
26411 fprintf (dump_file, "Label %i with max_skip %i\n",
26412 INSN_UID (insn), max_skip);
26413 if (max_skip)
26415 while (nbytes + max_skip >= 16)
26417 start = NEXT_INSN (start);
26418 if ((JUMP_P (start)
26419 && GET_CODE (PATTERN (start)) != ADDR_VEC
26420 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26421 || CALL_P (start))
26422 njumps--, isjump = 1;
26423 else
26424 isjump = 0;
26425 nbytes -= min_insn_size (start);
26428 continue;
26431 min_size = min_insn_size (insn);
26432 nbytes += min_size;
26433 if (dump_file)
26434 fprintf (dump_file, "Insn %i estimated to %i bytes\n",
26435 INSN_UID (insn), min_size);
26436 if ((JUMP_P (insn)
26437 && GET_CODE (PATTERN (insn)) != ADDR_VEC
26438 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
26439 || CALL_P (insn))
26440 njumps++;
26441 else
26442 continue;
26444 while (njumps > 3)
26446 start = NEXT_INSN (start);
26447 if ((JUMP_P (start)
26448 && GET_CODE (PATTERN (start)) != ADDR_VEC
26449 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26450 || CALL_P (start))
26451 njumps--, isjump = 1;
26452 else
26453 isjump = 0;
26454 nbytes -= min_insn_size (start);
26456 gcc_assert (njumps >= 0);
26457 if (dump_file)
26458 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
26459 INSN_UID (start), INSN_UID (insn), nbytes);
26461 if (njumps == 3 && isjump && nbytes < 16)
26463 int padsize = 15 - nbytes + min_insn_size (insn);
26465 if (dump_file)
26466 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
26467 INSN_UID (insn), padsize);
26468 emit_insn_before (gen_pad (GEN_INT (padsize)), insn);
26472 #endif
26474 /* AMD Athlon works faster
26475 when RET is not destination of conditional jump or directly preceded
26476 by other jump instruction. We avoid the penalty by inserting NOP just
26477 before the RET instructions in such cases. */
26478 static void
26479 ix86_pad_returns (void)
26481 edge e;
26482 edge_iterator ei;
26484 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
26486 basic_block bb = e->src;
26487 rtx ret = BB_END (bb);
26488 rtx prev;
26489 bool replace = false;
26491 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
26492 || optimize_bb_for_size_p (bb))
26493 continue;
26494 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
26495 if (active_insn_p (prev) || LABEL_P (prev))
26496 break;
26497 if (prev && LABEL_P (prev))
26499 edge e;
26500 edge_iterator ei;
26502 FOR_EACH_EDGE (e, ei, bb->preds)
26503 if (EDGE_FREQUENCY (e) && e->src->index >= 0
26504 && !(e->flags & EDGE_FALLTHRU))
26505 replace = true;
26507 if (!replace)
26509 prev = prev_active_insn (ret);
26510 if (prev
26511 && ((JUMP_P (prev) && any_condjump_p (prev))
26512 || CALL_P (prev)))
26513 replace = true;
26514 /* Empty functions get branch mispredict even when the jump destination
26515 is not visible to us. */
26516 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
26517 replace = true;
26519 if (replace)
26521 emit_jump_insn_before (gen_return_internal_long (), ret);
26522 delete_insn (ret);
26527 /* Implement machine specific optimizations. We implement padding of returns
26528 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
26529 static void
26530 ix86_reorg (void)
26532 if (optimize && optimize_function_for_speed_p (cfun))
26534 if (TARGET_PAD_RETURNS)
26535 ix86_pad_returns ();
26536 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26537 if (TARGET_FOUR_JUMP_LIMIT)
26538 ix86_avoid_jump_mispredicts ();
26539 #endif
26543 /* Return nonzero when QImode register that must be represented via REX prefix
26544 is used. */
26545 bool
26546 x86_extended_QIreg_mentioned_p (rtx insn)
26548 int i;
26549 extract_insn_cached (insn);
26550 for (i = 0; i < recog_data.n_operands; i++)
26551 if (REG_P (recog_data.operand[i])
26552 && REGNO (recog_data.operand[i]) > BX_REG)
26553 return true;
26554 return false;
26557 /* Return nonzero when P points to register encoded via REX prefix.
26558 Called via for_each_rtx. */
26559 static int
26560 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
26562 unsigned int regno;
26563 if (!REG_P (*p))
26564 return 0;
26565 regno = REGNO (*p);
26566 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
26569 /* Return true when INSN mentions register that must be encoded using REX
26570 prefix. */
26571 bool
26572 x86_extended_reg_mentioned_p (rtx insn)
26574 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
26575 extended_reg_mentioned_1, NULL);
26578 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
26579 optabs would emit if we didn't have TFmode patterns. */
26581 void
26582 x86_emit_floatuns (rtx operands[2])
26584 rtx neglab, donelab, i0, i1, f0, in, out;
26585 enum machine_mode mode, inmode;
26587 inmode = GET_MODE (operands[1]);
26588 gcc_assert (inmode == SImode || inmode == DImode);
26590 out = operands[0];
26591 in = force_reg (inmode, operands[1]);
26592 mode = GET_MODE (out);
26593 neglab = gen_label_rtx ();
26594 donelab = gen_label_rtx ();
26595 f0 = gen_reg_rtx (mode);
26597 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
26599 expand_float (out, in, 0);
26601 emit_jump_insn (gen_jump (donelab));
26602 emit_barrier ();
26604 emit_label (neglab);
26606 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
26607 1, OPTAB_DIRECT);
26608 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
26609 1, OPTAB_DIRECT);
26610 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
26612 expand_float (f0, i0, 0);
26614 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
26616 emit_label (donelab);
26619 /* AVX does not support 32-byte integer vector operations,
26620 thus the longest vector we are faced with is V16QImode. */
26621 #define MAX_VECT_LEN 16
26623 struct expand_vec_perm_d
26625 rtx target, op0, op1;
26626 unsigned char perm[MAX_VECT_LEN];
26627 enum machine_mode vmode;
26628 unsigned char nelt;
26629 bool testing_p;
26632 static bool expand_vec_perm_1 (struct expand_vec_perm_d *d);
26633 static bool expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d);
26635 /* Get a vector mode of the same size as the original but with elements
26636 twice as wide. This is only guaranteed to apply to integral vectors. */
26638 static inline enum machine_mode
26639 get_mode_wider_vector (enum machine_mode o)
26641 /* ??? Rely on the ordering that genmodes.c gives to vectors. */
26642 enum machine_mode n = GET_MODE_WIDER_MODE (o);
26643 gcc_assert (GET_MODE_NUNITS (o) == GET_MODE_NUNITS (n) * 2);
26644 gcc_assert (GET_MODE_SIZE (o) == GET_MODE_SIZE (n));
26645 return n;
26648 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26649 with all elements equal to VAR. Return true if successful. */
26651 static bool
26652 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
26653 rtx target, rtx val)
26655 bool ok;
26657 switch (mode)
26659 case V2SImode:
26660 case V2SFmode:
26661 if (!mmx_ok)
26662 return false;
26663 /* FALLTHRU */
26665 case V4DFmode:
26666 case V4DImode:
26667 case V8SFmode:
26668 case V8SImode:
26669 case V2DFmode:
26670 case V2DImode:
26671 case V4SFmode:
26672 case V4SImode:
26674 rtx insn, dup;
26676 /* First attempt to recognize VAL as-is. */
26677 dup = gen_rtx_VEC_DUPLICATE (mode, val);
26678 insn = emit_insn (gen_rtx_SET (VOIDmode, target, dup));
26679 if (recog_memoized (insn) < 0)
26681 rtx seq;
26682 /* If that fails, force VAL into a register. */
26684 start_sequence ();
26685 XEXP (dup, 0) = force_reg (GET_MODE_INNER (mode), val);
26686 seq = get_insns ();
26687 end_sequence ();
26688 if (seq)
26689 emit_insn_before (seq, insn);
26691 ok = recog_memoized (insn) >= 0;
26692 gcc_assert (ok);
26695 return true;
26697 case V4HImode:
26698 if (!mmx_ok)
26699 return false;
26700 if (TARGET_SSE || TARGET_3DNOW_A)
26702 rtx x;
26704 val = gen_lowpart (SImode, val);
26705 x = gen_rtx_TRUNCATE (HImode, val);
26706 x = gen_rtx_VEC_DUPLICATE (mode, x);
26707 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26708 return true;
26710 goto widen;
26712 case V8QImode:
26713 if (!mmx_ok)
26714 return false;
26715 goto widen;
26717 case V8HImode:
26718 if (TARGET_SSE2)
26720 struct expand_vec_perm_d dperm;
26721 rtx tmp1, tmp2;
26723 permute:
26724 memset (&dperm, 0, sizeof (dperm));
26725 dperm.target = target;
26726 dperm.vmode = mode;
26727 dperm.nelt = GET_MODE_NUNITS (mode);
26728 dperm.op0 = dperm.op1 = gen_reg_rtx (mode);
26730 /* Extend to SImode using a paradoxical SUBREG. */
26731 tmp1 = gen_reg_rtx (SImode);
26732 emit_move_insn (tmp1, gen_lowpart (SImode, val));
26734 /* Insert the SImode value as low element of a V4SImode vector. */
26735 tmp2 = gen_lowpart (V4SImode, dperm.op0);
26736 emit_insn (gen_vec_setv4si_0 (tmp2, CONST0_RTX (V4SImode), tmp1));
26738 ok = (expand_vec_perm_1 (&dperm)
26739 || expand_vec_perm_broadcast_1 (&dperm));
26740 gcc_assert (ok);
26741 return ok;
26743 goto widen;
26745 case V16QImode:
26746 if (TARGET_SSE2)
26747 goto permute;
26748 goto widen;
26750 widen:
26751 /* Replicate the value once into the next wider mode and recurse. */
26753 enum machine_mode smode, wsmode, wvmode;
26754 rtx x;
26756 smode = GET_MODE_INNER (mode);
26757 wvmode = get_mode_wider_vector (mode);
26758 wsmode = GET_MODE_INNER (wvmode);
26760 val = convert_modes (wsmode, smode, val, true);
26761 x = expand_simple_binop (wsmode, ASHIFT, val,
26762 GEN_INT (GET_MODE_BITSIZE (smode)),
26763 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26764 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
26766 x = gen_lowpart (wvmode, target);
26767 ok = ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val);
26768 gcc_assert (ok);
26769 return ok;
26772 case V16HImode:
26773 case V32QImode:
26775 enum machine_mode hvmode = (mode == V16HImode ? V8HImode : V16QImode);
26776 rtx x = gen_reg_rtx (hvmode);
26778 ok = ix86_expand_vector_init_duplicate (false, hvmode, x, val);
26779 gcc_assert (ok);
26781 x = gen_rtx_VEC_CONCAT (mode, x, x);
26782 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26784 return true;
26786 default:
26787 return false;
26791 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26792 whose ONE_VAR element is VAR, and other elements are zero. Return true
26793 if successful. */
26795 static bool
26796 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
26797 rtx target, rtx var, int one_var)
26799 enum machine_mode vsimode;
26800 rtx new_target;
26801 rtx x, tmp;
26802 bool use_vector_set = false;
26804 switch (mode)
26806 case V2DImode:
26807 /* For SSE4.1, we normally use vector set. But if the second
26808 element is zero and inter-unit moves are OK, we use movq
26809 instead. */
26810 use_vector_set = (TARGET_64BIT
26811 && TARGET_SSE4_1
26812 && !(TARGET_INTER_UNIT_MOVES
26813 && one_var == 0));
26814 break;
26815 case V16QImode:
26816 case V4SImode:
26817 case V4SFmode:
26818 use_vector_set = TARGET_SSE4_1;
26819 break;
26820 case V8HImode:
26821 use_vector_set = TARGET_SSE2;
26822 break;
26823 case V4HImode:
26824 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
26825 break;
26826 case V32QImode:
26827 case V16HImode:
26828 case V8SImode:
26829 case V8SFmode:
26830 case V4DFmode:
26831 use_vector_set = TARGET_AVX;
26832 break;
26833 case V4DImode:
26834 /* Use ix86_expand_vector_set in 64bit mode only. */
26835 use_vector_set = TARGET_AVX && TARGET_64BIT;
26836 break;
26837 default:
26838 break;
26841 if (use_vector_set)
26843 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
26844 var = force_reg (GET_MODE_INNER (mode), var);
26845 ix86_expand_vector_set (mmx_ok, target, var, one_var);
26846 return true;
26849 switch (mode)
26851 case V2SFmode:
26852 case V2SImode:
26853 if (!mmx_ok)
26854 return false;
26855 /* FALLTHRU */
26857 case V2DFmode:
26858 case V2DImode:
26859 if (one_var != 0)
26860 return false;
26861 var = force_reg (GET_MODE_INNER (mode), var);
26862 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
26863 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26864 return true;
26866 case V4SFmode:
26867 case V4SImode:
26868 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
26869 new_target = gen_reg_rtx (mode);
26870 else
26871 new_target = target;
26872 var = force_reg (GET_MODE_INNER (mode), var);
26873 x = gen_rtx_VEC_DUPLICATE (mode, var);
26874 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
26875 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
26876 if (one_var != 0)
26878 /* We need to shuffle the value to the correct position, so
26879 create a new pseudo to store the intermediate result. */
26881 /* With SSE2, we can use the integer shuffle insns. */
26882 if (mode != V4SFmode && TARGET_SSE2)
26884 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
26885 const1_rtx,
26886 GEN_INT (one_var == 1 ? 0 : 1),
26887 GEN_INT (one_var == 2 ? 0 : 1),
26888 GEN_INT (one_var == 3 ? 0 : 1)));
26889 if (target != new_target)
26890 emit_move_insn (target, new_target);
26891 return true;
26894 /* Otherwise convert the intermediate result to V4SFmode and
26895 use the SSE1 shuffle instructions. */
26896 if (mode != V4SFmode)
26898 tmp = gen_reg_rtx (V4SFmode);
26899 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
26901 else
26902 tmp = new_target;
26904 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
26905 const1_rtx,
26906 GEN_INT (one_var == 1 ? 0 : 1),
26907 GEN_INT (one_var == 2 ? 0+4 : 1+4),
26908 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
26910 if (mode != V4SFmode)
26911 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
26912 else if (tmp != target)
26913 emit_move_insn (target, tmp);
26915 else if (target != new_target)
26916 emit_move_insn (target, new_target);
26917 return true;
26919 case V8HImode:
26920 case V16QImode:
26921 vsimode = V4SImode;
26922 goto widen;
26923 case V4HImode:
26924 case V8QImode:
26925 if (!mmx_ok)
26926 return false;
26927 vsimode = V2SImode;
26928 goto widen;
26929 widen:
26930 if (one_var != 0)
26931 return false;
26933 /* Zero extend the variable element to SImode and recurse. */
26934 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
26936 x = gen_reg_rtx (vsimode);
26937 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
26938 var, one_var))
26939 gcc_unreachable ();
26941 emit_move_insn (target, gen_lowpart (mode, x));
26942 return true;
26944 default:
26945 return false;
26949 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26950 consisting of the values in VALS. It is known that all elements
26951 except ONE_VAR are constants. Return true if successful. */
26953 static bool
26954 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
26955 rtx target, rtx vals, int one_var)
26957 rtx var = XVECEXP (vals, 0, one_var);
26958 enum machine_mode wmode;
26959 rtx const_vec, x;
26961 const_vec = copy_rtx (vals);
26962 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
26963 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
26965 switch (mode)
26967 case V2DFmode:
26968 case V2DImode:
26969 case V2SFmode:
26970 case V2SImode:
26971 /* For the two element vectors, it's just as easy to use
26972 the general case. */
26973 return false;
26975 case V4DImode:
26976 /* Use ix86_expand_vector_set in 64bit mode only. */
26977 if (!TARGET_64BIT)
26978 return false;
26979 case V4DFmode:
26980 case V8SFmode:
26981 case V8SImode:
26982 case V16HImode:
26983 case V32QImode:
26984 case V4SFmode:
26985 case V4SImode:
26986 case V8HImode:
26987 case V4HImode:
26988 break;
26990 case V16QImode:
26991 if (TARGET_SSE4_1)
26992 break;
26993 wmode = V8HImode;
26994 goto widen;
26995 case V8QImode:
26996 wmode = V4HImode;
26997 goto widen;
26998 widen:
26999 /* There's no way to set one QImode entry easily. Combine
27000 the variable value with its adjacent constant value, and
27001 promote to an HImode set. */
27002 x = XVECEXP (vals, 0, one_var ^ 1);
27003 if (one_var & 1)
27005 var = convert_modes (HImode, QImode, var, true);
27006 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
27007 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27008 x = GEN_INT (INTVAL (x) & 0xff);
27010 else
27012 var = convert_modes (HImode, QImode, var, true);
27013 x = gen_int_mode (INTVAL (x) << 8, HImode);
27015 if (x != const0_rtx)
27016 var = expand_simple_binop (HImode, IOR, var, x, var,
27017 1, OPTAB_LIB_WIDEN);
27019 x = gen_reg_rtx (wmode);
27020 emit_move_insn (x, gen_lowpart (wmode, const_vec));
27021 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
27023 emit_move_insn (target, gen_lowpart (mode, x));
27024 return true;
27026 default:
27027 return false;
27030 emit_move_insn (target, const_vec);
27031 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27032 return true;
27035 /* A subroutine of ix86_expand_vector_init_general. Use vector
27036 concatenate to handle the most general case: all values variable,
27037 and none identical. */
27039 static void
27040 ix86_expand_vector_init_concat (enum machine_mode mode,
27041 rtx target, rtx *ops, int n)
27043 enum machine_mode cmode, hmode = VOIDmode;
27044 rtx first[8], second[4];
27045 rtvec v;
27046 int i, j;
27048 switch (n)
27050 case 2:
27051 switch (mode)
27053 case V8SImode:
27054 cmode = V4SImode;
27055 break;
27056 case V8SFmode:
27057 cmode = V4SFmode;
27058 break;
27059 case V4DImode:
27060 cmode = V2DImode;
27061 break;
27062 case V4DFmode:
27063 cmode = V2DFmode;
27064 break;
27065 case V4SImode:
27066 cmode = V2SImode;
27067 break;
27068 case V4SFmode:
27069 cmode = V2SFmode;
27070 break;
27071 case V2DImode:
27072 cmode = DImode;
27073 break;
27074 case V2SImode:
27075 cmode = SImode;
27076 break;
27077 case V2DFmode:
27078 cmode = DFmode;
27079 break;
27080 case V2SFmode:
27081 cmode = SFmode;
27082 break;
27083 default:
27084 gcc_unreachable ();
27087 if (!register_operand (ops[1], cmode))
27088 ops[1] = force_reg (cmode, ops[1]);
27089 if (!register_operand (ops[0], cmode))
27090 ops[0] = force_reg (cmode, ops[0]);
27091 emit_insn (gen_rtx_SET (VOIDmode, target,
27092 gen_rtx_VEC_CONCAT (mode, ops[0],
27093 ops[1])));
27094 break;
27096 case 4:
27097 switch (mode)
27099 case V4DImode:
27100 cmode = V2DImode;
27101 break;
27102 case V4DFmode:
27103 cmode = V2DFmode;
27104 break;
27105 case V4SImode:
27106 cmode = V2SImode;
27107 break;
27108 case V4SFmode:
27109 cmode = V2SFmode;
27110 break;
27111 default:
27112 gcc_unreachable ();
27114 goto half;
27116 case 8:
27117 switch (mode)
27119 case V8SImode:
27120 cmode = V2SImode;
27121 hmode = V4SImode;
27122 break;
27123 case V8SFmode:
27124 cmode = V2SFmode;
27125 hmode = V4SFmode;
27126 break;
27127 default:
27128 gcc_unreachable ();
27130 goto half;
27132 half:
27133 /* FIXME: We process inputs backward to help RA. PR 36222. */
27134 i = n - 1;
27135 j = (n >> 1) - 1;
27136 for (; i > 0; i -= 2, j--)
27138 first[j] = gen_reg_rtx (cmode);
27139 v = gen_rtvec (2, ops[i - 1], ops[i]);
27140 ix86_expand_vector_init (false, first[j],
27141 gen_rtx_PARALLEL (cmode, v));
27144 n >>= 1;
27145 if (n > 2)
27147 gcc_assert (hmode != VOIDmode);
27148 for (i = j = 0; i < n; i += 2, j++)
27150 second[j] = gen_reg_rtx (hmode);
27151 ix86_expand_vector_init_concat (hmode, second [j],
27152 &first [i], 2);
27154 n >>= 1;
27155 ix86_expand_vector_init_concat (mode, target, second, n);
27157 else
27158 ix86_expand_vector_init_concat (mode, target, first, n);
27159 break;
27161 default:
27162 gcc_unreachable ();
27166 /* A subroutine of ix86_expand_vector_init_general. Use vector
27167 interleave to handle the most general case: all values variable,
27168 and none identical. */
27170 static void
27171 ix86_expand_vector_init_interleave (enum machine_mode mode,
27172 rtx target, rtx *ops, int n)
27174 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
27175 int i, j;
27176 rtx op0, op1;
27177 rtx (*gen_load_even) (rtx, rtx, rtx);
27178 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
27179 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
27181 switch (mode)
27183 case V8HImode:
27184 gen_load_even = gen_vec_setv8hi;
27185 gen_interleave_first_low = gen_vec_interleave_lowv4si;
27186 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27187 inner_mode = HImode;
27188 first_imode = V4SImode;
27189 second_imode = V2DImode;
27190 third_imode = VOIDmode;
27191 break;
27192 case V16QImode:
27193 gen_load_even = gen_vec_setv16qi;
27194 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
27195 gen_interleave_second_low = gen_vec_interleave_lowv4si;
27196 inner_mode = QImode;
27197 first_imode = V8HImode;
27198 second_imode = V4SImode;
27199 third_imode = V2DImode;
27200 break;
27201 default:
27202 gcc_unreachable ();
27205 for (i = 0; i < n; i++)
27207 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
27208 op0 = gen_reg_rtx (SImode);
27209 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
27211 /* Insert the SImode value as low element of V4SImode vector. */
27212 op1 = gen_reg_rtx (V4SImode);
27213 op0 = gen_rtx_VEC_MERGE (V4SImode,
27214 gen_rtx_VEC_DUPLICATE (V4SImode,
27215 op0),
27216 CONST0_RTX (V4SImode),
27217 const1_rtx);
27218 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
27220 /* Cast the V4SImode vector back to a vector in orignal mode. */
27221 op0 = gen_reg_rtx (mode);
27222 emit_move_insn (op0, gen_lowpart (mode, op1));
27224 /* Load even elements into the second positon. */
27225 emit_insn ((*gen_load_even) (op0,
27226 force_reg (inner_mode,
27227 ops [i + i + 1]),
27228 const1_rtx));
27230 /* Cast vector to FIRST_IMODE vector. */
27231 ops[i] = gen_reg_rtx (first_imode);
27232 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
27235 /* Interleave low FIRST_IMODE vectors. */
27236 for (i = j = 0; i < n; i += 2, j++)
27238 op0 = gen_reg_rtx (first_imode);
27239 emit_insn ((*gen_interleave_first_low) (op0, ops[i], ops[i + 1]));
27241 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
27242 ops[j] = gen_reg_rtx (second_imode);
27243 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
27246 /* Interleave low SECOND_IMODE vectors. */
27247 switch (second_imode)
27249 case V4SImode:
27250 for (i = j = 0; i < n / 2; i += 2, j++)
27252 op0 = gen_reg_rtx (second_imode);
27253 emit_insn ((*gen_interleave_second_low) (op0, ops[i],
27254 ops[i + 1]));
27256 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
27257 vector. */
27258 ops[j] = gen_reg_rtx (third_imode);
27259 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
27261 second_imode = V2DImode;
27262 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27263 /* FALLTHRU */
27265 case V2DImode:
27266 op0 = gen_reg_rtx (second_imode);
27267 emit_insn ((*gen_interleave_second_low) (op0, ops[0],
27268 ops[1]));
27270 /* Cast the SECOND_IMODE vector back to a vector on original
27271 mode. */
27272 emit_insn (gen_rtx_SET (VOIDmode, target,
27273 gen_lowpart (mode, op0)));
27274 break;
27276 default:
27277 gcc_unreachable ();
27281 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
27282 all values variable, and none identical. */
27284 static void
27285 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
27286 rtx target, rtx vals)
27288 rtx ops[32], op0, op1;
27289 enum machine_mode half_mode = VOIDmode;
27290 int n, i;
27292 switch (mode)
27294 case V2SFmode:
27295 case V2SImode:
27296 if (!mmx_ok && !TARGET_SSE)
27297 break;
27298 /* FALLTHRU */
27300 case V8SFmode:
27301 case V8SImode:
27302 case V4DFmode:
27303 case V4DImode:
27304 case V4SFmode:
27305 case V4SImode:
27306 case V2DFmode:
27307 case V2DImode:
27308 n = GET_MODE_NUNITS (mode);
27309 for (i = 0; i < n; i++)
27310 ops[i] = XVECEXP (vals, 0, i);
27311 ix86_expand_vector_init_concat (mode, target, ops, n);
27312 return;
27314 case V32QImode:
27315 half_mode = V16QImode;
27316 goto half;
27318 case V16HImode:
27319 half_mode = V8HImode;
27320 goto half;
27322 half:
27323 n = GET_MODE_NUNITS (mode);
27324 for (i = 0; i < n; i++)
27325 ops[i] = XVECEXP (vals, 0, i);
27326 op0 = gen_reg_rtx (half_mode);
27327 op1 = gen_reg_rtx (half_mode);
27328 ix86_expand_vector_init_interleave (half_mode, op0, ops,
27329 n >> 2);
27330 ix86_expand_vector_init_interleave (half_mode, op1,
27331 &ops [n >> 1], n >> 2);
27332 emit_insn (gen_rtx_SET (VOIDmode, target,
27333 gen_rtx_VEC_CONCAT (mode, op0, op1)));
27334 return;
27336 case V16QImode:
27337 if (!TARGET_SSE4_1)
27338 break;
27339 /* FALLTHRU */
27341 case V8HImode:
27342 if (!TARGET_SSE2)
27343 break;
27345 /* Don't use ix86_expand_vector_init_interleave if we can't
27346 move from GPR to SSE register directly. */
27347 if (!TARGET_INTER_UNIT_MOVES)
27348 break;
27350 n = GET_MODE_NUNITS (mode);
27351 for (i = 0; i < n; i++)
27352 ops[i] = XVECEXP (vals, 0, i);
27353 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
27354 return;
27356 case V4HImode:
27357 case V8QImode:
27358 break;
27360 default:
27361 gcc_unreachable ();
27365 int i, j, n_elts, n_words, n_elt_per_word;
27366 enum machine_mode inner_mode;
27367 rtx words[4], shift;
27369 inner_mode = GET_MODE_INNER (mode);
27370 n_elts = GET_MODE_NUNITS (mode);
27371 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
27372 n_elt_per_word = n_elts / n_words;
27373 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
27375 for (i = 0; i < n_words; ++i)
27377 rtx word = NULL_RTX;
27379 for (j = 0; j < n_elt_per_word; ++j)
27381 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
27382 elt = convert_modes (word_mode, inner_mode, elt, true);
27384 if (j == 0)
27385 word = elt;
27386 else
27388 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
27389 word, 1, OPTAB_LIB_WIDEN);
27390 word = expand_simple_binop (word_mode, IOR, word, elt,
27391 word, 1, OPTAB_LIB_WIDEN);
27395 words[i] = word;
27398 if (n_words == 1)
27399 emit_move_insn (target, gen_lowpart (mode, words[0]));
27400 else if (n_words == 2)
27402 rtx tmp = gen_reg_rtx (mode);
27403 emit_clobber (tmp);
27404 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
27405 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
27406 emit_move_insn (target, tmp);
27408 else if (n_words == 4)
27410 rtx tmp = gen_reg_rtx (V4SImode);
27411 gcc_assert (word_mode == SImode);
27412 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
27413 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
27414 emit_move_insn (target, gen_lowpart (mode, tmp));
27416 else
27417 gcc_unreachable ();
27421 /* Initialize vector TARGET via VALS. Suppress the use of MMX
27422 instructions unless MMX_OK is true. */
27424 void
27425 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
27427 enum machine_mode mode = GET_MODE (target);
27428 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27429 int n_elts = GET_MODE_NUNITS (mode);
27430 int n_var = 0, one_var = -1;
27431 bool all_same = true, all_const_zero = true;
27432 int i;
27433 rtx x;
27435 for (i = 0; i < n_elts; ++i)
27437 x = XVECEXP (vals, 0, i);
27438 if (!(CONST_INT_P (x)
27439 || GET_CODE (x) == CONST_DOUBLE
27440 || GET_CODE (x) == CONST_FIXED))
27441 n_var++, one_var = i;
27442 else if (x != CONST0_RTX (inner_mode))
27443 all_const_zero = false;
27444 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
27445 all_same = false;
27448 /* Constants are best loaded from the constant pool. */
27449 if (n_var == 0)
27451 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
27452 return;
27455 /* If all values are identical, broadcast the value. */
27456 if (all_same
27457 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
27458 XVECEXP (vals, 0, 0)))
27459 return;
27461 /* Values where only one field is non-constant are best loaded from
27462 the pool and overwritten via move later. */
27463 if (n_var == 1)
27465 if (all_const_zero
27466 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
27467 XVECEXP (vals, 0, one_var),
27468 one_var))
27469 return;
27471 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
27472 return;
27475 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
27478 void
27479 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
27481 enum machine_mode mode = GET_MODE (target);
27482 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27483 enum machine_mode half_mode;
27484 bool use_vec_merge = false;
27485 rtx tmp;
27486 static rtx (*gen_extract[6][2]) (rtx, rtx)
27488 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
27489 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
27490 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
27491 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
27492 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
27493 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
27495 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
27497 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
27498 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
27499 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
27500 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
27501 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
27502 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
27504 int i, j, n;
27506 switch (mode)
27508 case V2SFmode:
27509 case V2SImode:
27510 if (mmx_ok)
27512 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
27513 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
27514 if (elt == 0)
27515 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
27516 else
27517 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
27518 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27519 return;
27521 break;
27523 case V2DImode:
27524 use_vec_merge = TARGET_SSE4_1;
27525 if (use_vec_merge)
27526 break;
27528 case V2DFmode:
27530 rtx op0, op1;
27532 /* For the two element vectors, we implement a VEC_CONCAT with
27533 the extraction of the other element. */
27535 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
27536 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
27538 if (elt == 0)
27539 op0 = val, op1 = tmp;
27540 else
27541 op0 = tmp, op1 = val;
27543 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
27544 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27546 return;
27548 case V4SFmode:
27549 use_vec_merge = TARGET_SSE4_1;
27550 if (use_vec_merge)
27551 break;
27553 switch (elt)
27555 case 0:
27556 use_vec_merge = true;
27557 break;
27559 case 1:
27560 /* tmp = target = A B C D */
27561 tmp = copy_to_reg (target);
27562 /* target = A A B B */
27563 emit_insn (gen_vec_interleave_lowv4sf (target, target, target));
27564 /* target = X A B B */
27565 ix86_expand_vector_set (false, target, val, 0);
27566 /* target = A X C D */
27567 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27568 const1_rtx, const0_rtx,
27569 GEN_INT (2+4), GEN_INT (3+4)));
27570 return;
27572 case 2:
27573 /* tmp = target = A B C D */
27574 tmp = copy_to_reg (target);
27575 /* tmp = X B C D */
27576 ix86_expand_vector_set (false, tmp, val, 0);
27577 /* target = A B X D */
27578 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27579 const0_rtx, const1_rtx,
27580 GEN_INT (0+4), GEN_INT (3+4)));
27581 return;
27583 case 3:
27584 /* tmp = target = A B C D */
27585 tmp = copy_to_reg (target);
27586 /* tmp = X B C D */
27587 ix86_expand_vector_set (false, tmp, val, 0);
27588 /* target = A B X D */
27589 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27590 const0_rtx, const1_rtx,
27591 GEN_INT (2+4), GEN_INT (0+4)));
27592 return;
27594 default:
27595 gcc_unreachable ();
27597 break;
27599 case V4SImode:
27600 use_vec_merge = TARGET_SSE4_1;
27601 if (use_vec_merge)
27602 break;
27604 /* Element 0 handled by vec_merge below. */
27605 if (elt == 0)
27607 use_vec_merge = true;
27608 break;
27611 if (TARGET_SSE2)
27613 /* With SSE2, use integer shuffles to swap element 0 and ELT,
27614 store into element 0, then shuffle them back. */
27616 rtx order[4];
27618 order[0] = GEN_INT (elt);
27619 order[1] = const1_rtx;
27620 order[2] = const2_rtx;
27621 order[3] = GEN_INT (3);
27622 order[elt] = const0_rtx;
27624 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27625 order[1], order[2], order[3]));
27627 ix86_expand_vector_set (false, target, val, 0);
27629 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27630 order[1], order[2], order[3]));
27632 else
27634 /* For SSE1, we have to reuse the V4SF code. */
27635 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
27636 gen_lowpart (SFmode, val), elt);
27638 return;
27640 case V8HImode:
27641 use_vec_merge = TARGET_SSE2;
27642 break;
27643 case V4HImode:
27644 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
27645 break;
27647 case V16QImode:
27648 use_vec_merge = TARGET_SSE4_1;
27649 break;
27651 case V8QImode:
27652 break;
27654 case V32QImode:
27655 half_mode = V16QImode;
27656 j = 0;
27657 n = 16;
27658 goto half;
27660 case V16HImode:
27661 half_mode = V8HImode;
27662 j = 1;
27663 n = 8;
27664 goto half;
27666 case V8SImode:
27667 half_mode = V4SImode;
27668 j = 2;
27669 n = 4;
27670 goto half;
27672 case V4DImode:
27673 half_mode = V2DImode;
27674 j = 3;
27675 n = 2;
27676 goto half;
27678 case V8SFmode:
27679 half_mode = V4SFmode;
27680 j = 4;
27681 n = 4;
27682 goto half;
27684 case V4DFmode:
27685 half_mode = V2DFmode;
27686 j = 5;
27687 n = 2;
27688 goto half;
27690 half:
27691 /* Compute offset. */
27692 i = elt / n;
27693 elt %= n;
27695 gcc_assert (i <= 1);
27697 /* Extract the half. */
27698 tmp = gen_reg_rtx (half_mode);
27699 emit_insn ((*gen_extract[j][i]) (tmp, target));
27701 /* Put val in tmp at elt. */
27702 ix86_expand_vector_set (false, tmp, val, elt);
27704 /* Put it back. */
27705 emit_insn ((*gen_insert[j][i]) (target, target, tmp));
27706 return;
27708 default:
27709 break;
27712 if (use_vec_merge)
27714 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
27715 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
27716 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27718 else
27720 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
27722 emit_move_insn (mem, target);
27724 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
27725 emit_move_insn (tmp, val);
27727 emit_move_insn (target, mem);
27731 void
27732 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
27734 enum machine_mode mode = GET_MODE (vec);
27735 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27736 bool use_vec_extr = false;
27737 rtx tmp;
27739 switch (mode)
27741 case V2SImode:
27742 case V2SFmode:
27743 if (!mmx_ok)
27744 break;
27745 /* FALLTHRU */
27747 case V2DFmode:
27748 case V2DImode:
27749 use_vec_extr = true;
27750 break;
27752 case V4SFmode:
27753 use_vec_extr = TARGET_SSE4_1;
27754 if (use_vec_extr)
27755 break;
27757 switch (elt)
27759 case 0:
27760 tmp = vec;
27761 break;
27763 case 1:
27764 case 3:
27765 tmp = gen_reg_rtx (mode);
27766 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
27767 GEN_INT (elt), GEN_INT (elt),
27768 GEN_INT (elt+4), GEN_INT (elt+4)));
27769 break;
27771 case 2:
27772 tmp = gen_reg_rtx (mode);
27773 emit_insn (gen_vec_interleave_highv4sf (tmp, vec, vec));
27774 break;
27776 default:
27777 gcc_unreachable ();
27779 vec = tmp;
27780 use_vec_extr = true;
27781 elt = 0;
27782 break;
27784 case V4SImode:
27785 use_vec_extr = TARGET_SSE4_1;
27786 if (use_vec_extr)
27787 break;
27789 if (TARGET_SSE2)
27791 switch (elt)
27793 case 0:
27794 tmp = vec;
27795 break;
27797 case 1:
27798 case 3:
27799 tmp = gen_reg_rtx (mode);
27800 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
27801 GEN_INT (elt), GEN_INT (elt),
27802 GEN_INT (elt), GEN_INT (elt)));
27803 break;
27805 case 2:
27806 tmp = gen_reg_rtx (mode);
27807 emit_insn (gen_vec_interleave_highv4si (tmp, vec, vec));
27808 break;
27810 default:
27811 gcc_unreachable ();
27813 vec = tmp;
27814 use_vec_extr = true;
27815 elt = 0;
27817 else
27819 /* For SSE1, we have to reuse the V4SF code. */
27820 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
27821 gen_lowpart (V4SFmode, vec), elt);
27822 return;
27824 break;
27826 case V8HImode:
27827 use_vec_extr = TARGET_SSE2;
27828 break;
27829 case V4HImode:
27830 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
27831 break;
27833 case V16QImode:
27834 use_vec_extr = TARGET_SSE4_1;
27835 break;
27837 case V8QImode:
27838 /* ??? Could extract the appropriate HImode element and shift. */
27839 default:
27840 break;
27843 if (use_vec_extr)
27845 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
27846 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
27848 /* Let the rtl optimizers know about the zero extension performed. */
27849 if (inner_mode == QImode || inner_mode == HImode)
27851 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
27852 target = gen_lowpart (SImode, target);
27855 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27857 else
27859 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
27861 emit_move_insn (mem, vec);
27863 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
27864 emit_move_insn (target, tmp);
27868 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
27869 pattern to reduce; DEST is the destination; IN is the input vector. */
27871 void
27872 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
27874 rtx tmp1, tmp2, tmp3;
27876 tmp1 = gen_reg_rtx (V4SFmode);
27877 tmp2 = gen_reg_rtx (V4SFmode);
27878 tmp3 = gen_reg_rtx (V4SFmode);
27880 emit_insn (gen_sse_movhlps (tmp1, in, in));
27881 emit_insn (fn (tmp2, tmp1, in));
27883 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
27884 const1_rtx, const1_rtx,
27885 GEN_INT (1+4), GEN_INT (1+4)));
27886 emit_insn (fn (dest, tmp2, tmp3));
27889 /* Target hook for scalar_mode_supported_p. */
27890 static bool
27891 ix86_scalar_mode_supported_p (enum machine_mode mode)
27893 if (DECIMAL_FLOAT_MODE_P (mode))
27894 return default_decimal_float_supported_p ();
27895 else if (mode == TFmode)
27896 return true;
27897 else
27898 return default_scalar_mode_supported_p (mode);
27901 /* Implements target hook vector_mode_supported_p. */
27902 static bool
27903 ix86_vector_mode_supported_p (enum machine_mode mode)
27905 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
27906 return true;
27907 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
27908 return true;
27909 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
27910 return true;
27911 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
27912 return true;
27913 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
27914 return true;
27915 return false;
27918 /* Target hook for c_mode_for_suffix. */
27919 static enum machine_mode
27920 ix86_c_mode_for_suffix (char suffix)
27922 if (suffix == 'q')
27923 return TFmode;
27924 if (suffix == 'w')
27925 return XFmode;
27927 return VOIDmode;
27930 /* Worker function for TARGET_MD_ASM_CLOBBERS.
27932 We do this in the new i386 backend to maintain source compatibility
27933 with the old cc0-based compiler. */
27935 static tree
27936 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
27937 tree inputs ATTRIBUTE_UNUSED,
27938 tree clobbers)
27940 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
27941 clobbers);
27942 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
27943 clobbers);
27944 return clobbers;
27947 /* Implements target vector targetm.asm.encode_section_info. This
27948 is not used by netware. */
27950 static void ATTRIBUTE_UNUSED
27951 ix86_encode_section_info (tree decl, rtx rtl, int first)
27953 default_encode_section_info (decl, rtl, first);
27955 if (TREE_CODE (decl) == VAR_DECL
27956 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
27957 && ix86_in_large_data_p (decl))
27958 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
27961 /* Worker function for REVERSE_CONDITION. */
27963 enum rtx_code
27964 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
27966 return (mode != CCFPmode && mode != CCFPUmode
27967 ? reverse_condition (code)
27968 : reverse_condition_maybe_unordered (code));
27971 /* Output code to perform an x87 FP register move, from OPERANDS[1]
27972 to OPERANDS[0]. */
27974 const char *
27975 output_387_reg_move (rtx insn, rtx *operands)
27977 if (REG_P (operands[0]))
27979 if (REG_P (operands[1])
27980 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
27982 if (REGNO (operands[0]) == FIRST_STACK_REG)
27983 return output_387_ffreep (operands, 0);
27984 return "fstp\t%y0";
27986 if (STACK_TOP_P (operands[0]))
27987 return "fld%Z1\t%y1";
27988 return "fst\t%y0";
27990 else if (MEM_P (operands[0]))
27992 gcc_assert (REG_P (operands[1]));
27993 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
27994 return "fstp%Z0\t%y0";
27995 else
27997 /* There is no non-popping store to memory for XFmode.
27998 So if we need one, follow the store with a load. */
27999 if (GET_MODE (operands[0]) == XFmode)
28000 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
28001 else
28002 return "fst%Z0\t%y0";
28005 else
28006 gcc_unreachable();
28009 /* Output code to perform a conditional jump to LABEL, if C2 flag in
28010 FP status register is set. */
28012 void
28013 ix86_emit_fp_unordered_jump (rtx label)
28015 rtx reg = gen_reg_rtx (HImode);
28016 rtx temp;
28018 emit_insn (gen_x86_fnstsw_1 (reg));
28020 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
28022 emit_insn (gen_x86_sahf_1 (reg));
28024 temp = gen_rtx_REG (CCmode, FLAGS_REG);
28025 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
28027 else
28029 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
28031 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
28032 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
28035 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
28036 gen_rtx_LABEL_REF (VOIDmode, label),
28037 pc_rtx);
28038 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
28040 emit_jump_insn (temp);
28041 predict_jump (REG_BR_PROB_BASE * 10 / 100);
28044 /* Output code to perform a log1p XFmode calculation. */
28046 void ix86_emit_i387_log1p (rtx op0, rtx op1)
28048 rtx label1 = gen_label_rtx ();
28049 rtx label2 = gen_label_rtx ();
28051 rtx tmp = gen_reg_rtx (XFmode);
28052 rtx tmp2 = gen_reg_rtx (XFmode);
28053 rtx test;
28055 emit_insn (gen_absxf2 (tmp, op1));
28056 test = gen_rtx_GE (VOIDmode, tmp,
28057 CONST_DOUBLE_FROM_REAL_VALUE (
28058 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
28059 XFmode));
28060 emit_jump_insn (gen_cbranchxf4 (test, XEXP (test, 0), XEXP (test, 1), label1));
28062 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28063 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
28064 emit_jump (label2);
28066 emit_label (label1);
28067 emit_move_insn (tmp, CONST1_RTX (XFmode));
28068 emit_insn (gen_addxf3 (tmp, op1, tmp));
28069 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28070 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
28072 emit_label (label2);
28075 /* Output code to perform a Newton-Rhapson approximation of a single precision
28076 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
28078 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
28080 rtx x0, x1, e0, e1, two;
28082 x0 = gen_reg_rtx (mode);
28083 e0 = gen_reg_rtx (mode);
28084 e1 = gen_reg_rtx (mode);
28085 x1 = gen_reg_rtx (mode);
28087 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
28089 if (VECTOR_MODE_P (mode))
28090 two = ix86_build_const_vector (SFmode, true, two);
28092 two = force_reg (mode, two);
28094 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
28096 /* x0 = rcp(b) estimate */
28097 emit_insn (gen_rtx_SET (VOIDmode, x0,
28098 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
28099 UNSPEC_RCP)));
28100 /* e0 = x0 * a */
28101 emit_insn (gen_rtx_SET (VOIDmode, e0,
28102 gen_rtx_MULT (mode, x0, a)));
28103 /* e1 = x0 * b */
28104 emit_insn (gen_rtx_SET (VOIDmode, e1,
28105 gen_rtx_MULT (mode, x0, b)));
28106 /* x1 = 2. - e1 */
28107 emit_insn (gen_rtx_SET (VOIDmode, x1,
28108 gen_rtx_MINUS (mode, two, e1)));
28109 /* res = e0 * x1 */
28110 emit_insn (gen_rtx_SET (VOIDmode, res,
28111 gen_rtx_MULT (mode, e0, x1)));
28114 /* Output code to perform a Newton-Rhapson approximation of a
28115 single precision floating point [reciprocal] square root. */
28117 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
28118 bool recip)
28120 rtx x0, e0, e1, e2, e3, mthree, mhalf;
28121 REAL_VALUE_TYPE r;
28123 x0 = gen_reg_rtx (mode);
28124 e0 = gen_reg_rtx (mode);
28125 e1 = gen_reg_rtx (mode);
28126 e2 = gen_reg_rtx (mode);
28127 e3 = gen_reg_rtx (mode);
28129 real_from_integer (&r, VOIDmode, -3, -1, 0);
28130 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28132 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
28133 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28135 if (VECTOR_MODE_P (mode))
28137 mthree = ix86_build_const_vector (SFmode, true, mthree);
28138 mhalf = ix86_build_const_vector (SFmode, true, mhalf);
28141 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
28142 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
28144 /* x0 = rsqrt(a) estimate */
28145 emit_insn (gen_rtx_SET (VOIDmode, x0,
28146 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
28147 UNSPEC_RSQRT)));
28149 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
28150 if (!recip)
28152 rtx zero, mask;
28154 zero = gen_reg_rtx (mode);
28155 mask = gen_reg_rtx (mode);
28157 zero = force_reg (mode, CONST0_RTX(mode));
28158 emit_insn (gen_rtx_SET (VOIDmode, mask,
28159 gen_rtx_NE (mode, zero, a)));
28161 emit_insn (gen_rtx_SET (VOIDmode, x0,
28162 gen_rtx_AND (mode, x0, mask)));
28165 /* e0 = x0 * a */
28166 emit_insn (gen_rtx_SET (VOIDmode, e0,
28167 gen_rtx_MULT (mode, x0, a)));
28168 /* e1 = e0 * x0 */
28169 emit_insn (gen_rtx_SET (VOIDmode, e1,
28170 gen_rtx_MULT (mode, e0, x0)));
28172 /* e2 = e1 - 3. */
28173 mthree = force_reg (mode, mthree);
28174 emit_insn (gen_rtx_SET (VOIDmode, e2,
28175 gen_rtx_PLUS (mode, e1, mthree)));
28177 mhalf = force_reg (mode, mhalf);
28178 if (recip)
28179 /* e3 = -.5 * x0 */
28180 emit_insn (gen_rtx_SET (VOIDmode, e3,
28181 gen_rtx_MULT (mode, x0, mhalf)));
28182 else
28183 /* e3 = -.5 * e0 */
28184 emit_insn (gen_rtx_SET (VOIDmode, e3,
28185 gen_rtx_MULT (mode, e0, mhalf)));
28186 /* ret = e2 * e3 */
28187 emit_insn (gen_rtx_SET (VOIDmode, res,
28188 gen_rtx_MULT (mode, e2, e3)));
28191 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
28193 static void ATTRIBUTE_UNUSED
28194 i386_solaris_elf_named_section (const char *name, unsigned int flags,
28195 tree decl)
28197 /* With Binutils 2.15, the "@unwind" marker must be specified on
28198 every occurrence of the ".eh_frame" section, not just the first
28199 one. */
28200 if (TARGET_64BIT
28201 && strcmp (name, ".eh_frame") == 0)
28203 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
28204 flags & SECTION_WRITE ? "aw" : "a");
28205 return;
28207 default_elf_asm_named_section (name, flags, decl);
28210 /* Return the mangling of TYPE if it is an extended fundamental type. */
28212 static const char *
28213 ix86_mangle_type (const_tree type)
28215 type = TYPE_MAIN_VARIANT (type);
28217 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28218 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28219 return NULL;
28221 switch (TYPE_MODE (type))
28223 case TFmode:
28224 /* __float128 is "g". */
28225 return "g";
28226 case XFmode:
28227 /* "long double" or __float80 is "e". */
28228 return "e";
28229 default:
28230 return NULL;
28234 /* For 32-bit code we can save PIC register setup by using
28235 __stack_chk_fail_local hidden function instead of calling
28236 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
28237 register, so it is better to call __stack_chk_fail directly. */
28239 static tree
28240 ix86_stack_protect_fail (void)
28242 return TARGET_64BIT
28243 ? default_external_stack_protect_fail ()
28244 : default_hidden_stack_protect_fail ();
28247 /* Select a format to encode pointers in exception handling data. CODE
28248 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
28249 true if the symbol may be affected by dynamic relocations.
28251 ??? All x86 object file formats are capable of representing this.
28252 After all, the relocation needed is the same as for the call insn.
28253 Whether or not a particular assembler allows us to enter such, I
28254 guess we'll have to see. */
28256 asm_preferred_eh_data_format (int code, int global)
28258 if (flag_pic)
28260 int type = DW_EH_PE_sdata8;
28261 if (!TARGET_64BIT
28262 || ix86_cmodel == CM_SMALL_PIC
28263 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
28264 type = DW_EH_PE_sdata4;
28265 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
28267 if (ix86_cmodel == CM_SMALL
28268 || (ix86_cmodel == CM_MEDIUM && code))
28269 return DW_EH_PE_udata4;
28270 return DW_EH_PE_absptr;
28273 /* Expand copysign from SIGN to the positive value ABS_VALUE
28274 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
28275 the sign-bit. */
28276 static void
28277 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
28279 enum machine_mode mode = GET_MODE (sign);
28280 rtx sgn = gen_reg_rtx (mode);
28281 if (mask == NULL_RTX)
28283 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
28284 if (!VECTOR_MODE_P (mode))
28286 /* We need to generate a scalar mode mask in this case. */
28287 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28288 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28289 mask = gen_reg_rtx (mode);
28290 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28293 else
28294 mask = gen_rtx_NOT (mode, mask);
28295 emit_insn (gen_rtx_SET (VOIDmode, sgn,
28296 gen_rtx_AND (mode, mask, sign)));
28297 emit_insn (gen_rtx_SET (VOIDmode, result,
28298 gen_rtx_IOR (mode, abs_value, sgn)));
28301 /* Expand fabs (OP0) and return a new rtx that holds the result. The
28302 mask for masking out the sign-bit is stored in *SMASK, if that is
28303 non-null. */
28304 static rtx
28305 ix86_expand_sse_fabs (rtx op0, rtx *smask)
28307 enum machine_mode mode = GET_MODE (op0);
28308 rtx xa, mask;
28310 xa = gen_reg_rtx (mode);
28311 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
28312 if (!VECTOR_MODE_P (mode))
28314 /* We need to generate a scalar mode mask in this case. */
28315 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28316 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28317 mask = gen_reg_rtx (mode);
28318 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28320 emit_insn (gen_rtx_SET (VOIDmode, xa,
28321 gen_rtx_AND (mode, op0, mask)));
28323 if (smask)
28324 *smask = mask;
28326 return xa;
28329 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
28330 swapping the operands if SWAP_OPERANDS is true. The expanded
28331 code is a forward jump to a newly created label in case the
28332 comparison is true. The generated label rtx is returned. */
28333 static rtx
28334 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
28335 bool swap_operands)
28337 rtx label, tmp;
28339 if (swap_operands)
28341 tmp = op0;
28342 op0 = op1;
28343 op1 = tmp;
28346 label = gen_label_rtx ();
28347 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
28348 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28349 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
28350 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
28351 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
28352 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
28353 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
28354 JUMP_LABEL (tmp) = label;
28356 return label;
28359 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
28360 using comparison code CODE. Operands are swapped for the comparison if
28361 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
28362 static rtx
28363 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
28364 bool swap_operands)
28366 enum machine_mode mode = GET_MODE (op0);
28367 rtx mask = gen_reg_rtx (mode);
28369 if (swap_operands)
28371 rtx tmp = op0;
28372 op0 = op1;
28373 op1 = tmp;
28376 if (mode == DFmode)
28377 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
28378 gen_rtx_fmt_ee (code, mode, op0, op1)));
28379 else
28380 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
28381 gen_rtx_fmt_ee (code, mode, op0, op1)));
28383 return mask;
28386 /* Generate and return a rtx of mode MODE for 2**n where n is the number
28387 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
28388 static rtx
28389 ix86_gen_TWO52 (enum machine_mode mode)
28391 REAL_VALUE_TYPE TWO52r;
28392 rtx TWO52;
28394 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
28395 TWO52 = const_double_from_real_value (TWO52r, mode);
28396 TWO52 = force_reg (mode, TWO52);
28398 return TWO52;
28401 /* Expand SSE sequence for computing lround from OP1 storing
28402 into OP0. */
28403 void
28404 ix86_expand_lround (rtx op0, rtx op1)
28406 /* C code for the stuff we're doing below:
28407 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
28408 return (long)tmp;
28410 enum machine_mode mode = GET_MODE (op1);
28411 const struct real_format *fmt;
28412 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28413 rtx adj;
28415 /* load nextafter (0.5, 0.0) */
28416 fmt = REAL_MODE_FORMAT (mode);
28417 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28418 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28420 /* adj = copysign (0.5, op1) */
28421 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
28422 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
28424 /* adj = op1 + adj */
28425 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
28427 /* op0 = (imode)adj */
28428 expand_fix (op0, adj, 0);
28431 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
28432 into OPERAND0. */
28433 void
28434 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
28436 /* C code for the stuff we're doing below (for do_floor):
28437 xi = (long)op1;
28438 xi -= (double)xi > op1 ? 1 : 0;
28439 return xi;
28441 enum machine_mode fmode = GET_MODE (op1);
28442 enum machine_mode imode = GET_MODE (op0);
28443 rtx ireg, freg, label, tmp;
28445 /* reg = (long)op1 */
28446 ireg = gen_reg_rtx (imode);
28447 expand_fix (ireg, op1, 0);
28449 /* freg = (double)reg */
28450 freg = gen_reg_rtx (fmode);
28451 expand_float (freg, ireg, 0);
28453 /* ireg = (freg > op1) ? ireg - 1 : ireg */
28454 label = ix86_expand_sse_compare_and_jump (UNLE,
28455 freg, op1, !do_floor);
28456 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
28457 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
28458 emit_move_insn (ireg, tmp);
28460 emit_label (label);
28461 LABEL_NUSES (label) = 1;
28463 emit_move_insn (op0, ireg);
28466 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
28467 result in OPERAND0. */
28468 void
28469 ix86_expand_rint (rtx operand0, rtx operand1)
28471 /* C code for the stuff we're doing below:
28472 xa = fabs (operand1);
28473 if (!isless (xa, 2**52))
28474 return operand1;
28475 xa = xa + 2**52 - 2**52;
28476 return copysign (xa, operand1);
28478 enum machine_mode mode = GET_MODE (operand0);
28479 rtx res, xa, label, TWO52, mask;
28481 res = gen_reg_rtx (mode);
28482 emit_move_insn (res, operand1);
28484 /* xa = abs (operand1) */
28485 xa = ix86_expand_sse_fabs (res, &mask);
28487 /* if (!isless (xa, TWO52)) goto label; */
28488 TWO52 = ix86_gen_TWO52 (mode);
28489 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28491 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28492 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28494 ix86_sse_copysign_to_positive (res, xa, res, mask);
28496 emit_label (label);
28497 LABEL_NUSES (label) = 1;
28499 emit_move_insn (operand0, res);
28502 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28503 into OPERAND0. */
28504 void
28505 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
28507 /* C code for the stuff we expand below.
28508 double xa = fabs (x), x2;
28509 if (!isless (xa, TWO52))
28510 return x;
28511 xa = xa + TWO52 - TWO52;
28512 x2 = copysign (xa, x);
28513 Compensate. Floor:
28514 if (x2 > x)
28515 x2 -= 1;
28516 Compensate. Ceil:
28517 if (x2 < x)
28518 x2 -= -1;
28519 return x2;
28521 enum machine_mode mode = GET_MODE (operand0);
28522 rtx xa, TWO52, tmp, label, one, res, mask;
28524 TWO52 = ix86_gen_TWO52 (mode);
28526 /* Temporary for holding the result, initialized to the input
28527 operand to ease control flow. */
28528 res = gen_reg_rtx (mode);
28529 emit_move_insn (res, operand1);
28531 /* xa = abs (operand1) */
28532 xa = ix86_expand_sse_fabs (res, &mask);
28534 /* if (!isless (xa, TWO52)) goto label; */
28535 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28537 /* xa = xa + TWO52 - TWO52; */
28538 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28539 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28541 /* xa = copysign (xa, operand1) */
28542 ix86_sse_copysign_to_positive (xa, xa, res, mask);
28544 /* generate 1.0 or -1.0 */
28545 one = force_reg (mode,
28546 const_double_from_real_value (do_floor
28547 ? dconst1 : dconstm1, mode));
28549 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28550 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28551 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28552 gen_rtx_AND (mode, one, tmp)));
28553 /* We always need to subtract here to preserve signed zero. */
28554 tmp = expand_simple_binop (mode, MINUS,
28555 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28556 emit_move_insn (res, tmp);
28558 emit_label (label);
28559 LABEL_NUSES (label) = 1;
28561 emit_move_insn (operand0, res);
28564 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28565 into OPERAND0. */
28566 void
28567 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
28569 /* C code for the stuff we expand below.
28570 double xa = fabs (x), x2;
28571 if (!isless (xa, TWO52))
28572 return x;
28573 x2 = (double)(long)x;
28574 Compensate. Floor:
28575 if (x2 > x)
28576 x2 -= 1;
28577 Compensate. Ceil:
28578 if (x2 < x)
28579 x2 += 1;
28580 if (HONOR_SIGNED_ZEROS (mode))
28581 return copysign (x2, x);
28582 return x2;
28584 enum machine_mode mode = GET_MODE (operand0);
28585 rtx xa, xi, TWO52, tmp, label, one, res, mask;
28587 TWO52 = ix86_gen_TWO52 (mode);
28589 /* Temporary for holding the result, initialized to the input
28590 operand to ease control flow. */
28591 res = gen_reg_rtx (mode);
28592 emit_move_insn (res, operand1);
28594 /* xa = abs (operand1) */
28595 xa = ix86_expand_sse_fabs (res, &mask);
28597 /* if (!isless (xa, TWO52)) goto label; */
28598 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28600 /* xa = (double)(long)x */
28601 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28602 expand_fix (xi, res, 0);
28603 expand_float (xa, xi, 0);
28605 /* generate 1.0 */
28606 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
28608 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28609 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28610 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28611 gen_rtx_AND (mode, one, tmp)));
28612 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
28613 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28614 emit_move_insn (res, tmp);
28616 if (HONOR_SIGNED_ZEROS (mode))
28617 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28619 emit_label (label);
28620 LABEL_NUSES (label) = 1;
28622 emit_move_insn (operand0, res);
28625 /* Expand SSE sequence for computing round from OPERAND1 storing
28626 into OPERAND0. Sequence that works without relying on DImode truncation
28627 via cvttsd2siq that is only available on 64bit targets. */
28628 void
28629 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
28631 /* C code for the stuff we expand below.
28632 double xa = fabs (x), xa2, x2;
28633 if (!isless (xa, TWO52))
28634 return x;
28635 Using the absolute value and copying back sign makes
28636 -0.0 -> -0.0 correct.
28637 xa2 = xa + TWO52 - TWO52;
28638 Compensate.
28639 dxa = xa2 - xa;
28640 if (dxa <= -0.5)
28641 xa2 += 1;
28642 else if (dxa > 0.5)
28643 xa2 -= 1;
28644 x2 = copysign (xa2, x);
28645 return x2;
28647 enum machine_mode mode = GET_MODE (operand0);
28648 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
28650 TWO52 = ix86_gen_TWO52 (mode);
28652 /* Temporary for holding the result, initialized to the input
28653 operand to ease control flow. */
28654 res = gen_reg_rtx (mode);
28655 emit_move_insn (res, operand1);
28657 /* xa = abs (operand1) */
28658 xa = ix86_expand_sse_fabs (res, &mask);
28660 /* if (!isless (xa, TWO52)) goto label; */
28661 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28663 /* xa2 = xa + TWO52 - TWO52; */
28664 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28665 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
28667 /* dxa = xa2 - xa; */
28668 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
28670 /* generate 0.5, 1.0 and -0.5 */
28671 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
28672 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
28673 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
28674 0, OPTAB_DIRECT);
28676 /* Compensate. */
28677 tmp = gen_reg_rtx (mode);
28678 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
28679 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
28680 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28681 gen_rtx_AND (mode, one, tmp)));
28682 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28683 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
28684 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
28685 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28686 gen_rtx_AND (mode, one, tmp)));
28687 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28689 /* res = copysign (xa2, operand1) */
28690 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
28692 emit_label (label);
28693 LABEL_NUSES (label) = 1;
28695 emit_move_insn (operand0, res);
28698 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28699 into OPERAND0. */
28700 void
28701 ix86_expand_trunc (rtx operand0, rtx operand1)
28703 /* C code for SSE variant we expand below.
28704 double xa = fabs (x), x2;
28705 if (!isless (xa, TWO52))
28706 return x;
28707 x2 = (double)(long)x;
28708 if (HONOR_SIGNED_ZEROS (mode))
28709 return copysign (x2, x);
28710 return x2;
28712 enum machine_mode mode = GET_MODE (operand0);
28713 rtx xa, xi, TWO52, label, res, mask;
28715 TWO52 = ix86_gen_TWO52 (mode);
28717 /* Temporary for holding the result, initialized to the input
28718 operand to ease control flow. */
28719 res = gen_reg_rtx (mode);
28720 emit_move_insn (res, operand1);
28722 /* xa = abs (operand1) */
28723 xa = ix86_expand_sse_fabs (res, &mask);
28725 /* if (!isless (xa, TWO52)) goto label; */
28726 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28728 /* x = (double)(long)x */
28729 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28730 expand_fix (xi, res, 0);
28731 expand_float (res, xi, 0);
28733 if (HONOR_SIGNED_ZEROS (mode))
28734 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28736 emit_label (label);
28737 LABEL_NUSES (label) = 1;
28739 emit_move_insn (operand0, res);
28742 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28743 into OPERAND0. */
28744 void
28745 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
28747 enum machine_mode mode = GET_MODE (operand0);
28748 rtx xa, mask, TWO52, label, one, res, smask, tmp;
28750 /* C code for SSE variant we expand below.
28751 double xa = fabs (x), x2;
28752 if (!isless (xa, TWO52))
28753 return x;
28754 xa2 = xa + TWO52 - TWO52;
28755 Compensate:
28756 if (xa2 > xa)
28757 xa2 -= 1.0;
28758 x2 = copysign (xa2, x);
28759 return x2;
28762 TWO52 = ix86_gen_TWO52 (mode);
28764 /* Temporary for holding the result, initialized to the input
28765 operand to ease control flow. */
28766 res = gen_reg_rtx (mode);
28767 emit_move_insn (res, operand1);
28769 /* xa = abs (operand1) */
28770 xa = ix86_expand_sse_fabs (res, &smask);
28772 /* if (!isless (xa, TWO52)) goto label; */
28773 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28775 /* res = xa + TWO52 - TWO52; */
28776 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28777 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
28778 emit_move_insn (res, tmp);
28780 /* generate 1.0 */
28781 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
28783 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
28784 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
28785 emit_insn (gen_rtx_SET (VOIDmode, mask,
28786 gen_rtx_AND (mode, mask, one)));
28787 tmp = expand_simple_binop (mode, MINUS,
28788 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
28789 emit_move_insn (res, tmp);
28791 /* res = copysign (res, operand1) */
28792 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
28794 emit_label (label);
28795 LABEL_NUSES (label) = 1;
28797 emit_move_insn (operand0, res);
28800 /* Expand SSE sequence for computing round from OPERAND1 storing
28801 into OPERAND0. */
28802 void
28803 ix86_expand_round (rtx operand0, rtx operand1)
28805 /* C code for the stuff we're doing below:
28806 double xa = fabs (x);
28807 if (!isless (xa, TWO52))
28808 return x;
28809 xa = (double)(long)(xa + nextafter (0.5, 0.0));
28810 return copysign (xa, x);
28812 enum machine_mode mode = GET_MODE (operand0);
28813 rtx res, TWO52, xa, label, xi, half, mask;
28814 const struct real_format *fmt;
28815 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28817 /* Temporary for holding the result, initialized to the input
28818 operand to ease control flow. */
28819 res = gen_reg_rtx (mode);
28820 emit_move_insn (res, operand1);
28822 TWO52 = ix86_gen_TWO52 (mode);
28823 xa = ix86_expand_sse_fabs (res, &mask);
28824 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28826 /* load nextafter (0.5, 0.0) */
28827 fmt = REAL_MODE_FORMAT (mode);
28828 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28829 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28831 /* xa = xa + 0.5 */
28832 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
28833 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
28835 /* xa = (double)(int64_t)xa */
28836 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28837 expand_fix (xi, xa, 0);
28838 expand_float (xa, xi, 0);
28840 /* res = copysign (xa, operand1) */
28841 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
28843 emit_label (label);
28844 LABEL_NUSES (label) = 1;
28846 emit_move_insn (operand0, res);
28850 /* Table of valid machine attributes. */
28851 static const struct attribute_spec ix86_attribute_table[] =
28853 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
28854 /* Stdcall attribute says callee is responsible for popping arguments
28855 if they are not variable. */
28856 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28857 /* Fastcall attribute says callee is responsible for popping arguments
28858 if they are not variable. */
28859 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28860 /* Cdecl attribute says the callee is a normal C declaration */
28861 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28862 /* Regparm attribute specifies how many integer arguments are to be
28863 passed in registers. */
28864 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
28865 /* Sseregparm attribute says we are using x86_64 calling conventions
28866 for FP arguments. */
28867 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28868 /* force_align_arg_pointer says this function realigns the stack at entry. */
28869 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
28870 false, true, true, ix86_handle_cconv_attribute },
28871 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
28872 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
28873 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
28874 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
28875 #endif
28876 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
28877 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
28878 #ifdef SUBTARGET_ATTRIBUTE_TABLE
28879 SUBTARGET_ATTRIBUTE_TABLE,
28880 #endif
28881 /* ms_abi and sysv_abi calling convention function attributes. */
28882 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
28883 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
28884 { "ms_hook_prologue", 0, 0, true, false, false, ix86_handle_fndecl_attribute },
28885 /* End element. */
28886 { NULL, 0, 0, false, false, false, NULL }
28889 /* Implement targetm.vectorize.builtin_vectorization_cost. */
28890 static int
28891 ix86_builtin_vectorization_cost (bool runtime_test)
28893 /* If the branch of the runtime test is taken - i.e. - the vectorized
28894 version is skipped - this incurs a misprediction cost (because the
28895 vectorized version is expected to be the fall-through). So we subtract
28896 the latency of a mispredicted branch from the costs that are incured
28897 when the vectorized version is executed.
28899 TODO: The values in individual target tables have to be tuned or new
28900 fields may be needed. For eg. on K8, the default branch path is the
28901 not-taken path. If the taken path is predicted correctly, the minimum
28902 penalty of going down the taken-path is 1 cycle. If the taken-path is
28903 not predicted correctly, then the minimum penalty is 10 cycles. */
28905 if (runtime_test)
28907 return (-(ix86_cost->cond_taken_branch_cost));
28909 else
28910 return 0;
28913 /* Implement targetm.vectorize.builtin_vec_perm. */
28915 static tree
28916 ix86_vectorize_builtin_vec_perm (tree vec_type, tree *mask_type)
28918 tree itype = TREE_TYPE (vec_type);
28919 bool u = TYPE_UNSIGNED (itype);
28920 enum machine_mode vmode = TYPE_MODE (vec_type);
28921 enum ix86_builtins fcode = fcode; /* Silence bogus warning. */
28922 bool ok = TARGET_SSE2;
28924 switch (vmode)
28926 case V4DFmode:
28927 ok = TARGET_AVX;
28928 fcode = IX86_BUILTIN_VEC_PERM_V4DF;
28929 goto get_di;
28930 case V2DFmode:
28931 fcode = IX86_BUILTIN_VEC_PERM_V2DF;
28932 get_di:
28933 itype = ix86_get_builtin_type (IX86_BT_DI);
28934 break;
28936 case V8SFmode:
28937 ok = TARGET_AVX;
28938 fcode = IX86_BUILTIN_VEC_PERM_V8SF;
28939 goto get_si;
28940 case V4SFmode:
28941 ok = TARGET_SSE;
28942 fcode = IX86_BUILTIN_VEC_PERM_V4SF;
28943 get_si:
28944 itype = ix86_get_builtin_type (IX86_BT_SI);
28945 break;
28947 case V2DImode:
28948 fcode = u ? IX86_BUILTIN_VEC_PERM_V2DI_U : IX86_BUILTIN_VEC_PERM_V2DI;
28949 break;
28950 case V4SImode:
28951 fcode = u ? IX86_BUILTIN_VEC_PERM_V4SI_U : IX86_BUILTIN_VEC_PERM_V4SI;
28952 break;
28953 case V8HImode:
28954 fcode = u ? IX86_BUILTIN_VEC_PERM_V8HI_U : IX86_BUILTIN_VEC_PERM_V8HI;
28955 break;
28956 case V16QImode:
28957 fcode = u ? IX86_BUILTIN_VEC_PERM_V16QI_U : IX86_BUILTIN_VEC_PERM_V16QI;
28958 break;
28959 default:
28960 ok = false;
28961 break;
28964 if (!ok)
28965 return NULL_TREE;
28967 *mask_type = itype;
28968 return ix86_builtins[(int) fcode];
28971 /* Return a vector mode with twice as many elements as VMODE. */
28972 /* ??? Consider moving this to a table generated by genmodes.c. */
28974 static enum machine_mode
28975 doublesize_vector_mode (enum machine_mode vmode)
28977 switch (vmode)
28979 case V2SFmode: return V4SFmode;
28980 case V1DImode: return V2DImode;
28981 case V2SImode: return V4SImode;
28982 case V4HImode: return V8HImode;
28983 case V8QImode: return V16QImode;
28985 case V2DFmode: return V4DFmode;
28986 case V4SFmode: return V8SFmode;
28987 case V2DImode: return V4DImode;
28988 case V4SImode: return V8SImode;
28989 case V8HImode: return V16HImode;
28990 case V16QImode: return V32QImode;
28992 case V4DFmode: return V8DFmode;
28993 case V8SFmode: return V16SFmode;
28994 case V4DImode: return V8DImode;
28995 case V8SImode: return V16SImode;
28996 case V16HImode: return V32HImode;
28997 case V32QImode: return V64QImode;
28999 default:
29000 gcc_unreachable ();
29004 /* Construct (set target (vec_select op0 (parallel perm))) and
29005 return true if that's a valid instruction in the active ISA. */
29007 static bool
29008 expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt)
29010 rtx rperm[MAX_VECT_LEN], x;
29011 unsigned i;
29013 for (i = 0; i < nelt; ++i)
29014 rperm[i] = GEN_INT (perm[i]);
29016 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
29017 x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
29018 x = gen_rtx_SET (VOIDmode, target, x);
29020 x = emit_insn (x);
29021 if (recog_memoized (x) < 0)
29023 remove_insn (x);
29024 return false;
29026 return true;
29029 /* Similar, but generate a vec_concat from op0 and op1 as well. */
29031 static bool
29032 expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
29033 const unsigned char *perm, unsigned nelt)
29035 enum machine_mode v2mode;
29036 rtx x;
29038 v2mode = doublesize_vector_mode (GET_MODE (op0));
29039 x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
29040 return expand_vselect (target, x, perm, nelt);
29043 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29044 in terms of blendp[sd] / pblendw / pblendvb. */
29046 static bool
29047 expand_vec_perm_blend (struct expand_vec_perm_d *d)
29049 enum machine_mode vmode = d->vmode;
29050 unsigned i, mask, nelt = d->nelt;
29051 rtx target, op0, op1, x;
29053 if (!TARGET_SSE4_1 || d->op0 == d->op1)
29054 return false;
29055 if (!(GET_MODE_SIZE (vmode) == 16 || vmode == V4DFmode || vmode == V8SFmode))
29056 return false;
29058 /* This is a blend, not a permute. Elements must stay in their
29059 respective lanes. */
29060 for (i = 0; i < nelt; ++i)
29062 unsigned e = d->perm[i];
29063 if (!(e == i || e == i + nelt))
29064 return false;
29067 if (d->testing_p)
29068 return true;
29070 /* ??? Without SSE4.1, we could implement this with and/andn/or. This
29071 decision should be extracted elsewhere, so that we only try that
29072 sequence once all budget==3 options have been tried. */
29074 /* For bytes, see if bytes move in pairs so we can use pblendw with
29075 an immediate argument, rather than pblendvb with a vector argument. */
29076 if (vmode == V16QImode)
29078 bool pblendw_ok = true;
29079 for (i = 0; i < 16 && pblendw_ok; i += 2)
29080 pblendw_ok = (d->perm[i] + 1 == d->perm[i + 1]);
29082 if (!pblendw_ok)
29084 rtx rperm[16], vperm;
29086 for (i = 0; i < nelt; ++i)
29087 rperm[i] = (d->perm[i] < nelt ? const0_rtx : constm1_rtx);
29089 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29090 vperm = force_reg (V16QImode, vperm);
29092 emit_insn (gen_sse4_1_pblendvb (d->target, d->op0, d->op1, vperm));
29093 return true;
29097 target = d->target;
29098 op0 = d->op0;
29099 op1 = d->op1;
29100 mask = 0;
29102 switch (vmode)
29104 case V4DFmode:
29105 case V8SFmode:
29106 case V2DFmode:
29107 case V4SFmode:
29108 case V8HImode:
29109 for (i = 0; i < nelt; ++i)
29110 mask |= (d->perm[i] >= nelt) << i;
29111 break;
29113 case V2DImode:
29114 for (i = 0; i < 2; ++i)
29115 mask |= (d->perm[i] >= 2 ? 15 : 0) << (i * 4);
29116 goto do_subreg;
29118 case V4SImode:
29119 for (i = 0; i < 4; ++i)
29120 mask |= (d->perm[i] >= 4 ? 3 : 0) << (i * 2);
29121 goto do_subreg;
29123 case V16QImode:
29124 for (i = 0; i < 8; ++i)
29125 mask |= (d->perm[i * 2] >= 16) << i;
29127 do_subreg:
29128 vmode = V8HImode;
29129 target = gen_lowpart (vmode, target);
29130 op0 = gen_lowpart (vmode, target);
29131 op1 = gen_lowpart (vmode, target);
29132 break;
29134 default:
29135 gcc_unreachable ();
29138 /* This matches five different patterns with the different modes. */
29139 x = gen_rtx_VEC_MERGE (vmode, op0, op1, GEN_INT (mask));
29140 x = gen_rtx_SET (VOIDmode, target, x);
29141 emit_insn (x);
29143 return true;
29146 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29147 in terms of the variable form of vpermilps.
29149 Note that we will have already failed the immediate input vpermilps,
29150 which requires that the high and low part shuffle be identical; the
29151 variable form doesn't require that. */
29153 static bool
29154 expand_vec_perm_vpermil (struct expand_vec_perm_d *d)
29156 rtx rperm[8], vperm;
29157 unsigned i;
29159 if (!TARGET_AVX || d->vmode != V8SFmode || d->op0 != d->op1)
29160 return false;
29162 /* We can only permute within the 128-bit lane. */
29163 for (i = 0; i < 8; ++i)
29165 unsigned e = d->perm[i];
29166 if (i < 4 ? e >= 4 : e < 4)
29167 return false;
29170 if (d->testing_p)
29171 return true;
29173 for (i = 0; i < 8; ++i)
29175 unsigned e = d->perm[i];
29177 /* Within each 128-bit lane, the elements of op0 are numbered
29178 from 0 and the elements of op1 are numbered from 4. */
29179 if (e >= 8 + 4)
29180 e -= 8;
29181 else if (e >= 4)
29182 e -= 4;
29184 rperm[i] = GEN_INT (e);
29187 vperm = gen_rtx_CONST_VECTOR (V8SImode, gen_rtvec_v (8, rperm));
29188 vperm = force_reg (V8SImode, vperm);
29189 emit_insn (gen_avx_vpermilvarv8sf3 (d->target, d->op0, vperm));
29191 return true;
29194 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29195 in terms of pshufb or vpperm. */
29197 static bool
29198 expand_vec_perm_pshufb (struct expand_vec_perm_d *d)
29200 unsigned i, nelt, eltsz;
29201 rtx rperm[16], vperm, target, op0, op1;
29203 if (!(d->op0 == d->op1 ? TARGET_SSSE3 : TARGET_XOP))
29204 return false;
29205 if (GET_MODE_SIZE (d->vmode) != 16)
29206 return false;
29208 if (d->testing_p)
29209 return true;
29211 nelt = d->nelt;
29212 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29214 for (i = 0; i < nelt; ++i)
29216 unsigned j, e = d->perm[i];
29217 for (j = 0; j < eltsz; ++j)
29218 rperm[i * eltsz + j] = GEN_INT (e * eltsz + j);
29221 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm));
29222 vperm = force_reg (V16QImode, vperm);
29224 target = gen_lowpart (V16QImode, d->target);
29225 op0 = gen_lowpart (V16QImode, d->op0);
29226 if (d->op0 == d->op1)
29227 emit_insn (gen_ssse3_pshufbv16qi3 (target, op0, vperm));
29228 else
29230 op1 = gen_lowpart (V16QImode, d->op1);
29231 emit_insn (gen_xop_pperm (target, op0, op1, vperm));
29234 return true;
29237 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to instantiate D
29238 in a single instruction. */
29240 static bool
29241 expand_vec_perm_1 (struct expand_vec_perm_d *d)
29243 unsigned i, nelt = d->nelt;
29244 unsigned char perm2[MAX_VECT_LEN];
29246 /* Check plain VEC_SELECT first, because AVX has instructions that could
29247 match both SEL and SEL+CONCAT, but the plain SEL will allow a memory
29248 input where SEL+CONCAT may not. */
29249 if (d->op0 == d->op1)
29251 if (expand_vselect (d->target, d->op0, d->perm, nelt))
29252 return true;
29254 /* There are plenty of patterns in sse.md that are written for
29255 SEL+CONCAT and are not replicated for a single op. Perhaps
29256 that should be changed, to avoid the nastiness here. */
29258 /* Recognize interleave style patterns, which means incrementing
29259 every other permutation operand. */
29260 for (i = 0; i < nelt; i += 2)
29262 perm2[i] = d->perm[i];
29263 perm2[i+1] = d->perm[i+1] + nelt;
29265 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29266 return true;
29268 /* Recognize shufps, which means adding {0, 0, nelt, nelt}. */
29269 if (nelt >= 4)
29271 memcpy (perm2, d->perm, nelt);
29272 for (i = 2; i < nelt; i += 4)
29274 perm2[i+0] += nelt;
29275 perm2[i+1] += nelt;
29278 if (expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, nelt))
29279 return true;
29283 /* Finally, try the fully general two operand permute. */
29284 if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt))
29285 return true;
29287 /* Recognize interleave style patterns with reversed operands. */
29288 if (d->op0 != d->op1)
29290 for (i = 0; i < nelt; ++i)
29292 unsigned e = d->perm[i];
29293 if (e >= nelt)
29294 e -= nelt;
29295 else
29296 e += nelt;
29297 perm2[i] = e;
29300 if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
29301 return true;
29304 /* Try the SSE4.1 blend variable merge instructions. */
29305 if (expand_vec_perm_blend (d))
29306 return true;
29308 /* Try one of the AVX vpermil variable permutations. */
29309 if (expand_vec_perm_vpermil (d))
29310 return true;
29312 /* Try the SSSE3 pshufb or XOP vpperm variable permutation. */
29313 if (expand_vec_perm_pshufb (d))
29314 return true;
29316 return false;
29319 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to implement D
29320 in terms of a pair of pshuflw + pshufhw instructions. */
29322 static bool
29323 expand_vec_perm_pshuflw_pshufhw (struct expand_vec_perm_d *d)
29325 unsigned char perm2[MAX_VECT_LEN];
29326 unsigned i;
29327 bool ok;
29329 if (d->vmode != V8HImode || d->op0 != d->op1)
29330 return false;
29332 /* The two permutations only operate in 64-bit lanes. */
29333 for (i = 0; i < 4; ++i)
29334 if (d->perm[i] >= 4)
29335 return false;
29336 for (i = 4; i < 8; ++i)
29337 if (d->perm[i] < 4)
29338 return false;
29340 if (d->testing_p)
29341 return true;
29343 /* Emit the pshuflw. */
29344 memcpy (perm2, d->perm, 4);
29345 for (i = 4; i < 8; ++i)
29346 perm2[i] = i;
29347 ok = expand_vselect (d->target, d->op0, perm2, 8);
29348 gcc_assert (ok);
29350 /* Emit the pshufhw. */
29351 memcpy (perm2 + 4, d->perm + 4, 4);
29352 for (i = 0; i < 4; ++i)
29353 perm2[i] = i;
29354 ok = expand_vselect (d->target, d->target, perm2, 8);
29355 gcc_assert (ok);
29357 return true;
29360 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29361 the permutation using the SSSE3 palignr instruction. This succeeds
29362 when all of the elements in PERM fit within one vector and we merely
29363 need to shift them down so that a single vector permutation has a
29364 chance to succeed. */
29366 static bool
29367 expand_vec_perm_palignr (struct expand_vec_perm_d *d)
29369 unsigned i, nelt = d->nelt;
29370 unsigned min, max;
29371 bool in_order, ok;
29372 rtx shift;
29374 /* Even with AVX, palignr only operates on 128-bit vectors. */
29375 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29376 return false;
29378 min = nelt, max = 0;
29379 for (i = 0; i < nelt; ++i)
29381 unsigned e = d->perm[i];
29382 if (e < min)
29383 min = e;
29384 if (e > max)
29385 max = e;
29387 if (min == 0 || max - min >= nelt)
29388 return false;
29390 /* Given that we have SSSE3, we know we'll be able to implement the
29391 single operand permutation after the palignr with pshufb. */
29392 if (d->testing_p)
29393 return true;
29395 shift = GEN_INT (min * GET_MODE_BITSIZE (GET_MODE_INNER (d->vmode)));
29396 emit_insn (gen_ssse3_palignrti (gen_lowpart (TImode, d->target),
29397 gen_lowpart (TImode, d->op1),
29398 gen_lowpart (TImode, d->op0), shift));
29400 d->op0 = d->op1 = d->target;
29402 in_order = true;
29403 for (i = 0; i < nelt; ++i)
29405 unsigned e = d->perm[i] - min;
29406 if (e != i)
29407 in_order = false;
29408 d->perm[i] = e;
29411 /* Test for the degenerate case where the alignment by itself
29412 produces the desired permutation. */
29413 if (in_order)
29414 return true;
29416 ok = expand_vec_perm_1 (d);
29417 gcc_assert (ok);
29419 return ok;
29422 /* A subroutine of ix86_expand_vec_perm_builtin_1. Try to simplify
29423 a two vector permutation into a single vector permutation by using
29424 an interleave operation to merge the vectors. */
29426 static bool
29427 expand_vec_perm_interleave2 (struct expand_vec_perm_d *d)
29429 struct expand_vec_perm_d dremap, dfinal;
29430 unsigned i, nelt = d->nelt, nelt2 = nelt / 2;
29431 unsigned contents, h1, h2, h3, h4;
29432 unsigned char remap[2 * MAX_VECT_LEN];
29433 rtx seq;
29434 bool ok;
29436 if (d->op0 == d->op1)
29437 return false;
29439 /* The 256-bit unpck[lh]p[sd] instructions only operate within the 128-bit
29440 lanes. We can use similar techniques with the vperm2f128 instruction,
29441 but it requires slightly different logic. */
29442 if (GET_MODE_SIZE (d->vmode) != 16)
29443 return false;
29445 /* Examine from whence the elements come. */
29446 contents = 0;
29447 for (i = 0; i < nelt; ++i)
29448 contents |= 1u << d->perm[i];
29450 /* Split the two input vectors into 4 halves. */
29451 h1 = (1u << nelt2) - 1;
29452 h2 = h1 << nelt2;
29453 h3 = h2 << nelt2;
29454 h4 = h3 << nelt2;
29456 memset (remap, 0xff, sizeof (remap));
29457 dremap = *d;
29459 /* If the elements from the low halves use interleave low, and similarly
29460 for interleave high. If the elements are from mis-matched halves, we
29461 can use shufps for V4SF/V4SI or do a DImode shuffle. */
29462 if ((contents & (h1 | h3)) == contents)
29464 for (i = 0; i < nelt2; ++i)
29466 remap[i] = i * 2;
29467 remap[i + nelt] = i * 2 + 1;
29468 dremap.perm[i * 2] = i;
29469 dremap.perm[i * 2 + 1] = i + nelt;
29472 else if ((contents & (h2 | h4)) == contents)
29474 for (i = 0; i < nelt2; ++i)
29476 remap[i + nelt2] = i * 2;
29477 remap[i + nelt + nelt2] = i * 2 + 1;
29478 dremap.perm[i * 2] = i + nelt2;
29479 dremap.perm[i * 2 + 1] = i + nelt + nelt2;
29482 else if ((contents & (h1 | h4)) == contents)
29484 for (i = 0; i < nelt2; ++i)
29486 remap[i] = i;
29487 remap[i + nelt + nelt2] = i + nelt2;
29488 dremap.perm[i] = i;
29489 dremap.perm[i + nelt2] = i + nelt + nelt2;
29491 if (nelt != 4)
29493 dremap.vmode = V2DImode;
29494 dremap.nelt = 2;
29495 dremap.perm[0] = 0;
29496 dremap.perm[1] = 3;
29499 else if ((contents & (h2 | h3)) == contents)
29501 for (i = 0; i < nelt2; ++i)
29503 remap[i + nelt2] = i;
29504 remap[i + nelt] = i + nelt2;
29505 dremap.perm[i] = i + nelt2;
29506 dremap.perm[i + nelt2] = i + nelt;
29508 if (nelt != 4)
29510 dremap.vmode = V2DImode;
29511 dremap.nelt = 2;
29512 dremap.perm[0] = 1;
29513 dremap.perm[1] = 2;
29516 else
29517 return false;
29519 /* Use the remapping array set up above to move the elements from their
29520 swizzled locations into their final destinations. */
29521 dfinal = *d;
29522 for (i = 0; i < nelt; ++i)
29524 unsigned e = remap[d->perm[i]];
29525 gcc_assert (e < nelt);
29526 dfinal.perm[i] = e;
29528 dfinal.op0 = gen_reg_rtx (dfinal.vmode);
29529 dfinal.op1 = dfinal.op0;
29530 dremap.target = dfinal.op0;
29532 /* Test if the final remap can be done with a single insn. For V4SFmode or
29533 V4SImode this *will* succeed. For V8HImode or V16QImode it may not. */
29534 start_sequence ();
29535 ok = expand_vec_perm_1 (&dfinal);
29536 seq = get_insns ();
29537 end_sequence ();
29539 if (!ok)
29540 return false;
29542 if (dremap.vmode != dfinal.vmode)
29544 dremap.target = gen_lowpart (dremap.vmode, dremap.target);
29545 dremap.op0 = gen_lowpart (dremap.vmode, dremap.op0);
29546 dremap.op1 = gen_lowpart (dremap.vmode, dremap.op1);
29549 ok = expand_vec_perm_1 (&dremap);
29550 gcc_assert (ok);
29552 emit_insn (seq);
29553 return true;
29556 /* A subroutine of expand_vec_perm_even_odd_1. Implement the double-word
29557 permutation with two pshufb insns and an ior. We should have already
29558 failed all two instruction sequences. */
29560 static bool
29561 expand_vec_perm_pshufb2 (struct expand_vec_perm_d *d)
29563 rtx rperm[2][16], vperm, l, h, op, m128;
29564 unsigned int i, nelt, eltsz;
29566 if (!TARGET_SSSE3 || GET_MODE_SIZE (d->vmode) != 16)
29567 return false;
29568 gcc_assert (d->op0 != d->op1);
29570 nelt = d->nelt;
29571 eltsz = GET_MODE_SIZE (GET_MODE_INNER (d->vmode));
29573 /* Generate two permutation masks. If the required element is within
29574 the given vector it is shuffled into the proper lane. If the required
29575 element is in the other vector, force a zero into the lane by setting
29576 bit 7 in the permutation mask. */
29577 m128 = GEN_INT (-128);
29578 for (i = 0; i < nelt; ++i)
29580 unsigned j, e = d->perm[i];
29581 unsigned which = (e >= nelt);
29582 if (e >= nelt)
29583 e -= nelt;
29585 for (j = 0; j < eltsz; ++j)
29587 rperm[which][i*eltsz + j] = GEN_INT (e*eltsz + j);
29588 rperm[1-which][i*eltsz + j] = m128;
29592 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[0]));
29593 vperm = force_reg (V16QImode, vperm);
29595 l = gen_reg_rtx (V16QImode);
29596 op = gen_lowpart (V16QImode, d->op0);
29597 emit_insn (gen_ssse3_pshufbv16qi3 (l, op, vperm));
29599 vperm = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, rperm[1]));
29600 vperm = force_reg (V16QImode, vperm);
29602 h = gen_reg_rtx (V16QImode);
29603 op = gen_lowpart (V16QImode, d->op1);
29604 emit_insn (gen_ssse3_pshufbv16qi3 (h, op, vperm));
29606 op = gen_lowpart (V16QImode, d->target);
29607 emit_insn (gen_iorv16qi3 (op, l, h));
29609 return true;
29612 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement extract-even
29613 and extract-odd permutations. */
29615 static bool
29616 expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
29618 rtx t1, t2, t3, t4;
29620 switch (d->vmode)
29622 case V4DFmode:
29623 t1 = gen_reg_rtx (V4DFmode);
29624 t2 = gen_reg_rtx (V4DFmode);
29626 /* Shuffle the lanes around into { 0 1 4 5 } and { 2 3 6 7 }. */
29627 emit_insn (gen_avx_vperm2f128v4df3 (t1, d->op0, d->op1, GEN_INT (0x20)));
29628 emit_insn (gen_avx_vperm2f128v4df3 (t2, d->op0, d->op1, GEN_INT (0x31)));
29630 /* Now an unpck[lh]pd will produce the result required. */
29631 if (odd)
29632 t3 = gen_avx_unpckhpd256 (d->target, t1, t2);
29633 else
29634 t3 = gen_avx_unpcklpd256 (d->target, t1, t2);
29635 emit_insn (t3);
29636 break;
29638 case V8SFmode:
29640 static const unsigned char perm1[8] = { 0, 2, 1, 3, 5, 6, 5, 7 };
29641 static const unsigned char perme[8] = { 0, 1, 8, 9, 4, 5, 12, 13 };
29642 static const unsigned char permo[8] = { 2, 3, 10, 11, 6, 7, 14, 15 };
29644 t1 = gen_reg_rtx (V8SFmode);
29645 t2 = gen_reg_rtx (V8SFmode);
29646 t3 = gen_reg_rtx (V8SFmode);
29647 t4 = gen_reg_rtx (V8SFmode);
29649 /* Shuffle within the 128-bit lanes to produce:
29650 { 0 2 1 3 4 6 5 7 } and { 8 a 9 b c e d f }. */
29651 expand_vselect (t1, d->op0, perm1, 8);
29652 expand_vselect (t2, d->op1, perm1, 8);
29654 /* Shuffle the lanes around to produce:
29655 { 0 2 1 3 8 a 9 b } and { 4 6 5 7 c e d f }. */
29656 emit_insn (gen_avx_vperm2f128v8sf3 (t3, t1, t2, GEN_INT (0x20)));
29657 emit_insn (gen_avx_vperm2f128v8sf3 (t4, t1, t2, GEN_INT (0x31)));
29659 /* Now a vpermil2p will produce the result required. */
29660 /* ??? The vpermil2p requires a vector constant. Another option
29661 is a unpck[lh]ps to merge the two vectors to produce
29662 { 0 4 2 6 8 c a e } or { 1 5 3 7 9 d b f }. Then use another
29663 vpermilps to get the elements into the final order. */
29664 d->op0 = t3;
29665 d->op1 = t4;
29666 memcpy (d->perm, odd ? permo: perme, 8);
29667 expand_vec_perm_vpermil (d);
29669 break;
29671 case V2DFmode:
29672 case V4SFmode:
29673 case V2DImode:
29674 case V4SImode:
29675 /* These are always directly implementable by expand_vec_perm_1. */
29676 gcc_unreachable ();
29678 case V8HImode:
29679 if (TARGET_SSSE3)
29680 return expand_vec_perm_pshufb2 (d);
29681 else
29683 /* We need 2*log2(N)-1 operations to achieve odd/even
29684 with interleave. */
29685 t1 = gen_reg_rtx (V8HImode);
29686 t2 = gen_reg_rtx (V8HImode);
29687 emit_insn (gen_vec_interleave_highv8hi (t1, d->op0, d->op1));
29688 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->op0, d->op1));
29689 emit_insn (gen_vec_interleave_highv8hi (t2, d->target, t1));
29690 emit_insn (gen_vec_interleave_lowv8hi (d->target, d->target, t1));
29691 if (odd)
29692 t3 = gen_vec_interleave_highv8hi (d->target, d->target, t2);
29693 else
29694 t3 = gen_vec_interleave_lowv8hi (d->target, d->target, t2);
29695 emit_insn (t3);
29697 break;
29699 case V16QImode:
29700 if (TARGET_SSSE3)
29701 return expand_vec_perm_pshufb2 (d);
29702 else
29704 t1 = gen_reg_rtx (V16QImode);
29705 t2 = gen_reg_rtx (V16QImode);
29706 t3 = gen_reg_rtx (V16QImode);
29707 emit_insn (gen_vec_interleave_highv16qi (t1, d->op0, d->op1));
29708 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->op0, d->op1));
29709 emit_insn (gen_vec_interleave_highv16qi (t2, d->target, t1));
29710 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t1));
29711 emit_insn (gen_vec_interleave_highv16qi (t3, d->target, t2));
29712 emit_insn (gen_vec_interleave_lowv16qi (d->target, d->target, t2));
29713 if (odd)
29714 t3 = gen_vec_interleave_highv16qi (d->target, d->target, t3);
29715 else
29716 t3 = gen_vec_interleave_lowv16qi (d->target, d->target, t3);
29717 emit_insn (t3);
29719 break;
29721 default:
29722 gcc_unreachable ();
29725 return true;
29728 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
29729 extract-even and extract-odd permutations. */
29731 static bool
29732 expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
29734 unsigned i, odd, nelt = d->nelt;
29736 odd = d->perm[0];
29737 if (odd != 0 && odd != 1)
29738 return false;
29740 for (i = 1; i < nelt; ++i)
29741 if (d->perm[i] != 2 * i + odd)
29742 return false;
29744 return expand_vec_perm_even_odd_1 (d, odd);
29747 /* A subroutine of ix86_expand_vec_perm_builtin_1. Implement broadcast
29748 permutations. We assume that expand_vec_perm_1 has already failed. */
29750 static bool
29751 expand_vec_perm_broadcast_1 (struct expand_vec_perm_d *d)
29753 unsigned elt = d->perm[0], nelt2 = d->nelt / 2;
29754 enum machine_mode vmode = d->vmode;
29755 unsigned char perm2[4];
29756 rtx op0 = d->op0;
29757 bool ok;
29759 switch (vmode)
29761 case V4DFmode:
29762 case V8SFmode:
29763 /* These are special-cased in sse.md so that we can optionally
29764 use the vbroadcast instruction. They expand to two insns
29765 if the input happens to be in a register. */
29766 gcc_unreachable ();
29768 case V2DFmode:
29769 case V2DImode:
29770 case V4SFmode:
29771 case V4SImode:
29772 /* These are always implementable using standard shuffle patterns. */
29773 gcc_unreachable ();
29775 case V8HImode:
29776 case V16QImode:
29777 /* These can be implemented via interleave. We save one insn by
29778 stopping once we have promoted to V4SImode and then use pshufd. */
29781 optab otab = vec_interleave_low_optab;
29783 if (elt >= nelt2)
29785 otab = vec_interleave_high_optab;
29786 elt -= nelt2;
29788 nelt2 /= 2;
29790 op0 = expand_binop (vmode, otab, op0, op0, NULL, 0, OPTAB_DIRECT);
29791 vmode = get_mode_wider_vector (vmode);
29792 op0 = gen_lowpart (vmode, op0);
29794 while (vmode != V4SImode);
29796 memset (perm2, elt, 4);
29797 ok = expand_vselect (gen_lowpart (V4SImode, d->target), op0, perm2, 4);
29798 gcc_assert (ok);
29799 return true;
29801 default:
29802 gcc_unreachable ();
29806 /* A subroutine of ix86_expand_vec_perm_builtin_1. Pattern match
29807 broadcast permutations. */
29809 static bool
29810 expand_vec_perm_broadcast (struct expand_vec_perm_d *d)
29812 unsigned i, elt, nelt = d->nelt;
29814 if (d->op0 != d->op1)
29815 return false;
29817 elt = d->perm[0];
29818 for (i = 1; i < nelt; ++i)
29819 if (d->perm[i] != elt)
29820 return false;
29822 return expand_vec_perm_broadcast_1 (d);
29825 /* The guts of ix86_expand_vec_perm_builtin, also used by the ok hook.
29826 With all of the interface bits taken care of, perform the expansion
29827 in D and return true on success. */
29829 static bool
29830 ix86_expand_vec_perm_builtin_1 (struct expand_vec_perm_d *d)
29832 /* Try a single instruction expansion. */
29833 if (expand_vec_perm_1 (d))
29834 return true;
29836 /* Try sequences of two instructions. */
29838 if (expand_vec_perm_pshuflw_pshufhw (d))
29839 return true;
29841 if (expand_vec_perm_palignr (d))
29842 return true;
29844 if (expand_vec_perm_interleave2 (d))
29845 return true;
29847 if (expand_vec_perm_broadcast (d))
29848 return true;
29850 /* Try sequences of three instructions. */
29852 if (expand_vec_perm_pshufb2 (d))
29853 return true;
29855 /* ??? Look for narrow permutations whose element orderings would
29856 allow the promotion to a wider mode. */
29858 /* ??? Look for sequences of interleave or a wider permute that place
29859 the data into the correct lanes for a half-vector shuffle like
29860 pshuf[lh]w or vpermilps. */
29862 /* ??? Look for sequences of interleave that produce the desired results.
29863 The combinatorics of punpck[lh] get pretty ugly... */
29865 if (expand_vec_perm_even_odd (d))
29866 return true;
29868 return false;
29871 /* Extract the values from the vector CST into the permutation array in D.
29872 Return 0 on error, 1 if all values from the permutation come from the
29873 first vector, 2 if all values from the second vector, and 3 otherwise. */
29875 static int
29876 extract_vec_perm_cst (struct expand_vec_perm_d *d, tree cst)
29878 tree list = TREE_VECTOR_CST_ELTS (cst);
29879 unsigned i, nelt = d->nelt;
29880 int ret = 0;
29882 for (i = 0; i < nelt; ++i, list = TREE_CHAIN (list))
29884 unsigned HOST_WIDE_INT e;
29886 if (!host_integerp (TREE_VALUE (list), 1))
29887 return 0;
29888 e = tree_low_cst (TREE_VALUE (list), 1);
29889 if (e >= 2 * nelt)
29890 return 0;
29892 ret |= (e < nelt ? 1 : 2);
29893 d->perm[i] = e;
29895 gcc_assert (list == NULL);
29897 /* For all elements from second vector, fold the elements to first. */
29898 if (ret == 2)
29899 for (i = 0; i < nelt; ++i)
29900 d->perm[i] -= nelt;
29902 return ret;
29905 static rtx
29906 ix86_expand_vec_perm_builtin (tree exp)
29908 struct expand_vec_perm_d d;
29909 tree arg0, arg1, arg2;
29911 arg0 = CALL_EXPR_ARG (exp, 0);
29912 arg1 = CALL_EXPR_ARG (exp, 1);
29913 arg2 = CALL_EXPR_ARG (exp, 2);
29915 d.vmode = TYPE_MODE (TREE_TYPE (arg0));
29916 d.nelt = GET_MODE_NUNITS (d.vmode);
29917 d.testing_p = false;
29918 gcc_assert (VECTOR_MODE_P (d.vmode));
29920 if (TREE_CODE (arg2) != VECTOR_CST)
29922 error_at (EXPR_LOCATION (exp),
29923 "vector permutation requires vector constant");
29924 goto exit_error;
29927 switch (extract_vec_perm_cst (&d, arg2))
29929 default:
29930 gcc_unreachable();
29932 case 0:
29933 error_at (EXPR_LOCATION (exp), "invalid vector permutation constant");
29934 goto exit_error;
29936 case 3:
29937 if (!operand_equal_p (arg0, arg1, 0))
29939 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
29940 d.op0 = force_reg (d.vmode, d.op0);
29941 d.op1 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
29942 d.op1 = force_reg (d.vmode, d.op1);
29943 break;
29946 /* The elements of PERM do not suggest that only the first operand
29947 is used, but both operands are identical. Allow easier matching
29948 of the permutation by folding the permutation into the single
29949 input vector. */
29951 unsigned i, nelt = d.nelt;
29952 for (i = 0; i < nelt; ++i)
29953 if (d.perm[i] >= nelt)
29954 d.perm[i] -= nelt;
29956 /* FALLTHRU */
29958 case 1:
29959 d.op0 = expand_expr (arg0, NULL_RTX, d.vmode, EXPAND_NORMAL);
29960 d.op0 = force_reg (d.vmode, d.op0);
29961 d.op1 = d.op0;
29962 break;
29964 case 2:
29965 d.op0 = expand_expr (arg1, NULL_RTX, d.vmode, EXPAND_NORMAL);
29966 d.op0 = force_reg (d.vmode, d.op0);
29967 d.op1 = d.op0;
29968 break;
29971 d.target = gen_reg_rtx (d.vmode);
29972 if (ix86_expand_vec_perm_builtin_1 (&d))
29973 return d.target;
29975 /* For compiler generated permutations, we should never got here, because
29976 the compiler should also be checking the ok hook. But since this is a
29977 builtin the user has access too, so don't abort. */
29978 switch (d.nelt)
29980 case 2:
29981 sorry ("vector permutation (%d %d)", d.perm[0], d.perm[1]);
29982 break;
29983 case 4:
29984 sorry ("vector permutation (%d %d %d %d)",
29985 d.perm[0], d.perm[1], d.perm[2], d.perm[3]);
29986 break;
29987 case 8:
29988 sorry ("vector permutation (%d %d %d %d %d %d %d %d)",
29989 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
29990 d.perm[4], d.perm[5], d.perm[6], d.perm[7]);
29991 break;
29992 case 16:
29993 sorry ("vector permutation "
29994 "(%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d)",
29995 d.perm[0], d.perm[1], d.perm[2], d.perm[3],
29996 d.perm[4], d.perm[5], d.perm[6], d.perm[7],
29997 d.perm[8], d.perm[9], d.perm[10], d.perm[11],
29998 d.perm[12], d.perm[13], d.perm[14], d.perm[15]);
29999 break;
30000 default:
30001 gcc_unreachable ();
30003 exit_error:
30004 return CONST0_RTX (d.vmode);
30007 /* Implement targetm.vectorize.builtin_vec_perm_ok. */
30009 static bool
30010 ix86_vectorize_builtin_vec_perm_ok (tree vec_type, tree mask)
30012 struct expand_vec_perm_d d;
30013 int vec_mask;
30014 bool ret, one_vec;
30016 d.vmode = TYPE_MODE (vec_type);
30017 d.nelt = GET_MODE_NUNITS (d.vmode);
30018 d.testing_p = true;
30020 /* Given sufficient ISA support we can just return true here
30021 for selected vector modes. */
30022 if (GET_MODE_SIZE (d.vmode) == 16)
30024 /* All implementable with a single vpperm insn. */
30025 if (TARGET_XOP)
30026 return true;
30027 /* All implementable with 2 pshufb + 1 ior. */
30028 if (TARGET_SSSE3)
30029 return true;
30030 /* All implementable with shufpd or unpck[lh]pd. */
30031 if (d.nelt == 2)
30032 return true;
30035 vec_mask = extract_vec_perm_cst (&d, mask);
30037 /* This hook is cannot be called in response to something that the
30038 user does (unlike the builtin expander) so we shouldn't ever see
30039 an error generated from the extract. */
30040 gcc_assert (vec_mask > 0 && vec_mask <= 3);
30041 one_vec = (vec_mask != 3);
30043 /* Implementable with shufps or pshufd. */
30044 if (one_vec && (d.vmode == V4SFmode || d.vmode == V4SImode))
30045 return true;
30047 /* Otherwise we have to go through the motions and see if we can
30048 figure out how to generate the requested permutation. */
30049 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
30050 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
30051 if (!one_vec)
30052 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
30054 start_sequence ();
30055 ret = ix86_expand_vec_perm_builtin_1 (&d);
30056 end_sequence ();
30058 return ret;
30061 void
30062 ix86_expand_vec_extract_even_odd (rtx targ, rtx op0, rtx op1, unsigned odd)
30064 struct expand_vec_perm_d d;
30065 unsigned i, nelt;
30067 d.target = targ;
30068 d.op0 = op0;
30069 d.op1 = op1;
30070 d.vmode = GET_MODE (targ);
30071 d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
30072 d.testing_p = false;
30074 for (i = 0; i < nelt; ++i)
30075 d.perm[i] = i * 2 + odd;
30077 /* We'll either be able to implement the permutation directly... */
30078 if (expand_vec_perm_1 (&d))
30079 return;
30081 /* ... or we use the special-case patterns. */
30082 expand_vec_perm_even_odd_1 (&d, odd);
30085 /* This function returns the calling abi specific va_list type node.
30086 It returns the FNDECL specific va_list type. */
30088 tree
30089 ix86_fn_abi_va_list (tree fndecl)
30091 if (!TARGET_64BIT)
30092 return va_list_type_node;
30093 gcc_assert (fndecl != NULL_TREE);
30095 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
30096 return ms_va_list_type_node;
30097 else
30098 return sysv_va_list_type_node;
30101 /* Returns the canonical va_list type specified by TYPE. If there
30102 is no valid TYPE provided, it return NULL_TREE. */
30104 tree
30105 ix86_canonical_va_list_type (tree type)
30107 tree wtype, htype;
30109 /* Resolve references and pointers to va_list type. */
30110 if (INDIRECT_REF_P (type))
30111 type = TREE_TYPE (type);
30112 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
30113 type = TREE_TYPE (type);
30115 if (TARGET_64BIT)
30117 wtype = va_list_type_node;
30118 gcc_assert (wtype != NULL_TREE);
30119 htype = type;
30120 if (TREE_CODE (wtype) == ARRAY_TYPE)
30122 /* If va_list is an array type, the argument may have decayed
30123 to a pointer type, e.g. by being passed to another function.
30124 In that case, unwrap both types so that we can compare the
30125 underlying records. */
30126 if (TREE_CODE (htype) == ARRAY_TYPE
30127 || POINTER_TYPE_P (htype))
30129 wtype = TREE_TYPE (wtype);
30130 htype = TREE_TYPE (htype);
30133 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30134 return va_list_type_node;
30135 wtype = sysv_va_list_type_node;
30136 gcc_assert (wtype != NULL_TREE);
30137 htype = type;
30138 if (TREE_CODE (wtype) == ARRAY_TYPE)
30140 /* If va_list is an array type, the argument may have decayed
30141 to a pointer type, e.g. by being passed to another function.
30142 In that case, unwrap both types so that we can compare the
30143 underlying records. */
30144 if (TREE_CODE (htype) == ARRAY_TYPE
30145 || POINTER_TYPE_P (htype))
30147 wtype = TREE_TYPE (wtype);
30148 htype = TREE_TYPE (htype);
30151 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30152 return sysv_va_list_type_node;
30153 wtype = ms_va_list_type_node;
30154 gcc_assert (wtype != NULL_TREE);
30155 htype = type;
30156 if (TREE_CODE (wtype) == ARRAY_TYPE)
30158 /* If va_list is an array type, the argument may have decayed
30159 to a pointer type, e.g. by being passed to another function.
30160 In that case, unwrap both types so that we can compare the
30161 underlying records. */
30162 if (TREE_CODE (htype) == ARRAY_TYPE
30163 || POINTER_TYPE_P (htype))
30165 wtype = TREE_TYPE (wtype);
30166 htype = TREE_TYPE (htype);
30169 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
30170 return ms_va_list_type_node;
30171 return NULL_TREE;
30173 return std_canonical_va_list_type (type);
30176 /* Iterate through the target-specific builtin types for va_list.
30177 IDX denotes the iterator, *PTREE is set to the result type of
30178 the va_list builtin, and *PNAME to its internal type.
30179 Returns zero if there is no element for this index, otherwise
30180 IDX should be increased upon the next call.
30181 Note, do not iterate a base builtin's name like __builtin_va_list.
30182 Used from c_common_nodes_and_builtins. */
30185 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
30187 if (!TARGET_64BIT)
30188 return 0;
30189 switch (idx) {
30190 case 0:
30191 *ptree = ms_va_list_type_node;
30192 *pname = "__builtin_ms_va_list";
30193 break;
30194 case 1:
30195 *ptree = sysv_va_list_type_node;
30196 *pname = "__builtin_sysv_va_list";
30197 break;
30198 default:
30199 return 0;
30201 return 1;
30204 /* Initialize the GCC target structure. */
30205 #undef TARGET_RETURN_IN_MEMORY
30206 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
30208 #undef TARGET_LEGITIMIZE_ADDRESS
30209 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
30211 #undef TARGET_ATTRIBUTE_TABLE
30212 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
30213 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30214 # undef TARGET_MERGE_DECL_ATTRIBUTES
30215 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
30216 #endif
30218 #undef TARGET_COMP_TYPE_ATTRIBUTES
30219 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
30221 #undef TARGET_INIT_BUILTINS
30222 #define TARGET_INIT_BUILTINS ix86_init_builtins
30223 #undef TARGET_BUILTIN_DECL
30224 #define TARGET_BUILTIN_DECL ix86_builtin_decl
30225 #undef TARGET_EXPAND_BUILTIN
30226 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
30228 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
30229 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
30230 ix86_builtin_vectorized_function
30232 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
30233 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
30235 #undef TARGET_BUILTIN_RECIPROCAL
30236 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
30238 #undef TARGET_ASM_FUNCTION_EPILOGUE
30239 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
30241 #undef TARGET_ENCODE_SECTION_INFO
30242 #ifndef SUBTARGET_ENCODE_SECTION_INFO
30243 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
30244 #else
30245 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
30246 #endif
30248 #undef TARGET_ASM_OPEN_PAREN
30249 #define TARGET_ASM_OPEN_PAREN ""
30250 #undef TARGET_ASM_CLOSE_PAREN
30251 #define TARGET_ASM_CLOSE_PAREN ""
30253 #undef TARGET_ASM_BYTE_OP
30254 #define TARGET_ASM_BYTE_OP ASM_BYTE
30256 #undef TARGET_ASM_ALIGNED_HI_OP
30257 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
30258 #undef TARGET_ASM_ALIGNED_SI_OP
30259 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
30260 #ifdef ASM_QUAD
30261 #undef TARGET_ASM_ALIGNED_DI_OP
30262 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
30263 #endif
30265 #undef TARGET_ASM_UNALIGNED_HI_OP
30266 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
30267 #undef TARGET_ASM_UNALIGNED_SI_OP
30268 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
30269 #undef TARGET_ASM_UNALIGNED_DI_OP
30270 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
30272 #undef TARGET_SCHED_ADJUST_COST
30273 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
30274 #undef TARGET_SCHED_ISSUE_RATE
30275 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
30276 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
30277 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
30278 ia32_multipass_dfa_lookahead
30280 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
30281 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
30283 #ifdef HAVE_AS_TLS
30284 #undef TARGET_HAVE_TLS
30285 #define TARGET_HAVE_TLS true
30286 #endif
30287 #undef TARGET_CANNOT_FORCE_CONST_MEM
30288 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
30289 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
30290 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
30292 #undef TARGET_DELEGITIMIZE_ADDRESS
30293 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
30295 #undef TARGET_MS_BITFIELD_LAYOUT_P
30296 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
30298 #if TARGET_MACHO
30299 #undef TARGET_BINDS_LOCAL_P
30300 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
30301 #endif
30302 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
30303 #undef TARGET_BINDS_LOCAL_P
30304 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
30305 #endif
30307 #undef TARGET_ASM_OUTPUT_MI_THUNK
30308 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
30309 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
30310 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
30312 #undef TARGET_ASM_FILE_START
30313 #define TARGET_ASM_FILE_START x86_file_start
30315 #undef TARGET_DEFAULT_TARGET_FLAGS
30316 #define TARGET_DEFAULT_TARGET_FLAGS \
30317 (TARGET_DEFAULT \
30318 | TARGET_SUBTARGET_DEFAULT \
30319 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT \
30320 | MASK_FUSED_MADD)
30322 #undef TARGET_HANDLE_OPTION
30323 #define TARGET_HANDLE_OPTION ix86_handle_option
30325 #undef TARGET_RTX_COSTS
30326 #define TARGET_RTX_COSTS ix86_rtx_costs
30327 #undef TARGET_ADDRESS_COST
30328 #define TARGET_ADDRESS_COST ix86_address_cost
30330 #undef TARGET_FIXED_CONDITION_CODE_REGS
30331 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
30332 #undef TARGET_CC_MODES_COMPATIBLE
30333 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
30335 #undef TARGET_MACHINE_DEPENDENT_REORG
30336 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
30338 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
30339 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
30341 #undef TARGET_BUILD_BUILTIN_VA_LIST
30342 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
30344 #undef TARGET_FN_ABI_VA_LIST
30345 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
30347 #undef TARGET_CANONICAL_VA_LIST_TYPE
30348 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
30350 #undef TARGET_EXPAND_BUILTIN_VA_START
30351 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
30353 #undef TARGET_MD_ASM_CLOBBERS
30354 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
30356 #undef TARGET_PROMOTE_PROTOTYPES
30357 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
30358 #undef TARGET_STRUCT_VALUE_RTX
30359 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
30360 #undef TARGET_SETUP_INCOMING_VARARGS
30361 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
30362 #undef TARGET_MUST_PASS_IN_STACK
30363 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
30364 #undef TARGET_PASS_BY_REFERENCE
30365 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
30366 #undef TARGET_INTERNAL_ARG_POINTER
30367 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
30368 #undef TARGET_UPDATE_STACK_BOUNDARY
30369 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
30370 #undef TARGET_GET_DRAP_RTX
30371 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
30372 #undef TARGET_STRICT_ARGUMENT_NAMING
30373 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
30374 #undef TARGET_STATIC_CHAIN
30375 #define TARGET_STATIC_CHAIN ix86_static_chain
30376 #undef TARGET_TRAMPOLINE_INIT
30377 #define TARGET_TRAMPOLINE_INIT ix86_trampoline_init
30379 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
30380 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
30382 #undef TARGET_SCALAR_MODE_SUPPORTED_P
30383 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
30385 #undef TARGET_VECTOR_MODE_SUPPORTED_P
30386 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
30388 #undef TARGET_C_MODE_FOR_SUFFIX
30389 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
30391 #ifdef HAVE_AS_TLS
30392 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
30393 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
30394 #endif
30396 #ifdef SUBTARGET_INSERT_ATTRIBUTES
30397 #undef TARGET_INSERT_ATTRIBUTES
30398 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
30399 #endif
30401 #undef TARGET_MANGLE_TYPE
30402 #define TARGET_MANGLE_TYPE ix86_mangle_type
30404 #undef TARGET_STACK_PROTECT_FAIL
30405 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
30407 #undef TARGET_FUNCTION_VALUE
30408 #define TARGET_FUNCTION_VALUE ix86_function_value
30410 #undef TARGET_SECONDARY_RELOAD
30411 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
30413 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
30414 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
30415 ix86_builtin_vectorization_cost
30416 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
30417 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM \
30418 ix86_vectorize_builtin_vec_perm
30419 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK
30420 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM_OK \
30421 ix86_vectorize_builtin_vec_perm_ok
30423 #undef TARGET_SET_CURRENT_FUNCTION
30424 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
30426 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
30427 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
30429 #undef TARGET_OPTION_SAVE
30430 #define TARGET_OPTION_SAVE ix86_function_specific_save
30432 #undef TARGET_OPTION_RESTORE
30433 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
30435 #undef TARGET_OPTION_PRINT
30436 #define TARGET_OPTION_PRINT ix86_function_specific_print
30438 #undef TARGET_CAN_INLINE_P
30439 #define TARGET_CAN_INLINE_P ix86_can_inline_p
30441 #undef TARGET_EXPAND_TO_RTL_HOOK
30442 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
30444 #undef TARGET_LEGITIMATE_ADDRESS_P
30445 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
30447 #undef TARGET_IRA_COVER_CLASSES
30448 #define TARGET_IRA_COVER_CLASSES i386_ira_cover_classes
30450 #undef TARGET_FRAME_POINTER_REQUIRED
30451 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
30453 #undef TARGET_CAN_ELIMINATE
30454 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
30456 struct gcc_target targetm = TARGET_INITIALIZER;
30458 #include "gt-i386.h"