* dwarf2out.c, fold-const.c, ipa-type-escape.c,
[official-gcc.git] / gcc / config / i386 / i386.c
blob69f7bdf8e8c46c433035eed2bf79ae3d0a52f887
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
20 Boston, MA 02110-1301, USA. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "tree-gimple.h"
52 #ifndef CHECK_STACK_LIMIT
53 #define CHECK_STACK_LIMIT (-1)
54 #endif
56 /* Return index of given mode in mult and division cost tables. */
57 #define MODE_INDEX(mode) \
58 ((mode) == QImode ? 0 \
59 : (mode) == HImode ? 1 \
60 : (mode) == SImode ? 2 \
61 : (mode) == DImode ? 3 \
62 : 4)
64 /* Processor costs (relative to an add) */
65 static const
66 struct processor_costs size_cost = { /* costs for tunning for size */
67 2, /* cost of an add instruction */
68 3, /* cost of a lea instruction */
69 2, /* variable shift costs */
70 3, /* constant shift costs */
71 {3, 3, 3, 3, 5}, /* cost of starting a multiply */
72 0, /* cost of multiply per each bit set */
73 {3, 3, 3, 3, 5}, /* cost of a divide/mod */
74 3, /* cost of movsx */
75 3, /* cost of movzx */
76 0, /* "large" insn */
77 2, /* MOVE_RATIO */
78 2, /* cost for loading QImode using movzbl */
79 {2, 2, 2}, /* cost of loading integer registers
80 in QImode, HImode and SImode.
81 Relative to reg-reg move (2). */
82 {2, 2, 2}, /* cost of storing integer registers */
83 2, /* cost of reg,reg fld/fst */
84 {2, 2, 2}, /* cost of loading fp registers
85 in SFmode, DFmode and XFmode */
86 {2, 2, 2}, /* cost of loading integer registers */
87 3, /* cost of moving MMX register */
88 {3, 3}, /* cost of loading MMX registers
89 in SImode and DImode */
90 {3, 3}, /* cost of storing MMX registers
91 in SImode and DImode */
92 3, /* cost of moving SSE register */
93 {3, 3, 3}, /* cost of loading SSE registers
94 in SImode, DImode and TImode */
95 {3, 3, 3}, /* cost of storing SSE registers
96 in SImode, DImode and TImode */
97 3, /* MMX or SSE register to integer */
98 0, /* size of prefetch block */
99 0, /* number of parallel prefetches */
100 1, /* Branch cost */
101 2, /* cost of FADD and FSUB insns. */
102 2, /* cost of FMUL instruction. */
103 2, /* cost of FDIV instruction. */
104 2, /* cost of FABS instruction. */
105 2, /* cost of FCHS instruction. */
106 2, /* cost of FSQRT instruction. */
109 /* Processor costs (relative to an add) */
110 static const
111 struct processor_costs i386_cost = { /* 386 specific costs */
112 1, /* cost of an add instruction */
113 1, /* cost of a lea instruction */
114 3, /* variable shift costs */
115 2, /* constant shift costs */
116 {6, 6, 6, 6, 6}, /* cost of starting a multiply */
117 1, /* cost of multiply per each bit set */
118 {23, 23, 23, 23, 23}, /* cost of a divide/mod */
119 3, /* cost of movsx */
120 2, /* cost of movzx */
121 15, /* "large" insn */
122 3, /* MOVE_RATIO */
123 4, /* cost for loading QImode using movzbl */
124 {2, 4, 2}, /* cost of loading integer registers
125 in QImode, HImode and SImode.
126 Relative to reg-reg move (2). */
127 {2, 4, 2}, /* cost of storing integer registers */
128 2, /* cost of reg,reg fld/fst */
129 {8, 8, 8}, /* cost of loading fp registers
130 in SFmode, DFmode and XFmode */
131 {8, 8, 8}, /* cost of loading integer registers */
132 2, /* cost of moving MMX register */
133 {4, 8}, /* cost of loading MMX registers
134 in SImode and DImode */
135 {4, 8}, /* cost of storing MMX registers
136 in SImode and DImode */
137 2, /* cost of moving SSE register */
138 {4, 8, 16}, /* cost of loading SSE registers
139 in SImode, DImode and TImode */
140 {4, 8, 16}, /* cost of storing SSE registers
141 in SImode, DImode and TImode */
142 3, /* MMX or SSE register to integer */
143 0, /* size of prefetch block */
144 0, /* number of parallel prefetches */
145 1, /* Branch cost */
146 23, /* cost of FADD and FSUB insns. */
147 27, /* cost of FMUL instruction. */
148 88, /* cost of FDIV instruction. */
149 22, /* cost of FABS instruction. */
150 24, /* cost of FCHS instruction. */
151 122, /* cost of FSQRT instruction. */
154 static const
155 struct processor_costs i486_cost = { /* 486 specific costs */
156 1, /* cost of an add instruction */
157 1, /* cost of a lea instruction */
158 3, /* variable shift costs */
159 2, /* constant shift costs */
160 {12, 12, 12, 12, 12}, /* cost of starting a multiply */
161 1, /* cost of multiply per each bit set */
162 {40, 40, 40, 40, 40}, /* cost of a divide/mod */
163 3, /* cost of movsx */
164 2, /* cost of movzx */
165 15, /* "large" insn */
166 3, /* MOVE_RATIO */
167 4, /* cost for loading QImode using movzbl */
168 {2, 4, 2}, /* cost of loading integer registers
169 in QImode, HImode and SImode.
170 Relative to reg-reg move (2). */
171 {2, 4, 2}, /* cost of storing integer registers */
172 2, /* cost of reg,reg fld/fst */
173 {8, 8, 8}, /* cost of loading fp registers
174 in SFmode, DFmode and XFmode */
175 {8, 8, 8}, /* cost of loading integer registers */
176 2, /* cost of moving MMX register */
177 {4, 8}, /* cost of loading MMX registers
178 in SImode and DImode */
179 {4, 8}, /* cost of storing MMX registers
180 in SImode and DImode */
181 2, /* cost of moving SSE register */
182 {4, 8, 16}, /* cost of loading SSE registers
183 in SImode, DImode and TImode */
184 {4, 8, 16}, /* cost of storing SSE registers
185 in SImode, DImode and TImode */
186 3, /* MMX or SSE register to integer */
187 0, /* size of prefetch block */
188 0, /* number of parallel prefetches */
189 1, /* Branch cost */
190 8, /* cost of FADD and FSUB insns. */
191 16, /* cost of FMUL instruction. */
192 73, /* cost of FDIV instruction. */
193 3, /* cost of FABS instruction. */
194 3, /* cost of FCHS instruction. */
195 83, /* cost of FSQRT instruction. */
198 static const
199 struct processor_costs pentium_cost = {
200 1, /* cost of an add instruction */
201 1, /* cost of a lea instruction */
202 4, /* variable shift costs */
203 1, /* constant shift costs */
204 {11, 11, 11, 11, 11}, /* cost of starting a multiply */
205 0, /* cost of multiply per each bit set */
206 {25, 25, 25, 25, 25}, /* cost of a divide/mod */
207 3, /* cost of movsx */
208 2, /* cost of movzx */
209 8, /* "large" insn */
210 6, /* MOVE_RATIO */
211 6, /* cost for loading QImode using movzbl */
212 {2, 4, 2}, /* cost of loading integer registers
213 in QImode, HImode and SImode.
214 Relative to reg-reg move (2). */
215 {2, 4, 2}, /* cost of storing integer registers */
216 2, /* cost of reg,reg fld/fst */
217 {2, 2, 6}, /* cost of loading fp registers
218 in SFmode, DFmode and XFmode */
219 {4, 4, 6}, /* cost of loading integer registers */
220 8, /* cost of moving MMX register */
221 {8, 8}, /* cost of loading MMX registers
222 in SImode and DImode */
223 {8, 8}, /* cost of storing MMX registers
224 in SImode and DImode */
225 2, /* cost of moving SSE register */
226 {4, 8, 16}, /* cost of loading SSE registers
227 in SImode, DImode and TImode */
228 {4, 8, 16}, /* cost of storing SSE registers
229 in SImode, DImode and TImode */
230 3, /* MMX or SSE register to integer */
231 0, /* size of prefetch block */
232 0, /* number of parallel prefetches */
233 2, /* Branch cost */
234 3, /* cost of FADD and FSUB insns. */
235 3, /* cost of FMUL instruction. */
236 39, /* cost of FDIV instruction. */
237 1, /* cost of FABS instruction. */
238 1, /* cost of FCHS instruction. */
239 70, /* cost of FSQRT instruction. */
242 static const
243 struct processor_costs pentiumpro_cost = {
244 1, /* cost of an add instruction */
245 1, /* cost of a lea instruction */
246 1, /* variable shift costs */
247 1, /* constant shift costs */
248 {4, 4, 4, 4, 4}, /* cost of starting a multiply */
249 0, /* cost of multiply per each bit set */
250 {17, 17, 17, 17, 17}, /* cost of a divide/mod */
251 1, /* cost of movsx */
252 1, /* cost of movzx */
253 8, /* "large" insn */
254 6, /* MOVE_RATIO */
255 2, /* cost for loading QImode using movzbl */
256 {4, 4, 4}, /* cost of loading integer registers
257 in QImode, HImode and SImode.
258 Relative to reg-reg move (2). */
259 {2, 2, 2}, /* cost of storing integer registers */
260 2, /* cost of reg,reg fld/fst */
261 {2, 2, 6}, /* cost of loading fp registers
262 in SFmode, DFmode and XFmode */
263 {4, 4, 6}, /* cost of loading integer registers */
264 2, /* cost of moving MMX register */
265 {2, 2}, /* cost of loading MMX registers
266 in SImode and DImode */
267 {2, 2}, /* cost of storing MMX registers
268 in SImode and DImode */
269 2, /* cost of moving SSE register */
270 {2, 2, 8}, /* cost of loading SSE registers
271 in SImode, DImode and TImode */
272 {2, 2, 8}, /* cost of storing SSE registers
273 in SImode, DImode and TImode */
274 3, /* MMX or SSE register to integer */
275 32, /* size of prefetch block */
276 6, /* number of parallel prefetches */
277 2, /* Branch cost */
278 3, /* cost of FADD and FSUB insns. */
279 5, /* cost of FMUL instruction. */
280 56, /* cost of FDIV instruction. */
281 2, /* cost of FABS instruction. */
282 2, /* cost of FCHS instruction. */
283 56, /* cost of FSQRT instruction. */
286 static const
287 struct processor_costs k6_cost = {
288 1, /* cost of an add instruction */
289 2, /* cost of a lea instruction */
290 1, /* variable shift costs */
291 1, /* constant shift costs */
292 {3, 3, 3, 3, 3}, /* cost of starting a multiply */
293 0, /* cost of multiply per each bit set */
294 {18, 18, 18, 18, 18}, /* cost of a divide/mod */
295 2, /* cost of movsx */
296 2, /* cost of movzx */
297 8, /* "large" insn */
298 4, /* MOVE_RATIO */
299 3, /* cost for loading QImode using movzbl */
300 {4, 5, 4}, /* cost of loading integer registers
301 in QImode, HImode and SImode.
302 Relative to reg-reg move (2). */
303 {2, 3, 2}, /* cost of storing integer registers */
304 4, /* cost of reg,reg fld/fst */
305 {6, 6, 6}, /* cost of loading fp registers
306 in SFmode, DFmode and XFmode */
307 {4, 4, 4}, /* cost of loading integer registers */
308 2, /* cost of moving MMX register */
309 {2, 2}, /* cost of loading MMX registers
310 in SImode and DImode */
311 {2, 2}, /* cost of storing MMX registers
312 in SImode and DImode */
313 2, /* cost of moving SSE register */
314 {2, 2, 8}, /* cost of loading SSE registers
315 in SImode, DImode and TImode */
316 {2, 2, 8}, /* cost of storing SSE registers
317 in SImode, DImode and TImode */
318 6, /* MMX or SSE register to integer */
319 32, /* size of prefetch block */
320 1, /* number of parallel prefetches */
321 1, /* Branch cost */
322 2, /* cost of FADD and FSUB insns. */
323 2, /* cost of FMUL instruction. */
324 56, /* cost of FDIV instruction. */
325 2, /* cost of FABS instruction. */
326 2, /* cost of FCHS instruction. */
327 56, /* cost of FSQRT instruction. */
330 static const
331 struct processor_costs athlon_cost = {
332 1, /* cost of an add instruction */
333 2, /* cost of a lea instruction */
334 1, /* variable shift costs */
335 1, /* constant shift costs */
336 {5, 5, 5, 5, 5}, /* cost of starting a multiply */
337 0, /* cost of multiply per each bit set */
338 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
339 1, /* cost of movsx */
340 1, /* cost of movzx */
341 8, /* "large" insn */
342 9, /* MOVE_RATIO */
343 4, /* cost for loading QImode using movzbl */
344 {3, 4, 3}, /* cost of loading integer registers
345 in QImode, HImode and SImode.
346 Relative to reg-reg move (2). */
347 {3, 4, 3}, /* cost of storing integer registers */
348 4, /* cost of reg,reg fld/fst */
349 {4, 4, 12}, /* cost of loading fp registers
350 in SFmode, DFmode and XFmode */
351 {6, 6, 8}, /* cost of loading integer registers */
352 2, /* cost of moving MMX register */
353 {4, 4}, /* cost of loading MMX registers
354 in SImode and DImode */
355 {4, 4}, /* cost of storing MMX registers
356 in SImode and DImode */
357 2, /* cost of moving SSE register */
358 {4, 4, 6}, /* cost of loading SSE registers
359 in SImode, DImode and TImode */
360 {4, 4, 5}, /* cost of storing SSE registers
361 in SImode, DImode and TImode */
362 5, /* MMX or SSE register to integer */
363 64, /* size of prefetch block */
364 6, /* number of parallel prefetches */
365 5, /* Branch cost */
366 4, /* cost of FADD and FSUB insns. */
367 4, /* cost of FMUL instruction. */
368 24, /* cost of FDIV instruction. */
369 2, /* cost of FABS instruction. */
370 2, /* cost of FCHS instruction. */
371 35, /* cost of FSQRT instruction. */
374 static const
375 struct processor_costs k8_cost = {
376 1, /* cost of an add instruction */
377 2, /* cost of a lea instruction */
378 1, /* variable shift costs */
379 1, /* constant shift costs */
380 {3, 4, 3, 4, 5}, /* cost of starting a multiply */
381 0, /* cost of multiply per each bit set */
382 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
383 1, /* cost of movsx */
384 1, /* cost of movzx */
385 8, /* "large" insn */
386 9, /* MOVE_RATIO */
387 4, /* cost for loading QImode using movzbl */
388 {3, 4, 3}, /* cost of loading integer registers
389 in QImode, HImode and SImode.
390 Relative to reg-reg move (2). */
391 {3, 4, 3}, /* cost of storing integer registers */
392 4, /* cost of reg,reg fld/fst */
393 {4, 4, 12}, /* cost of loading fp registers
394 in SFmode, DFmode and XFmode */
395 {6, 6, 8}, /* cost of loading integer registers */
396 2, /* cost of moving MMX register */
397 {3, 3}, /* cost of loading MMX registers
398 in SImode and DImode */
399 {4, 4}, /* cost of storing MMX registers
400 in SImode and DImode */
401 2, /* cost of moving SSE register */
402 {4, 3, 6}, /* cost of loading SSE registers
403 in SImode, DImode and TImode */
404 {4, 4, 5}, /* cost of storing SSE registers
405 in SImode, DImode and TImode */
406 5, /* MMX or SSE register to integer */
407 64, /* size of prefetch block */
408 6, /* number of parallel prefetches */
409 5, /* Branch cost */
410 4, /* cost of FADD and FSUB insns. */
411 4, /* cost of FMUL instruction. */
412 19, /* cost of FDIV instruction. */
413 2, /* cost of FABS instruction. */
414 2, /* cost of FCHS instruction. */
415 35, /* cost of FSQRT instruction. */
418 static const
419 struct processor_costs pentium4_cost = {
420 1, /* cost of an add instruction */
421 3, /* cost of a lea instruction */
422 4, /* variable shift costs */
423 4, /* constant shift costs */
424 {15, 15, 15, 15, 15}, /* cost of starting a multiply */
425 0, /* cost of multiply per each bit set */
426 {56, 56, 56, 56, 56}, /* cost of a divide/mod */
427 1, /* cost of movsx */
428 1, /* cost of movzx */
429 16, /* "large" insn */
430 6, /* MOVE_RATIO */
431 2, /* cost for loading QImode using movzbl */
432 {4, 5, 4}, /* cost of loading integer registers
433 in QImode, HImode and SImode.
434 Relative to reg-reg move (2). */
435 {2, 3, 2}, /* cost of storing integer registers */
436 2, /* cost of reg,reg fld/fst */
437 {2, 2, 6}, /* cost of loading fp registers
438 in SFmode, DFmode and XFmode */
439 {4, 4, 6}, /* cost of loading integer registers */
440 2, /* cost of moving MMX register */
441 {2, 2}, /* cost of loading MMX registers
442 in SImode and DImode */
443 {2, 2}, /* cost of storing MMX registers
444 in SImode and DImode */
445 12, /* cost of moving SSE register */
446 {12, 12, 12}, /* cost of loading SSE registers
447 in SImode, DImode and TImode */
448 {2, 2, 8}, /* cost of storing SSE registers
449 in SImode, DImode and TImode */
450 10, /* MMX or SSE register to integer */
451 64, /* size of prefetch block */
452 6, /* number of parallel prefetches */
453 2, /* Branch cost */
454 5, /* cost of FADD and FSUB insns. */
455 7, /* cost of FMUL instruction. */
456 43, /* cost of FDIV instruction. */
457 2, /* cost of FABS instruction. */
458 2, /* cost of FCHS instruction. */
459 43, /* cost of FSQRT instruction. */
462 static const
463 struct processor_costs nocona_cost = {
464 1, /* cost of an add instruction */
465 1, /* cost of a lea instruction */
466 1, /* variable shift costs */
467 1, /* constant shift costs */
468 {10, 10, 10, 10, 10}, /* cost of starting a multiply */
469 0, /* cost of multiply per each bit set */
470 {66, 66, 66, 66, 66}, /* cost of a divide/mod */
471 1, /* cost of movsx */
472 1, /* cost of movzx */
473 16, /* "large" insn */
474 9, /* MOVE_RATIO */
475 4, /* cost for loading QImode using movzbl */
476 {4, 4, 4}, /* cost of loading integer registers
477 in QImode, HImode and SImode.
478 Relative to reg-reg move (2). */
479 {4, 4, 4}, /* cost of storing integer registers */
480 3, /* cost of reg,reg fld/fst */
481 {12, 12, 12}, /* cost of loading fp registers
482 in SFmode, DFmode and XFmode */
483 {4, 4, 4}, /* cost of loading integer registers */
484 6, /* cost of moving MMX register */
485 {12, 12}, /* cost of loading MMX registers
486 in SImode and DImode */
487 {12, 12}, /* cost of storing MMX registers
488 in SImode and DImode */
489 6, /* cost of moving SSE register */
490 {12, 12, 12}, /* cost of loading SSE registers
491 in SImode, DImode and TImode */
492 {12, 12, 12}, /* cost of storing SSE registers
493 in SImode, DImode and TImode */
494 8, /* MMX or SSE register to integer */
495 128, /* size of prefetch block */
496 8, /* number of parallel prefetches */
497 1, /* Branch cost */
498 6, /* cost of FADD and FSUB insns. */
499 8, /* cost of FMUL instruction. */
500 40, /* cost of FDIV instruction. */
501 3, /* cost of FABS instruction. */
502 3, /* cost of FCHS instruction. */
503 44, /* cost of FSQRT instruction. */
506 const struct processor_costs *ix86_cost = &pentium_cost;
508 /* Processor feature/optimization bitmasks. */
509 #define m_386 (1<<PROCESSOR_I386)
510 #define m_486 (1<<PROCESSOR_I486)
511 #define m_PENT (1<<PROCESSOR_PENTIUM)
512 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
513 #define m_K6 (1<<PROCESSOR_K6)
514 #define m_ATHLON (1<<PROCESSOR_ATHLON)
515 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
516 #define m_K8 (1<<PROCESSOR_K8)
517 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
518 #define m_NOCONA (1<<PROCESSOR_NOCONA)
520 const int x86_use_leave = m_386 | m_K6 | m_ATHLON_K8;
521 const int x86_push_memory = m_386 | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
522 const int x86_zero_extend_with_and = m_486 | m_PENT;
523 const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA /* m_386 | m_K6 */;
524 const int x86_double_with_add = ~m_386;
525 const int x86_use_bit_test = m_386;
526 const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6;
527 const int x86_cmove = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
528 const int x86_fisttp = m_NOCONA;
529 const int x86_3dnow_a = m_ATHLON_K8;
530 const int x86_deep_branch = m_PPRO | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
531 /* Branch hints were put in P4 based on simulation result. But
532 after P4 was made, no performance benefit was observed with
533 branch hints. It also increases the code size. As the result,
534 icc never generates branch hints. */
535 const int x86_branch_hints = 0;
536 const int x86_use_sahf = m_PPRO | m_K6 | m_PENT4 | m_NOCONA;
537 const int x86_partial_reg_stall = m_PPRO;
538 const int x86_use_himode_fiop = m_386 | m_486 | m_K6;
539 const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT);
540 const int x86_use_mov0 = m_K6;
541 const int x86_use_cltd = ~(m_PENT | m_K6);
542 const int x86_read_modify_write = ~m_PENT;
543 const int x86_read_modify = ~(m_PENT | m_PPRO);
544 const int x86_split_long_moves = m_PPRO;
545 const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486 | m_ATHLON_K8;
546 const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
547 const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
548 const int x86_qimode_math = ~(0);
549 const int x86_promote_qi_regs = 0;
550 const int x86_himode_math = ~(m_PPRO);
551 const int x86_promote_hi_regs = m_PPRO;
552 const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA;
553 const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA;
554 const int x86_add_esp_4 = m_ATHLON_K8 | m_K6 | m_PENT4 | m_NOCONA;
555 const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6 | m_386 | m_486 | m_PENT4 | m_NOCONA;
556 const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO);
557 const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
558 const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
559 const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO;
560 const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO;
561 const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO;
562 const int x86_decompose_lea = m_PENT4 | m_NOCONA;
563 const int x86_shift1 = ~m_486;
564 const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
565 const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO;
566 /* Set for machines where the type and dependencies are resolved on SSE
567 register parts instead of whole registers, so we may maintain just
568 lower part of scalar values in proper format leaving the upper part
569 undefined. */
570 const int x86_sse_split_regs = m_ATHLON_K8;
571 const int x86_sse_typeless_stores = m_ATHLON_K8;
572 const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
573 const int x86_use_ffreep = m_ATHLON_K8;
574 const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6;
576 /* ??? Allowing interunit moves makes it all too easy for the compiler to put
577 integer data in xmm registers. Which results in pretty abysmal code. */
578 const int x86_inter_unit_moves = 0 /* ~(m_ATHLON_K8) */;
580 const int x86_ext_80387_constants = m_K6 | m_ATHLON | m_PENT4 | m_NOCONA | m_PPRO;
581 /* Some CPU cores are not able to predict more than 4 branch instructions in
582 the 16 byte window. */
583 const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
584 const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6 | m_PENT;
585 const int x86_use_bt = m_ATHLON_K8;
586 /* Compare and exchange was added for 80486. */
587 const int x86_cmpxchg = ~m_386;
588 /* Exchange and add was added for 80486. */
589 const int x86_xadd = ~m_386;
591 /* In case the average insn count for single function invocation is
592 lower than this constant, emit fast (but longer) prologue and
593 epilogue code. */
594 #define FAST_PROLOGUE_INSN_COUNT 20
596 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
597 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
598 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
599 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
601 /* Array of the smallest class containing reg number REGNO, indexed by
602 REGNO. Used by REGNO_REG_CLASS in i386.h. */
604 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
606 /* ax, dx, cx, bx */
607 AREG, DREG, CREG, BREG,
608 /* si, di, bp, sp */
609 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
610 /* FP registers */
611 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
612 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
613 /* arg pointer */
614 NON_Q_REGS,
615 /* flags, fpsr, dirflag, frame */
616 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
617 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
618 SSE_REGS, SSE_REGS,
619 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
620 MMX_REGS, MMX_REGS,
621 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
622 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
623 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
624 SSE_REGS, SSE_REGS,
627 /* The "default" register map used in 32bit mode. */
629 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
631 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
632 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
633 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
634 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
635 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
636 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
637 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
640 static int const x86_64_int_parameter_registers[6] =
642 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
643 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
646 static int const x86_64_int_return_registers[4] =
648 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
651 /* The "default" register map used in 64bit mode. */
652 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
654 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
655 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
656 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
657 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
658 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
659 8,9,10,11,12,13,14,15, /* extended integer registers */
660 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
663 /* Define the register numbers to be used in Dwarf debugging information.
664 The SVR4 reference port C compiler uses the following register numbers
665 in its Dwarf output code:
666 0 for %eax (gcc regno = 0)
667 1 for %ecx (gcc regno = 2)
668 2 for %edx (gcc regno = 1)
669 3 for %ebx (gcc regno = 3)
670 4 for %esp (gcc regno = 7)
671 5 for %ebp (gcc regno = 6)
672 6 for %esi (gcc regno = 4)
673 7 for %edi (gcc regno = 5)
674 The following three DWARF register numbers are never generated by
675 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
676 believes these numbers have these meanings.
677 8 for %eip (no gcc equivalent)
678 9 for %eflags (gcc regno = 17)
679 10 for %trapno (no gcc equivalent)
680 It is not at all clear how we should number the FP stack registers
681 for the x86 architecture. If the version of SDB on x86/svr4 were
682 a bit less brain dead with respect to floating-point then we would
683 have a precedent to follow with respect to DWARF register numbers
684 for x86 FP registers, but the SDB on x86/svr4 is so completely
685 broken with respect to FP registers that it is hardly worth thinking
686 of it as something to strive for compatibility with.
687 The version of x86/svr4 SDB I have at the moment does (partially)
688 seem to believe that DWARF register number 11 is associated with
689 the x86 register %st(0), but that's about all. Higher DWARF
690 register numbers don't seem to be associated with anything in
691 particular, and even for DWARF regno 11, SDB only seems to under-
692 stand that it should say that a variable lives in %st(0) (when
693 asked via an `=' command) if we said it was in DWARF regno 11,
694 but SDB still prints garbage when asked for the value of the
695 variable in question (via a `/' command).
696 (Also note that the labels SDB prints for various FP stack regs
697 when doing an `x' command are all wrong.)
698 Note that these problems generally don't affect the native SVR4
699 C compiler because it doesn't allow the use of -O with -g and
700 because when it is *not* optimizing, it allocates a memory
701 location for each floating-point variable, and the memory
702 location is what gets described in the DWARF AT_location
703 attribute for the variable in question.
704 Regardless of the severe mental illness of the x86/svr4 SDB, we
705 do something sensible here and we use the following DWARF
706 register numbers. Note that these are all stack-top-relative
707 numbers.
708 11 for %st(0) (gcc regno = 8)
709 12 for %st(1) (gcc regno = 9)
710 13 for %st(2) (gcc regno = 10)
711 14 for %st(3) (gcc regno = 11)
712 15 for %st(4) (gcc regno = 12)
713 16 for %st(5) (gcc regno = 13)
714 17 for %st(6) (gcc regno = 14)
715 18 for %st(7) (gcc regno = 15)
717 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
719 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
720 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
721 -1, 9, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
722 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
723 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
724 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
725 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
728 /* Test and compare insns in i386.md store the information needed to
729 generate branch and scc insns here. */
731 rtx ix86_compare_op0 = NULL_RTX;
732 rtx ix86_compare_op1 = NULL_RTX;
733 rtx ix86_compare_emitted = NULL_RTX;
735 /* Size of the register save area. */
736 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
738 /* Define the structure for the machine field in struct function. */
740 struct stack_local_entry GTY(())
742 unsigned short mode;
743 unsigned short n;
744 rtx rtl;
745 struct stack_local_entry *next;
748 /* Structure describing stack frame layout.
749 Stack grows downward:
751 [arguments]
752 <- ARG_POINTER
753 saved pc
755 saved frame pointer if frame_pointer_needed
756 <- HARD_FRAME_POINTER
757 [saved regs]
759 [padding1] \
761 [va_arg registers] (
762 > to_allocate <- FRAME_POINTER
763 [frame] (
765 [padding2] /
767 struct ix86_frame
769 int nregs;
770 int padding1;
771 int va_arg_size;
772 HOST_WIDE_INT frame;
773 int padding2;
774 int outgoing_arguments_size;
775 int red_zone_size;
777 HOST_WIDE_INT to_allocate;
778 /* The offsets relative to ARG_POINTER. */
779 HOST_WIDE_INT frame_pointer_offset;
780 HOST_WIDE_INT hard_frame_pointer_offset;
781 HOST_WIDE_INT stack_pointer_offset;
783 /* When save_regs_using_mov is set, emit prologue using
784 move instead of push instructions. */
785 bool save_regs_using_mov;
788 /* Code model option. */
789 enum cmodel ix86_cmodel;
790 /* Asm dialect. */
791 enum asm_dialect ix86_asm_dialect = ASM_ATT;
792 /* TLS dialext. */
793 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
795 /* Which unit we are generating floating point math for. */
796 enum fpmath_unit ix86_fpmath;
798 /* Which cpu are we scheduling for. */
799 enum processor_type ix86_tune;
800 /* Which instruction set architecture to use. */
801 enum processor_type ix86_arch;
803 /* true if sse prefetch instruction is not NOOP. */
804 int x86_prefetch_sse;
806 /* ix86_regparm_string as a number */
807 static int ix86_regparm;
809 /* Preferred alignment for stack boundary in bits. */
810 unsigned int ix86_preferred_stack_boundary;
812 /* Values 1-5: see jump.c */
813 int ix86_branch_cost;
815 /* Variables which are this size or smaller are put in the data/bss
816 or ldata/lbss sections. */
818 int ix86_section_threshold = 65536;
820 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
821 char internal_label_prefix[16];
822 int internal_label_prefix_len;
824 static bool ix86_handle_option (size_t, const char *, int);
825 static void output_pic_addr_const (FILE *, rtx, int);
826 static void put_condition_code (enum rtx_code, enum machine_mode,
827 int, int, FILE *);
828 static const char *get_some_local_dynamic_name (void);
829 static int get_some_local_dynamic_name_1 (rtx *, void *);
830 static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx);
831 static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *,
832 rtx *);
833 static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *);
834 static enum machine_mode ix86_cc_modes_compatible (enum machine_mode,
835 enum machine_mode);
836 static rtx get_thread_pointer (int);
837 static rtx legitimize_tls_address (rtx, enum tls_model, int);
838 static void get_pc_thunk_name (char [32], unsigned int);
839 static rtx gen_push (rtx);
840 static int ix86_flags_dependant (rtx, rtx, enum attr_type);
841 static int ix86_agi_dependant (rtx, rtx, enum attr_type);
842 static struct machine_function * ix86_init_machine_status (void);
843 static int ix86_split_to_parts (rtx, rtx *, enum machine_mode);
844 static int ix86_nsaved_regs (void);
845 static void ix86_emit_save_regs (void);
846 static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT);
847 static void ix86_emit_restore_regs_using_mov (rtx, HOST_WIDE_INT, int);
848 static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT);
849 static HOST_WIDE_INT ix86_GOT_alias_set (void);
850 static void ix86_adjust_counter (rtx, HOST_WIDE_INT);
851 static rtx ix86_expand_aligntest (rtx, int);
852 static void ix86_expand_strlensi_unroll_1 (rtx, rtx, rtx);
853 static int ix86_issue_rate (void);
854 static int ix86_adjust_cost (rtx, rtx, rtx, int);
855 static int ia32_multipass_dfa_lookahead (void);
856 static void ix86_init_mmx_sse_builtins (void);
857 static rtx x86_this_parameter (tree);
858 static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
859 HOST_WIDE_INT, tree);
860 static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
861 static void x86_file_start (void);
862 static void ix86_reorg (void);
863 static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*);
864 static tree ix86_build_builtin_va_list (void);
865 static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
866 tree, int *, int);
867 static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
868 static bool ix86_vector_mode_supported_p (enum machine_mode);
870 static int ix86_address_cost (rtx);
871 static bool ix86_cannot_force_const_mem (rtx);
872 static rtx ix86_delegitimize_address (rtx);
874 static void i386_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
876 struct builtin_description;
877 static rtx ix86_expand_sse_comi (const struct builtin_description *,
878 tree, rtx);
879 static rtx ix86_expand_sse_compare (const struct builtin_description *,
880 tree, rtx);
881 static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx);
882 static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int);
883 static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx);
884 static rtx ix86_expand_store_builtin (enum insn_code, tree);
885 static rtx safe_vector_operand (rtx, enum machine_mode);
886 static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *);
887 static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code);
888 static int ix86_fp_comparison_fcomi_cost (enum rtx_code code);
889 static int ix86_fp_comparison_sahf_cost (enum rtx_code code);
890 static int ix86_fp_comparison_cost (enum rtx_code code);
891 static unsigned int ix86_select_alt_pic_regnum (void);
892 static int ix86_save_reg (unsigned int, int);
893 static void ix86_compute_frame_layout (struct ix86_frame *);
894 static int ix86_comp_type_attributes (tree, tree);
895 static int ix86_function_regparm (tree, tree);
896 const struct attribute_spec ix86_attribute_table[];
897 static bool ix86_function_ok_for_sibcall (tree, tree);
898 static tree ix86_handle_cconv_attribute (tree *, tree, tree, int, bool *);
899 static int ix86_value_regno (enum machine_mode, tree, tree);
900 static bool contains_128bit_aligned_vector_p (tree);
901 static rtx ix86_struct_value_rtx (tree, int);
902 static bool ix86_ms_bitfield_layout_p (tree);
903 static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *);
904 static int extended_reg_mentioned_1 (rtx *, void *);
905 static bool ix86_rtx_costs (rtx, int, int, int *);
906 static int min_insn_size (rtx);
907 static tree ix86_md_asm_clobbers (tree outputs, tree inputs, tree clobbers);
908 static bool ix86_must_pass_in_stack (enum machine_mode mode, tree type);
909 static bool ix86_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
910 tree, bool);
911 static void ix86_init_builtins (void);
912 static rtx ix86_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
913 static const char *ix86_mangle_fundamental_type (tree);
914 static tree ix86_stack_protect_fail (void);
916 /* This function is only used on Solaris. */
917 static void i386_solaris_elf_named_section (const char *, unsigned int, tree)
918 ATTRIBUTE_UNUSED;
920 /* Register class used for passing given 64bit part of the argument.
921 These represent classes as documented by the PS ABI, with the exception
922 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
923 use SF or DFmode move instead of DImode to avoid reformatting penalties.
925 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
926 whenever possible (upper half does contain padding).
928 enum x86_64_reg_class
930 X86_64_NO_CLASS,
931 X86_64_INTEGER_CLASS,
932 X86_64_INTEGERSI_CLASS,
933 X86_64_SSE_CLASS,
934 X86_64_SSESF_CLASS,
935 X86_64_SSEDF_CLASS,
936 X86_64_SSEUP_CLASS,
937 X86_64_X87_CLASS,
938 X86_64_X87UP_CLASS,
939 X86_64_COMPLEX_X87_CLASS,
940 X86_64_MEMORY_CLASS
942 static const char * const x86_64_reg_class_name[] = {
943 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
944 "sseup", "x87", "x87up", "cplx87", "no"
947 #define MAX_CLASSES 4
949 /* Table of constants used by fldpi, fldln2, etc.... */
950 static REAL_VALUE_TYPE ext_80387_constants_table [5];
951 static bool ext_80387_constants_init = 0;
952 static void init_ext_80387_constants (void);
953 static bool ix86_in_large_data_p (tree);
954 static void ix86_encode_section_info (tree, rtx, int);
955 static void x86_64_elf_unique_section (tree decl, int reloc) ATTRIBUTE_UNUSED;
956 static void x86_64_elf_select_section (tree decl, int reloc,
957 unsigned HOST_WIDE_INT align)
958 ATTRIBUTE_UNUSED;
960 /* Initialize the GCC target structure. */
961 #undef TARGET_ATTRIBUTE_TABLE
962 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
963 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
964 # undef TARGET_MERGE_DECL_ATTRIBUTES
965 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
966 #endif
968 #undef TARGET_COMP_TYPE_ATTRIBUTES
969 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
971 #undef TARGET_INIT_BUILTINS
972 #define TARGET_INIT_BUILTINS ix86_init_builtins
973 #undef TARGET_EXPAND_BUILTIN
974 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
976 #undef TARGET_ASM_FUNCTION_EPILOGUE
977 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
979 #undef TARGET_ENCODE_SECTION_INFO
980 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
982 #undef TARGET_ASM_OPEN_PAREN
983 #define TARGET_ASM_OPEN_PAREN ""
984 #undef TARGET_ASM_CLOSE_PAREN
985 #define TARGET_ASM_CLOSE_PAREN ""
987 #undef TARGET_ASM_ALIGNED_HI_OP
988 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
989 #undef TARGET_ASM_ALIGNED_SI_OP
990 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
991 #ifdef ASM_QUAD
992 #undef TARGET_ASM_ALIGNED_DI_OP
993 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
994 #endif
996 #undef TARGET_ASM_UNALIGNED_HI_OP
997 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
998 #undef TARGET_ASM_UNALIGNED_SI_OP
999 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
1000 #undef TARGET_ASM_UNALIGNED_DI_OP
1001 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
1003 #undef TARGET_SCHED_ADJUST_COST
1004 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
1005 #undef TARGET_SCHED_ISSUE_RATE
1006 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
1007 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1008 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1009 ia32_multipass_dfa_lookahead
1011 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1012 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
1014 #ifdef HAVE_AS_TLS
1015 #undef TARGET_HAVE_TLS
1016 #define TARGET_HAVE_TLS true
1017 #endif
1018 #undef TARGET_CANNOT_FORCE_CONST_MEM
1019 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
1021 #undef TARGET_DELEGITIMIZE_ADDRESS
1022 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
1024 #undef TARGET_MS_BITFIELD_LAYOUT_P
1025 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
1027 #if TARGET_MACHO
1028 #undef TARGET_BINDS_LOCAL_P
1029 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1030 #endif
1032 #undef TARGET_ASM_OUTPUT_MI_THUNK
1033 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
1034 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1035 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
1037 #undef TARGET_ASM_FILE_START
1038 #define TARGET_ASM_FILE_START x86_file_start
1040 #undef TARGET_DEFAULT_TARGET_FLAGS
1041 #define TARGET_DEFAULT_TARGET_FLAGS \
1042 (TARGET_DEFAULT \
1043 | TARGET_64BIT_DEFAULT \
1044 | TARGET_SUBTARGET_DEFAULT \
1045 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
1047 #undef TARGET_HANDLE_OPTION
1048 #define TARGET_HANDLE_OPTION ix86_handle_option
1050 #undef TARGET_RTX_COSTS
1051 #define TARGET_RTX_COSTS ix86_rtx_costs
1052 #undef TARGET_ADDRESS_COST
1053 #define TARGET_ADDRESS_COST ix86_address_cost
1055 #undef TARGET_FIXED_CONDITION_CODE_REGS
1056 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
1057 #undef TARGET_CC_MODES_COMPATIBLE
1058 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
1060 #undef TARGET_MACHINE_DEPENDENT_REORG
1061 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
1063 #undef TARGET_BUILD_BUILTIN_VA_LIST
1064 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
1066 #undef TARGET_MD_ASM_CLOBBERS
1067 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
1069 #undef TARGET_PROMOTE_PROTOTYPES
1070 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1071 #undef TARGET_STRUCT_VALUE_RTX
1072 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
1073 #undef TARGET_SETUP_INCOMING_VARARGS
1074 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
1075 #undef TARGET_MUST_PASS_IN_STACK
1076 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
1077 #undef TARGET_PASS_BY_REFERENCE
1078 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
1080 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1081 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
1083 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1084 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
1086 #ifdef HAVE_AS_TLS
1087 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1088 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
1089 #endif
1091 #ifdef SUBTARGET_INSERT_ATTRIBUTES
1092 #undef TARGET_INSERT_ATTRIBUTES
1093 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
1094 #endif
1096 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
1097 #define TARGET_MANGLE_FUNDAMENTAL_TYPE ix86_mangle_fundamental_type
1099 #undef TARGET_STACK_PROTECT_FAIL
1100 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
1102 #undef TARGET_FUNCTION_VALUE
1103 #define TARGET_FUNCTION_VALUE ix86_function_value
1105 struct gcc_target targetm = TARGET_INITIALIZER;
1108 /* The svr4 ABI for the i386 says that records and unions are returned
1109 in memory. */
1110 #ifndef DEFAULT_PCC_STRUCT_RETURN
1111 #define DEFAULT_PCC_STRUCT_RETURN 1
1112 #endif
1114 /* Implement TARGET_HANDLE_OPTION. */
1116 static bool
1117 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
1119 switch (code)
1121 case OPT_m3dnow:
1122 if (!value)
1124 target_flags &= ~MASK_3DNOW_A;
1125 target_flags_explicit |= MASK_3DNOW_A;
1127 return true;
1129 case OPT_mmmx:
1130 if (!value)
1132 target_flags &= ~(MASK_3DNOW | MASK_3DNOW_A);
1133 target_flags_explicit |= MASK_3DNOW | MASK_3DNOW_A;
1135 return true;
1137 case OPT_msse:
1138 if (!value)
1140 target_flags &= ~(MASK_SSE2 | MASK_SSE3);
1141 target_flags_explicit |= MASK_SSE2 | MASK_SSE3;
1143 return true;
1145 case OPT_msse2:
1146 if (!value)
1148 target_flags &= ~MASK_SSE3;
1149 target_flags_explicit |= MASK_SSE3;
1151 return true;
1153 default:
1154 return true;
1158 /* Sometimes certain combinations of command options do not make
1159 sense on a particular target machine. You can define a macro
1160 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1161 defined, is executed once just after all the command options have
1162 been parsed.
1164 Don't use this macro to turn on various extra optimizations for
1165 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1167 void
1168 override_options (void)
1170 int i;
1171 int ix86_tune_defaulted = 0;
1173 /* Comes from final.c -- no real reason to change it. */
1174 #define MAX_CODE_ALIGN 16
1176 static struct ptt
1178 const struct processor_costs *cost; /* Processor costs */
1179 const int target_enable; /* Target flags to enable. */
1180 const int target_disable; /* Target flags to disable. */
1181 const int align_loop; /* Default alignments. */
1182 const int align_loop_max_skip;
1183 const int align_jump;
1184 const int align_jump_max_skip;
1185 const int align_func;
1187 const processor_target_table[PROCESSOR_max] =
1189 {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
1190 {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
1191 {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
1192 {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
1193 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1194 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1195 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1196 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1197 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0}
1200 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1201 static struct pta
1203 const char *const name; /* processor name or nickname. */
1204 const enum processor_type processor;
1205 const enum pta_flags
1207 PTA_SSE = 1,
1208 PTA_SSE2 = 2,
1209 PTA_SSE3 = 4,
1210 PTA_MMX = 8,
1211 PTA_PREFETCH_SSE = 16,
1212 PTA_3DNOW = 32,
1213 PTA_3DNOW_A = 64,
1214 PTA_64BIT = 128
1215 } flags;
1217 const processor_alias_table[] =
1219 {"i386", PROCESSOR_I386, 0},
1220 {"i486", PROCESSOR_I486, 0},
1221 {"i586", PROCESSOR_PENTIUM, 0},
1222 {"pentium", PROCESSOR_PENTIUM, 0},
1223 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
1224 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
1225 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1226 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1227 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
1228 {"i686", PROCESSOR_PENTIUMPRO, 0},
1229 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
1230 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
1231 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1232 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1233 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1234 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1235 | PTA_MMX | PTA_PREFETCH_SSE},
1236 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1237 | PTA_MMX | PTA_PREFETCH_SSE},
1238 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1239 | PTA_MMX | PTA_PREFETCH_SSE},
1240 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1241 | PTA_MMX | PTA_PREFETCH_SSE},
1242 {"k6", PROCESSOR_K6, PTA_MMX},
1243 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1244 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1245 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1246 | PTA_3DNOW_A},
1247 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
1248 | PTA_3DNOW | PTA_3DNOW_A},
1249 {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1250 | PTA_3DNOW_A | PTA_SSE},
1251 {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1252 | PTA_3DNOW_A | PTA_SSE},
1253 {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1254 | PTA_3DNOW_A | PTA_SSE},
1255 {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT
1256 | PTA_SSE | PTA_SSE2 },
1257 {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1258 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1259 {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1260 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1261 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1262 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1263 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1264 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1267 int const pta_size = ARRAY_SIZE (processor_alias_table);
1269 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1270 SUBTARGET_OVERRIDE_OPTIONS;
1271 #endif
1273 /* Set the default values for switches whose default depends on TARGET_64BIT
1274 in case they weren't overwritten by command line options. */
1275 if (TARGET_64BIT)
1277 if (flag_omit_frame_pointer == 2)
1278 flag_omit_frame_pointer = 1;
1279 if (flag_asynchronous_unwind_tables == 2)
1280 flag_asynchronous_unwind_tables = 1;
1281 if (flag_pcc_struct_return == 2)
1282 flag_pcc_struct_return = 0;
1284 else
1286 if (flag_omit_frame_pointer == 2)
1287 flag_omit_frame_pointer = 0;
1288 if (flag_asynchronous_unwind_tables == 2)
1289 flag_asynchronous_unwind_tables = 0;
1290 if (flag_pcc_struct_return == 2)
1291 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
1294 if (!ix86_tune_string && ix86_arch_string)
1295 ix86_tune_string = ix86_arch_string;
1296 if (!ix86_tune_string)
1298 ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
1299 ix86_tune_defaulted = 1;
1301 if (!ix86_arch_string)
1302 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
1304 if (ix86_cmodel_string != 0)
1306 if (!strcmp (ix86_cmodel_string, "small"))
1307 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1308 else if (!strcmp (ix86_cmodel_string, "medium"))
1309 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
1310 else if (flag_pic)
1311 sorry ("code model %s not supported in PIC mode", ix86_cmodel_string);
1312 else if (!strcmp (ix86_cmodel_string, "32"))
1313 ix86_cmodel = CM_32;
1314 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
1315 ix86_cmodel = CM_KERNEL;
1316 else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic)
1317 ix86_cmodel = CM_LARGE;
1318 else
1319 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
1321 else
1323 ix86_cmodel = CM_32;
1324 if (TARGET_64BIT)
1325 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1327 if (ix86_asm_string != 0)
1329 if (!strcmp (ix86_asm_string, "intel"))
1330 ix86_asm_dialect = ASM_INTEL;
1331 else if (!strcmp (ix86_asm_string, "att"))
1332 ix86_asm_dialect = ASM_ATT;
1333 else
1334 error ("bad value (%s) for -masm= switch", ix86_asm_string);
1336 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
1337 error ("code model %qs not supported in the %s bit mode",
1338 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
1339 if (ix86_cmodel == CM_LARGE)
1340 sorry ("code model %<large%> not supported yet");
1341 if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
1342 sorry ("%i-bit mode not compiled in",
1343 (target_flags & MASK_64BIT) ? 64 : 32);
1345 for (i = 0; i < pta_size; i++)
1346 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
1348 ix86_arch = processor_alias_table[i].processor;
1349 /* Default cpu tuning to the architecture. */
1350 ix86_tune = ix86_arch;
1351 if (processor_alias_table[i].flags & PTA_MMX
1352 && !(target_flags_explicit & MASK_MMX))
1353 target_flags |= MASK_MMX;
1354 if (processor_alias_table[i].flags & PTA_3DNOW
1355 && !(target_flags_explicit & MASK_3DNOW))
1356 target_flags |= MASK_3DNOW;
1357 if (processor_alias_table[i].flags & PTA_3DNOW_A
1358 && !(target_flags_explicit & MASK_3DNOW_A))
1359 target_flags |= MASK_3DNOW_A;
1360 if (processor_alias_table[i].flags & PTA_SSE
1361 && !(target_flags_explicit & MASK_SSE))
1362 target_flags |= MASK_SSE;
1363 if (processor_alias_table[i].flags & PTA_SSE2
1364 && !(target_flags_explicit & MASK_SSE2))
1365 target_flags |= MASK_SSE2;
1366 if (processor_alias_table[i].flags & PTA_SSE3
1367 && !(target_flags_explicit & MASK_SSE3))
1368 target_flags |= MASK_SSE3;
1369 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1370 x86_prefetch_sse = true;
1371 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1372 error ("CPU you selected does not support x86-64 "
1373 "instruction set");
1374 break;
1377 if (i == pta_size)
1378 error ("bad value (%s) for -march= switch", ix86_arch_string);
1380 for (i = 0; i < pta_size; i++)
1381 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
1383 ix86_tune = processor_alias_table[i].processor;
1384 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1386 if (ix86_tune_defaulted)
1388 ix86_tune_string = "x86-64";
1389 for (i = 0; i < pta_size; i++)
1390 if (! strcmp (ix86_tune_string,
1391 processor_alias_table[i].name))
1392 break;
1393 ix86_tune = processor_alias_table[i].processor;
1395 else
1396 error ("CPU you selected does not support x86-64 "
1397 "instruction set");
1399 /* Intel CPUs have always interpreted SSE prefetch instructions as
1400 NOPs; so, we can enable SSE prefetch instructions even when
1401 -mtune (rather than -march) points us to a processor that has them.
1402 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
1403 higher processors. */
1404 if (TARGET_CMOVE && (processor_alias_table[i].flags & PTA_PREFETCH_SSE))
1405 x86_prefetch_sse = true;
1406 break;
1408 if (i == pta_size)
1409 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1411 if (optimize_size)
1412 ix86_cost = &size_cost;
1413 else
1414 ix86_cost = processor_target_table[ix86_tune].cost;
1415 target_flags |= processor_target_table[ix86_tune].target_enable;
1416 target_flags &= ~processor_target_table[ix86_tune].target_disable;
1418 /* Arrange to set up i386_stack_locals for all functions. */
1419 init_machine_status = ix86_init_machine_status;
1421 /* Validate -mregparm= value. */
1422 if (ix86_regparm_string)
1424 i = atoi (ix86_regparm_string);
1425 if (i < 0 || i > REGPARM_MAX)
1426 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
1427 else
1428 ix86_regparm = i;
1430 else
1431 if (TARGET_64BIT)
1432 ix86_regparm = REGPARM_MAX;
1434 /* If the user has provided any of the -malign-* options,
1435 warn and use that value only if -falign-* is not set.
1436 Remove this code in GCC 3.2 or later. */
1437 if (ix86_align_loops_string)
1439 warning (0, "-malign-loops is obsolete, use -falign-loops");
1440 if (align_loops == 0)
1442 i = atoi (ix86_align_loops_string);
1443 if (i < 0 || i > MAX_CODE_ALIGN)
1444 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1445 else
1446 align_loops = 1 << i;
1450 if (ix86_align_jumps_string)
1452 warning (0, "-malign-jumps is obsolete, use -falign-jumps");
1453 if (align_jumps == 0)
1455 i = atoi (ix86_align_jumps_string);
1456 if (i < 0 || i > MAX_CODE_ALIGN)
1457 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1458 else
1459 align_jumps = 1 << i;
1463 if (ix86_align_funcs_string)
1465 warning (0, "-malign-functions is obsolete, use -falign-functions");
1466 if (align_functions == 0)
1468 i = atoi (ix86_align_funcs_string);
1469 if (i < 0 || i > MAX_CODE_ALIGN)
1470 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1471 else
1472 align_functions = 1 << i;
1476 /* Default align_* from the processor table. */
1477 if (align_loops == 0)
1479 align_loops = processor_target_table[ix86_tune].align_loop;
1480 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
1482 if (align_jumps == 0)
1484 align_jumps = processor_target_table[ix86_tune].align_jump;
1485 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
1487 if (align_functions == 0)
1489 align_functions = processor_target_table[ix86_tune].align_func;
1492 /* Validate -mpreferred-stack-boundary= value, or provide default.
1493 The default of 128 bits is for Pentium III's SSE __m128, but we
1494 don't want additional code to keep the stack aligned when
1495 optimizing for code size. */
1496 ix86_preferred_stack_boundary = (optimize_size
1497 ? TARGET_64BIT ? 128 : 32
1498 : 128);
1499 if (ix86_preferred_stack_boundary_string)
1501 i = atoi (ix86_preferred_stack_boundary_string);
1502 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
1503 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
1504 TARGET_64BIT ? 4 : 2);
1505 else
1506 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
1509 /* Validate -mbranch-cost= value, or provide default. */
1510 ix86_branch_cost = processor_target_table[ix86_tune].cost->branch_cost;
1511 if (ix86_branch_cost_string)
1513 i = atoi (ix86_branch_cost_string);
1514 if (i < 0 || i > 5)
1515 error ("-mbranch-cost=%d is not between 0 and 5", i);
1516 else
1517 ix86_branch_cost = i;
1519 if (ix86_section_threshold_string)
1521 i = atoi (ix86_section_threshold_string);
1522 if (i < 0)
1523 error ("-mlarge-data-threshold=%d is negative", i);
1524 else
1525 ix86_section_threshold = i;
1528 if (ix86_tls_dialect_string)
1530 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
1531 ix86_tls_dialect = TLS_DIALECT_GNU;
1532 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
1533 ix86_tls_dialect = TLS_DIALECT_SUN;
1534 else
1535 error ("bad value (%s) for -mtls-dialect= switch",
1536 ix86_tls_dialect_string);
1539 /* Keep nonleaf frame pointers. */
1540 if (flag_omit_frame_pointer)
1541 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
1542 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
1543 flag_omit_frame_pointer = 1;
1545 /* If we're doing fast math, we don't care about comparison order
1546 wrt NaNs. This lets us use a shorter comparison sequence. */
1547 if (flag_unsafe_math_optimizations)
1548 target_flags &= ~MASK_IEEE_FP;
1550 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
1551 since the insns won't need emulation. */
1552 if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
1553 target_flags &= ~MASK_NO_FANCY_MATH_387;
1555 /* Likewise, if the target doesn't have a 387, or we've specified
1556 software floating point, don't use 387 inline intrinsics. */
1557 if (!TARGET_80387)
1558 target_flags |= MASK_NO_FANCY_MATH_387;
1560 /* Turn on SSE2 builtins for -msse3. */
1561 if (TARGET_SSE3)
1562 target_flags |= MASK_SSE2;
1564 /* Turn on SSE builtins for -msse2. */
1565 if (TARGET_SSE2)
1566 target_flags |= MASK_SSE;
1568 /* Turn on MMX builtins for -msse. */
1569 if (TARGET_SSE)
1571 target_flags |= MASK_MMX & ~target_flags_explicit;
1572 x86_prefetch_sse = true;
1575 /* Turn on MMX builtins for 3Dnow. */
1576 if (TARGET_3DNOW)
1577 target_flags |= MASK_MMX;
1579 if (TARGET_64BIT)
1581 if (TARGET_ALIGN_DOUBLE)
1582 error ("-malign-double makes no sense in the 64bit mode");
1583 if (TARGET_RTD)
1584 error ("-mrtd calling convention not supported in the 64bit mode");
1586 /* Enable by default the SSE and MMX builtins. Do allow the user to
1587 explicitly disable any of these. In particular, disabling SSE and
1588 MMX for kernel code is extremely useful. */
1589 target_flags
1590 |= ((MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE)
1591 & ~target_flags_explicit);
1593 else
1595 /* i386 ABI does not specify red zone. It still makes sense to use it
1596 when programmer takes care to stack from being destroyed. */
1597 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
1598 target_flags |= MASK_NO_RED_ZONE;
1601 /* Accept -msseregparm only if at least SSE support is enabled. */
1602 if (TARGET_SSEREGPARM
1603 && ! TARGET_SSE)
1604 error ("-msseregparm used without SSE enabled");
1606 ix86_fpmath = TARGET_FPMATH_DEFAULT;
1608 if (ix86_fpmath_string != 0)
1610 if (! strcmp (ix86_fpmath_string, "387"))
1611 ix86_fpmath = FPMATH_387;
1612 else if (! strcmp (ix86_fpmath_string, "sse"))
1614 if (!TARGET_SSE)
1616 warning (0, "SSE instruction set disabled, using 387 arithmetics");
1617 ix86_fpmath = FPMATH_387;
1619 else
1620 ix86_fpmath = FPMATH_SSE;
1622 else if (! strcmp (ix86_fpmath_string, "387,sse")
1623 || ! strcmp (ix86_fpmath_string, "sse,387"))
1625 if (!TARGET_SSE)
1627 warning (0, "SSE instruction set disabled, using 387 arithmetics");
1628 ix86_fpmath = FPMATH_387;
1630 else if (!TARGET_80387)
1632 warning (0, "387 instruction set disabled, using SSE arithmetics");
1633 ix86_fpmath = FPMATH_SSE;
1635 else
1636 ix86_fpmath = FPMATH_SSE | FPMATH_387;
1638 else
1639 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
1642 /* If the i387 is disabled, then do not return values in it. */
1643 if (!TARGET_80387)
1644 target_flags &= ~MASK_FLOAT_RETURNS;
1646 if ((x86_accumulate_outgoing_args & TUNEMASK)
1647 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
1648 && !optimize_size)
1649 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
1651 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
1653 char *p;
1654 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
1655 p = strchr (internal_label_prefix, 'X');
1656 internal_label_prefix_len = p - internal_label_prefix;
1657 *p = '\0';
1660 /* When scheduling description is not available, disable scheduler pass
1661 so it won't slow down the compilation and make x87 code slower. */
1662 if (!TARGET_SCHEDULE)
1663 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
1666 /* switch to the appropriate section for output of DECL.
1667 DECL is either a `VAR_DECL' node or a constant of some sort.
1668 RELOC indicates whether forming the initial value of DECL requires
1669 link-time relocations. */
1671 static void
1672 x86_64_elf_select_section (tree decl, int reloc,
1673 unsigned HOST_WIDE_INT align)
1675 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1676 && ix86_in_large_data_p (decl))
1678 const char *sname = NULL;
1679 switch (categorize_decl_for_section (decl, reloc, flag_pic))
1681 case SECCAT_DATA:
1682 sname = ".ldata";
1683 break;
1684 case SECCAT_DATA_REL:
1685 sname = ".ldata.rel";
1686 break;
1687 case SECCAT_DATA_REL_LOCAL:
1688 sname = ".ldata.rel.local";
1689 break;
1690 case SECCAT_DATA_REL_RO:
1691 sname = ".ldata.rel.ro";
1692 break;
1693 case SECCAT_DATA_REL_RO_LOCAL:
1694 sname = ".ldata.rel.ro.local";
1695 break;
1696 case SECCAT_BSS:
1697 sname = ".lbss";
1698 break;
1699 case SECCAT_RODATA:
1700 case SECCAT_RODATA_MERGE_STR:
1701 case SECCAT_RODATA_MERGE_STR_INIT:
1702 case SECCAT_RODATA_MERGE_CONST:
1703 sname = ".lrodata";
1704 break;
1705 case SECCAT_SRODATA:
1706 case SECCAT_SDATA:
1707 case SECCAT_SBSS:
1708 gcc_unreachable ();
1709 case SECCAT_TEXT:
1710 case SECCAT_TDATA:
1711 case SECCAT_TBSS:
1712 /* We don't split these for medium model. Place them into
1713 default sections and hope for best. */
1714 break;
1716 if (sname)
1718 named_section (decl, sname, reloc);
1719 return;
1722 default_elf_select_section (decl, reloc, align);
1725 /* Build up a unique section name, expressed as a
1726 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
1727 RELOC indicates whether the initial value of EXP requires
1728 link-time relocations. */
1730 static void
1731 x86_64_elf_unique_section (tree decl, int reloc)
1733 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1734 && ix86_in_large_data_p (decl))
1736 const char *prefix = NULL;
1737 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
1738 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
1740 switch (categorize_decl_for_section (decl, reloc, flag_pic))
1742 case SECCAT_DATA:
1743 case SECCAT_DATA_REL:
1744 case SECCAT_DATA_REL_LOCAL:
1745 case SECCAT_DATA_REL_RO:
1746 case SECCAT_DATA_REL_RO_LOCAL:
1747 prefix = one_only ? ".gnu.linkonce.ld." : ".ldata.";
1748 break;
1749 case SECCAT_BSS:
1750 prefix = one_only ? ".gnu.linkonce.lb." : ".lbss.";
1751 break;
1752 case SECCAT_RODATA:
1753 case SECCAT_RODATA_MERGE_STR:
1754 case SECCAT_RODATA_MERGE_STR_INIT:
1755 case SECCAT_RODATA_MERGE_CONST:
1756 prefix = one_only ? ".gnu.linkonce.lr." : ".lrodata.";
1757 break;
1758 case SECCAT_SRODATA:
1759 case SECCAT_SDATA:
1760 case SECCAT_SBSS:
1761 gcc_unreachable ();
1762 case SECCAT_TEXT:
1763 case SECCAT_TDATA:
1764 case SECCAT_TBSS:
1765 /* We don't split these for medium model. Place them into
1766 default sections and hope for best. */
1767 break;
1769 if (prefix)
1771 const char *name;
1772 size_t nlen, plen;
1773 char *string;
1774 plen = strlen (prefix);
1776 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
1777 name = targetm.strip_name_encoding (name);
1778 nlen = strlen (name);
1780 string = alloca (nlen + plen + 1);
1781 memcpy (string, prefix, plen);
1782 memcpy (string + plen, name, nlen + 1);
1784 DECL_SECTION_NAME (decl) = build_string (nlen + plen, string);
1785 return;
1788 default_unique_section (decl, reloc);
1791 /* This says how to output assembler code to declare an
1792 uninitialized external linkage data object.
1794 For medium model x86-64 we need to use .largecomm opcode for
1795 large objects. */
1796 void
1797 x86_elf_aligned_common (FILE *file,
1798 const char *name, unsigned HOST_WIDE_INT size,
1799 int align)
1801 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1802 && size > (unsigned int)ix86_section_threshold)
1803 fprintf (file, ".largecomm\t");
1804 else
1805 fprintf (file, "%s", COMMON_ASM_OP);
1806 assemble_name (file, name);
1807 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
1808 size, align / BITS_PER_UNIT);
1811 /* Utility function for targets to use in implementing
1812 ASM_OUTPUT_ALIGNED_BSS. */
1814 void
1815 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
1816 const char *name, unsigned HOST_WIDE_INT size,
1817 int align)
1819 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1820 && size > (unsigned int)ix86_section_threshold)
1821 named_section (decl, ".lbss", 0);
1822 else
1823 bss_section ();
1824 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
1825 #ifdef ASM_DECLARE_OBJECT_NAME
1826 last_assemble_variable_decl = decl;
1827 ASM_DECLARE_OBJECT_NAME (file, name, decl);
1828 #else
1829 /* Standard thing is just output label for the object. */
1830 ASM_OUTPUT_LABEL (file, name);
1831 #endif /* ASM_DECLARE_OBJECT_NAME */
1832 ASM_OUTPUT_SKIP (file, size ? size : 1);
1835 void
1836 optimization_options (int level, int size ATTRIBUTE_UNUSED)
1838 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
1839 make the problem with not enough registers even worse. */
1840 #ifdef INSN_SCHEDULING
1841 if (level > 1)
1842 flag_schedule_insns = 0;
1843 #endif
1845 if (TARGET_MACHO)
1846 /* The Darwin libraries never set errno, so we might as well
1847 avoid calling them when that's the only reason we would. */
1848 flag_errno_math = 0;
1850 /* The default values of these switches depend on the TARGET_64BIT
1851 that is not known at this moment. Mark these values with 2 and
1852 let user the to override these. In case there is no command line option
1853 specifying them, we will set the defaults in override_options. */
1854 if (optimize >= 1)
1855 flag_omit_frame_pointer = 2;
1856 flag_pcc_struct_return = 2;
1857 flag_asynchronous_unwind_tables = 2;
1858 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
1859 SUBTARGET_OPTIMIZATION_OPTIONS;
1860 #endif
1863 /* Table of valid machine attributes. */
1864 const struct attribute_spec ix86_attribute_table[] =
1866 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
1867 /* Stdcall attribute says callee is responsible for popping arguments
1868 if they are not variable. */
1869 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1870 /* Fastcall attribute says callee is responsible for popping arguments
1871 if they are not variable. */
1872 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1873 /* Cdecl attribute says the callee is a normal C declaration */
1874 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1875 /* Regparm attribute specifies how many integer arguments are to be
1876 passed in registers. */
1877 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
1878 /* Sseregparm attribute says we are using x86_64 calling conventions
1879 for FP arguments. */
1880 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1881 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1882 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
1883 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
1884 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
1885 #endif
1886 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1887 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1888 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1889 SUBTARGET_ATTRIBUTE_TABLE,
1890 #endif
1891 { NULL, 0, 0, false, false, false, NULL }
1894 /* Decide whether we can make a sibling call to a function. DECL is the
1895 declaration of the function being targeted by the call and EXP is the
1896 CALL_EXPR representing the call. */
1898 static bool
1899 ix86_function_ok_for_sibcall (tree decl, tree exp)
1901 tree func;
1902 rtx a, b;
1904 /* If we are generating position-independent code, we cannot sibcall
1905 optimize any indirect call, or a direct call to a global function,
1906 as the PLT requires %ebx be live. */
1907 if (!TARGET_64BIT && flag_pic && (!decl || TREE_PUBLIC (decl)))
1908 return false;
1910 if (decl)
1911 func = decl;
1912 else
1914 func = TREE_TYPE (TREE_OPERAND (exp, 0));
1915 if (POINTER_TYPE_P (func))
1916 func = TREE_TYPE (func);
1919 /* Check that the return value locations are the same. Like
1920 if we are returning floats on the 80387 register stack, we cannot
1921 make a sibcall from a function that doesn't return a float to a
1922 function that does or, conversely, from a function that does return
1923 a float to a function that doesn't; the necessary stack adjustment
1924 would not be executed. This is also the place we notice
1925 differences in the return value ABI. */
1926 a = ix86_function_value (TREE_TYPE (exp), func, false);
1927 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
1928 cfun->decl, false);
1929 if (! rtx_equal_p (a, b))
1930 return false;
1932 /* If this call is indirect, we'll need to be able to use a call-clobbered
1933 register for the address of the target function. Make sure that all
1934 such registers are not used for passing parameters. */
1935 if (!decl && !TARGET_64BIT)
1937 tree type;
1939 /* We're looking at the CALL_EXPR, we need the type of the function. */
1940 type = TREE_OPERAND (exp, 0); /* pointer expression */
1941 type = TREE_TYPE (type); /* pointer type */
1942 type = TREE_TYPE (type); /* function type */
1944 if (ix86_function_regparm (type, NULL) >= 3)
1946 /* ??? Need to count the actual number of registers to be used,
1947 not the possible number of registers. Fix later. */
1948 return false;
1952 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1953 /* Dllimport'd functions are also called indirectly. */
1954 if (decl && lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl))
1955 && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
1956 return false;
1957 #endif
1959 /* Otherwise okay. That also includes certain types of indirect calls. */
1960 return true;
1963 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
1964 calling convention attributes;
1965 arguments as in struct attribute_spec.handler. */
1967 static tree
1968 ix86_handle_cconv_attribute (tree *node, tree name,
1969 tree args,
1970 int flags ATTRIBUTE_UNUSED,
1971 bool *no_add_attrs)
1973 if (TREE_CODE (*node) != FUNCTION_TYPE
1974 && TREE_CODE (*node) != METHOD_TYPE
1975 && TREE_CODE (*node) != FIELD_DECL
1976 && TREE_CODE (*node) != TYPE_DECL)
1978 warning (OPT_Wattributes, "%qs attribute only applies to functions",
1979 IDENTIFIER_POINTER (name));
1980 *no_add_attrs = true;
1981 return NULL_TREE;
1984 /* Can combine regparm with all attributes but fastcall. */
1985 if (is_attribute_p ("regparm", name))
1987 tree cst;
1989 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
1991 error ("fastcall and regparm attributes are not compatible");
1994 cst = TREE_VALUE (args);
1995 if (TREE_CODE (cst) != INTEGER_CST)
1997 warning (OPT_Wattributes,
1998 "%qs attribute requires an integer constant argument",
1999 IDENTIFIER_POINTER (name));
2000 *no_add_attrs = true;
2002 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
2004 warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
2005 IDENTIFIER_POINTER (name), REGPARM_MAX);
2006 *no_add_attrs = true;
2009 return NULL_TREE;
2012 if (TARGET_64BIT)
2014 warning (OPT_Wattributes, "%qs attribute ignored",
2015 IDENTIFIER_POINTER (name));
2016 *no_add_attrs = true;
2017 return NULL_TREE;
2020 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
2021 if (is_attribute_p ("fastcall", name))
2023 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2025 error ("fastcall and cdecl attributes are not compatible");
2027 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2029 error ("fastcall and stdcall attributes are not compatible");
2031 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
2033 error ("fastcall and regparm attributes are not compatible");
2037 /* Can combine stdcall with fastcall (redundant), regparm and
2038 sseregparm. */
2039 else if (is_attribute_p ("stdcall", name))
2041 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2043 error ("stdcall and cdecl attributes are not compatible");
2045 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2047 error ("stdcall and fastcall attributes are not compatible");
2051 /* Can combine cdecl with regparm and sseregparm. */
2052 else if (is_attribute_p ("cdecl", name))
2054 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2056 error ("stdcall and cdecl attributes are not compatible");
2058 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2060 error ("fastcall and cdecl attributes are not compatible");
2064 /* Can combine sseregparm with all attributes. */
2066 return NULL_TREE;
2069 /* Return 0 if the attributes for two types are incompatible, 1 if they
2070 are compatible, and 2 if they are nearly compatible (which causes a
2071 warning to be generated). */
2073 static int
2074 ix86_comp_type_attributes (tree type1, tree type2)
2076 /* Check for mismatch of non-default calling convention. */
2077 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
2079 if (TREE_CODE (type1) != FUNCTION_TYPE)
2080 return 1;
2082 /* Check for mismatched fastcall/regparm types. */
2083 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
2084 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
2085 || (ix86_function_regparm (type1, NULL)
2086 != ix86_function_regparm (type2, NULL)))
2087 return 0;
2089 /* Check for mismatched sseregparm types. */
2090 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
2091 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
2092 return 0;
2094 /* Check for mismatched return types (cdecl vs stdcall). */
2095 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
2096 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
2097 return 0;
2099 return 1;
2102 /* Return the regparm value for a function with the indicated TYPE and DECL.
2103 DECL may be NULL when calling function indirectly
2104 or considering a libcall. */
2106 static int
2107 ix86_function_regparm (tree type, tree decl)
2109 tree attr;
2110 int regparm = ix86_regparm;
2111 bool user_convention = false;
2113 if (!TARGET_64BIT)
2115 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
2116 if (attr)
2118 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
2119 user_convention = true;
2122 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
2124 regparm = 2;
2125 user_convention = true;
2128 /* Use register calling convention for local functions when possible. */
2129 if (!TARGET_64BIT && !user_convention && decl
2130 && flag_unit_at_a_time && !profile_flag)
2132 struct cgraph_local_info *i = cgraph_local_info (decl);
2133 if (i && i->local)
2135 /* We can't use regparm(3) for nested functions as these use
2136 static chain pointer in third argument. */
2137 if (DECL_CONTEXT (decl) && !DECL_NO_STATIC_CHAIN (decl))
2138 regparm = 2;
2139 else
2140 regparm = 3;
2144 return regparm;
2147 /* Return 1 or 2, if we can pass up to 8 SFmode (1) and DFmode (2) arguments
2148 in SSE registers for a function with the indicated TYPE and DECL.
2149 DECL may be NULL when calling function indirectly
2150 or considering a libcall. Otherwise return 0. */
2152 static int
2153 ix86_function_sseregparm (tree type, tree decl)
2155 /* Use SSE registers to pass SFmode and DFmode arguments if requested
2156 by the sseregparm attribute. */
2157 if (TARGET_SSEREGPARM
2158 || (type
2159 && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
2161 if (!TARGET_SSE)
2163 if (decl)
2164 error ("Calling %qD with attribute sseregparm without "
2165 "SSE/SSE2 enabled", decl);
2166 else
2167 error ("Calling %qT with attribute sseregparm without "
2168 "SSE/SSE2 enabled", type);
2169 return 0;
2172 return 2;
2175 /* For local functions, pass SFmode (and DFmode for SSE2) arguments
2176 in SSE registers even for 32-bit mode and not just 3, but up to
2177 8 SSE arguments in registers. */
2178 if (!TARGET_64BIT && decl
2179 && TARGET_SSE_MATH && flag_unit_at_a_time && !profile_flag)
2181 struct cgraph_local_info *i = cgraph_local_info (decl);
2182 if (i && i->local)
2183 return TARGET_SSE2 ? 2 : 1;
2186 return 0;
2189 /* Return true if EAX is live at the start of the function. Used by
2190 ix86_expand_prologue to determine if we need special help before
2191 calling allocate_stack_worker. */
2193 static bool
2194 ix86_eax_live_at_start_p (void)
2196 /* Cheat. Don't bother working forward from ix86_function_regparm
2197 to the function type to whether an actual argument is located in
2198 eax. Instead just look at cfg info, which is still close enough
2199 to correct at this point. This gives false positives for broken
2200 functions that might use uninitialized data that happens to be
2201 allocated in eax, but who cares? */
2202 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->il.rtl->global_live_at_end, 0);
2205 /* Value is the number of bytes of arguments automatically
2206 popped when returning from a subroutine call.
2207 FUNDECL is the declaration node of the function (as a tree),
2208 FUNTYPE is the data type of the function (as a tree),
2209 or for a library call it is an identifier node for the subroutine name.
2210 SIZE is the number of bytes of arguments passed on the stack.
2212 On the 80386, the RTD insn may be used to pop them if the number
2213 of args is fixed, but if the number is variable then the caller
2214 must pop them all. RTD can't be used for library calls now
2215 because the library is compiled with the Unix compiler.
2216 Use of RTD is a selectable option, since it is incompatible with
2217 standard Unix calling sequences. If the option is not selected,
2218 the caller must always pop the args.
2220 The attribute stdcall is equivalent to RTD on a per module basis. */
2223 ix86_return_pops_args (tree fundecl, tree funtype, int size)
2225 int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
2227 /* Cdecl functions override -mrtd, and never pop the stack. */
2228 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
2230 /* Stdcall and fastcall functions will pop the stack if not
2231 variable args. */
2232 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
2233 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
2234 rtd = 1;
2236 if (rtd
2237 && (TYPE_ARG_TYPES (funtype) == NULL_TREE
2238 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
2239 == void_type_node)))
2240 return size;
2243 /* Lose any fake structure return argument if it is passed on the stack. */
2244 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
2245 && !TARGET_64BIT
2246 && !KEEP_AGGREGATE_RETURN_POINTER)
2248 int nregs = ix86_function_regparm (funtype, fundecl);
2250 if (!nregs)
2251 return GET_MODE_SIZE (Pmode);
2254 return 0;
2257 /* Argument support functions. */
2259 /* Return true when register may be used to pass function parameters. */
2260 bool
2261 ix86_function_arg_regno_p (int regno)
2263 int i;
2264 if (!TARGET_64BIT)
2265 return (regno < REGPARM_MAX
2266 || (TARGET_MMX && MMX_REGNO_P (regno)
2267 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
2268 || (TARGET_SSE && SSE_REGNO_P (regno)
2269 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
2271 if (TARGET_SSE && SSE_REGNO_P (regno)
2272 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
2273 return true;
2274 /* RAX is used as hidden argument to va_arg functions. */
2275 if (!regno)
2276 return true;
2277 for (i = 0; i < REGPARM_MAX; i++)
2278 if (regno == x86_64_int_parameter_registers[i])
2279 return true;
2280 return false;
2283 /* Return if we do not know how to pass TYPE solely in registers. */
2285 static bool
2286 ix86_must_pass_in_stack (enum machine_mode mode, tree type)
2288 if (must_pass_in_stack_var_size_or_pad (mode, type))
2289 return true;
2291 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
2292 The layout_type routine is crafty and tries to trick us into passing
2293 currently unsupported vector types on the stack by using TImode. */
2294 return (!TARGET_64BIT && mode == TImode
2295 && type && TREE_CODE (type) != VECTOR_TYPE);
2298 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2299 for a call to a function whose data type is FNTYPE.
2300 For a library call, FNTYPE is 0. */
2302 void
2303 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
2304 tree fntype, /* tree ptr for function decl */
2305 rtx libname, /* SYMBOL_REF of library name or 0 */
2306 tree fndecl)
2308 static CUMULATIVE_ARGS zero_cum;
2309 tree param, next_param;
2311 if (TARGET_DEBUG_ARG)
2313 fprintf (stderr, "\ninit_cumulative_args (");
2314 if (fntype)
2315 fprintf (stderr, "fntype code = %s, ret code = %s",
2316 tree_code_name[(int) TREE_CODE (fntype)],
2317 tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
2318 else
2319 fprintf (stderr, "no fntype");
2321 if (libname)
2322 fprintf (stderr, ", libname = %s", XSTR (libname, 0));
2325 *cum = zero_cum;
2327 /* Set up the number of registers to use for passing arguments. */
2328 cum->nregs = ix86_regparm;
2329 if (TARGET_SSE)
2330 cum->sse_nregs = SSE_REGPARM_MAX;
2331 if (TARGET_MMX)
2332 cum->mmx_nregs = MMX_REGPARM_MAX;
2333 cum->warn_sse = true;
2334 cum->warn_mmx = true;
2335 cum->maybe_vaarg = false;
2337 /* Use ecx and edx registers if function has fastcall attribute,
2338 else look for regparm information. */
2339 if (fntype && !TARGET_64BIT)
2341 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
2343 cum->nregs = 2;
2344 cum->fastcall = 1;
2346 else
2347 cum->nregs = ix86_function_regparm (fntype, fndecl);
2350 /* Set up the number of SSE registers used for passing SFmode
2351 and DFmode arguments. Warn for mismatching ABI. */
2352 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl);
2354 /* Determine if this function has variable arguments. This is
2355 indicated by the last argument being 'void_type_mode' if there
2356 are no variable arguments. If there are variable arguments, then
2357 we won't pass anything in registers in 32-bit mode. */
2359 if (cum->nregs || cum->mmx_nregs || cum->sse_nregs)
2361 for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
2362 param != 0; param = next_param)
2364 next_param = TREE_CHAIN (param);
2365 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
2367 if (!TARGET_64BIT)
2369 cum->nregs = 0;
2370 cum->sse_nregs = 0;
2371 cum->mmx_nregs = 0;
2372 cum->warn_sse = 0;
2373 cum->warn_mmx = 0;
2374 cum->fastcall = 0;
2375 cum->float_in_sse = 0;
2377 cum->maybe_vaarg = true;
2381 if ((!fntype && !libname)
2382 || (fntype && !TYPE_ARG_TYPES (fntype)))
2383 cum->maybe_vaarg = true;
2385 if (TARGET_DEBUG_ARG)
2386 fprintf (stderr, ", nregs=%d )\n", cum->nregs);
2388 return;
2391 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
2392 But in the case of vector types, it is some vector mode.
2394 When we have only some of our vector isa extensions enabled, then there
2395 are some modes for which vector_mode_supported_p is false. For these
2396 modes, the generic vector support in gcc will choose some non-vector mode
2397 in order to implement the type. By computing the natural mode, we'll
2398 select the proper ABI location for the operand and not depend on whatever
2399 the middle-end decides to do with these vector types. */
2401 static enum machine_mode
2402 type_natural_mode (tree type)
2404 enum machine_mode mode = TYPE_MODE (type);
2406 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
2408 HOST_WIDE_INT size = int_size_in_bytes (type);
2409 if ((size == 8 || size == 16)
2410 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
2411 && TYPE_VECTOR_SUBPARTS (type) > 1)
2413 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
2415 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
2416 mode = MIN_MODE_VECTOR_FLOAT;
2417 else
2418 mode = MIN_MODE_VECTOR_INT;
2420 /* Get the mode which has this inner mode and number of units. */
2421 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
2422 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
2423 && GET_MODE_INNER (mode) == innermode)
2424 return mode;
2426 gcc_unreachable ();
2430 return mode;
2433 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
2434 this may not agree with the mode that the type system has chosen for the
2435 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
2436 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
2438 static rtx
2439 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
2440 unsigned int regno)
2442 rtx tmp;
2444 if (orig_mode != BLKmode)
2445 tmp = gen_rtx_REG (orig_mode, regno);
2446 else
2448 tmp = gen_rtx_REG (mode, regno);
2449 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
2450 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
2453 return tmp;
2456 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
2457 of this code is to classify each 8bytes of incoming argument by the register
2458 class and assign registers accordingly. */
2460 /* Return the union class of CLASS1 and CLASS2.
2461 See the x86-64 PS ABI for details. */
2463 static enum x86_64_reg_class
2464 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
2466 /* Rule #1: If both classes are equal, this is the resulting class. */
2467 if (class1 == class2)
2468 return class1;
2470 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
2471 the other class. */
2472 if (class1 == X86_64_NO_CLASS)
2473 return class2;
2474 if (class2 == X86_64_NO_CLASS)
2475 return class1;
2477 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
2478 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
2479 return X86_64_MEMORY_CLASS;
2481 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
2482 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
2483 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
2484 return X86_64_INTEGERSI_CLASS;
2485 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
2486 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
2487 return X86_64_INTEGER_CLASS;
2489 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
2490 MEMORY is used. */
2491 if (class1 == X86_64_X87_CLASS
2492 || class1 == X86_64_X87UP_CLASS
2493 || class1 == X86_64_COMPLEX_X87_CLASS
2494 || class2 == X86_64_X87_CLASS
2495 || class2 == X86_64_X87UP_CLASS
2496 || class2 == X86_64_COMPLEX_X87_CLASS)
2497 return X86_64_MEMORY_CLASS;
2499 /* Rule #6: Otherwise class SSE is used. */
2500 return X86_64_SSE_CLASS;
2503 /* Classify the argument of type TYPE and mode MODE.
2504 CLASSES will be filled by the register class used to pass each word
2505 of the operand. The number of words is returned. In case the parameter
2506 should be passed in memory, 0 is returned. As a special case for zero
2507 sized containers, classes[0] will be NO_CLASS and 1 is returned.
2509 BIT_OFFSET is used internally for handling records and specifies offset
2510 of the offset in bits modulo 256 to avoid overflow cases.
2512 See the x86-64 PS ABI for details.
2515 static int
2516 classify_argument (enum machine_mode mode, tree type,
2517 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
2519 HOST_WIDE_INT bytes =
2520 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2521 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2523 /* Variable sized entities are always passed/returned in memory. */
2524 if (bytes < 0)
2525 return 0;
2527 if (mode != VOIDmode
2528 && targetm.calls.must_pass_in_stack (mode, type))
2529 return 0;
2531 if (type && AGGREGATE_TYPE_P (type))
2533 int i;
2534 tree field;
2535 enum x86_64_reg_class subclasses[MAX_CLASSES];
2537 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
2538 if (bytes > 16)
2539 return 0;
2541 for (i = 0; i < words; i++)
2542 classes[i] = X86_64_NO_CLASS;
2544 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
2545 signalize memory class, so handle it as special case. */
2546 if (!words)
2548 classes[0] = X86_64_NO_CLASS;
2549 return 1;
2552 /* Classify each field of record and merge classes. */
2553 switch (TREE_CODE (type))
2555 case RECORD_TYPE:
2556 /* For classes first merge in the field of the subclasses. */
2557 if (TYPE_BINFO (type))
2559 tree binfo, base_binfo;
2560 int basenum;
2562 for (binfo = TYPE_BINFO (type), basenum = 0;
2563 BINFO_BASE_ITERATE (binfo, basenum, base_binfo); basenum++)
2565 int num;
2566 int offset = tree_low_cst (BINFO_OFFSET (base_binfo), 0) * 8;
2567 tree type = BINFO_TYPE (base_binfo);
2569 num = classify_argument (TYPE_MODE (type),
2570 type, subclasses,
2571 (offset + bit_offset) % 256);
2572 if (!num)
2573 return 0;
2574 for (i = 0; i < num; i++)
2576 int pos = (offset + (bit_offset % 64)) / 8 / 8;
2577 classes[i + pos] =
2578 merge_classes (subclasses[i], classes[i + pos]);
2582 /* And now merge the fields of structure. */
2583 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2585 if (TREE_CODE (field) == FIELD_DECL)
2587 int num;
2589 /* Bitfields are always classified as integer. Handle them
2590 early, since later code would consider them to be
2591 misaligned integers. */
2592 if (DECL_BIT_FIELD (field))
2594 for (i = int_bit_position (field) / 8 / 8;
2595 i < (int_bit_position (field)
2596 + tree_low_cst (DECL_SIZE (field), 0)
2597 + 63) / 8 / 8; i++)
2598 classes[i] =
2599 merge_classes (X86_64_INTEGER_CLASS,
2600 classes[i]);
2602 else
2604 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2605 TREE_TYPE (field), subclasses,
2606 (int_bit_position (field)
2607 + bit_offset) % 256);
2608 if (!num)
2609 return 0;
2610 for (i = 0; i < num; i++)
2612 int pos =
2613 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
2614 classes[i + pos] =
2615 merge_classes (subclasses[i], classes[i + pos]);
2620 break;
2622 case ARRAY_TYPE:
2623 /* Arrays are handled as small records. */
2625 int num;
2626 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
2627 TREE_TYPE (type), subclasses, bit_offset);
2628 if (!num)
2629 return 0;
2631 /* The partial classes are now full classes. */
2632 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
2633 subclasses[0] = X86_64_SSE_CLASS;
2634 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
2635 subclasses[0] = X86_64_INTEGER_CLASS;
2637 for (i = 0; i < words; i++)
2638 classes[i] = subclasses[i % num];
2640 break;
2642 case UNION_TYPE:
2643 case QUAL_UNION_TYPE:
2644 /* Unions are similar to RECORD_TYPE but offset is always 0.
2647 /* Unions are not derived. */
2648 gcc_assert (!TYPE_BINFO (type)
2649 || !BINFO_N_BASE_BINFOS (TYPE_BINFO (type)));
2650 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2652 if (TREE_CODE (field) == FIELD_DECL)
2654 int num;
2655 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2656 TREE_TYPE (field), subclasses,
2657 bit_offset);
2658 if (!num)
2659 return 0;
2660 for (i = 0; i < num; i++)
2661 classes[i] = merge_classes (subclasses[i], classes[i]);
2664 break;
2666 default:
2667 gcc_unreachable ();
2670 /* Final merger cleanup. */
2671 for (i = 0; i < words; i++)
2673 /* If one class is MEMORY, everything should be passed in
2674 memory. */
2675 if (classes[i] == X86_64_MEMORY_CLASS)
2676 return 0;
2678 /* The X86_64_SSEUP_CLASS should be always preceded by
2679 X86_64_SSE_CLASS. */
2680 if (classes[i] == X86_64_SSEUP_CLASS
2681 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
2682 classes[i] = X86_64_SSE_CLASS;
2684 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
2685 if (classes[i] == X86_64_X87UP_CLASS
2686 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
2687 classes[i] = X86_64_SSE_CLASS;
2689 return words;
2692 /* Compute alignment needed. We align all types to natural boundaries with
2693 exception of XFmode that is aligned to 64bits. */
2694 if (mode != VOIDmode && mode != BLKmode)
2696 int mode_alignment = GET_MODE_BITSIZE (mode);
2698 if (mode == XFmode)
2699 mode_alignment = 128;
2700 else if (mode == XCmode)
2701 mode_alignment = 256;
2702 if (COMPLEX_MODE_P (mode))
2703 mode_alignment /= 2;
2704 /* Misaligned fields are always returned in memory. */
2705 if (bit_offset % mode_alignment)
2706 return 0;
2709 /* for V1xx modes, just use the base mode */
2710 if (VECTOR_MODE_P (mode)
2711 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
2712 mode = GET_MODE_INNER (mode);
2714 /* Classification of atomic types. */
2715 switch (mode)
2717 case DImode:
2718 case SImode:
2719 case HImode:
2720 case QImode:
2721 case CSImode:
2722 case CHImode:
2723 case CQImode:
2724 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2725 classes[0] = X86_64_INTEGERSI_CLASS;
2726 else
2727 classes[0] = X86_64_INTEGER_CLASS;
2728 return 1;
2729 case CDImode:
2730 case TImode:
2731 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
2732 return 2;
2733 case CTImode:
2734 return 0;
2735 case SFmode:
2736 if (!(bit_offset % 64))
2737 classes[0] = X86_64_SSESF_CLASS;
2738 else
2739 classes[0] = X86_64_SSE_CLASS;
2740 return 1;
2741 case DFmode:
2742 classes[0] = X86_64_SSEDF_CLASS;
2743 return 1;
2744 case XFmode:
2745 classes[0] = X86_64_X87_CLASS;
2746 classes[1] = X86_64_X87UP_CLASS;
2747 return 2;
2748 case TFmode:
2749 classes[0] = X86_64_SSE_CLASS;
2750 classes[1] = X86_64_SSEUP_CLASS;
2751 return 2;
2752 case SCmode:
2753 classes[0] = X86_64_SSE_CLASS;
2754 return 1;
2755 case DCmode:
2756 classes[0] = X86_64_SSEDF_CLASS;
2757 classes[1] = X86_64_SSEDF_CLASS;
2758 return 2;
2759 case XCmode:
2760 classes[0] = X86_64_COMPLEX_X87_CLASS;
2761 return 1;
2762 case TCmode:
2763 /* This modes is larger than 16 bytes. */
2764 return 0;
2765 case V4SFmode:
2766 case V4SImode:
2767 case V16QImode:
2768 case V8HImode:
2769 case V2DFmode:
2770 case V2DImode:
2771 classes[0] = X86_64_SSE_CLASS;
2772 classes[1] = X86_64_SSEUP_CLASS;
2773 return 2;
2774 case V2SFmode:
2775 case V2SImode:
2776 case V4HImode:
2777 case V8QImode:
2778 classes[0] = X86_64_SSE_CLASS;
2779 return 1;
2780 case BLKmode:
2781 case VOIDmode:
2782 return 0;
2783 default:
2784 gcc_assert (VECTOR_MODE_P (mode));
2786 if (bytes > 16)
2787 return 0;
2789 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
2791 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2792 classes[0] = X86_64_INTEGERSI_CLASS;
2793 else
2794 classes[0] = X86_64_INTEGER_CLASS;
2795 classes[1] = X86_64_INTEGER_CLASS;
2796 return 1 + (bytes > 8);
2800 /* Examine the argument and return set number of register required in each
2801 class. Return 0 iff parameter should be passed in memory. */
2802 static int
2803 examine_argument (enum machine_mode mode, tree type, int in_return,
2804 int *int_nregs, int *sse_nregs)
2806 enum x86_64_reg_class class[MAX_CLASSES];
2807 int n = classify_argument (mode, type, class, 0);
2809 *int_nregs = 0;
2810 *sse_nregs = 0;
2811 if (!n)
2812 return 0;
2813 for (n--; n >= 0; n--)
2814 switch (class[n])
2816 case X86_64_INTEGER_CLASS:
2817 case X86_64_INTEGERSI_CLASS:
2818 (*int_nregs)++;
2819 break;
2820 case X86_64_SSE_CLASS:
2821 case X86_64_SSESF_CLASS:
2822 case X86_64_SSEDF_CLASS:
2823 (*sse_nregs)++;
2824 break;
2825 case X86_64_NO_CLASS:
2826 case X86_64_SSEUP_CLASS:
2827 break;
2828 case X86_64_X87_CLASS:
2829 case X86_64_X87UP_CLASS:
2830 if (!in_return)
2831 return 0;
2832 break;
2833 case X86_64_COMPLEX_X87_CLASS:
2834 return in_return ? 2 : 0;
2835 case X86_64_MEMORY_CLASS:
2836 gcc_unreachable ();
2838 return 1;
2841 /* Construct container for the argument used by GCC interface. See
2842 FUNCTION_ARG for the detailed description. */
2844 static rtx
2845 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
2846 tree type, int in_return, int nintregs, int nsseregs,
2847 const int *intreg, int sse_regno)
2849 enum machine_mode tmpmode;
2850 int bytes =
2851 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2852 enum x86_64_reg_class class[MAX_CLASSES];
2853 int n;
2854 int i;
2855 int nexps = 0;
2856 int needed_sseregs, needed_intregs;
2857 rtx exp[MAX_CLASSES];
2858 rtx ret;
2860 n = classify_argument (mode, type, class, 0);
2861 if (TARGET_DEBUG_ARG)
2863 if (!n)
2864 fprintf (stderr, "Memory class\n");
2865 else
2867 fprintf (stderr, "Classes:");
2868 for (i = 0; i < n; i++)
2870 fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]);
2872 fprintf (stderr, "\n");
2875 if (!n)
2876 return NULL;
2877 if (!examine_argument (mode, type, in_return, &needed_intregs,
2878 &needed_sseregs))
2879 return NULL;
2880 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
2881 return NULL;
2883 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
2884 some less clueful developer tries to use floating-point anyway. */
2885 if (needed_sseregs && !TARGET_SSE)
2887 static bool issued_error;
2888 if (!issued_error)
2890 issued_error = true;
2891 if (in_return)
2892 error ("SSE register return with SSE disabled");
2893 else
2894 error ("SSE register argument with SSE disabled");
2896 return NULL;
2899 /* First construct simple cases. Avoid SCmode, since we want to use
2900 single register to pass this type. */
2901 if (n == 1 && mode != SCmode)
2902 switch (class[0])
2904 case X86_64_INTEGER_CLASS:
2905 case X86_64_INTEGERSI_CLASS:
2906 return gen_rtx_REG (mode, intreg[0]);
2907 case X86_64_SSE_CLASS:
2908 case X86_64_SSESF_CLASS:
2909 case X86_64_SSEDF_CLASS:
2910 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
2911 case X86_64_X87_CLASS:
2912 case X86_64_COMPLEX_X87_CLASS:
2913 return gen_rtx_REG (mode, FIRST_STACK_REG);
2914 case X86_64_NO_CLASS:
2915 /* Zero sized array, struct or class. */
2916 return NULL;
2917 default:
2918 gcc_unreachable ();
2920 if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS
2921 && mode != BLKmode)
2922 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
2923 if (n == 2
2924 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
2925 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
2926 if (n == 2 && class[0] == X86_64_INTEGER_CLASS
2927 && class[1] == X86_64_INTEGER_CLASS
2928 && (mode == CDImode || mode == TImode || mode == TFmode)
2929 && intreg[0] + 1 == intreg[1])
2930 return gen_rtx_REG (mode, intreg[0]);
2932 /* Otherwise figure out the entries of the PARALLEL. */
2933 for (i = 0; i < n; i++)
2935 switch (class[i])
2937 case X86_64_NO_CLASS:
2938 break;
2939 case X86_64_INTEGER_CLASS:
2940 case X86_64_INTEGERSI_CLASS:
2941 /* Merge TImodes on aligned occasions here too. */
2942 if (i * 8 + 8 > bytes)
2943 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
2944 else if (class[i] == X86_64_INTEGERSI_CLASS)
2945 tmpmode = SImode;
2946 else
2947 tmpmode = DImode;
2948 /* We've requested 24 bytes we don't have mode for. Use DImode. */
2949 if (tmpmode == BLKmode)
2950 tmpmode = DImode;
2951 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2952 gen_rtx_REG (tmpmode, *intreg),
2953 GEN_INT (i*8));
2954 intreg++;
2955 break;
2956 case X86_64_SSESF_CLASS:
2957 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2958 gen_rtx_REG (SFmode,
2959 SSE_REGNO (sse_regno)),
2960 GEN_INT (i*8));
2961 sse_regno++;
2962 break;
2963 case X86_64_SSEDF_CLASS:
2964 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2965 gen_rtx_REG (DFmode,
2966 SSE_REGNO (sse_regno)),
2967 GEN_INT (i*8));
2968 sse_regno++;
2969 break;
2970 case X86_64_SSE_CLASS:
2971 if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
2972 tmpmode = TImode;
2973 else
2974 tmpmode = DImode;
2975 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2976 gen_rtx_REG (tmpmode,
2977 SSE_REGNO (sse_regno)),
2978 GEN_INT (i*8));
2979 if (tmpmode == TImode)
2980 i++;
2981 sse_regno++;
2982 break;
2983 default:
2984 gcc_unreachable ();
2988 /* Empty aligned struct, union or class. */
2989 if (nexps == 0)
2990 return NULL;
2992 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
2993 for (i = 0; i < nexps; i++)
2994 XVECEXP (ret, 0, i) = exp [i];
2995 return ret;
2998 /* Update the data in CUM to advance over an argument
2999 of mode MODE and data type TYPE.
3000 (TYPE is null for libcalls where that information may not be available.) */
3002 void
3003 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3004 tree type, int named)
3006 int bytes =
3007 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3008 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3010 if (type)
3011 mode = type_natural_mode (type);
3013 if (TARGET_DEBUG_ARG)
3014 fprintf (stderr, "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, "
3015 "mode=%s, named=%d)\n\n",
3016 words, cum->words, cum->nregs, cum->sse_nregs,
3017 GET_MODE_NAME (mode), named);
3019 if (TARGET_64BIT)
3021 int int_nregs, sse_nregs;
3022 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
3023 cum->words += words;
3024 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
3026 cum->nregs -= int_nregs;
3027 cum->sse_nregs -= sse_nregs;
3028 cum->regno += int_nregs;
3029 cum->sse_regno += sse_nregs;
3031 else
3032 cum->words += words;
3034 else
3036 switch (mode)
3038 default:
3039 break;
3041 case BLKmode:
3042 if (bytes < 0)
3043 break;
3044 /* FALLTHRU */
3046 case DImode:
3047 case SImode:
3048 case HImode:
3049 case QImode:
3050 cum->words += words;
3051 cum->nregs -= words;
3052 cum->regno += words;
3054 if (cum->nregs <= 0)
3056 cum->nregs = 0;
3057 cum->regno = 0;
3059 break;
3061 case DFmode:
3062 if (cum->float_in_sse < 2)
3063 break;
3064 case SFmode:
3065 if (cum->float_in_sse < 1)
3066 break;
3067 /* FALLTHRU */
3069 case TImode:
3070 case V16QImode:
3071 case V8HImode:
3072 case V4SImode:
3073 case V2DImode:
3074 case V4SFmode:
3075 case V2DFmode:
3076 if (!type || !AGGREGATE_TYPE_P (type))
3078 cum->sse_words += words;
3079 cum->sse_nregs -= 1;
3080 cum->sse_regno += 1;
3081 if (cum->sse_nregs <= 0)
3083 cum->sse_nregs = 0;
3084 cum->sse_regno = 0;
3087 break;
3089 case V8QImode:
3090 case V4HImode:
3091 case V2SImode:
3092 case V2SFmode:
3093 if (!type || !AGGREGATE_TYPE_P (type))
3095 cum->mmx_words += words;
3096 cum->mmx_nregs -= 1;
3097 cum->mmx_regno += 1;
3098 if (cum->mmx_nregs <= 0)
3100 cum->mmx_nregs = 0;
3101 cum->mmx_regno = 0;
3104 break;
3109 /* Define where to put the arguments to a function.
3110 Value is zero to push the argument on the stack,
3111 or a hard register in which to store the argument.
3113 MODE is the argument's machine mode.
3114 TYPE is the data type of the argument (as a tree).
3115 This is null for libcalls where that information may
3116 not be available.
3117 CUM is a variable of type CUMULATIVE_ARGS which gives info about
3118 the preceding args and about the function being called.
3119 NAMED is nonzero if this argument is a named parameter
3120 (otherwise it is an extra parameter matching an ellipsis). */
3123 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode orig_mode,
3124 tree type, int named)
3126 enum machine_mode mode = orig_mode;
3127 rtx ret = NULL_RTX;
3128 int bytes =
3129 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3130 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3131 static bool warnedsse, warnedmmx;
3133 /* To simplify the code below, represent vector types with a vector mode
3134 even if MMX/SSE are not active. */
3135 if (type && TREE_CODE (type) == VECTOR_TYPE)
3136 mode = type_natural_mode (type);
3138 /* Handle a hidden AL argument containing number of registers for varargs
3139 x86-64 functions. For i386 ABI just return constm1_rtx to avoid
3140 any AL settings. */
3141 if (mode == VOIDmode)
3143 if (TARGET_64BIT)
3144 return GEN_INT (cum->maybe_vaarg
3145 ? (cum->sse_nregs < 0
3146 ? SSE_REGPARM_MAX
3147 : cum->sse_regno)
3148 : -1);
3149 else
3150 return constm1_rtx;
3152 if (TARGET_64BIT)
3153 ret = construct_container (mode, orig_mode, type, 0, cum->nregs,
3154 cum->sse_nregs,
3155 &x86_64_int_parameter_registers [cum->regno],
3156 cum->sse_regno);
3157 else
3158 switch (mode)
3160 /* For now, pass fp/complex values on the stack. */
3161 default:
3162 break;
3164 case BLKmode:
3165 if (bytes < 0)
3166 break;
3167 /* FALLTHRU */
3168 case DImode:
3169 case SImode:
3170 case HImode:
3171 case QImode:
3172 if (words <= cum->nregs)
3174 int regno = cum->regno;
3176 /* Fastcall allocates the first two DWORD (SImode) or
3177 smaller arguments to ECX and EDX. */
3178 if (cum->fastcall)
3180 if (mode == BLKmode || mode == DImode)
3181 break;
3183 /* ECX not EAX is the first allocated register. */
3184 if (regno == 0)
3185 regno = 2;
3187 ret = gen_rtx_REG (mode, regno);
3189 break;
3190 case DFmode:
3191 if (cum->float_in_sse < 2)
3192 break;
3193 case SFmode:
3194 if (cum->float_in_sse < 1)
3195 break;
3196 /* FALLTHRU */
3197 case TImode:
3198 case V16QImode:
3199 case V8HImode:
3200 case V4SImode:
3201 case V2DImode:
3202 case V4SFmode:
3203 case V2DFmode:
3204 if (!type || !AGGREGATE_TYPE_P (type))
3206 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
3208 warnedsse = true;
3209 warning (0, "SSE vector argument without SSE enabled "
3210 "changes the ABI");
3212 if (cum->sse_nregs)
3213 ret = gen_reg_or_parallel (mode, orig_mode,
3214 cum->sse_regno + FIRST_SSE_REG);
3216 break;
3217 case V8QImode:
3218 case V4HImode:
3219 case V2SImode:
3220 case V2SFmode:
3221 if (!type || !AGGREGATE_TYPE_P (type))
3223 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
3225 warnedmmx = true;
3226 warning (0, "MMX vector argument without MMX enabled "
3227 "changes the ABI");
3229 if (cum->mmx_nregs)
3230 ret = gen_reg_or_parallel (mode, orig_mode,
3231 cum->mmx_regno + FIRST_MMX_REG);
3233 break;
3236 if (TARGET_DEBUG_ARG)
3238 fprintf (stderr,
3239 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
3240 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
3242 if (ret)
3243 print_simple_rtl (stderr, ret);
3244 else
3245 fprintf (stderr, ", stack");
3247 fprintf (stderr, " )\n");
3250 return ret;
3253 /* A C expression that indicates when an argument must be passed by
3254 reference. If nonzero for an argument, a copy of that argument is
3255 made in memory and a pointer to the argument is passed instead of
3256 the argument itself. The pointer is passed in whatever way is
3257 appropriate for passing a pointer to that type. */
3259 static bool
3260 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3261 enum machine_mode mode ATTRIBUTE_UNUSED,
3262 tree type, bool named ATTRIBUTE_UNUSED)
3264 if (!TARGET_64BIT)
3265 return 0;
3267 if (type && int_size_in_bytes (type) == -1)
3269 if (TARGET_DEBUG_ARG)
3270 fprintf (stderr, "function_arg_pass_by_reference\n");
3271 return 1;
3274 return 0;
3277 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
3278 ABI. Only called if TARGET_SSE. */
3279 static bool
3280 contains_128bit_aligned_vector_p (tree type)
3282 enum machine_mode mode = TYPE_MODE (type);
3283 if (SSE_REG_MODE_P (mode)
3284 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
3285 return true;
3286 if (TYPE_ALIGN (type) < 128)
3287 return false;
3289 if (AGGREGATE_TYPE_P (type))
3291 /* Walk the aggregates recursively. */
3292 switch (TREE_CODE (type))
3294 case RECORD_TYPE:
3295 case UNION_TYPE:
3296 case QUAL_UNION_TYPE:
3298 tree field;
3300 if (TYPE_BINFO (type))
3302 tree binfo, base_binfo;
3303 int i;
3305 for (binfo = TYPE_BINFO (type), i = 0;
3306 BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
3307 if (contains_128bit_aligned_vector_p
3308 (BINFO_TYPE (base_binfo)))
3309 return true;
3311 /* And now merge the fields of structure. */
3312 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3314 if (TREE_CODE (field) == FIELD_DECL
3315 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
3316 return true;
3318 break;
3321 case ARRAY_TYPE:
3322 /* Just for use if some languages passes arrays by value. */
3323 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
3324 return true;
3326 default:
3327 gcc_unreachable ();
3330 return false;
3333 /* Gives the alignment boundary, in bits, of an argument with the
3334 specified mode and type. */
3337 ix86_function_arg_boundary (enum machine_mode mode, tree type)
3339 int align;
3340 if (type)
3341 align = TYPE_ALIGN (type);
3342 else
3343 align = GET_MODE_ALIGNMENT (mode);
3344 if (align < PARM_BOUNDARY)
3345 align = PARM_BOUNDARY;
3346 if (!TARGET_64BIT)
3348 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
3349 make an exception for SSE modes since these require 128bit
3350 alignment.
3352 The handling here differs from field_alignment. ICC aligns MMX
3353 arguments to 4 byte boundaries, while structure fields are aligned
3354 to 8 byte boundaries. */
3355 if (!TARGET_SSE)
3356 align = PARM_BOUNDARY;
3357 else if (!type)
3359 if (!SSE_REG_MODE_P (mode))
3360 align = PARM_BOUNDARY;
3362 else
3364 if (!contains_128bit_aligned_vector_p (type))
3365 align = PARM_BOUNDARY;
3368 if (align > 128)
3369 align = 128;
3370 return align;
3373 /* Return true if N is a possible register number of function value. */
3374 bool
3375 ix86_function_value_regno_p (int regno)
3377 if (regno == 0
3378 || (regno == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
3379 || (regno == FIRST_SSE_REG && TARGET_SSE))
3380 return true;
3382 if (!TARGET_64BIT
3383 && (regno == FIRST_MMX_REG && TARGET_MMX))
3384 return true;
3386 return false;
3389 /* Define how to find the value returned by a function.
3390 VALTYPE is the data type of the value (as a tree).
3391 If the precise function being called is known, FUNC is its FUNCTION_DECL;
3392 otherwise, FUNC is 0. */
3394 ix86_function_value (tree valtype, tree fntype_or_decl,
3395 bool outgoing ATTRIBUTE_UNUSED)
3397 enum machine_mode natmode = type_natural_mode (valtype);
3399 if (TARGET_64BIT)
3401 rtx ret = construct_container (natmode, TYPE_MODE (valtype), valtype,
3402 1, REGPARM_MAX, SSE_REGPARM_MAX,
3403 x86_64_int_return_registers, 0);
3404 /* For zero sized structures, construct_container return NULL, but we
3405 need to keep rest of compiler happy by returning meaningful value. */
3406 if (!ret)
3407 ret = gen_rtx_REG (TYPE_MODE (valtype), 0);
3408 return ret;
3410 else
3412 tree fn = NULL_TREE, fntype;
3413 if (fntype_or_decl
3414 && DECL_P (fntype_or_decl))
3415 fn = fntype_or_decl;
3416 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
3417 return gen_rtx_REG (TYPE_MODE (valtype),
3418 ix86_value_regno (natmode, fn, fntype));
3422 /* Return false iff type is returned in memory. */
3424 ix86_return_in_memory (tree type)
3426 int needed_intregs, needed_sseregs, size;
3427 enum machine_mode mode = type_natural_mode (type);
3429 if (TARGET_64BIT)
3430 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
3432 if (mode == BLKmode)
3433 return 1;
3435 size = int_size_in_bytes (type);
3437 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
3438 return 0;
3440 if (VECTOR_MODE_P (mode) || mode == TImode)
3442 /* User-created vectors small enough to fit in EAX. */
3443 if (size < 8)
3444 return 0;
3446 /* MMX/3dNow values are returned in MM0,
3447 except when it doesn't exits. */
3448 if (size == 8)
3449 return (TARGET_MMX ? 0 : 1);
3451 /* SSE values are returned in XMM0, except when it doesn't exist. */
3452 if (size == 16)
3453 return (TARGET_SSE ? 0 : 1);
3456 if (mode == XFmode)
3457 return 0;
3459 if (size > 12)
3460 return 1;
3461 return 0;
3464 /* When returning SSE vector types, we have a choice of either
3465 (1) being abi incompatible with a -march switch, or
3466 (2) generating an error.
3467 Given no good solution, I think the safest thing is one warning.
3468 The user won't be able to use -Werror, but....
3470 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
3471 called in response to actually generating a caller or callee that
3472 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
3473 via aggregate_value_p for general type probing from tree-ssa. */
3475 static rtx
3476 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
3478 static bool warnedsse, warnedmmx;
3480 if (type)
3482 /* Look at the return type of the function, not the function type. */
3483 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
3485 if (!TARGET_SSE && !warnedsse)
3487 if (mode == TImode
3488 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3490 warnedsse = true;
3491 warning (0, "SSE vector return without SSE enabled "
3492 "changes the ABI");
3496 if (!TARGET_MMX && !warnedmmx)
3498 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
3500 warnedmmx = true;
3501 warning (0, "MMX vector return without MMX enabled "
3502 "changes the ABI");
3507 return NULL;
3510 /* Define how to find the value returned by a library function
3511 assuming the value has mode MODE. */
3513 ix86_libcall_value (enum machine_mode mode)
3515 if (TARGET_64BIT)
3517 switch (mode)
3519 case SFmode:
3520 case SCmode:
3521 case DFmode:
3522 case DCmode:
3523 case TFmode:
3524 return gen_rtx_REG (mode, FIRST_SSE_REG);
3525 case XFmode:
3526 case XCmode:
3527 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
3528 case TCmode:
3529 return NULL;
3530 default:
3531 return gen_rtx_REG (mode, 0);
3534 else
3535 return gen_rtx_REG (mode, ix86_value_regno (mode, NULL, NULL));
3538 /* Given a mode, return the register to use for a return value. */
3540 static int
3541 ix86_value_regno (enum machine_mode mode, tree func, tree fntype)
3543 gcc_assert (!TARGET_64BIT);
3545 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
3546 we prevent this case when mmx is not available. */
3547 if ((VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8))
3548 return FIRST_MMX_REG;
3550 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
3551 we prevent this case when sse is not available. */
3552 if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3553 return FIRST_SSE_REG;
3555 /* Most things go in %eax, except (unless -mno-fp-ret-in-387) fp values. */
3556 if (GET_MODE_CLASS (mode) != MODE_FLOAT || !TARGET_FLOAT_RETURNS_IN_80387)
3557 return 0;
3559 /* Floating point return values in %st(0), except for local functions when
3560 SSE math is enabled or for functions with sseregparm attribute. */
3561 if ((func || fntype)
3562 && (mode == SFmode || mode == DFmode))
3564 int sse_level = ix86_function_sseregparm (fntype, func);
3565 if ((sse_level >= 1 && mode == SFmode)
3566 || (sse_level == 2 && mode == DFmode))
3567 return FIRST_SSE_REG;
3570 return FIRST_FLOAT_REG;
3573 /* Create the va_list data type. */
3575 static tree
3576 ix86_build_builtin_va_list (void)
3578 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
3580 /* For i386 we use plain pointer to argument area. */
3581 if (!TARGET_64BIT)
3582 return build_pointer_type (char_type_node);
3584 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
3585 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
3587 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
3588 unsigned_type_node);
3589 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
3590 unsigned_type_node);
3591 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
3592 ptr_type_node);
3593 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
3594 ptr_type_node);
3596 va_list_gpr_counter_field = f_gpr;
3597 va_list_fpr_counter_field = f_fpr;
3599 DECL_FIELD_CONTEXT (f_gpr) = record;
3600 DECL_FIELD_CONTEXT (f_fpr) = record;
3601 DECL_FIELD_CONTEXT (f_ovf) = record;
3602 DECL_FIELD_CONTEXT (f_sav) = record;
3604 TREE_CHAIN (record) = type_decl;
3605 TYPE_NAME (record) = type_decl;
3606 TYPE_FIELDS (record) = f_gpr;
3607 TREE_CHAIN (f_gpr) = f_fpr;
3608 TREE_CHAIN (f_fpr) = f_ovf;
3609 TREE_CHAIN (f_ovf) = f_sav;
3611 layout_type (record);
3613 /* The correct type is an array type of one element. */
3614 return build_array_type (record, build_index_type (size_zero_node));
3617 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
3619 static void
3620 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3621 tree type, int *pretend_size ATTRIBUTE_UNUSED,
3622 int no_rtl)
3624 CUMULATIVE_ARGS next_cum;
3625 rtx save_area = NULL_RTX, mem;
3626 rtx label;
3627 rtx label_ref;
3628 rtx tmp_reg;
3629 rtx nsse_reg;
3630 int set;
3631 tree fntype;
3632 int stdarg_p;
3633 int i;
3635 if (!TARGET_64BIT)
3636 return;
3638 if (! cfun->va_list_gpr_size && ! cfun->va_list_fpr_size)
3639 return;
3641 /* Indicate to allocate space on the stack for varargs save area. */
3642 ix86_save_varrargs_registers = 1;
3644 cfun->stack_alignment_needed = 128;
3646 fntype = TREE_TYPE (current_function_decl);
3647 stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
3648 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
3649 != void_type_node));
3651 /* For varargs, we do not want to skip the dummy va_dcl argument.
3652 For stdargs, we do want to skip the last named argument. */
3653 next_cum = *cum;
3654 if (stdarg_p)
3655 function_arg_advance (&next_cum, mode, type, 1);
3657 if (!no_rtl)
3658 save_area = frame_pointer_rtx;
3660 set = get_varargs_alias_set ();
3662 for (i = next_cum.regno;
3663 i < ix86_regparm
3664 && i < next_cum.regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
3665 i++)
3667 mem = gen_rtx_MEM (Pmode,
3668 plus_constant (save_area, i * UNITS_PER_WORD));
3669 set_mem_alias_set (mem, set);
3670 emit_move_insn (mem, gen_rtx_REG (Pmode,
3671 x86_64_int_parameter_registers[i]));
3674 if (next_cum.sse_nregs && cfun->va_list_fpr_size)
3676 /* Now emit code to save SSE registers. The AX parameter contains number
3677 of SSE parameter registers used to call this function. We use
3678 sse_prologue_save insn template that produces computed jump across
3679 SSE saves. We need some preparation work to get this working. */
3681 label = gen_label_rtx ();
3682 label_ref = gen_rtx_LABEL_REF (Pmode, label);
3684 /* Compute address to jump to :
3685 label - 5*eax + nnamed_sse_arguments*5 */
3686 tmp_reg = gen_reg_rtx (Pmode);
3687 nsse_reg = gen_reg_rtx (Pmode);
3688 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
3689 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3690 gen_rtx_MULT (Pmode, nsse_reg,
3691 GEN_INT (4))));
3692 if (next_cum.sse_regno)
3693 emit_move_insn
3694 (nsse_reg,
3695 gen_rtx_CONST (DImode,
3696 gen_rtx_PLUS (DImode,
3697 label_ref,
3698 GEN_INT (next_cum.sse_regno * 4))));
3699 else
3700 emit_move_insn (nsse_reg, label_ref);
3701 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
3703 /* Compute address of memory block we save into. We always use pointer
3704 pointing 127 bytes after first byte to store - this is needed to keep
3705 instruction size limited by 4 bytes. */
3706 tmp_reg = gen_reg_rtx (Pmode);
3707 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3708 plus_constant (save_area,
3709 8 * REGPARM_MAX + 127)));
3710 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
3711 set_mem_alias_set (mem, set);
3712 set_mem_align (mem, BITS_PER_WORD);
3714 /* And finally do the dirty job! */
3715 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
3716 GEN_INT (next_cum.sse_regno), label));
3721 /* Implement va_start. */
3723 void
3724 ix86_va_start (tree valist, rtx nextarg)
3726 HOST_WIDE_INT words, n_gpr, n_fpr;
3727 tree f_gpr, f_fpr, f_ovf, f_sav;
3728 tree gpr, fpr, ovf, sav, t;
3730 /* Only 64bit target needs something special. */
3731 if (!TARGET_64BIT)
3733 std_expand_builtin_va_start (valist, nextarg);
3734 return;
3737 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3738 f_fpr = TREE_CHAIN (f_gpr);
3739 f_ovf = TREE_CHAIN (f_fpr);
3740 f_sav = TREE_CHAIN (f_ovf);
3742 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
3743 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3744 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3745 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3746 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3748 /* Count number of gp and fp argument registers used. */
3749 words = current_function_args_info.words;
3750 n_gpr = current_function_args_info.regno;
3751 n_fpr = current_function_args_info.sse_regno;
3753 if (TARGET_DEBUG_ARG)
3754 fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
3755 (int) words, (int) n_gpr, (int) n_fpr);
3757 if (cfun->va_list_gpr_size)
3759 t = build (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
3760 build_int_cst (NULL_TREE, n_gpr * 8));
3761 TREE_SIDE_EFFECTS (t) = 1;
3762 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3765 if (cfun->va_list_fpr_size)
3767 t = build (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
3768 build_int_cst (NULL_TREE, n_fpr * 16 + 8*REGPARM_MAX));
3769 TREE_SIDE_EFFECTS (t) = 1;
3770 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3773 /* Find the overflow area. */
3774 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
3775 if (words != 0)
3776 t = build (PLUS_EXPR, TREE_TYPE (ovf), t,
3777 build_int_cst (NULL_TREE, words * UNITS_PER_WORD));
3778 t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3779 TREE_SIDE_EFFECTS (t) = 1;
3780 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3782 if (cfun->va_list_gpr_size || cfun->va_list_fpr_size)
3784 /* Find the register save area.
3785 Prologue of the function save it right above stack frame. */
3786 t = make_tree (TREE_TYPE (sav), frame_pointer_rtx);
3787 t = build (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
3788 TREE_SIDE_EFFECTS (t) = 1;
3789 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3793 /* Implement va_arg. */
3795 tree
3796 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
3798 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
3799 tree f_gpr, f_fpr, f_ovf, f_sav;
3800 tree gpr, fpr, ovf, sav, t;
3801 int size, rsize;
3802 tree lab_false, lab_over = NULL_TREE;
3803 tree addr, t2;
3804 rtx container;
3805 int indirect_p = 0;
3806 tree ptrtype;
3807 enum machine_mode nat_mode;
3809 /* Only 64bit target needs something special. */
3810 if (!TARGET_64BIT)
3811 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3813 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3814 f_fpr = TREE_CHAIN (f_gpr);
3815 f_ovf = TREE_CHAIN (f_fpr);
3816 f_sav = TREE_CHAIN (f_ovf);
3818 valist = build_va_arg_indirect_ref (valist);
3819 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3820 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3821 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3822 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3824 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
3825 if (indirect_p)
3826 type = build_pointer_type (type);
3827 size = int_size_in_bytes (type);
3828 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3830 nat_mode = type_natural_mode (type);
3831 container = construct_container (nat_mode, TYPE_MODE (type), type, 0,
3832 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
3834 /* Pull the value out of the saved registers. */
3836 addr = create_tmp_var (ptr_type_node, "addr");
3837 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
3839 if (container)
3841 int needed_intregs, needed_sseregs;
3842 bool need_temp;
3843 tree int_addr, sse_addr;
3845 lab_false = create_artificial_label ();
3846 lab_over = create_artificial_label ();
3848 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
3850 need_temp = (!REG_P (container)
3851 && ((needed_intregs && TYPE_ALIGN (type) > 64)
3852 || TYPE_ALIGN (type) > 128));
3854 /* In case we are passing structure, verify that it is consecutive block
3855 on the register save area. If not we need to do moves. */
3856 if (!need_temp && !REG_P (container))
3858 /* Verify that all registers are strictly consecutive */
3859 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
3861 int i;
3863 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3865 rtx slot = XVECEXP (container, 0, i);
3866 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
3867 || INTVAL (XEXP (slot, 1)) != i * 16)
3868 need_temp = 1;
3871 else
3873 int i;
3875 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3877 rtx slot = XVECEXP (container, 0, i);
3878 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
3879 || INTVAL (XEXP (slot, 1)) != i * 8)
3880 need_temp = 1;
3884 if (!need_temp)
3886 int_addr = addr;
3887 sse_addr = addr;
3889 else
3891 int_addr = create_tmp_var (ptr_type_node, "int_addr");
3892 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
3893 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
3894 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
3897 /* First ensure that we fit completely in registers. */
3898 if (needed_intregs)
3900 t = build_int_cst (TREE_TYPE (gpr),
3901 (REGPARM_MAX - needed_intregs + 1) * 8);
3902 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
3903 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3904 t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3905 gimplify_and_add (t, pre_p);
3907 if (needed_sseregs)
3909 t = build_int_cst (TREE_TYPE (fpr),
3910 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
3911 + REGPARM_MAX * 8);
3912 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
3913 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3914 t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3915 gimplify_and_add (t, pre_p);
3918 /* Compute index to start of area used for integer regs. */
3919 if (needed_intregs)
3921 /* int_addr = gpr + sav; */
3922 t = fold_convert (ptr_type_node, gpr);
3923 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
3924 t = build2 (MODIFY_EXPR, void_type_node, int_addr, t);
3925 gimplify_and_add (t, pre_p);
3927 if (needed_sseregs)
3929 /* sse_addr = fpr + sav; */
3930 t = fold_convert (ptr_type_node, fpr);
3931 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
3932 t = build2 (MODIFY_EXPR, void_type_node, sse_addr, t);
3933 gimplify_and_add (t, pre_p);
3935 if (need_temp)
3937 int i;
3938 tree temp = create_tmp_var (type, "va_arg_tmp");
3940 /* addr = &temp; */
3941 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
3942 t = build2 (MODIFY_EXPR, void_type_node, addr, t);
3943 gimplify_and_add (t, pre_p);
3945 for (i = 0; i < XVECLEN (container, 0); i++)
3947 rtx slot = XVECEXP (container, 0, i);
3948 rtx reg = XEXP (slot, 0);
3949 enum machine_mode mode = GET_MODE (reg);
3950 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
3951 tree addr_type = build_pointer_type (piece_type);
3952 tree src_addr, src;
3953 int src_offset;
3954 tree dest_addr, dest;
3956 if (SSE_REGNO_P (REGNO (reg)))
3958 src_addr = sse_addr;
3959 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
3961 else
3963 src_addr = int_addr;
3964 src_offset = REGNO (reg) * 8;
3966 src_addr = fold_convert (addr_type, src_addr);
3967 src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr,
3968 size_int (src_offset)));
3969 src = build_va_arg_indirect_ref (src_addr);
3971 dest_addr = fold_convert (addr_type, addr);
3972 dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr,
3973 size_int (INTVAL (XEXP (slot, 1)))));
3974 dest = build_va_arg_indirect_ref (dest_addr);
3976 t = build2 (MODIFY_EXPR, void_type_node, dest, src);
3977 gimplify_and_add (t, pre_p);
3981 if (needed_intregs)
3983 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
3984 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
3985 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr, t);
3986 gimplify_and_add (t, pre_p);
3988 if (needed_sseregs)
3990 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
3991 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
3992 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr, t);
3993 gimplify_and_add (t, pre_p);
3996 t = build1 (GOTO_EXPR, void_type_node, lab_over);
3997 gimplify_and_add (t, pre_p);
3999 t = build1 (LABEL_EXPR, void_type_node, lab_false);
4000 append_to_statement_list (t, pre_p);
4003 /* ... otherwise out of the overflow area. */
4005 /* Care for on-stack alignment if needed. */
4006 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64)
4007 t = ovf;
4008 else
4010 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
4011 t = build (PLUS_EXPR, TREE_TYPE (ovf), ovf,
4012 build_int_cst (TREE_TYPE (ovf), align - 1));
4013 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
4014 build_int_cst (TREE_TYPE (t), -align));
4016 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
4018 t2 = build2 (MODIFY_EXPR, void_type_node, addr, t);
4019 gimplify_and_add (t2, pre_p);
4021 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
4022 build_int_cst (TREE_TYPE (t), rsize * UNITS_PER_WORD));
4023 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
4024 gimplify_and_add (t, pre_p);
4026 if (container)
4028 t = build1 (LABEL_EXPR, void_type_node, lab_over);
4029 append_to_statement_list (t, pre_p);
4032 ptrtype = build_pointer_type (type);
4033 addr = fold_convert (ptrtype, addr);
4035 if (indirect_p)
4036 addr = build_va_arg_indirect_ref (addr);
4037 return build_va_arg_indirect_ref (addr);
4040 /* Return nonzero if OPNUM's MEM should be matched
4041 in movabs* patterns. */
4044 ix86_check_movabs (rtx insn, int opnum)
4046 rtx set, mem;
4048 set = PATTERN (insn);
4049 if (GET_CODE (set) == PARALLEL)
4050 set = XVECEXP (set, 0, 0);
4051 gcc_assert (GET_CODE (set) == SET);
4052 mem = XEXP (set, opnum);
4053 while (GET_CODE (mem) == SUBREG)
4054 mem = SUBREG_REG (mem);
4055 gcc_assert (GET_CODE (mem) == MEM);
4056 return (volatile_ok || !MEM_VOLATILE_P (mem));
4059 /* Initialize the table of extra 80387 mathematical constants. */
4061 static void
4062 init_ext_80387_constants (void)
4064 static const char * cst[5] =
4066 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
4067 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
4068 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
4069 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
4070 "3.1415926535897932385128089594061862044", /* 4: fldpi */
4072 int i;
4074 for (i = 0; i < 5; i++)
4076 real_from_string (&ext_80387_constants_table[i], cst[i]);
4077 /* Ensure each constant is rounded to XFmode precision. */
4078 real_convert (&ext_80387_constants_table[i],
4079 XFmode, &ext_80387_constants_table[i]);
4082 ext_80387_constants_init = 1;
4085 /* Return true if the constant is something that can be loaded with
4086 a special instruction. */
4089 standard_80387_constant_p (rtx x)
4091 if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
4092 return -1;
4094 if (x == CONST0_RTX (GET_MODE (x)))
4095 return 1;
4096 if (x == CONST1_RTX (GET_MODE (x)))
4097 return 2;
4099 /* For XFmode constants, try to find a special 80387 instruction when
4100 optimizing for size or on those CPUs that benefit from them. */
4101 if (GET_MODE (x) == XFmode
4102 && (optimize_size || x86_ext_80387_constants & TUNEMASK))
4104 REAL_VALUE_TYPE r;
4105 int i;
4107 if (! ext_80387_constants_init)
4108 init_ext_80387_constants ();
4110 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4111 for (i = 0; i < 5; i++)
4112 if (real_identical (&r, &ext_80387_constants_table[i]))
4113 return i + 3;
4116 return 0;
4119 /* Return the opcode of the special instruction to be used to load
4120 the constant X. */
4122 const char *
4123 standard_80387_constant_opcode (rtx x)
4125 switch (standard_80387_constant_p (x))
4127 case 1:
4128 return "fldz";
4129 case 2:
4130 return "fld1";
4131 case 3:
4132 return "fldlg2";
4133 case 4:
4134 return "fldln2";
4135 case 5:
4136 return "fldl2e";
4137 case 6:
4138 return "fldl2t";
4139 case 7:
4140 return "fldpi";
4141 default:
4142 gcc_unreachable ();
4146 /* Return the CONST_DOUBLE representing the 80387 constant that is
4147 loaded by the specified special instruction. The argument IDX
4148 matches the return value from standard_80387_constant_p. */
4151 standard_80387_constant_rtx (int idx)
4153 int i;
4155 if (! ext_80387_constants_init)
4156 init_ext_80387_constants ();
4158 switch (idx)
4160 case 3:
4161 case 4:
4162 case 5:
4163 case 6:
4164 case 7:
4165 i = idx - 3;
4166 break;
4168 default:
4169 gcc_unreachable ();
4172 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
4173 XFmode);
4176 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
4179 standard_sse_constant_p (rtx x)
4181 if (x == const0_rtx)
4182 return 1;
4183 return (x == CONST0_RTX (GET_MODE (x)));
4186 /* Returns 1 if OP contains a symbol reference */
4189 symbolic_reference_mentioned_p (rtx op)
4191 const char *fmt;
4192 int i;
4194 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
4195 return 1;
4197 fmt = GET_RTX_FORMAT (GET_CODE (op));
4198 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
4200 if (fmt[i] == 'E')
4202 int j;
4204 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
4205 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
4206 return 1;
4209 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
4210 return 1;
4213 return 0;
4216 /* Return 1 if it is appropriate to emit `ret' instructions in the
4217 body of a function. Do this only if the epilogue is simple, needing a
4218 couple of insns. Prior to reloading, we can't tell how many registers
4219 must be saved, so return 0 then. Return 0 if there is no frame
4220 marker to de-allocate. */
4223 ix86_can_use_return_insn_p (void)
4225 struct ix86_frame frame;
4227 if (! reload_completed || frame_pointer_needed)
4228 return 0;
4230 /* Don't allow more than 32 pop, since that's all we can do
4231 with one instruction. */
4232 if (current_function_pops_args
4233 && current_function_args_size >= 32768)
4234 return 0;
4236 ix86_compute_frame_layout (&frame);
4237 return frame.to_allocate == 0 && frame.nregs == 0;
4240 /* Value should be nonzero if functions must have frame pointers.
4241 Zero means the frame pointer need not be set up (and parms may
4242 be accessed via the stack pointer) in functions that seem suitable. */
4245 ix86_frame_pointer_required (void)
4247 /* If we accessed previous frames, then the generated code expects
4248 to be able to access the saved ebp value in our frame. */
4249 if (cfun->machine->accesses_prev_frame)
4250 return 1;
4252 /* Several x86 os'es need a frame pointer for other reasons,
4253 usually pertaining to setjmp. */
4254 if (SUBTARGET_FRAME_POINTER_REQUIRED)
4255 return 1;
4257 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
4258 the frame pointer by default. Turn it back on now if we've not
4259 got a leaf function. */
4260 if (TARGET_OMIT_LEAF_FRAME_POINTER
4261 && (!current_function_is_leaf))
4262 return 1;
4264 if (current_function_profile)
4265 return 1;
4267 return 0;
4270 /* Record that the current function accesses previous call frames. */
4272 void
4273 ix86_setup_frame_addresses (void)
4275 cfun->machine->accesses_prev_frame = 1;
4278 #if defined(HAVE_GAS_HIDDEN) && defined(SUPPORTS_ONE_ONLY)
4279 # define USE_HIDDEN_LINKONCE 1
4280 #else
4281 # define USE_HIDDEN_LINKONCE 0
4282 #endif
4284 static int pic_labels_used;
4286 /* Fills in the label name that should be used for a pc thunk for
4287 the given register. */
4289 static void
4290 get_pc_thunk_name (char name[32], unsigned int regno)
4292 if (USE_HIDDEN_LINKONCE)
4293 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
4294 else
4295 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
4299 /* This function generates code for -fpic that loads %ebx with
4300 the return address of the caller and then returns. */
4302 void
4303 ix86_file_end (void)
4305 rtx xops[2];
4306 int regno;
4308 for (regno = 0; regno < 8; ++regno)
4310 char name[32];
4312 if (! ((pic_labels_used >> regno) & 1))
4313 continue;
4315 get_pc_thunk_name (name, regno);
4317 if (USE_HIDDEN_LINKONCE)
4319 tree decl;
4321 decl = build_decl (FUNCTION_DECL, get_identifier (name),
4322 error_mark_node);
4323 TREE_PUBLIC (decl) = 1;
4324 TREE_STATIC (decl) = 1;
4325 DECL_ONE_ONLY (decl) = 1;
4327 (*targetm.asm_out.unique_section) (decl, 0);
4328 named_section (decl, NULL, 0);
4330 (*targetm.asm_out.globalize_label) (asm_out_file, name);
4331 fputs ("\t.hidden\t", asm_out_file);
4332 assemble_name (asm_out_file, name);
4333 fputc ('\n', asm_out_file);
4334 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
4336 else
4338 text_section ();
4339 ASM_OUTPUT_LABEL (asm_out_file, name);
4342 xops[0] = gen_rtx_REG (SImode, regno);
4343 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
4344 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
4345 output_asm_insn ("ret", xops);
4348 if (NEED_INDICATE_EXEC_STACK)
4349 file_end_indicate_exec_stack ();
4352 /* Emit code for the SET_GOT patterns. */
4354 const char *
4355 output_set_got (rtx dest)
4357 rtx xops[3];
4359 xops[0] = dest;
4360 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
4362 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
4364 xops[2] = gen_rtx_LABEL_REF (Pmode, gen_label_rtx ());
4366 if (!flag_pic)
4367 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
4368 else
4369 output_asm_insn ("call\t%a2", xops);
4371 #if TARGET_MACHO
4372 /* Output the "canonical" label name ("Lxx$pb") here too. This
4373 is what will be referred to by the Mach-O PIC subsystem. */
4374 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
4375 #endif
4376 (*targetm.asm_out.internal_label) (asm_out_file, "L",
4377 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
4379 if (flag_pic)
4380 output_asm_insn ("pop{l}\t%0", xops);
4382 else
4384 char name[32];
4385 get_pc_thunk_name (name, REGNO (dest));
4386 pic_labels_used |= 1 << REGNO (dest);
4388 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
4389 xops[2] = gen_rtx_MEM (QImode, xops[2]);
4390 output_asm_insn ("call\t%X2", xops);
4393 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
4394 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
4395 else if (!TARGET_MACHO)
4396 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
4398 return "";
4401 /* Generate an "push" pattern for input ARG. */
4403 static rtx
4404 gen_push (rtx arg)
4406 return gen_rtx_SET (VOIDmode,
4407 gen_rtx_MEM (Pmode,
4408 gen_rtx_PRE_DEC (Pmode,
4409 stack_pointer_rtx)),
4410 arg);
4413 /* Return >= 0 if there is an unused call-clobbered register available
4414 for the entire function. */
4416 static unsigned int
4417 ix86_select_alt_pic_regnum (void)
4419 if (current_function_is_leaf && !current_function_profile)
4421 int i;
4422 for (i = 2; i >= 0; --i)
4423 if (!regs_ever_live[i])
4424 return i;
4427 return INVALID_REGNUM;
4430 /* Return 1 if we need to save REGNO. */
4431 static int
4432 ix86_save_reg (unsigned int regno, int maybe_eh_return)
4434 if (pic_offset_table_rtx
4435 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
4436 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4437 || current_function_profile
4438 || current_function_calls_eh_return
4439 || current_function_uses_const_pool))
4441 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
4442 return 0;
4443 return 1;
4446 if (current_function_calls_eh_return && maybe_eh_return)
4448 unsigned i;
4449 for (i = 0; ; i++)
4451 unsigned test = EH_RETURN_DATA_REGNO (i);
4452 if (test == INVALID_REGNUM)
4453 break;
4454 if (test == regno)
4455 return 1;
4459 return (regs_ever_live[regno]
4460 && !call_used_regs[regno]
4461 && !fixed_regs[regno]
4462 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
4465 /* Return number of registers to be saved on the stack. */
4467 static int
4468 ix86_nsaved_regs (void)
4470 int nregs = 0;
4471 int regno;
4473 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
4474 if (ix86_save_reg (regno, true))
4475 nregs++;
4476 return nregs;
4479 /* Return the offset between two registers, one to be eliminated, and the other
4480 its replacement, at the start of a routine. */
4482 HOST_WIDE_INT
4483 ix86_initial_elimination_offset (int from, int to)
4485 struct ix86_frame frame;
4486 ix86_compute_frame_layout (&frame);
4488 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
4489 return frame.hard_frame_pointer_offset;
4490 else if (from == FRAME_POINTER_REGNUM
4491 && to == HARD_FRAME_POINTER_REGNUM)
4492 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
4493 else
4495 gcc_assert (to == STACK_POINTER_REGNUM);
4497 if (from == ARG_POINTER_REGNUM)
4498 return frame.stack_pointer_offset;
4500 gcc_assert (from == FRAME_POINTER_REGNUM);
4501 return frame.stack_pointer_offset - frame.frame_pointer_offset;
4505 /* Fill structure ix86_frame about frame of currently computed function. */
4507 static void
4508 ix86_compute_frame_layout (struct ix86_frame *frame)
4510 HOST_WIDE_INT total_size;
4511 unsigned int stack_alignment_needed;
4512 HOST_WIDE_INT offset;
4513 unsigned int preferred_alignment;
4514 HOST_WIDE_INT size = get_frame_size ();
4516 frame->nregs = ix86_nsaved_regs ();
4517 total_size = size;
4519 stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
4520 preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
4522 /* During reload iteration the amount of registers saved can change.
4523 Recompute the value as needed. Do not recompute when amount of registers
4524 didn't change as reload does multiple calls to the function and does not
4525 expect the decision to change within single iteration. */
4526 if (!optimize_size
4527 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
4529 int count = frame->nregs;
4531 cfun->machine->use_fast_prologue_epilogue_nregs = count;
4532 /* The fast prologue uses move instead of push to save registers. This
4533 is significantly longer, but also executes faster as modern hardware
4534 can execute the moves in parallel, but can't do that for push/pop.
4536 Be careful about choosing what prologue to emit: When function takes
4537 many instructions to execute we may use slow version as well as in
4538 case function is known to be outside hot spot (this is known with
4539 feedback only). Weight the size of function by number of registers
4540 to save as it is cheap to use one or two push instructions but very
4541 slow to use many of them. */
4542 if (count)
4543 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
4544 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
4545 || (flag_branch_probabilities
4546 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
4547 cfun->machine->use_fast_prologue_epilogue = false;
4548 else
4549 cfun->machine->use_fast_prologue_epilogue
4550 = !expensive_function_p (count);
4552 if (TARGET_PROLOGUE_USING_MOVE
4553 && cfun->machine->use_fast_prologue_epilogue)
4554 frame->save_regs_using_mov = true;
4555 else
4556 frame->save_regs_using_mov = false;
4559 /* Skip return address and saved base pointer. */
4560 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
4562 frame->hard_frame_pointer_offset = offset;
4564 /* Do some sanity checking of stack_alignment_needed and
4565 preferred_alignment, since i386 port is the only using those features
4566 that may break easily. */
4568 gcc_assert (!size || stack_alignment_needed);
4569 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
4570 gcc_assert (preferred_alignment <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
4571 gcc_assert (stack_alignment_needed
4572 <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
4574 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
4575 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
4577 /* Register save area */
4578 offset += frame->nregs * UNITS_PER_WORD;
4580 /* Va-arg area */
4581 if (ix86_save_varrargs_registers)
4583 offset += X86_64_VARARGS_SIZE;
4584 frame->va_arg_size = X86_64_VARARGS_SIZE;
4586 else
4587 frame->va_arg_size = 0;
4589 /* Align start of frame for local function. */
4590 frame->padding1 = ((offset + stack_alignment_needed - 1)
4591 & -stack_alignment_needed) - offset;
4593 offset += frame->padding1;
4595 /* Frame pointer points here. */
4596 frame->frame_pointer_offset = offset;
4598 offset += size;
4600 /* Add outgoing arguments area. Can be skipped if we eliminated
4601 all the function calls as dead code.
4602 Skipping is however impossible when function calls alloca. Alloca
4603 expander assumes that last current_function_outgoing_args_size
4604 of stack frame are unused. */
4605 if (ACCUMULATE_OUTGOING_ARGS
4606 && (!current_function_is_leaf || current_function_calls_alloca))
4608 offset += current_function_outgoing_args_size;
4609 frame->outgoing_arguments_size = current_function_outgoing_args_size;
4611 else
4612 frame->outgoing_arguments_size = 0;
4614 /* Align stack boundary. Only needed if we're calling another function
4615 or using alloca. */
4616 if (!current_function_is_leaf || current_function_calls_alloca)
4617 frame->padding2 = ((offset + preferred_alignment - 1)
4618 & -preferred_alignment) - offset;
4619 else
4620 frame->padding2 = 0;
4622 offset += frame->padding2;
4624 /* We've reached end of stack frame. */
4625 frame->stack_pointer_offset = offset;
4627 /* Size prologue needs to allocate. */
4628 frame->to_allocate =
4629 (size + frame->padding1 + frame->padding2
4630 + frame->outgoing_arguments_size + frame->va_arg_size);
4632 if ((!frame->to_allocate && frame->nregs <= 1)
4633 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
4634 frame->save_regs_using_mov = false;
4636 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
4637 && current_function_is_leaf)
4639 frame->red_zone_size = frame->to_allocate;
4640 if (frame->save_regs_using_mov)
4641 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
4642 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
4643 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
4645 else
4646 frame->red_zone_size = 0;
4647 frame->to_allocate -= frame->red_zone_size;
4648 frame->stack_pointer_offset -= frame->red_zone_size;
4649 #if 0
4650 fprintf (stderr, "nregs: %i\n", frame->nregs);
4651 fprintf (stderr, "size: %i\n", size);
4652 fprintf (stderr, "alignment1: %i\n", stack_alignment_needed);
4653 fprintf (stderr, "padding1: %i\n", frame->padding1);
4654 fprintf (stderr, "va_arg: %i\n", frame->va_arg_size);
4655 fprintf (stderr, "padding2: %i\n", frame->padding2);
4656 fprintf (stderr, "to_allocate: %i\n", frame->to_allocate);
4657 fprintf (stderr, "red_zone_size: %i\n", frame->red_zone_size);
4658 fprintf (stderr, "frame_pointer_offset: %i\n", frame->frame_pointer_offset);
4659 fprintf (stderr, "hard_frame_pointer_offset: %i\n",
4660 frame->hard_frame_pointer_offset);
4661 fprintf (stderr, "stack_pointer_offset: %i\n", frame->stack_pointer_offset);
4662 #endif
4665 /* Emit code to save registers in the prologue. */
4667 static void
4668 ix86_emit_save_regs (void)
4670 int regno;
4671 rtx insn;
4673 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
4674 if (ix86_save_reg (regno, true))
4676 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
4677 RTX_FRAME_RELATED_P (insn) = 1;
4681 /* Emit code to save registers using MOV insns. First register
4682 is restored from POINTER + OFFSET. */
4683 static void
4684 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
4686 int regno;
4687 rtx insn;
4689 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4690 if (ix86_save_reg (regno, true))
4692 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
4693 Pmode, offset),
4694 gen_rtx_REG (Pmode, regno));
4695 RTX_FRAME_RELATED_P (insn) = 1;
4696 offset += UNITS_PER_WORD;
4700 /* Expand prologue or epilogue stack adjustment.
4701 The pattern exist to put a dependency on all ebp-based memory accesses.
4702 STYLE should be negative if instructions should be marked as frame related,
4703 zero if %r11 register is live and cannot be freely used and positive
4704 otherwise. */
4706 static void
4707 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
4709 rtx insn;
4711 if (! TARGET_64BIT)
4712 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
4713 else if (x86_64_immediate_operand (offset, DImode))
4714 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
4715 else
4717 rtx r11;
4718 /* r11 is used by indirect sibcall return as well, set before the
4719 epilogue and used after the epilogue. ATM indirect sibcall
4720 shouldn't be used together with huge frame sizes in one
4721 function because of the frame_size check in sibcall.c. */
4722 gcc_assert (style);
4723 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4724 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
4725 if (style < 0)
4726 RTX_FRAME_RELATED_P (insn) = 1;
4727 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
4728 offset));
4730 if (style < 0)
4731 RTX_FRAME_RELATED_P (insn) = 1;
4734 /* Expand the prologue into a bunch of separate insns. */
4736 void
4737 ix86_expand_prologue (void)
4739 rtx insn;
4740 bool pic_reg_used;
4741 struct ix86_frame frame;
4742 HOST_WIDE_INT allocate;
4744 ix86_compute_frame_layout (&frame);
4746 /* Note: AT&T enter does NOT have reversed args. Enter is probably
4747 slower on all targets. Also sdb doesn't like it. */
4749 if (frame_pointer_needed)
4751 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
4752 RTX_FRAME_RELATED_P (insn) = 1;
4754 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
4755 RTX_FRAME_RELATED_P (insn) = 1;
4758 allocate = frame.to_allocate;
4760 if (!frame.save_regs_using_mov)
4761 ix86_emit_save_regs ();
4762 else
4763 allocate += frame.nregs * UNITS_PER_WORD;
4765 /* When using red zone we may start register saving before allocating
4766 the stack frame saving one cycle of the prologue. */
4767 if (TARGET_RED_ZONE && frame.save_regs_using_mov)
4768 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
4769 : stack_pointer_rtx,
4770 -frame.nregs * UNITS_PER_WORD);
4772 if (allocate == 0)
4774 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
4775 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4776 GEN_INT (-allocate), -1);
4777 else
4779 /* Only valid for Win32. */
4780 rtx eax = gen_rtx_REG (SImode, 0);
4781 bool eax_live = ix86_eax_live_at_start_p ();
4782 rtx t;
4784 gcc_assert (!TARGET_64BIT);
4786 if (eax_live)
4788 emit_insn (gen_push (eax));
4789 allocate -= 4;
4792 emit_move_insn (eax, GEN_INT (allocate));
4794 insn = emit_insn (gen_allocate_stack_worker (eax));
4795 RTX_FRAME_RELATED_P (insn) = 1;
4796 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
4797 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
4798 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
4799 t, REG_NOTES (insn));
4801 if (eax_live)
4803 if (frame_pointer_needed)
4804 t = plus_constant (hard_frame_pointer_rtx,
4805 allocate
4806 - frame.to_allocate
4807 - frame.nregs * UNITS_PER_WORD);
4808 else
4809 t = plus_constant (stack_pointer_rtx, allocate);
4810 emit_move_insn (eax, gen_rtx_MEM (SImode, t));
4814 if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
4816 if (!frame_pointer_needed || !frame.to_allocate)
4817 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
4818 else
4819 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
4820 -frame.nregs * UNITS_PER_WORD);
4823 pic_reg_used = false;
4824 if (pic_offset_table_rtx
4825 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4826 || current_function_profile))
4828 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
4830 if (alt_pic_reg_used != INVALID_REGNUM)
4831 REGNO (pic_offset_table_rtx) = alt_pic_reg_used;
4833 pic_reg_used = true;
4836 if (pic_reg_used)
4838 if (TARGET_64BIT)
4839 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
4840 else
4841 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
4843 /* Even with accurate pre-reload life analysis, we can wind up
4844 deleting all references to the pic register after reload.
4845 Consider if cross-jumping unifies two sides of a branch
4846 controlled by a comparison vs the only read from a global.
4847 In which case, allow the set_got to be deleted, though we're
4848 too late to do anything about the ebx save in the prologue. */
4849 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
4852 /* Prevent function calls from be scheduled before the call to mcount.
4853 In the pic_reg_used case, make sure that the got load isn't deleted. */
4854 if (current_function_profile)
4855 emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
4858 /* Emit code to restore saved registers using MOV insns. First register
4859 is restored from POINTER + OFFSET. */
4860 static void
4861 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
4862 int maybe_eh_return)
4864 int regno;
4865 rtx base_address = gen_rtx_MEM (Pmode, pointer);
4867 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4868 if (ix86_save_reg (regno, maybe_eh_return))
4870 /* Ensure that adjust_address won't be forced to produce pointer
4871 out of range allowed by x86-64 instruction set. */
4872 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
4874 rtx r11;
4876 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4877 emit_move_insn (r11, GEN_INT (offset));
4878 emit_insn (gen_adddi3 (r11, r11, pointer));
4879 base_address = gen_rtx_MEM (Pmode, r11);
4880 offset = 0;
4882 emit_move_insn (gen_rtx_REG (Pmode, regno),
4883 adjust_address (base_address, Pmode, offset));
4884 offset += UNITS_PER_WORD;
4888 /* Restore function stack, frame, and registers. */
4890 void
4891 ix86_expand_epilogue (int style)
4893 int regno;
4894 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
4895 struct ix86_frame frame;
4896 HOST_WIDE_INT offset;
4898 ix86_compute_frame_layout (&frame);
4900 /* Calculate start of saved registers relative to ebp. Special care
4901 must be taken for the normal return case of a function using
4902 eh_return: the eax and edx registers are marked as saved, but not
4903 restored along this path. */
4904 offset = frame.nregs;
4905 if (current_function_calls_eh_return && style != 2)
4906 offset -= 2;
4907 offset *= -UNITS_PER_WORD;
4909 /* If we're only restoring one register and sp is not valid then
4910 using a move instruction to restore the register since it's
4911 less work than reloading sp and popping the register.
4913 The default code result in stack adjustment using add/lea instruction,
4914 while this code results in LEAVE instruction (or discrete equivalent),
4915 so it is profitable in some other cases as well. Especially when there
4916 are no registers to restore. We also use this code when TARGET_USE_LEAVE
4917 and there is exactly one register to pop. This heuristic may need some
4918 tuning in future. */
4919 if ((!sp_valid && frame.nregs <= 1)
4920 || (TARGET_EPILOGUE_USING_MOVE
4921 && cfun->machine->use_fast_prologue_epilogue
4922 && (frame.nregs > 1 || frame.to_allocate))
4923 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
4924 || (frame_pointer_needed && TARGET_USE_LEAVE
4925 && cfun->machine->use_fast_prologue_epilogue
4926 && frame.nregs == 1)
4927 || current_function_calls_eh_return)
4929 /* Restore registers. We can use ebp or esp to address the memory
4930 locations. If both are available, default to ebp, since offsets
4931 are known to be small. Only exception is esp pointing directly to the
4932 end of block of saved registers, where we may simplify addressing
4933 mode. */
4935 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
4936 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
4937 frame.to_allocate, style == 2);
4938 else
4939 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
4940 offset, style == 2);
4942 /* eh_return epilogues need %ecx added to the stack pointer. */
4943 if (style == 2)
4945 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
4947 if (frame_pointer_needed)
4949 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
4950 tmp = plus_constant (tmp, UNITS_PER_WORD);
4951 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
4953 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
4954 emit_move_insn (hard_frame_pointer_rtx, tmp);
4956 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
4957 const0_rtx, style);
4959 else
4961 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
4962 tmp = plus_constant (tmp, (frame.to_allocate
4963 + frame.nregs * UNITS_PER_WORD));
4964 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
4967 else if (!frame_pointer_needed)
4968 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4969 GEN_INT (frame.to_allocate
4970 + frame.nregs * UNITS_PER_WORD),
4971 style);
4972 /* If not an i386, mov & pop is faster than "leave". */
4973 else if (TARGET_USE_LEAVE || optimize_size
4974 || !cfun->machine->use_fast_prologue_epilogue)
4975 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
4976 else
4978 pro_epilogue_adjust_stack (stack_pointer_rtx,
4979 hard_frame_pointer_rtx,
4980 const0_rtx, style);
4981 if (TARGET_64BIT)
4982 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
4983 else
4984 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
4987 else
4989 /* First step is to deallocate the stack frame so that we can
4990 pop the registers. */
4991 if (!sp_valid)
4993 gcc_assert (frame_pointer_needed);
4994 pro_epilogue_adjust_stack (stack_pointer_rtx,
4995 hard_frame_pointer_rtx,
4996 GEN_INT (offset), style);
4998 else if (frame.to_allocate)
4999 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5000 GEN_INT (frame.to_allocate), style);
5002 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5003 if (ix86_save_reg (regno, false))
5005 if (TARGET_64BIT)
5006 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
5007 else
5008 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
5010 if (frame_pointer_needed)
5012 /* Leave results in shorter dependency chains on CPUs that are
5013 able to grok it fast. */
5014 if (TARGET_USE_LEAVE)
5015 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5016 else if (TARGET_64BIT)
5017 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5018 else
5019 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5023 /* Sibcall epilogues don't want a return instruction. */
5024 if (style == 0)
5025 return;
5027 if (current_function_pops_args && current_function_args_size)
5029 rtx popc = GEN_INT (current_function_pops_args);
5031 /* i386 can only pop 64K bytes. If asked to pop more, pop
5032 return address, do explicit add, and jump indirectly to the
5033 caller. */
5035 if (current_function_pops_args >= 65536)
5037 rtx ecx = gen_rtx_REG (SImode, 2);
5039 /* There is no "pascal" calling convention in 64bit ABI. */
5040 gcc_assert (!TARGET_64BIT);
5042 emit_insn (gen_popsi1 (ecx));
5043 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
5044 emit_jump_insn (gen_return_indirect_internal (ecx));
5046 else
5047 emit_jump_insn (gen_return_pop_internal (popc));
5049 else
5050 emit_jump_insn (gen_return_internal ());
5053 /* Reset from the function's potential modifications. */
5055 static void
5056 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
5057 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5059 if (pic_offset_table_rtx)
5060 REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
5063 /* Extract the parts of an RTL expression that is a valid memory address
5064 for an instruction. Return 0 if the structure of the address is
5065 grossly off. Return -1 if the address contains ASHIFT, so it is not
5066 strictly valid, but still used for computing length of lea instruction. */
5069 ix86_decompose_address (rtx addr, struct ix86_address *out)
5071 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
5072 rtx base_reg, index_reg;
5073 HOST_WIDE_INT scale = 1;
5074 rtx scale_rtx = NULL_RTX;
5075 int retval = 1;
5076 enum ix86_address_seg seg = SEG_DEFAULT;
5078 if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
5079 base = addr;
5080 else if (GET_CODE (addr) == PLUS)
5082 rtx addends[4], op;
5083 int n = 0, i;
5085 op = addr;
5088 if (n >= 4)
5089 return 0;
5090 addends[n++] = XEXP (op, 1);
5091 op = XEXP (op, 0);
5093 while (GET_CODE (op) == PLUS);
5094 if (n >= 4)
5095 return 0;
5096 addends[n] = op;
5098 for (i = n; i >= 0; --i)
5100 op = addends[i];
5101 switch (GET_CODE (op))
5103 case MULT:
5104 if (index)
5105 return 0;
5106 index = XEXP (op, 0);
5107 scale_rtx = XEXP (op, 1);
5108 break;
5110 case UNSPEC:
5111 if (XINT (op, 1) == UNSPEC_TP
5112 && TARGET_TLS_DIRECT_SEG_REFS
5113 && seg == SEG_DEFAULT)
5114 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
5115 else
5116 return 0;
5117 break;
5119 case REG:
5120 case SUBREG:
5121 if (!base)
5122 base = op;
5123 else if (!index)
5124 index = op;
5125 else
5126 return 0;
5127 break;
5129 case CONST:
5130 case CONST_INT:
5131 case SYMBOL_REF:
5132 case LABEL_REF:
5133 if (disp)
5134 return 0;
5135 disp = op;
5136 break;
5138 default:
5139 return 0;
5143 else if (GET_CODE (addr) == MULT)
5145 index = XEXP (addr, 0); /* index*scale */
5146 scale_rtx = XEXP (addr, 1);
5148 else if (GET_CODE (addr) == ASHIFT)
5150 rtx tmp;
5152 /* We're called for lea too, which implements ashift on occasion. */
5153 index = XEXP (addr, 0);
5154 tmp = XEXP (addr, 1);
5155 if (GET_CODE (tmp) != CONST_INT)
5156 return 0;
5157 scale = INTVAL (tmp);
5158 if ((unsigned HOST_WIDE_INT) scale > 3)
5159 return 0;
5160 scale = 1 << scale;
5161 retval = -1;
5163 else
5164 disp = addr; /* displacement */
5166 /* Extract the integral value of scale. */
5167 if (scale_rtx)
5169 if (GET_CODE (scale_rtx) != CONST_INT)
5170 return 0;
5171 scale = INTVAL (scale_rtx);
5174 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
5175 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
5177 /* Allow arg pointer and stack pointer as index if there is not scaling. */
5178 if (base_reg && index_reg && scale == 1
5179 && (index_reg == arg_pointer_rtx
5180 || index_reg == frame_pointer_rtx
5181 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
5183 rtx tmp;
5184 tmp = base, base = index, index = tmp;
5185 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
5188 /* Special case: %ebp cannot be encoded as a base without a displacement. */
5189 if ((base_reg == hard_frame_pointer_rtx
5190 || base_reg == frame_pointer_rtx
5191 || base_reg == arg_pointer_rtx) && !disp)
5192 disp = const0_rtx;
5194 /* Special case: on K6, [%esi] makes the instruction vector decoded.
5195 Avoid this by transforming to [%esi+0]. */
5196 if (ix86_tune == PROCESSOR_K6 && !optimize_size
5197 && base_reg && !index_reg && !disp
5198 && REG_P (base_reg)
5199 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
5200 disp = const0_rtx;
5202 /* Special case: encode reg+reg instead of reg*2. */
5203 if (!base && index && scale && scale == 2)
5204 base = index, base_reg = index_reg, scale = 1;
5206 /* Special case: scaling cannot be encoded without base or displacement. */
5207 if (!base && !disp && index && scale != 1)
5208 disp = const0_rtx;
5210 out->base = base;
5211 out->index = index;
5212 out->disp = disp;
5213 out->scale = scale;
5214 out->seg = seg;
5216 return retval;
5219 /* Return cost of the memory address x.
5220 For i386, it is better to use a complex address than let gcc copy
5221 the address into a reg and make a new pseudo. But not if the address
5222 requires to two regs - that would mean more pseudos with longer
5223 lifetimes. */
5224 static int
5225 ix86_address_cost (rtx x)
5227 struct ix86_address parts;
5228 int cost = 1;
5229 int ok = ix86_decompose_address (x, &parts);
5231 gcc_assert (ok);
5233 if (parts.base && GET_CODE (parts.base) == SUBREG)
5234 parts.base = SUBREG_REG (parts.base);
5235 if (parts.index && GET_CODE (parts.index) == SUBREG)
5236 parts.index = SUBREG_REG (parts.index);
5238 /* More complex memory references are better. */
5239 if (parts.disp && parts.disp != const0_rtx)
5240 cost--;
5241 if (parts.seg != SEG_DEFAULT)
5242 cost--;
5244 /* Attempt to minimize number of registers in the address. */
5245 if ((parts.base
5246 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
5247 || (parts.index
5248 && (!REG_P (parts.index)
5249 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
5250 cost++;
5252 if (parts.base
5253 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
5254 && parts.index
5255 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
5256 && parts.base != parts.index)
5257 cost++;
5259 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
5260 since it's predecode logic can't detect the length of instructions
5261 and it degenerates to vector decoded. Increase cost of such
5262 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
5263 to split such addresses or even refuse such addresses at all.
5265 Following addressing modes are affected:
5266 [base+scale*index]
5267 [scale*index+disp]
5268 [base+index]
5270 The first and last case may be avoidable by explicitly coding the zero in
5271 memory address, but I don't have AMD-K6 machine handy to check this
5272 theory. */
5274 if (TARGET_K6
5275 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
5276 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
5277 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
5278 cost += 10;
5280 return cost;
5283 /* If X is a machine specific address (i.e. a symbol or label being
5284 referenced as a displacement from the GOT implemented using an
5285 UNSPEC), then return the base term. Otherwise return X. */
5288 ix86_find_base_term (rtx x)
5290 rtx term;
5292 if (TARGET_64BIT)
5294 if (GET_CODE (x) != CONST)
5295 return x;
5296 term = XEXP (x, 0);
5297 if (GET_CODE (term) == PLUS
5298 && (GET_CODE (XEXP (term, 1)) == CONST_INT
5299 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
5300 term = XEXP (term, 0);
5301 if (GET_CODE (term) != UNSPEC
5302 || XINT (term, 1) != UNSPEC_GOTPCREL)
5303 return x;
5305 term = XVECEXP (term, 0, 0);
5307 if (GET_CODE (term) != SYMBOL_REF
5308 && GET_CODE (term) != LABEL_REF)
5309 return x;
5311 return term;
5314 term = ix86_delegitimize_address (x);
5316 if (GET_CODE (term) != SYMBOL_REF
5317 && GET_CODE (term) != LABEL_REF)
5318 return x;
5320 return term;
5323 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
5324 this is used for to form addresses to local data when -fPIC is in
5325 use. */
5327 static bool
5328 darwin_local_data_pic (rtx disp)
5330 if (GET_CODE (disp) == MINUS)
5332 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
5333 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
5334 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
5336 const char *sym_name = XSTR (XEXP (disp, 1), 0);
5337 if (! strcmp (sym_name, "<pic base>"))
5338 return true;
5342 return false;
5345 /* Determine if a given RTX is a valid constant. We already know this
5346 satisfies CONSTANT_P. */
5348 bool
5349 legitimate_constant_p (rtx x)
5351 switch (GET_CODE (x))
5353 case CONST:
5354 x = XEXP (x, 0);
5356 if (GET_CODE (x) == PLUS)
5358 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5359 return false;
5360 x = XEXP (x, 0);
5363 if (TARGET_MACHO && darwin_local_data_pic (x))
5364 return true;
5366 /* Only some unspecs are valid as "constants". */
5367 if (GET_CODE (x) == UNSPEC)
5368 switch (XINT (x, 1))
5370 case UNSPEC_GOTOFF:
5371 return TARGET_64BIT;
5372 case UNSPEC_TPOFF:
5373 case UNSPEC_NTPOFF:
5374 return local_exec_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
5375 case UNSPEC_DTPOFF:
5376 return local_dynamic_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
5377 default:
5378 return false;
5381 /* We must have drilled down to a symbol. */
5382 if (!symbolic_operand (x, Pmode))
5383 return false;
5384 /* FALLTHRU */
5386 case SYMBOL_REF:
5387 /* TLS symbols are never valid. */
5388 if (tls_symbolic_operand (x, Pmode))
5389 return false;
5390 break;
5392 default:
5393 break;
5396 /* Otherwise we handle everything else in the move patterns. */
5397 return true;
5400 /* Determine if it's legal to put X into the constant pool. This
5401 is not possible for the address of thread-local symbols, which
5402 is checked above. */
5404 static bool
5405 ix86_cannot_force_const_mem (rtx x)
5407 return !legitimate_constant_p (x);
5410 /* Determine if a given RTX is a valid constant address. */
5412 bool
5413 constant_address_p (rtx x)
5415 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
5418 /* Nonzero if the constant value X is a legitimate general operand
5419 when generating PIC code. It is given that flag_pic is on and
5420 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
5422 bool
5423 legitimate_pic_operand_p (rtx x)
5425 rtx inner;
5427 switch (GET_CODE (x))
5429 case CONST:
5430 inner = XEXP (x, 0);
5431 if (GET_CODE (inner) == PLUS
5432 && GET_CODE (XEXP (inner, 1)) == CONST_INT)
5433 inner = XEXP (inner, 0);
5435 /* Only some unspecs are valid as "constants". */
5436 if (GET_CODE (inner) == UNSPEC)
5437 switch (XINT (inner, 1))
5439 case UNSPEC_GOTOFF:
5440 return TARGET_64BIT;
5441 case UNSPEC_TPOFF:
5442 return local_exec_symbolic_operand (XVECEXP (inner, 0, 0), Pmode);
5443 default:
5444 return false;
5446 /* FALLTHRU */
5448 case SYMBOL_REF:
5449 case LABEL_REF:
5450 return legitimate_pic_address_disp_p (x);
5452 default:
5453 return true;
5457 /* Determine if a given CONST RTX is a valid memory displacement
5458 in PIC mode. */
5461 legitimate_pic_address_disp_p (rtx disp)
5463 bool saw_plus;
5465 /* In 64bit mode we can allow direct addresses of symbols and labels
5466 when they are not dynamic symbols. */
5467 if (TARGET_64BIT)
5469 /* TLS references should always be enclosed in UNSPEC. */
5470 if (tls_symbolic_operand (disp, GET_MODE (disp)))
5471 return 0;
5472 if (GET_CODE (disp) == SYMBOL_REF
5473 && !SYMBOL_REF_FAR_ADDR_P (disp)
5474 && SYMBOL_REF_LOCAL_P (disp))
5475 return 1;
5476 if (GET_CODE (disp) == LABEL_REF)
5477 return 1;
5478 if (GET_CODE (disp) == CONST
5479 && GET_CODE (XEXP (disp, 0)) == PLUS)
5481 rtx op0 = XEXP (XEXP (disp, 0), 0);
5482 rtx op1 = XEXP (XEXP (disp, 0), 1);
5484 /* TLS references should always be enclosed in UNSPEC. */
5485 if (tls_symbolic_operand (op0, GET_MODE (op0)))
5486 return 0;
5487 if (((GET_CODE (op0) == SYMBOL_REF
5488 && !SYMBOL_REF_FAR_ADDR_P (op0)
5489 && SYMBOL_REF_LOCAL_P (op0))
5490 || GET_CODE (op0) == LABEL_REF)
5491 && GET_CODE (op1) == CONST_INT
5492 && INTVAL (op1) < 16*1024*1024
5493 && INTVAL (op1) >= -16*1024*1024)
5494 return 1;
5497 if (GET_CODE (disp) != CONST)
5498 return 0;
5499 disp = XEXP (disp, 0);
5501 if (TARGET_64BIT)
5503 /* We are unsafe to allow PLUS expressions. This limit allowed distance
5504 of GOT tables. We should not need these anyway. */
5505 if (GET_CODE (disp) != UNSPEC
5506 || (XINT (disp, 1) != UNSPEC_GOTPCREL
5507 && XINT (disp, 1) != UNSPEC_GOTOFF))
5508 return 0;
5510 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
5511 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
5512 return 0;
5513 return 1;
5516 saw_plus = false;
5517 if (GET_CODE (disp) == PLUS)
5519 if (GET_CODE (XEXP (disp, 1)) != CONST_INT)
5520 return 0;
5521 disp = XEXP (disp, 0);
5522 saw_plus = true;
5525 if (TARGET_MACHO && darwin_local_data_pic (disp))
5526 return 1;
5528 if (GET_CODE (disp) != UNSPEC)
5529 return 0;
5531 switch (XINT (disp, 1))
5533 case UNSPEC_GOT:
5534 if (saw_plus)
5535 return false;
5536 return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF;
5537 case UNSPEC_GOTOFF:
5538 if (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
5539 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
5540 return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5541 return false;
5542 case UNSPEC_GOTTPOFF:
5543 case UNSPEC_GOTNTPOFF:
5544 case UNSPEC_INDNTPOFF:
5545 if (saw_plus)
5546 return false;
5547 return initial_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5548 case UNSPEC_NTPOFF:
5549 return local_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5550 case UNSPEC_DTPOFF:
5551 return local_dynamic_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5554 return 0;
5557 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
5558 memory address for an instruction. The MODE argument is the machine mode
5559 for the MEM expression that wants to use this address.
5561 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
5562 convert common non-canonical forms to canonical form so that they will
5563 be recognized. */
5566 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
5568 struct ix86_address parts;
5569 rtx base, index, disp;
5570 HOST_WIDE_INT scale;
5571 const char *reason = NULL;
5572 rtx reason_rtx = NULL_RTX;
5574 if (TARGET_DEBUG_ADDR)
5576 fprintf (stderr,
5577 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
5578 GET_MODE_NAME (mode), strict);
5579 debug_rtx (addr);
5582 if (ix86_decompose_address (addr, &parts) <= 0)
5584 reason = "decomposition failed";
5585 goto report_error;
5588 base = parts.base;
5589 index = parts.index;
5590 disp = parts.disp;
5591 scale = parts.scale;
5593 /* Validate base register.
5595 Don't allow SUBREG's that span more than a word here. It can lead to spill
5596 failures when the base is one word out of a two word structure, which is
5597 represented internally as a DImode int. */
5599 if (base)
5601 rtx reg;
5602 reason_rtx = base;
5604 if (REG_P (base))
5605 reg = base;
5606 else if (GET_CODE (base) == SUBREG
5607 && REG_P (SUBREG_REG (base))
5608 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
5609 <= UNITS_PER_WORD)
5610 reg = SUBREG_REG (base);
5611 else
5613 reason = "base is not a register";
5614 goto report_error;
5617 if (GET_MODE (base) != Pmode)
5619 reason = "base is not in Pmode";
5620 goto report_error;
5623 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
5624 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
5626 reason = "base is not valid";
5627 goto report_error;
5631 /* Validate index register.
5633 Don't allow SUBREG's that span more than a word here -- same as above. */
5635 if (index)
5637 rtx reg;
5638 reason_rtx = index;
5640 if (REG_P (index))
5641 reg = index;
5642 else if (GET_CODE (index) == SUBREG
5643 && REG_P (SUBREG_REG (index))
5644 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
5645 <= UNITS_PER_WORD)
5646 reg = SUBREG_REG (index);
5647 else
5649 reason = "index is not a register";
5650 goto report_error;
5653 if (GET_MODE (index) != Pmode)
5655 reason = "index is not in Pmode";
5656 goto report_error;
5659 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
5660 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
5662 reason = "index is not valid";
5663 goto report_error;
5667 /* Validate scale factor. */
5668 if (scale != 1)
5670 reason_rtx = GEN_INT (scale);
5671 if (!index)
5673 reason = "scale without index";
5674 goto report_error;
5677 if (scale != 2 && scale != 4 && scale != 8)
5679 reason = "scale is not a valid multiplier";
5680 goto report_error;
5684 /* Validate displacement. */
5685 if (disp)
5687 reason_rtx = disp;
5689 if (GET_CODE (disp) == CONST
5690 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
5691 switch (XINT (XEXP (disp, 0), 1))
5693 case UNSPEC_GOT:
5694 case UNSPEC_GOTOFF:
5695 case UNSPEC_GOTPCREL:
5696 gcc_assert (flag_pic);
5697 goto is_legitimate_pic;
5699 case UNSPEC_GOTTPOFF:
5700 case UNSPEC_GOTNTPOFF:
5701 case UNSPEC_INDNTPOFF:
5702 case UNSPEC_NTPOFF:
5703 case UNSPEC_DTPOFF:
5704 break;
5706 default:
5707 reason = "invalid address unspec";
5708 goto report_error;
5711 else if (flag_pic && (SYMBOLIC_CONST (disp)
5712 #if TARGET_MACHO
5713 && !machopic_operand_p (disp)
5714 #endif
5717 is_legitimate_pic:
5718 if (TARGET_64BIT && (index || base))
5720 /* foo@dtpoff(%rX) is ok. */
5721 if (GET_CODE (disp) != CONST
5722 || GET_CODE (XEXP (disp, 0)) != PLUS
5723 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
5724 || GET_CODE (XEXP (XEXP (disp, 0), 1)) != CONST_INT
5725 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
5726 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
5728 reason = "non-constant pic memory reference";
5729 goto report_error;
5732 else if (! legitimate_pic_address_disp_p (disp))
5734 reason = "displacement is an invalid pic construct";
5735 goto report_error;
5738 /* This code used to verify that a symbolic pic displacement
5739 includes the pic_offset_table_rtx register.
5741 While this is good idea, unfortunately these constructs may
5742 be created by "adds using lea" optimization for incorrect
5743 code like:
5745 int a;
5746 int foo(int i)
5748 return *(&a+i);
5751 This code is nonsensical, but results in addressing
5752 GOT table with pic_offset_table_rtx base. We can't
5753 just refuse it easily, since it gets matched by
5754 "addsi3" pattern, that later gets split to lea in the
5755 case output register differs from input. While this
5756 can be handled by separate addsi pattern for this case
5757 that never results in lea, this seems to be easier and
5758 correct fix for crash to disable this test. */
5760 else if (GET_CODE (disp) != LABEL_REF
5761 && GET_CODE (disp) != CONST_INT
5762 && (GET_CODE (disp) != CONST
5763 || !legitimate_constant_p (disp))
5764 && (GET_CODE (disp) != SYMBOL_REF
5765 || !legitimate_constant_p (disp)))
5767 reason = "displacement is not constant";
5768 goto report_error;
5770 else if (TARGET_64BIT
5771 && !x86_64_immediate_operand (disp, VOIDmode))
5773 reason = "displacement is out of range";
5774 goto report_error;
5778 /* Everything looks valid. */
5779 if (TARGET_DEBUG_ADDR)
5780 fprintf (stderr, "Success.\n");
5781 return TRUE;
5783 report_error:
5784 if (TARGET_DEBUG_ADDR)
5786 fprintf (stderr, "Error: %s\n", reason);
5787 debug_rtx (reason_rtx);
5789 return FALSE;
5792 /* Return a unique alias set for the GOT. */
5794 static HOST_WIDE_INT
5795 ix86_GOT_alias_set (void)
5797 static HOST_WIDE_INT set = -1;
5798 if (set == -1)
5799 set = new_alias_set ();
5800 return set;
5803 /* Return a legitimate reference for ORIG (an address) using the
5804 register REG. If REG is 0, a new pseudo is generated.
5806 There are two types of references that must be handled:
5808 1. Global data references must load the address from the GOT, via
5809 the PIC reg. An insn is emitted to do this load, and the reg is
5810 returned.
5812 2. Static data references, constant pool addresses, and code labels
5813 compute the address as an offset from the GOT, whose base is in
5814 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
5815 differentiate them from global data objects. The returned
5816 address is the PIC reg + an unspec constant.
5818 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
5819 reg also appears in the address. */
5821 static rtx
5822 legitimize_pic_address (rtx orig, rtx reg)
5824 rtx addr = orig;
5825 rtx new = orig;
5826 rtx base;
5828 #if TARGET_MACHO
5829 if (reg == 0)
5830 reg = gen_reg_rtx (Pmode);
5831 /* Use the generic Mach-O PIC machinery. */
5832 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
5833 #endif
5835 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
5836 new = addr;
5837 else if (TARGET_64BIT
5838 && ix86_cmodel != CM_SMALL_PIC
5839 && local_symbolic_operand (addr, Pmode))
5841 rtx tmpreg;
5842 /* This symbol may be referenced via a displacement from the PIC
5843 base address (@GOTOFF). */
5845 if (reload_in_progress)
5846 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5847 if (GET_CODE (addr) == CONST)
5848 addr = XEXP (addr, 0);
5849 if (GET_CODE (addr) == PLUS)
5851 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
5852 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
5854 else
5855 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
5856 new = gen_rtx_CONST (Pmode, new);
5857 if (!reg)
5858 tmpreg = gen_reg_rtx (Pmode);
5859 else
5860 tmpreg = reg;
5861 emit_move_insn (tmpreg, new);
5863 if (reg != 0)
5865 new = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
5866 tmpreg, 1, OPTAB_DIRECT);
5867 new = reg;
5869 else new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
5871 else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode))
5873 /* This symbol may be referenced via a displacement from the PIC
5874 base address (@GOTOFF). */
5876 if (reload_in_progress)
5877 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5878 if (GET_CODE (addr) == CONST)
5879 addr = XEXP (addr, 0);
5880 if (GET_CODE (addr) == PLUS)
5882 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
5883 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
5885 else
5886 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
5887 new = gen_rtx_CONST (Pmode, new);
5888 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5890 if (reg != 0)
5892 emit_move_insn (reg, new);
5893 new = reg;
5896 else if (GET_CODE (addr) == SYMBOL_REF)
5898 if (TARGET_64BIT)
5900 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
5901 new = gen_rtx_CONST (Pmode, new);
5902 new = gen_const_mem (Pmode, new);
5903 set_mem_alias_set (new, ix86_GOT_alias_set ());
5905 if (reg == 0)
5906 reg = gen_reg_rtx (Pmode);
5907 /* Use directly gen_movsi, otherwise the address is loaded
5908 into register for CSE. We don't want to CSE this addresses,
5909 instead we CSE addresses from the GOT table, so skip this. */
5910 emit_insn (gen_movsi (reg, new));
5911 new = reg;
5913 else
5915 /* This symbol must be referenced via a load from the
5916 Global Offset Table (@GOT). */
5918 if (reload_in_progress)
5919 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5920 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
5921 new = gen_rtx_CONST (Pmode, new);
5922 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5923 new = gen_const_mem (Pmode, new);
5924 set_mem_alias_set (new, ix86_GOT_alias_set ());
5926 if (reg == 0)
5927 reg = gen_reg_rtx (Pmode);
5928 emit_move_insn (reg, new);
5929 new = reg;
5932 else
5934 if (GET_CODE (addr) == CONST)
5936 addr = XEXP (addr, 0);
5938 /* We must match stuff we generate before. Assume the only
5939 unspecs that can get here are ours. Not that we could do
5940 anything with them anyway.... */
5941 if (GET_CODE (addr) == UNSPEC
5942 || (GET_CODE (addr) == PLUS
5943 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
5944 return orig;
5945 gcc_assert (GET_CODE (addr) == PLUS);
5947 if (GET_CODE (addr) == PLUS)
5949 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
5951 /* Check first to see if this is a constant offset from a @GOTOFF
5952 symbol reference. */
5953 if (local_symbolic_operand (op0, Pmode)
5954 && GET_CODE (op1) == CONST_INT)
5956 if (!TARGET_64BIT)
5958 if (reload_in_progress)
5959 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5960 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
5961 UNSPEC_GOTOFF);
5962 new = gen_rtx_PLUS (Pmode, new, op1);
5963 new = gen_rtx_CONST (Pmode, new);
5964 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5966 if (reg != 0)
5968 emit_move_insn (reg, new);
5969 new = reg;
5972 else
5974 if (INTVAL (op1) < -16*1024*1024
5975 || INTVAL (op1) >= 16*1024*1024)
5976 new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
5979 else
5981 base = legitimize_pic_address (XEXP (addr, 0), reg);
5982 new = legitimize_pic_address (XEXP (addr, 1),
5983 base == reg ? NULL_RTX : reg);
5985 if (GET_CODE (new) == CONST_INT)
5986 new = plus_constant (base, INTVAL (new));
5987 else
5989 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
5991 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
5992 new = XEXP (new, 1);
5994 new = gen_rtx_PLUS (Pmode, base, new);
5999 return new;
6002 /* Load the thread pointer. If TO_REG is true, force it into a register. */
6004 static rtx
6005 get_thread_pointer (int to_reg)
6007 rtx tp, reg, insn;
6009 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
6010 if (!to_reg)
6011 return tp;
6013 reg = gen_reg_rtx (Pmode);
6014 insn = gen_rtx_SET (VOIDmode, reg, tp);
6015 insn = emit_insn (insn);
6017 return reg;
6020 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
6021 false if we expect this to be used for a memory address and true if
6022 we expect to load the address into a register. */
6024 static rtx
6025 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
6027 rtx dest, base, off, pic;
6028 int type;
6030 switch (model)
6032 case TLS_MODEL_GLOBAL_DYNAMIC:
6033 dest = gen_reg_rtx (Pmode);
6034 if (TARGET_64BIT)
6036 rtx rax = gen_rtx_REG (Pmode, 0), insns;
6038 start_sequence ();
6039 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
6040 insns = get_insns ();
6041 end_sequence ();
6043 emit_libcall_block (insns, dest, rax, x);
6045 else
6046 emit_insn (gen_tls_global_dynamic_32 (dest, x));
6047 break;
6049 case TLS_MODEL_LOCAL_DYNAMIC:
6050 base = gen_reg_rtx (Pmode);
6051 if (TARGET_64BIT)
6053 rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
6055 start_sequence ();
6056 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
6057 insns = get_insns ();
6058 end_sequence ();
6060 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
6061 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
6062 emit_libcall_block (insns, base, rax, note);
6064 else
6065 emit_insn (gen_tls_local_dynamic_base_32 (base));
6067 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
6068 off = gen_rtx_CONST (Pmode, off);
6070 return gen_rtx_PLUS (Pmode, base, off);
6072 case TLS_MODEL_INITIAL_EXEC:
6073 if (TARGET_64BIT)
6075 pic = NULL;
6076 type = UNSPEC_GOTNTPOFF;
6078 else if (flag_pic)
6080 if (reload_in_progress)
6081 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6082 pic = pic_offset_table_rtx;
6083 type = TARGET_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
6085 else if (!TARGET_GNU_TLS)
6087 pic = gen_reg_rtx (Pmode);
6088 emit_insn (gen_set_got (pic));
6089 type = UNSPEC_GOTTPOFF;
6091 else
6093 pic = NULL;
6094 type = UNSPEC_INDNTPOFF;
6097 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
6098 off = gen_rtx_CONST (Pmode, off);
6099 if (pic)
6100 off = gen_rtx_PLUS (Pmode, pic, off);
6101 off = gen_const_mem (Pmode, off);
6102 set_mem_alias_set (off, ix86_GOT_alias_set ());
6104 if (TARGET_64BIT || TARGET_GNU_TLS)
6106 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
6107 off = force_reg (Pmode, off);
6108 return gen_rtx_PLUS (Pmode, base, off);
6110 else
6112 base = get_thread_pointer (true);
6113 dest = gen_reg_rtx (Pmode);
6114 emit_insn (gen_subsi3 (dest, base, off));
6116 break;
6118 case TLS_MODEL_LOCAL_EXEC:
6119 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
6120 (TARGET_64BIT || TARGET_GNU_TLS)
6121 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
6122 off = gen_rtx_CONST (Pmode, off);
6124 if (TARGET_64BIT || TARGET_GNU_TLS)
6126 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
6127 return gen_rtx_PLUS (Pmode, base, off);
6129 else
6131 base = get_thread_pointer (true);
6132 dest = gen_reg_rtx (Pmode);
6133 emit_insn (gen_subsi3 (dest, base, off));
6135 break;
6137 default:
6138 gcc_unreachable ();
6141 return dest;
6144 /* Try machine-dependent ways of modifying an illegitimate address
6145 to be legitimate. If we find one, return the new, valid address.
6146 This macro is used in only one place: `memory_address' in explow.c.
6148 OLDX is the address as it was before break_out_memory_refs was called.
6149 In some cases it is useful to look at this to decide what needs to be done.
6151 MODE and WIN are passed so that this macro can use
6152 GO_IF_LEGITIMATE_ADDRESS.
6154 It is always safe for this macro to do nothing. It exists to recognize
6155 opportunities to optimize the output.
6157 For the 80386, we handle X+REG by loading X into a register R and
6158 using R+REG. R will go in a general reg and indexing will be used.
6159 However, if REG is a broken-out memory address or multiplication,
6160 nothing needs to be done because REG can certainly go in a general reg.
6162 When -fpic is used, special handling is needed for symbolic references.
6163 See comments by legitimize_pic_address in i386.c for details. */
6166 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
6168 int changed = 0;
6169 unsigned log;
6171 if (TARGET_DEBUG_ADDR)
6173 fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
6174 GET_MODE_NAME (mode));
6175 debug_rtx (x);
6178 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
6179 if (log)
6180 return legitimize_tls_address (x, log, false);
6181 if (GET_CODE (x) == CONST
6182 && GET_CODE (XEXP (x, 0)) == PLUS
6183 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6184 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
6186 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0), log, false);
6187 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
6190 if (flag_pic && SYMBOLIC_CONST (x))
6191 return legitimize_pic_address (x, 0);
6193 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
6194 if (GET_CODE (x) == ASHIFT
6195 && GET_CODE (XEXP (x, 1)) == CONST_INT
6196 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
6198 changed = 1;
6199 log = INTVAL (XEXP (x, 1));
6200 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
6201 GEN_INT (1 << log));
6204 if (GET_CODE (x) == PLUS)
6206 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
6208 if (GET_CODE (XEXP (x, 0)) == ASHIFT
6209 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6210 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
6212 changed = 1;
6213 log = INTVAL (XEXP (XEXP (x, 0), 1));
6214 XEXP (x, 0) = gen_rtx_MULT (Pmode,
6215 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
6216 GEN_INT (1 << log));
6219 if (GET_CODE (XEXP (x, 1)) == ASHIFT
6220 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
6221 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
6223 changed = 1;
6224 log = INTVAL (XEXP (XEXP (x, 1), 1));
6225 XEXP (x, 1) = gen_rtx_MULT (Pmode,
6226 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
6227 GEN_INT (1 << log));
6230 /* Put multiply first if it isn't already. */
6231 if (GET_CODE (XEXP (x, 1)) == MULT)
6233 rtx tmp = XEXP (x, 0);
6234 XEXP (x, 0) = XEXP (x, 1);
6235 XEXP (x, 1) = tmp;
6236 changed = 1;
6239 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
6240 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
6241 created by virtual register instantiation, register elimination, and
6242 similar optimizations. */
6243 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
6245 changed = 1;
6246 x = gen_rtx_PLUS (Pmode,
6247 gen_rtx_PLUS (Pmode, XEXP (x, 0),
6248 XEXP (XEXP (x, 1), 0)),
6249 XEXP (XEXP (x, 1), 1));
6252 /* Canonicalize
6253 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
6254 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
6255 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
6256 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
6257 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
6258 && CONSTANT_P (XEXP (x, 1)))
6260 rtx constant;
6261 rtx other = NULL_RTX;
6263 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6265 constant = XEXP (x, 1);
6266 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
6268 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
6270 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
6271 other = XEXP (x, 1);
6273 else
6274 constant = 0;
6276 if (constant)
6278 changed = 1;
6279 x = gen_rtx_PLUS (Pmode,
6280 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
6281 XEXP (XEXP (XEXP (x, 0), 1), 0)),
6282 plus_constant (other, INTVAL (constant)));
6286 if (changed && legitimate_address_p (mode, x, FALSE))
6287 return x;
6289 if (GET_CODE (XEXP (x, 0)) == MULT)
6291 changed = 1;
6292 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
6295 if (GET_CODE (XEXP (x, 1)) == MULT)
6297 changed = 1;
6298 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
6301 if (changed
6302 && GET_CODE (XEXP (x, 1)) == REG
6303 && GET_CODE (XEXP (x, 0)) == REG)
6304 return x;
6306 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
6308 changed = 1;
6309 x = legitimize_pic_address (x, 0);
6312 if (changed && legitimate_address_p (mode, x, FALSE))
6313 return x;
6315 if (GET_CODE (XEXP (x, 0)) == REG)
6317 rtx temp = gen_reg_rtx (Pmode);
6318 rtx val = force_operand (XEXP (x, 1), temp);
6319 if (val != temp)
6320 emit_move_insn (temp, val);
6322 XEXP (x, 1) = temp;
6323 return x;
6326 else if (GET_CODE (XEXP (x, 1)) == REG)
6328 rtx temp = gen_reg_rtx (Pmode);
6329 rtx val = force_operand (XEXP (x, 0), temp);
6330 if (val != temp)
6331 emit_move_insn (temp, val);
6333 XEXP (x, 0) = temp;
6334 return x;
6338 return x;
6341 /* Print an integer constant expression in assembler syntax. Addition
6342 and subtraction are the only arithmetic that may appear in these
6343 expressions. FILE is the stdio stream to write to, X is the rtx, and
6344 CODE is the operand print code from the output string. */
6346 static void
6347 output_pic_addr_const (FILE *file, rtx x, int code)
6349 char buf[256];
6351 switch (GET_CODE (x))
6353 case PC:
6354 gcc_assert (flag_pic);
6355 putc ('.', file);
6356 break;
6358 case SYMBOL_REF:
6359 assemble_name (file, XSTR (x, 0));
6360 if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
6361 fputs ("@PLT", file);
6362 break;
6364 case LABEL_REF:
6365 x = XEXP (x, 0);
6366 /* FALLTHRU */
6367 case CODE_LABEL:
6368 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
6369 assemble_name (asm_out_file, buf);
6370 break;
6372 case CONST_INT:
6373 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
6374 break;
6376 case CONST:
6377 /* This used to output parentheses around the expression,
6378 but that does not work on the 386 (either ATT or BSD assembler). */
6379 output_pic_addr_const (file, XEXP (x, 0), code);
6380 break;
6382 case CONST_DOUBLE:
6383 if (GET_MODE (x) == VOIDmode)
6385 /* We can use %d if the number is <32 bits and positive. */
6386 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
6387 fprintf (file, "0x%lx%08lx",
6388 (unsigned long) CONST_DOUBLE_HIGH (x),
6389 (unsigned long) CONST_DOUBLE_LOW (x));
6390 else
6391 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
6393 else
6394 /* We can't handle floating point constants;
6395 PRINT_OPERAND must handle them. */
6396 output_operand_lossage ("floating constant misused");
6397 break;
6399 case PLUS:
6400 /* Some assemblers need integer constants to appear first. */
6401 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
6403 output_pic_addr_const (file, XEXP (x, 0), code);
6404 putc ('+', file);
6405 output_pic_addr_const (file, XEXP (x, 1), code);
6407 else
6409 gcc_assert (GET_CODE (XEXP (x, 1)) == CONST_INT);
6410 output_pic_addr_const (file, XEXP (x, 1), code);
6411 putc ('+', file);
6412 output_pic_addr_const (file, XEXP (x, 0), code);
6414 break;
6416 case MINUS:
6417 if (!TARGET_MACHO)
6418 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
6419 output_pic_addr_const (file, XEXP (x, 0), code);
6420 putc ('-', file);
6421 output_pic_addr_const (file, XEXP (x, 1), code);
6422 if (!TARGET_MACHO)
6423 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
6424 break;
6426 case UNSPEC:
6427 gcc_assert (XVECLEN (x, 0) == 1);
6428 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
6429 switch (XINT (x, 1))
6431 case UNSPEC_GOT:
6432 fputs ("@GOT", file);
6433 break;
6434 case UNSPEC_GOTOFF:
6435 fputs ("@GOTOFF", file);
6436 break;
6437 case UNSPEC_GOTPCREL:
6438 fputs ("@GOTPCREL(%rip)", file);
6439 break;
6440 case UNSPEC_GOTTPOFF:
6441 /* FIXME: This might be @TPOFF in Sun ld too. */
6442 fputs ("@GOTTPOFF", file);
6443 break;
6444 case UNSPEC_TPOFF:
6445 fputs ("@TPOFF", file);
6446 break;
6447 case UNSPEC_NTPOFF:
6448 if (TARGET_64BIT)
6449 fputs ("@TPOFF", file);
6450 else
6451 fputs ("@NTPOFF", file);
6452 break;
6453 case UNSPEC_DTPOFF:
6454 fputs ("@DTPOFF", file);
6455 break;
6456 case UNSPEC_GOTNTPOFF:
6457 if (TARGET_64BIT)
6458 fputs ("@GOTTPOFF(%rip)", file);
6459 else
6460 fputs ("@GOTNTPOFF", file);
6461 break;
6462 case UNSPEC_INDNTPOFF:
6463 fputs ("@INDNTPOFF", file);
6464 break;
6465 default:
6466 output_operand_lossage ("invalid UNSPEC as operand");
6467 break;
6469 break;
6471 default:
6472 output_operand_lossage ("invalid expression as operand");
6476 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6477 We need to emit DTP-relative relocations. */
6479 static void
6480 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
6482 fputs (ASM_LONG, file);
6483 output_addr_const (file, x);
6484 fputs ("@DTPOFF", file);
6485 switch (size)
6487 case 4:
6488 break;
6489 case 8:
6490 fputs (", 0", file);
6491 break;
6492 default:
6493 gcc_unreachable ();
6497 /* In the name of slightly smaller debug output, and to cater to
6498 general assembler lossage, recognize PIC+GOTOFF and turn it back
6499 into a direct symbol reference. */
6501 static rtx
6502 ix86_delegitimize_address (rtx orig_x)
6504 rtx x = orig_x, y;
6506 if (GET_CODE (x) == MEM)
6507 x = XEXP (x, 0);
6509 if (TARGET_64BIT)
6511 if (GET_CODE (x) != CONST
6512 || GET_CODE (XEXP (x, 0)) != UNSPEC
6513 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
6514 || GET_CODE (orig_x) != MEM)
6515 return orig_x;
6516 return XVECEXP (XEXP (x, 0), 0, 0);
6519 if (GET_CODE (x) != PLUS
6520 || GET_CODE (XEXP (x, 1)) != CONST)
6521 return orig_x;
6523 if (GET_CODE (XEXP (x, 0)) == REG
6524 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6525 /* %ebx + GOT/GOTOFF */
6526 y = NULL;
6527 else if (GET_CODE (XEXP (x, 0)) == PLUS)
6529 /* %ebx + %reg * scale + GOT/GOTOFF */
6530 y = XEXP (x, 0);
6531 if (GET_CODE (XEXP (y, 0)) == REG
6532 && REGNO (XEXP (y, 0)) == PIC_OFFSET_TABLE_REGNUM)
6533 y = XEXP (y, 1);
6534 else if (GET_CODE (XEXP (y, 1)) == REG
6535 && REGNO (XEXP (y, 1)) == PIC_OFFSET_TABLE_REGNUM)
6536 y = XEXP (y, 0);
6537 else
6538 return orig_x;
6539 if (GET_CODE (y) != REG
6540 && GET_CODE (y) != MULT
6541 && GET_CODE (y) != ASHIFT)
6542 return orig_x;
6544 else
6545 return orig_x;
6547 x = XEXP (XEXP (x, 1), 0);
6548 if (GET_CODE (x) == UNSPEC
6549 && ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6550 || (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM)))
6552 if (y)
6553 return gen_rtx_PLUS (Pmode, y, XVECEXP (x, 0, 0));
6554 return XVECEXP (x, 0, 0);
6557 if (GET_CODE (x) == PLUS
6558 && GET_CODE (XEXP (x, 0)) == UNSPEC
6559 && GET_CODE (XEXP (x, 1)) == CONST_INT
6560 && ((XINT (XEXP (x, 0), 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6561 || (XINT (XEXP (x, 0), 1) == UNSPEC_GOTOFF
6562 && GET_CODE (orig_x) != MEM)))
6564 x = gen_rtx_PLUS (VOIDmode, XVECEXP (XEXP (x, 0), 0, 0), XEXP (x, 1));
6565 if (y)
6566 return gen_rtx_PLUS (Pmode, y, x);
6567 return x;
6570 return orig_x;
6573 static void
6574 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
6575 int fp, FILE *file)
6577 const char *suffix;
6579 if (mode == CCFPmode || mode == CCFPUmode)
6581 enum rtx_code second_code, bypass_code;
6582 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
6583 gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
6584 code = ix86_fp_compare_code_to_integer (code);
6585 mode = CCmode;
6587 if (reverse)
6588 code = reverse_condition (code);
6590 switch (code)
6592 case EQ:
6593 suffix = "e";
6594 break;
6595 case NE:
6596 suffix = "ne";
6597 break;
6598 case GT:
6599 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
6600 suffix = "g";
6601 break;
6602 case GTU:
6603 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
6604 Those same assemblers have the same but opposite lossage on cmov. */
6605 gcc_assert (mode == CCmode);
6606 suffix = fp ? "nbe" : "a";
6607 break;
6608 case LT:
6609 switch (mode)
6611 case CCNOmode:
6612 case CCGOCmode:
6613 suffix = "s";
6614 break;
6616 case CCmode:
6617 case CCGCmode:
6618 suffix = "l";
6619 break;
6621 default:
6622 gcc_unreachable ();
6624 break;
6625 case LTU:
6626 gcc_assert (mode == CCmode);
6627 suffix = "b";
6628 break;
6629 case GE:
6630 switch (mode)
6632 case CCNOmode:
6633 case CCGOCmode:
6634 suffix = "ns";
6635 break;
6637 case CCmode:
6638 case CCGCmode:
6639 suffix = "ge";
6640 break;
6642 default:
6643 gcc_unreachable ();
6645 break;
6646 case GEU:
6647 /* ??? As above. */
6648 gcc_assert (mode == CCmode);
6649 suffix = fp ? "nb" : "ae";
6650 break;
6651 case LE:
6652 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
6653 suffix = "le";
6654 break;
6655 case LEU:
6656 gcc_assert (mode == CCmode);
6657 suffix = "be";
6658 break;
6659 case UNORDERED:
6660 suffix = fp ? "u" : "p";
6661 break;
6662 case ORDERED:
6663 suffix = fp ? "nu" : "np";
6664 break;
6665 default:
6666 gcc_unreachable ();
6668 fputs (suffix, file);
6671 /* Print the name of register X to FILE based on its machine mode and number.
6672 If CODE is 'w', pretend the mode is HImode.
6673 If CODE is 'b', pretend the mode is QImode.
6674 If CODE is 'k', pretend the mode is SImode.
6675 If CODE is 'q', pretend the mode is DImode.
6676 If CODE is 'h', pretend the reg is the 'high' byte register.
6677 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
6679 void
6680 print_reg (rtx x, int code, FILE *file)
6682 gcc_assert (REGNO (x) != ARG_POINTER_REGNUM
6683 && REGNO (x) != FRAME_POINTER_REGNUM
6684 && REGNO (x) != FLAGS_REG
6685 && REGNO (x) != FPSR_REG);
6687 if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
6688 putc ('%', file);
6690 if (code == 'w' || MMX_REG_P (x))
6691 code = 2;
6692 else if (code == 'b')
6693 code = 1;
6694 else if (code == 'k')
6695 code = 4;
6696 else if (code == 'q')
6697 code = 8;
6698 else if (code == 'y')
6699 code = 3;
6700 else if (code == 'h')
6701 code = 0;
6702 else
6703 code = GET_MODE_SIZE (GET_MODE (x));
6705 /* Irritatingly, AMD extended registers use different naming convention
6706 from the normal registers. */
6707 if (REX_INT_REG_P (x))
6709 gcc_assert (TARGET_64BIT);
6710 switch (code)
6712 case 0:
6713 error ("extended registers have no high halves");
6714 break;
6715 case 1:
6716 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
6717 break;
6718 case 2:
6719 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
6720 break;
6721 case 4:
6722 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
6723 break;
6724 case 8:
6725 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
6726 break;
6727 default:
6728 error ("unsupported operand size for extended register");
6729 break;
6731 return;
6733 switch (code)
6735 case 3:
6736 if (STACK_TOP_P (x))
6738 fputs ("st(0)", file);
6739 break;
6741 /* FALLTHRU */
6742 case 8:
6743 case 4:
6744 case 12:
6745 if (! ANY_FP_REG_P (x))
6746 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
6747 /* FALLTHRU */
6748 case 16:
6749 case 2:
6750 normal:
6751 fputs (hi_reg_name[REGNO (x)], file);
6752 break;
6753 case 1:
6754 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
6755 goto normal;
6756 fputs (qi_reg_name[REGNO (x)], file);
6757 break;
6758 case 0:
6759 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
6760 goto normal;
6761 fputs (qi_high_reg_name[REGNO (x)], file);
6762 break;
6763 default:
6764 gcc_unreachable ();
6768 /* Locate some local-dynamic symbol still in use by this function
6769 so that we can print its name in some tls_local_dynamic_base
6770 pattern. */
6772 static const char *
6773 get_some_local_dynamic_name (void)
6775 rtx insn;
6777 if (cfun->machine->some_ld_name)
6778 return cfun->machine->some_ld_name;
6780 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
6781 if (INSN_P (insn)
6782 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
6783 return cfun->machine->some_ld_name;
6785 gcc_unreachable ();
6788 static int
6789 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
6791 rtx x = *px;
6793 if (GET_CODE (x) == SYMBOL_REF
6794 && local_dynamic_symbolic_operand (x, Pmode))
6796 cfun->machine->some_ld_name = XSTR (x, 0);
6797 return 1;
6800 return 0;
6803 /* Meaning of CODE:
6804 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
6805 C -- print opcode suffix for set/cmov insn.
6806 c -- like C, but print reversed condition
6807 F,f -- likewise, but for floating-point.
6808 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
6809 otherwise nothing
6810 R -- print the prefix for register names.
6811 z -- print the opcode suffix for the size of the current operand.
6812 * -- print a star (in certain assembler syntax)
6813 A -- print an absolute memory reference.
6814 w -- print the operand as if it's a "word" (HImode) even if it isn't.
6815 s -- print a shift double count, followed by the assemblers argument
6816 delimiter.
6817 b -- print the QImode name of the register for the indicated operand.
6818 %b0 would print %al if operands[0] is reg 0.
6819 w -- likewise, print the HImode name of the register.
6820 k -- likewise, print the SImode name of the register.
6821 q -- likewise, print the DImode name of the register.
6822 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
6823 y -- print "st(0)" instead of "st" as a register.
6824 D -- print condition for SSE cmp instruction.
6825 P -- if PIC, print an @PLT suffix.
6826 X -- don't print any sort of PIC '@' suffix for a symbol.
6827 & -- print some in-use local-dynamic symbol name.
6828 H -- print a memory address offset by 8; used for sse high-parts
6831 void
6832 print_operand (FILE *file, rtx x, int code)
6834 if (code)
6836 switch (code)
6838 case '*':
6839 if (ASSEMBLER_DIALECT == ASM_ATT)
6840 putc ('*', file);
6841 return;
6843 case '&':
6844 assemble_name (file, get_some_local_dynamic_name ());
6845 return;
6847 case 'A':
6848 switch (ASSEMBLER_DIALECT)
6850 case ASM_ATT:
6851 putc ('*', file);
6852 break;
6854 case ASM_INTEL:
6855 /* Intel syntax. For absolute addresses, registers should not
6856 be surrounded by braces. */
6857 if (GET_CODE (x) != REG)
6859 putc ('[', file);
6860 PRINT_OPERAND (file, x, 0);
6861 putc (']', file);
6862 return;
6864 break;
6866 default:
6867 gcc_unreachable ();
6870 PRINT_OPERAND (file, x, 0);
6871 return;
6874 case 'L':
6875 if (ASSEMBLER_DIALECT == ASM_ATT)
6876 putc ('l', file);
6877 return;
6879 case 'W':
6880 if (ASSEMBLER_DIALECT == ASM_ATT)
6881 putc ('w', file);
6882 return;
6884 case 'B':
6885 if (ASSEMBLER_DIALECT == ASM_ATT)
6886 putc ('b', file);
6887 return;
6889 case 'Q':
6890 if (ASSEMBLER_DIALECT == ASM_ATT)
6891 putc ('l', file);
6892 return;
6894 case 'S':
6895 if (ASSEMBLER_DIALECT == ASM_ATT)
6896 putc ('s', file);
6897 return;
6899 case 'T':
6900 if (ASSEMBLER_DIALECT == ASM_ATT)
6901 putc ('t', file);
6902 return;
6904 case 'z':
6905 /* 387 opcodes don't get size suffixes if the operands are
6906 registers. */
6907 if (STACK_REG_P (x))
6908 return;
6910 /* Likewise if using Intel opcodes. */
6911 if (ASSEMBLER_DIALECT == ASM_INTEL)
6912 return;
6914 /* This is the size of op from size of operand. */
6915 switch (GET_MODE_SIZE (GET_MODE (x)))
6917 case 2:
6918 #ifdef HAVE_GAS_FILDS_FISTS
6919 putc ('s', file);
6920 #endif
6921 return;
6923 case 4:
6924 if (GET_MODE (x) == SFmode)
6926 putc ('s', file);
6927 return;
6929 else
6930 putc ('l', file);
6931 return;
6933 case 12:
6934 case 16:
6935 putc ('t', file);
6936 return;
6938 case 8:
6939 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
6941 #ifdef GAS_MNEMONICS
6942 putc ('q', file);
6943 #else
6944 putc ('l', file);
6945 putc ('l', file);
6946 #endif
6948 else
6949 putc ('l', file);
6950 return;
6952 default:
6953 gcc_unreachable ();
6956 case 'b':
6957 case 'w':
6958 case 'k':
6959 case 'q':
6960 case 'h':
6961 case 'y':
6962 case 'X':
6963 case 'P':
6964 break;
6966 case 's':
6967 if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
6969 PRINT_OPERAND (file, x, 0);
6970 putc (',', file);
6972 return;
6974 case 'D':
6975 /* Little bit of braindamage here. The SSE compare instructions
6976 does use completely different names for the comparisons that the
6977 fp conditional moves. */
6978 switch (GET_CODE (x))
6980 case EQ:
6981 case UNEQ:
6982 fputs ("eq", file);
6983 break;
6984 case LT:
6985 case UNLT:
6986 fputs ("lt", file);
6987 break;
6988 case LE:
6989 case UNLE:
6990 fputs ("le", file);
6991 break;
6992 case UNORDERED:
6993 fputs ("unord", file);
6994 break;
6995 case NE:
6996 case LTGT:
6997 fputs ("neq", file);
6998 break;
6999 case UNGE:
7000 case GE:
7001 fputs ("nlt", file);
7002 break;
7003 case UNGT:
7004 case GT:
7005 fputs ("nle", file);
7006 break;
7007 case ORDERED:
7008 fputs ("ord", file);
7009 break;
7010 default:
7011 gcc_unreachable ();
7013 return;
7014 case 'O':
7015 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
7016 if (ASSEMBLER_DIALECT == ASM_ATT)
7018 switch (GET_MODE (x))
7020 case HImode: putc ('w', file); break;
7021 case SImode:
7022 case SFmode: putc ('l', file); break;
7023 case DImode:
7024 case DFmode: putc ('q', file); break;
7025 default: gcc_unreachable ();
7027 putc ('.', file);
7029 #endif
7030 return;
7031 case 'C':
7032 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
7033 return;
7034 case 'F':
7035 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
7036 if (ASSEMBLER_DIALECT == ASM_ATT)
7037 putc ('.', file);
7038 #endif
7039 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
7040 return;
7042 /* Like above, but reverse condition */
7043 case 'c':
7044 /* Check to see if argument to %c is really a constant
7045 and not a condition code which needs to be reversed. */
7046 if (!COMPARISON_P (x))
7048 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
7049 return;
7051 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
7052 return;
7053 case 'f':
7054 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
7055 if (ASSEMBLER_DIALECT == ASM_ATT)
7056 putc ('.', file);
7057 #endif
7058 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
7059 return;
7061 case 'H':
7062 /* It doesn't actually matter what mode we use here, as we're
7063 only going to use this for printing. */
7064 x = adjust_address_nv (x, DImode, 8);
7065 break;
7067 case '+':
7069 rtx x;
7071 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
7072 return;
7074 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
7075 if (x)
7077 int pred_val = INTVAL (XEXP (x, 0));
7079 if (pred_val < REG_BR_PROB_BASE * 45 / 100
7080 || pred_val > REG_BR_PROB_BASE * 55 / 100)
7082 int taken = pred_val > REG_BR_PROB_BASE / 2;
7083 int cputaken = final_forward_branch_p (current_output_insn) == 0;
7085 /* Emit hints only in the case default branch prediction
7086 heuristics would fail. */
7087 if (taken != cputaken)
7089 /* We use 3e (DS) prefix for taken branches and
7090 2e (CS) prefix for not taken branches. */
7091 if (taken)
7092 fputs ("ds ; ", file);
7093 else
7094 fputs ("cs ; ", file);
7098 return;
7100 default:
7101 output_operand_lossage ("invalid operand code '%c'", code);
7105 if (GET_CODE (x) == REG)
7106 print_reg (x, code, file);
7108 else if (GET_CODE (x) == MEM)
7110 /* No `byte ptr' prefix for call instructions. */
7111 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
7113 const char * size;
7114 switch (GET_MODE_SIZE (GET_MODE (x)))
7116 case 1: size = "BYTE"; break;
7117 case 2: size = "WORD"; break;
7118 case 4: size = "DWORD"; break;
7119 case 8: size = "QWORD"; break;
7120 case 12: size = "XWORD"; break;
7121 case 16: size = "XMMWORD"; break;
7122 default:
7123 gcc_unreachable ();
7126 /* Check for explicit size override (codes 'b', 'w' and 'k') */
7127 if (code == 'b')
7128 size = "BYTE";
7129 else if (code == 'w')
7130 size = "WORD";
7131 else if (code == 'k')
7132 size = "DWORD";
7134 fputs (size, file);
7135 fputs (" PTR ", file);
7138 x = XEXP (x, 0);
7139 /* Avoid (%rip) for call operands. */
7140 if (CONSTANT_ADDRESS_P (x) && code == 'P'
7141 && GET_CODE (x) != CONST_INT)
7142 output_addr_const (file, x);
7143 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
7144 output_operand_lossage ("invalid constraints for operand");
7145 else
7146 output_address (x);
7149 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
7151 REAL_VALUE_TYPE r;
7152 long l;
7154 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7155 REAL_VALUE_TO_TARGET_SINGLE (r, l);
7157 if (ASSEMBLER_DIALECT == ASM_ATT)
7158 putc ('$', file);
7159 fprintf (file, "0x%08lx", l);
7162 /* These float cases don't actually occur as immediate operands. */
7163 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
7165 char dstr[30];
7167 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
7168 fprintf (file, "%s", dstr);
7171 else if (GET_CODE (x) == CONST_DOUBLE
7172 && GET_MODE (x) == XFmode)
7174 char dstr[30];
7176 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
7177 fprintf (file, "%s", dstr);
7180 else
7182 /* We have patterns that allow zero sets of memory, for instance.
7183 In 64-bit mode, we should probably support all 8-byte vectors,
7184 since we can in fact encode that into an immediate. */
7185 if (GET_CODE (x) == CONST_VECTOR)
7187 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
7188 x = const0_rtx;
7191 if (code != 'P')
7193 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
7195 if (ASSEMBLER_DIALECT == ASM_ATT)
7196 putc ('$', file);
7198 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
7199 || GET_CODE (x) == LABEL_REF)
7201 if (ASSEMBLER_DIALECT == ASM_ATT)
7202 putc ('$', file);
7203 else
7204 fputs ("OFFSET FLAT:", file);
7207 if (GET_CODE (x) == CONST_INT)
7208 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
7209 else if (flag_pic)
7210 output_pic_addr_const (file, x, code);
7211 else
7212 output_addr_const (file, x);
7216 /* Print a memory operand whose address is ADDR. */
7218 void
7219 print_operand_address (FILE *file, rtx addr)
7221 struct ix86_address parts;
7222 rtx base, index, disp;
7223 int scale;
7224 int ok = ix86_decompose_address (addr, &parts);
7226 gcc_assert (ok);
7228 base = parts.base;
7229 index = parts.index;
7230 disp = parts.disp;
7231 scale = parts.scale;
7233 switch (parts.seg)
7235 case SEG_DEFAULT:
7236 break;
7237 case SEG_FS:
7238 case SEG_GS:
7239 if (USER_LABEL_PREFIX[0] == 0)
7240 putc ('%', file);
7241 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
7242 break;
7243 default:
7244 gcc_unreachable ();
7247 if (!base && !index)
7249 /* Displacement only requires special attention. */
7251 if (GET_CODE (disp) == CONST_INT)
7253 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
7255 if (USER_LABEL_PREFIX[0] == 0)
7256 putc ('%', file);
7257 fputs ("ds:", file);
7259 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
7261 else if (flag_pic)
7262 output_pic_addr_const (file, disp, 0);
7263 else
7264 output_addr_const (file, disp);
7266 /* Use one byte shorter RIP relative addressing for 64bit mode. */
7267 if (TARGET_64BIT
7268 && ((GET_CODE (disp) == SYMBOL_REF
7269 && ! tls_symbolic_operand (disp, GET_MODE (disp)))
7270 || GET_CODE (disp) == LABEL_REF
7271 || (GET_CODE (disp) == CONST
7272 && GET_CODE (XEXP (disp, 0)) == PLUS
7273 && (GET_CODE (XEXP (XEXP (disp, 0), 0)) == SYMBOL_REF
7274 || GET_CODE (XEXP (XEXP (disp, 0), 0)) == LABEL_REF)
7275 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)))
7276 fputs ("(%rip)", file);
7278 else
7280 if (ASSEMBLER_DIALECT == ASM_ATT)
7282 if (disp)
7284 if (flag_pic)
7285 output_pic_addr_const (file, disp, 0);
7286 else if (GET_CODE (disp) == LABEL_REF)
7287 output_asm_label (disp);
7288 else
7289 output_addr_const (file, disp);
7292 putc ('(', file);
7293 if (base)
7294 print_reg (base, 0, file);
7295 if (index)
7297 putc (',', file);
7298 print_reg (index, 0, file);
7299 if (scale != 1)
7300 fprintf (file, ",%d", scale);
7302 putc (')', file);
7304 else
7306 rtx offset = NULL_RTX;
7308 if (disp)
7310 /* Pull out the offset of a symbol; print any symbol itself. */
7311 if (GET_CODE (disp) == CONST
7312 && GET_CODE (XEXP (disp, 0)) == PLUS
7313 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
7315 offset = XEXP (XEXP (disp, 0), 1);
7316 disp = gen_rtx_CONST (VOIDmode,
7317 XEXP (XEXP (disp, 0), 0));
7320 if (flag_pic)
7321 output_pic_addr_const (file, disp, 0);
7322 else if (GET_CODE (disp) == LABEL_REF)
7323 output_asm_label (disp);
7324 else if (GET_CODE (disp) == CONST_INT)
7325 offset = disp;
7326 else
7327 output_addr_const (file, disp);
7330 putc ('[', file);
7331 if (base)
7333 print_reg (base, 0, file);
7334 if (offset)
7336 if (INTVAL (offset) >= 0)
7337 putc ('+', file);
7338 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
7341 else if (offset)
7342 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
7343 else
7344 putc ('0', file);
7346 if (index)
7348 putc ('+', file);
7349 print_reg (index, 0, file);
7350 if (scale != 1)
7351 fprintf (file, "*%d", scale);
7353 putc (']', file);
7358 bool
7359 output_addr_const_extra (FILE *file, rtx x)
7361 rtx op;
7363 if (GET_CODE (x) != UNSPEC)
7364 return false;
7366 op = XVECEXP (x, 0, 0);
7367 switch (XINT (x, 1))
7369 case UNSPEC_GOTTPOFF:
7370 output_addr_const (file, op);
7371 /* FIXME: This might be @TPOFF in Sun ld. */
7372 fputs ("@GOTTPOFF", file);
7373 break;
7374 case UNSPEC_TPOFF:
7375 output_addr_const (file, op);
7376 fputs ("@TPOFF", file);
7377 break;
7378 case UNSPEC_NTPOFF:
7379 output_addr_const (file, op);
7380 if (TARGET_64BIT)
7381 fputs ("@TPOFF", file);
7382 else
7383 fputs ("@NTPOFF", file);
7384 break;
7385 case UNSPEC_DTPOFF:
7386 output_addr_const (file, op);
7387 fputs ("@DTPOFF", file);
7388 break;
7389 case UNSPEC_GOTNTPOFF:
7390 output_addr_const (file, op);
7391 if (TARGET_64BIT)
7392 fputs ("@GOTTPOFF(%rip)", file);
7393 else
7394 fputs ("@GOTNTPOFF", file);
7395 break;
7396 case UNSPEC_INDNTPOFF:
7397 output_addr_const (file, op);
7398 fputs ("@INDNTPOFF", file);
7399 break;
7401 default:
7402 return false;
7405 return true;
7408 /* Split one or more DImode RTL references into pairs of SImode
7409 references. The RTL can be REG, offsettable MEM, integer constant, or
7410 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
7411 split and "num" is its length. lo_half and hi_half are output arrays
7412 that parallel "operands". */
7414 void
7415 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
7417 while (num--)
7419 rtx op = operands[num];
7421 /* simplify_subreg refuse to split volatile memory addresses,
7422 but we still have to handle it. */
7423 if (GET_CODE (op) == MEM)
7425 lo_half[num] = adjust_address (op, SImode, 0);
7426 hi_half[num] = adjust_address (op, SImode, 4);
7428 else
7430 lo_half[num] = simplify_gen_subreg (SImode, op,
7431 GET_MODE (op) == VOIDmode
7432 ? DImode : GET_MODE (op), 0);
7433 hi_half[num] = simplify_gen_subreg (SImode, op,
7434 GET_MODE (op) == VOIDmode
7435 ? DImode : GET_MODE (op), 4);
7439 /* Split one or more TImode RTL references into pairs of DImode
7440 references. The RTL can be REG, offsettable MEM, integer constant, or
7441 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
7442 split and "num" is its length. lo_half and hi_half are output arrays
7443 that parallel "operands". */
7445 void
7446 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
7448 while (num--)
7450 rtx op = operands[num];
7452 /* simplify_subreg refuse to split volatile memory addresses, but we
7453 still have to handle it. */
7454 if (GET_CODE (op) == MEM)
7456 lo_half[num] = adjust_address (op, DImode, 0);
7457 hi_half[num] = adjust_address (op, DImode, 8);
7459 else
7461 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
7462 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
7467 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
7468 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
7469 is the expression of the binary operation. The output may either be
7470 emitted here, or returned to the caller, like all output_* functions.
7472 There is no guarantee that the operands are the same mode, as they
7473 might be within FLOAT or FLOAT_EXTEND expressions. */
7475 #ifndef SYSV386_COMPAT
7476 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
7477 wants to fix the assemblers because that causes incompatibility
7478 with gcc. No-one wants to fix gcc because that causes
7479 incompatibility with assemblers... You can use the option of
7480 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
7481 #define SYSV386_COMPAT 1
7482 #endif
7484 const char *
7485 output_387_binary_op (rtx insn, rtx *operands)
7487 static char buf[30];
7488 const char *p;
7489 const char *ssep;
7490 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
7492 #ifdef ENABLE_CHECKING
7493 /* Even if we do not want to check the inputs, this documents input
7494 constraints. Which helps in understanding the following code. */
7495 if (STACK_REG_P (operands[0])
7496 && ((REG_P (operands[1])
7497 && REGNO (operands[0]) == REGNO (operands[1])
7498 && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM))
7499 || (REG_P (operands[2])
7500 && REGNO (operands[0]) == REGNO (operands[2])
7501 && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)))
7502 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
7503 ; /* ok */
7504 else
7505 gcc_assert (is_sse);
7506 #endif
7508 switch (GET_CODE (operands[3]))
7510 case PLUS:
7511 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7512 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7513 p = "fiadd";
7514 else
7515 p = "fadd";
7516 ssep = "add";
7517 break;
7519 case MINUS:
7520 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7521 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7522 p = "fisub";
7523 else
7524 p = "fsub";
7525 ssep = "sub";
7526 break;
7528 case MULT:
7529 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7530 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7531 p = "fimul";
7532 else
7533 p = "fmul";
7534 ssep = "mul";
7535 break;
7537 case DIV:
7538 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7539 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7540 p = "fidiv";
7541 else
7542 p = "fdiv";
7543 ssep = "div";
7544 break;
7546 default:
7547 gcc_unreachable ();
7550 if (is_sse)
7552 strcpy (buf, ssep);
7553 if (GET_MODE (operands[0]) == SFmode)
7554 strcat (buf, "ss\t{%2, %0|%0, %2}");
7555 else
7556 strcat (buf, "sd\t{%2, %0|%0, %2}");
7557 return buf;
7559 strcpy (buf, p);
7561 switch (GET_CODE (operands[3]))
7563 case MULT:
7564 case PLUS:
7565 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
7567 rtx temp = operands[2];
7568 operands[2] = operands[1];
7569 operands[1] = temp;
7572 /* know operands[0] == operands[1]. */
7574 if (GET_CODE (operands[2]) == MEM)
7576 p = "%z2\t%2";
7577 break;
7580 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7582 if (STACK_TOP_P (operands[0]))
7583 /* How is it that we are storing to a dead operand[2]?
7584 Well, presumably operands[1] is dead too. We can't
7585 store the result to st(0) as st(0) gets popped on this
7586 instruction. Instead store to operands[2] (which I
7587 think has to be st(1)). st(1) will be popped later.
7588 gcc <= 2.8.1 didn't have this check and generated
7589 assembly code that the Unixware assembler rejected. */
7590 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7591 else
7592 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7593 break;
7596 if (STACK_TOP_P (operands[0]))
7597 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7598 else
7599 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7600 break;
7602 case MINUS:
7603 case DIV:
7604 if (GET_CODE (operands[1]) == MEM)
7606 p = "r%z1\t%1";
7607 break;
7610 if (GET_CODE (operands[2]) == MEM)
7612 p = "%z2\t%2";
7613 break;
7616 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7618 #if SYSV386_COMPAT
7619 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
7620 derived assemblers, confusingly reverse the direction of
7621 the operation for fsub{r} and fdiv{r} when the
7622 destination register is not st(0). The Intel assembler
7623 doesn't have this brain damage. Read !SYSV386_COMPAT to
7624 figure out what the hardware really does. */
7625 if (STACK_TOP_P (operands[0]))
7626 p = "{p\t%0, %2|rp\t%2, %0}";
7627 else
7628 p = "{rp\t%2, %0|p\t%0, %2}";
7629 #else
7630 if (STACK_TOP_P (operands[0]))
7631 /* As above for fmul/fadd, we can't store to st(0). */
7632 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7633 else
7634 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7635 #endif
7636 break;
7639 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
7641 #if SYSV386_COMPAT
7642 if (STACK_TOP_P (operands[0]))
7643 p = "{rp\t%0, %1|p\t%1, %0}";
7644 else
7645 p = "{p\t%1, %0|rp\t%0, %1}";
7646 #else
7647 if (STACK_TOP_P (operands[0]))
7648 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
7649 else
7650 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
7651 #endif
7652 break;
7655 if (STACK_TOP_P (operands[0]))
7657 if (STACK_TOP_P (operands[1]))
7658 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7659 else
7660 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
7661 break;
7663 else if (STACK_TOP_P (operands[1]))
7665 #if SYSV386_COMPAT
7666 p = "{\t%1, %0|r\t%0, %1}";
7667 #else
7668 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
7669 #endif
7671 else
7673 #if SYSV386_COMPAT
7674 p = "{r\t%2, %0|\t%0, %2}";
7675 #else
7676 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7677 #endif
7679 break;
7681 default:
7682 gcc_unreachable ();
7685 strcat (buf, p);
7686 return buf;
7689 /* Return needed mode for entity in optimize_mode_switching pass. */
7692 ix86_mode_needed (int entity, rtx insn)
7694 enum attr_i387_cw mode;
7696 /* The mode UNINITIALIZED is used to store control word after a
7697 function call or ASM pattern. The mode ANY specify that function
7698 has no requirements on the control word and make no changes in the
7699 bits we are interested in. */
7701 if (CALL_P (insn)
7702 || (NONJUMP_INSN_P (insn)
7703 && (asm_noperands (PATTERN (insn)) >= 0
7704 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
7705 return I387_CW_UNINITIALIZED;
7707 if (recog_memoized (insn) < 0)
7708 return I387_CW_ANY;
7710 mode = get_attr_i387_cw (insn);
7712 switch (entity)
7714 case I387_TRUNC:
7715 if (mode == I387_CW_TRUNC)
7716 return mode;
7717 break;
7719 case I387_FLOOR:
7720 if (mode == I387_CW_FLOOR)
7721 return mode;
7722 break;
7724 case I387_CEIL:
7725 if (mode == I387_CW_CEIL)
7726 return mode;
7727 break;
7729 case I387_MASK_PM:
7730 if (mode == I387_CW_MASK_PM)
7731 return mode;
7732 break;
7734 default:
7735 gcc_unreachable ();
7738 return I387_CW_ANY;
7741 /* Output code to initialize control word copies used by trunc?f?i and
7742 rounding patterns. CURRENT_MODE is set to current control word,
7743 while NEW_MODE is set to new control word. */
7745 void
7746 emit_i387_cw_initialization (int mode)
7748 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
7749 rtx new_mode;
7751 int slot;
7753 rtx reg = gen_reg_rtx (HImode);
7755 emit_insn (gen_x86_fnstcw_1 (stored_mode));
7756 emit_move_insn (reg, stored_mode);
7758 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL || optimize_size)
7760 switch (mode)
7762 case I387_CW_TRUNC:
7763 /* round toward zero (truncate) */
7764 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
7765 slot = SLOT_CW_TRUNC;
7766 break;
7768 case I387_CW_FLOOR:
7769 /* round down toward -oo */
7770 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7771 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
7772 slot = SLOT_CW_FLOOR;
7773 break;
7775 case I387_CW_CEIL:
7776 /* round up toward +oo */
7777 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7778 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
7779 slot = SLOT_CW_CEIL;
7780 break;
7782 case I387_CW_MASK_PM:
7783 /* mask precision exception for nearbyint() */
7784 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
7785 slot = SLOT_CW_MASK_PM;
7786 break;
7788 default:
7789 gcc_unreachable ();
7792 else
7794 switch (mode)
7796 case I387_CW_TRUNC:
7797 /* round toward zero (truncate) */
7798 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
7799 slot = SLOT_CW_TRUNC;
7800 break;
7802 case I387_CW_FLOOR:
7803 /* round down toward -oo */
7804 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
7805 slot = SLOT_CW_FLOOR;
7806 break;
7808 case I387_CW_CEIL:
7809 /* round up toward +oo */
7810 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
7811 slot = SLOT_CW_CEIL;
7812 break;
7814 case I387_CW_MASK_PM:
7815 /* mask precision exception for nearbyint() */
7816 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
7817 slot = SLOT_CW_MASK_PM;
7818 break;
7820 default:
7821 gcc_unreachable ();
7825 gcc_assert (slot < MAX_386_STACK_LOCALS);
7827 new_mode = assign_386_stack_local (HImode, slot);
7828 emit_move_insn (new_mode, reg);
7831 /* Output code for INSN to convert a float to a signed int. OPERANDS
7832 are the insn operands. The output may be [HSD]Imode and the input
7833 operand may be [SDX]Fmode. */
7835 const char *
7836 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
7838 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
7839 int dimode_p = GET_MODE (operands[0]) == DImode;
7840 int round_mode = get_attr_i387_cw (insn);
7842 /* Jump through a hoop or two for DImode, since the hardware has no
7843 non-popping instruction. We used to do this a different way, but
7844 that was somewhat fragile and broke with post-reload splitters. */
7845 if ((dimode_p || fisttp) && !stack_top_dies)
7846 output_asm_insn ("fld\t%y1", operands);
7848 gcc_assert (STACK_TOP_P (operands[1]));
7849 gcc_assert (GET_CODE (operands[0]) == MEM);
7851 if (fisttp)
7852 output_asm_insn ("fisttp%z0\t%0", operands);
7853 else
7855 if (round_mode != I387_CW_ANY)
7856 output_asm_insn ("fldcw\t%3", operands);
7857 if (stack_top_dies || dimode_p)
7858 output_asm_insn ("fistp%z0\t%0", operands);
7859 else
7860 output_asm_insn ("fist%z0\t%0", operands);
7861 if (round_mode != I387_CW_ANY)
7862 output_asm_insn ("fldcw\t%2", operands);
7865 return "";
7868 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
7869 should be used. UNORDERED_P is true when fucom should be used. */
7871 const char *
7872 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
7874 int stack_top_dies;
7875 rtx cmp_op0, cmp_op1;
7876 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
7878 if (eflags_p)
7880 cmp_op0 = operands[0];
7881 cmp_op1 = operands[1];
7883 else
7885 cmp_op0 = operands[1];
7886 cmp_op1 = operands[2];
7889 if (is_sse)
7891 if (GET_MODE (operands[0]) == SFmode)
7892 if (unordered_p)
7893 return "ucomiss\t{%1, %0|%0, %1}";
7894 else
7895 return "comiss\t{%1, %0|%0, %1}";
7896 else
7897 if (unordered_p)
7898 return "ucomisd\t{%1, %0|%0, %1}";
7899 else
7900 return "comisd\t{%1, %0|%0, %1}";
7903 gcc_assert (STACK_TOP_P (cmp_op0));
7905 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
7907 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
7909 if (stack_top_dies)
7911 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
7912 return TARGET_USE_FFREEP ? "ffreep\t%y1" : "fstp\t%y1";
7914 else
7915 return "ftst\n\tfnstsw\t%0";
7918 if (STACK_REG_P (cmp_op1)
7919 && stack_top_dies
7920 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
7921 && REGNO (cmp_op1) != FIRST_STACK_REG)
7923 /* If both the top of the 387 stack dies, and the other operand
7924 is also a stack register that dies, then this must be a
7925 `fcompp' float compare */
7927 if (eflags_p)
7929 /* There is no double popping fcomi variant. Fortunately,
7930 eflags is immune from the fstp's cc clobbering. */
7931 if (unordered_p)
7932 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
7933 else
7934 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
7935 return TARGET_USE_FFREEP ? "ffreep\t%y0" : "fstp\t%y0";
7937 else
7939 if (unordered_p)
7940 return "fucompp\n\tfnstsw\t%0";
7941 else
7942 return "fcompp\n\tfnstsw\t%0";
7945 else
7947 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
7949 static const char * const alt[16] =
7951 "fcom%z2\t%y2\n\tfnstsw\t%0",
7952 "fcomp%z2\t%y2\n\tfnstsw\t%0",
7953 "fucom%z2\t%y2\n\tfnstsw\t%0",
7954 "fucomp%z2\t%y2\n\tfnstsw\t%0",
7956 "ficom%z2\t%y2\n\tfnstsw\t%0",
7957 "ficomp%z2\t%y2\n\tfnstsw\t%0",
7958 NULL,
7959 NULL,
7961 "fcomi\t{%y1, %0|%0, %y1}",
7962 "fcomip\t{%y1, %0|%0, %y1}",
7963 "fucomi\t{%y1, %0|%0, %y1}",
7964 "fucomip\t{%y1, %0|%0, %y1}",
7966 NULL,
7967 NULL,
7968 NULL,
7969 NULL
7972 int mask;
7973 const char *ret;
7975 mask = eflags_p << 3;
7976 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
7977 mask |= unordered_p << 1;
7978 mask |= stack_top_dies;
7980 gcc_assert (mask < 16);
7981 ret = alt[mask];
7982 gcc_assert (ret);
7984 return ret;
7988 void
7989 ix86_output_addr_vec_elt (FILE *file, int value)
7991 const char *directive = ASM_LONG;
7993 #ifdef ASM_QUAD
7994 if (TARGET_64BIT)
7995 directive = ASM_QUAD;
7996 #else
7997 gcc_assert (!TARGET_64BIT);
7998 #endif
8000 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
8003 void
8004 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
8006 if (TARGET_64BIT)
8007 fprintf (file, "%s%s%d-%s%d\n",
8008 ASM_LONG, LPREFIX, value, LPREFIX, rel);
8009 else if (HAVE_AS_GOTOFF_IN_DATA)
8010 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
8011 #if TARGET_MACHO
8012 else if (TARGET_MACHO)
8014 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
8015 machopic_output_function_base_name (file);
8016 fprintf(file, "\n");
8018 #endif
8019 else
8020 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
8021 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
8024 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
8025 for the target. */
8027 void
8028 ix86_expand_clear (rtx dest)
8030 rtx tmp;
8032 /* We play register width games, which are only valid after reload. */
8033 gcc_assert (reload_completed);
8035 /* Avoid HImode and its attendant prefix byte. */
8036 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
8037 dest = gen_rtx_REG (SImode, REGNO (dest));
8039 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
8041 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
8042 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
8044 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
8045 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
8048 emit_insn (tmp);
8051 /* X is an unchanging MEM. If it is a constant pool reference, return
8052 the constant pool rtx, else NULL. */
8055 maybe_get_pool_constant (rtx x)
8057 x = ix86_delegitimize_address (XEXP (x, 0));
8059 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
8060 return get_pool_constant (x);
8062 return NULL_RTX;
8065 void
8066 ix86_expand_move (enum machine_mode mode, rtx operands[])
8068 int strict = (reload_in_progress || reload_completed);
8069 rtx op0, op1;
8070 enum tls_model model;
8072 op0 = operands[0];
8073 op1 = operands[1];
8075 if (GET_CODE (op1) == SYMBOL_REF)
8077 model = SYMBOL_REF_TLS_MODEL (op1);
8078 if (model)
8080 op1 = legitimize_tls_address (op1, model, true);
8081 op1 = force_operand (op1, op0);
8082 if (op1 == op0)
8083 return;
8086 else if (GET_CODE (op1) == CONST
8087 && GET_CODE (XEXP (op1, 0)) == PLUS
8088 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
8090 model = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (op1, 0), 0));
8091 if (model)
8093 rtx addend = XEXP (XEXP (op1, 0), 1);
8094 op1 = legitimize_tls_address (XEXP (XEXP (op1, 0), 0), model, true);
8095 op1 = force_operand (op1, NULL);
8096 op1 = expand_simple_binop (Pmode, PLUS, op1, addend,
8097 op0, 1, OPTAB_DIRECT);
8098 if (op1 == op0)
8099 return;
8103 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
8105 #if TARGET_MACHO
8106 if (MACHOPIC_PURE)
8108 rtx temp = ((reload_in_progress
8109 || ((op0 && GET_CODE (op0) == REG)
8110 && mode == Pmode))
8111 ? op0 : gen_reg_rtx (Pmode));
8112 op1 = machopic_indirect_data_reference (op1, temp);
8113 op1 = machopic_legitimize_pic_address (op1, mode,
8114 temp == op1 ? 0 : temp);
8116 else if (MACHOPIC_INDIRECT)
8117 op1 = machopic_indirect_data_reference (op1, 0);
8118 if (op0 == op1)
8119 return;
8120 #else
8121 if (GET_CODE (op0) == MEM)
8122 op1 = force_reg (Pmode, op1);
8123 else
8124 op1 = legitimize_address (op1, op1, Pmode);
8125 #endif /* TARGET_MACHO */
8127 else
8129 if (GET_CODE (op0) == MEM
8130 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
8131 || !push_operand (op0, mode))
8132 && GET_CODE (op1) == MEM)
8133 op1 = force_reg (mode, op1);
8135 if (push_operand (op0, mode)
8136 && ! general_no_elim_operand (op1, mode))
8137 op1 = copy_to_mode_reg (mode, op1);
8139 /* Force large constants in 64bit compilation into register
8140 to get them CSEed. */
8141 if (TARGET_64BIT && mode == DImode
8142 && immediate_operand (op1, mode)
8143 && !x86_64_zext_immediate_operand (op1, VOIDmode)
8144 && !register_operand (op0, mode)
8145 && optimize && !reload_completed && !reload_in_progress)
8146 op1 = copy_to_mode_reg (mode, op1);
8148 if (FLOAT_MODE_P (mode))
8150 /* If we are loading a floating point constant to a register,
8151 force the value to memory now, since we'll get better code
8152 out the back end. */
8154 if (strict)
8156 else if (GET_CODE (op1) == CONST_DOUBLE)
8158 op1 = validize_mem (force_const_mem (mode, op1));
8159 if (!register_operand (op0, mode))
8161 rtx temp = gen_reg_rtx (mode);
8162 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
8163 emit_move_insn (op0, temp);
8164 return;
8170 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
8173 void
8174 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
8176 rtx op0 = operands[0], op1 = operands[1];
8178 /* Force constants other than zero into memory. We do not know how
8179 the instructions used to build constants modify the upper 64 bits
8180 of the register, once we have that information we may be able
8181 to handle some of them more efficiently. */
8182 if ((reload_in_progress | reload_completed) == 0
8183 && register_operand (op0, mode)
8184 && CONSTANT_P (op1) && op1 != CONST0_RTX (mode))
8185 op1 = validize_mem (force_const_mem (mode, op1));
8187 /* Make operand1 a register if it isn't already. */
8188 if (!no_new_pseudos
8189 && !register_operand (op0, mode)
8190 && !register_operand (op1, mode))
8192 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
8193 return;
8196 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
8199 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
8200 straight to ix86_expand_vector_move. */
8202 void
8203 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
8205 rtx op0, op1, m;
8207 op0 = operands[0];
8208 op1 = operands[1];
8210 if (MEM_P (op1))
8212 /* If we're optimizing for size, movups is the smallest. */
8213 if (optimize_size)
8215 op0 = gen_lowpart (V4SFmode, op0);
8216 op1 = gen_lowpart (V4SFmode, op1);
8217 emit_insn (gen_sse_movups (op0, op1));
8218 return;
8221 /* ??? If we have typed data, then it would appear that using
8222 movdqu is the only way to get unaligned data loaded with
8223 integer type. */
8224 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
8226 op0 = gen_lowpart (V16QImode, op0);
8227 op1 = gen_lowpart (V16QImode, op1);
8228 emit_insn (gen_sse2_movdqu (op0, op1));
8229 return;
8232 if (TARGET_SSE2 && mode == V2DFmode)
8234 rtx zero;
8236 /* When SSE registers are split into halves, we can avoid
8237 writing to the top half twice. */
8238 if (TARGET_SSE_SPLIT_REGS)
8240 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
8241 zero = op0;
8243 else
8245 /* ??? Not sure about the best option for the Intel chips.
8246 The following would seem to satisfy; the register is
8247 entirely cleared, breaking the dependency chain. We
8248 then store to the upper half, with a dependency depth
8249 of one. A rumor has it that Intel recommends two movsd
8250 followed by an unpacklpd, but this is unconfirmed. And
8251 given that the dependency depth of the unpacklpd would
8252 still be one, I'm not sure why this would be better. */
8253 zero = CONST0_RTX (V2DFmode);
8256 m = adjust_address (op1, DFmode, 0);
8257 emit_insn (gen_sse2_loadlpd (op0, zero, m));
8258 m = adjust_address (op1, DFmode, 8);
8259 emit_insn (gen_sse2_loadhpd (op0, op0, m));
8261 else
8263 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
8264 emit_move_insn (op0, CONST0_RTX (mode));
8265 else
8266 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
8268 if (mode != V4SFmode)
8269 op0 = gen_lowpart (V4SFmode, op0);
8270 m = adjust_address (op1, V2SFmode, 0);
8271 emit_insn (gen_sse_loadlps (op0, op0, m));
8272 m = adjust_address (op1, V2SFmode, 8);
8273 emit_insn (gen_sse_loadhps (op0, op0, m));
8276 else if (MEM_P (op0))
8278 /* If we're optimizing for size, movups is the smallest. */
8279 if (optimize_size)
8281 op0 = gen_lowpart (V4SFmode, op0);
8282 op1 = gen_lowpart (V4SFmode, op1);
8283 emit_insn (gen_sse_movups (op0, op1));
8284 return;
8287 /* ??? Similar to above, only less clear because of quote
8288 typeless stores unquote. */
8289 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
8290 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
8292 op0 = gen_lowpart (V16QImode, op0);
8293 op1 = gen_lowpart (V16QImode, op1);
8294 emit_insn (gen_sse2_movdqu (op0, op1));
8295 return;
8298 if (TARGET_SSE2 && mode == V2DFmode)
8300 m = adjust_address (op0, DFmode, 0);
8301 emit_insn (gen_sse2_storelpd (m, op1));
8302 m = adjust_address (op0, DFmode, 8);
8303 emit_insn (gen_sse2_storehpd (m, op1));
8305 else
8307 if (mode != V4SFmode)
8308 op1 = gen_lowpart (V4SFmode, op1);
8309 m = adjust_address (op0, V2SFmode, 0);
8310 emit_insn (gen_sse_storelps (m, op1));
8311 m = adjust_address (op0, V2SFmode, 8);
8312 emit_insn (gen_sse_storehps (m, op1));
8315 else
8316 gcc_unreachable ();
8319 /* Expand a push in MODE. This is some mode for which we do not support
8320 proper push instructions, at least from the registers that we expect
8321 the value to live in. */
8323 void
8324 ix86_expand_push (enum machine_mode mode, rtx x)
8326 rtx tmp;
8328 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
8329 GEN_INT (-GET_MODE_SIZE (mode)),
8330 stack_pointer_rtx, 1, OPTAB_DIRECT);
8331 if (tmp != stack_pointer_rtx)
8332 emit_move_insn (stack_pointer_rtx, tmp);
8334 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
8335 emit_move_insn (tmp, x);
8338 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
8339 destination to use for the operation. If different from the true
8340 destination in operands[0], a copy operation will be required. */
8343 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
8344 rtx operands[])
8346 int matching_memory;
8347 rtx src1, src2, dst;
8349 dst = operands[0];
8350 src1 = operands[1];
8351 src2 = operands[2];
8353 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
8354 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8355 && (rtx_equal_p (dst, src2)
8356 || immediate_operand (src1, mode)))
8358 rtx temp = src1;
8359 src1 = src2;
8360 src2 = temp;
8363 /* If the destination is memory, and we do not have matching source
8364 operands, do things in registers. */
8365 matching_memory = 0;
8366 if (GET_CODE (dst) == MEM)
8368 if (rtx_equal_p (dst, src1))
8369 matching_memory = 1;
8370 else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8371 && rtx_equal_p (dst, src2))
8372 matching_memory = 2;
8373 else
8374 dst = gen_reg_rtx (mode);
8377 /* Both source operands cannot be in memory. */
8378 if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM)
8380 if (matching_memory != 2)
8381 src2 = force_reg (mode, src2);
8382 else
8383 src1 = force_reg (mode, src1);
8386 /* If the operation is not commutable, source 1 cannot be a constant
8387 or non-matching memory. */
8388 if ((CONSTANT_P (src1)
8389 || (!matching_memory && GET_CODE (src1) == MEM))
8390 && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
8391 src1 = force_reg (mode, src1);
8393 src1 = operands[1] = src1;
8394 src2 = operands[2] = src2;
8395 return dst;
8398 /* Similarly, but assume that the destination has already been
8399 set up properly. */
8401 void
8402 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
8403 enum machine_mode mode, rtx operands[])
8405 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
8406 gcc_assert (dst == operands[0]);
8409 /* Attempt to expand a binary operator. Make the expansion closer to the
8410 actual machine, then just general_operand, which will allow 3 separate
8411 memory references (one output, two input) in a single insn. */
8413 void
8414 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
8415 rtx operands[])
8417 rtx src1, src2, dst, op, clob;
8419 dst = ix86_fixup_binary_operands (code, mode, operands);
8420 src1 = operands[1];
8421 src2 = operands[2];
8423 /* Emit the instruction. */
8425 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
8426 if (reload_in_progress)
8428 /* Reload doesn't know about the flags register, and doesn't know that
8429 it doesn't want to clobber it. We can only do this with PLUS. */
8430 gcc_assert (code == PLUS);
8431 emit_insn (op);
8433 else
8435 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8436 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
8439 /* Fix up the destination if needed. */
8440 if (dst != operands[0])
8441 emit_move_insn (operands[0], dst);
8444 /* Return TRUE or FALSE depending on whether the binary operator meets the
8445 appropriate constraints. */
8448 ix86_binary_operator_ok (enum rtx_code code,
8449 enum machine_mode mode ATTRIBUTE_UNUSED,
8450 rtx operands[3])
8452 /* Both source operands cannot be in memory. */
8453 if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM)
8454 return 0;
8455 /* If the operation is not commutable, source 1 cannot be a constant. */
8456 if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
8457 return 0;
8458 /* If the destination is memory, we must have a matching source operand. */
8459 if (GET_CODE (operands[0]) == MEM
8460 && ! (rtx_equal_p (operands[0], operands[1])
8461 || (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8462 && rtx_equal_p (operands[0], operands[2]))))
8463 return 0;
8464 /* If the operation is not commutable and the source 1 is memory, we must
8465 have a matching destination. */
8466 if (GET_CODE (operands[1]) == MEM
8467 && GET_RTX_CLASS (code) != RTX_COMM_ARITH
8468 && ! rtx_equal_p (operands[0], operands[1]))
8469 return 0;
8470 return 1;
8473 /* Attempt to expand a unary operator. Make the expansion closer to the
8474 actual machine, then just general_operand, which will allow 2 separate
8475 memory references (one output, one input) in a single insn. */
8477 void
8478 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
8479 rtx operands[])
8481 int matching_memory;
8482 rtx src, dst, op, clob;
8484 dst = operands[0];
8485 src = operands[1];
8487 /* If the destination is memory, and we do not have matching source
8488 operands, do things in registers. */
8489 matching_memory = 0;
8490 if (MEM_P (dst))
8492 if (rtx_equal_p (dst, src))
8493 matching_memory = 1;
8494 else
8495 dst = gen_reg_rtx (mode);
8498 /* When source operand is memory, destination must match. */
8499 if (MEM_P (src) && !matching_memory)
8500 src = force_reg (mode, src);
8502 /* Emit the instruction. */
8504 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
8505 if (reload_in_progress || code == NOT)
8507 /* Reload doesn't know about the flags register, and doesn't know that
8508 it doesn't want to clobber it. */
8509 gcc_assert (code == NOT);
8510 emit_insn (op);
8512 else
8514 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8515 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
8518 /* Fix up the destination if needed. */
8519 if (dst != operands[0])
8520 emit_move_insn (operands[0], dst);
8523 /* Return TRUE or FALSE depending on whether the unary operator meets the
8524 appropriate constraints. */
8527 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
8528 enum machine_mode mode ATTRIBUTE_UNUSED,
8529 rtx operands[2] ATTRIBUTE_UNUSED)
8531 /* If one of operands is memory, source and destination must match. */
8532 if ((GET_CODE (operands[0]) == MEM
8533 || GET_CODE (operands[1]) == MEM)
8534 && ! rtx_equal_p (operands[0], operands[1]))
8535 return FALSE;
8536 return TRUE;
8539 /* A subroutine of ix86_expand_fp_absneg_operator and copysign expanders.
8540 Create a mask for the sign bit in MODE for an SSE register. If VECT is
8541 true, then replicate the mask for all elements of the vector register.
8542 If INVERT is true, then create a mask excluding the sign bit. */
8545 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
8547 enum machine_mode vec_mode;
8548 HOST_WIDE_INT hi, lo;
8549 int shift = 63;
8550 rtvec v;
8551 rtx mask;
8553 /* Find the sign bit, sign extended to 2*HWI. */
8554 if (mode == SFmode)
8555 lo = 0x80000000, hi = lo < 0;
8556 else if (HOST_BITS_PER_WIDE_INT >= 64)
8557 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
8558 else
8559 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
8561 if (invert)
8562 lo = ~lo, hi = ~hi;
8564 /* Force this value into the low part of a fp vector constant. */
8565 mask = immed_double_const (lo, hi, mode == SFmode ? SImode : DImode);
8566 mask = gen_lowpart (mode, mask);
8568 if (mode == SFmode)
8570 if (vect)
8571 v = gen_rtvec (4, mask, mask, mask, mask);
8572 else
8573 v = gen_rtvec (4, mask, CONST0_RTX (SFmode),
8574 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
8575 vec_mode = V4SFmode;
8577 else
8579 if (vect)
8580 v = gen_rtvec (2, mask, mask);
8581 else
8582 v = gen_rtvec (2, mask, CONST0_RTX (DFmode));
8583 vec_mode = V2DFmode;
8586 return force_reg (vec_mode, gen_rtx_CONST_VECTOR (vec_mode, v));
8589 /* Generate code for floating point ABS or NEG. */
8591 void
8592 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
8593 rtx operands[])
8595 rtx mask, set, use, clob, dst, src;
8596 bool matching_memory;
8597 bool use_sse = false;
8598 bool vector_mode = VECTOR_MODE_P (mode);
8599 enum machine_mode elt_mode = mode;
8601 if (vector_mode)
8603 elt_mode = GET_MODE_INNER (mode);
8604 use_sse = true;
8606 else if (TARGET_SSE_MATH)
8607 use_sse = SSE_FLOAT_MODE_P (mode);
8609 /* NEG and ABS performed with SSE use bitwise mask operations.
8610 Create the appropriate mask now. */
8611 if (use_sse)
8612 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
8613 else
8615 /* When not using SSE, we don't use the mask, but prefer to keep the
8616 same general form of the insn pattern to reduce duplication when
8617 it comes time to split. */
8618 mask = const0_rtx;
8621 dst = operands[0];
8622 src = operands[1];
8624 /* If the destination is memory, and we don't have matching source
8625 operands, do things in registers. */
8626 matching_memory = false;
8627 if (MEM_P (dst))
8629 if (rtx_equal_p (dst, src))
8630 matching_memory = true;
8631 else
8632 dst = gen_reg_rtx (mode);
8634 if (MEM_P (src) && !matching_memory)
8635 src = force_reg (mode, src);
8637 if (vector_mode)
8639 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
8640 set = gen_rtx_SET (VOIDmode, dst, set);
8641 emit_insn (set);
8643 else
8645 set = gen_rtx_fmt_e (code, mode, src);
8646 set = gen_rtx_SET (VOIDmode, dst, set);
8647 use = gen_rtx_USE (VOIDmode, mask);
8648 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8649 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, set, use, clob)));
8652 if (dst != operands[0])
8653 emit_move_insn (operands[0], dst);
8656 /* Expand a copysign operation. Special case operand 0 being a constant. */
8658 void
8659 ix86_expand_copysign (rtx operands[])
8661 enum machine_mode mode, vmode;
8662 rtx dest, op0, op1, mask, nmask;
8664 dest = operands[0];
8665 op0 = operands[1];
8666 op1 = operands[2];
8668 mode = GET_MODE (dest);
8669 vmode = mode == SFmode ? V4SFmode : V2DFmode;
8671 if (GET_CODE (op0) == CONST_DOUBLE)
8673 rtvec v;
8675 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
8676 op0 = simplify_unary_operation (ABS, mode, op0, mode);
8678 if (op0 == CONST0_RTX (mode))
8679 op0 = CONST0_RTX (vmode);
8680 else
8682 if (mode == SFmode)
8683 v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
8684 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
8685 else
8686 v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
8687 op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
8690 mask = ix86_build_signbit_mask (mode, 0, 0);
8692 if (mode == SFmode)
8693 emit_insn (gen_copysignsf3_const (dest, op0, op1, mask));
8694 else
8695 emit_insn (gen_copysigndf3_const (dest, op0, op1, mask));
8697 else
8699 nmask = ix86_build_signbit_mask (mode, 0, 1);
8700 mask = ix86_build_signbit_mask (mode, 0, 0);
8702 if (mode == SFmode)
8703 emit_insn (gen_copysignsf3_var (dest, NULL, op0, op1, nmask, mask));
8704 else
8705 emit_insn (gen_copysigndf3_var (dest, NULL, op0, op1, nmask, mask));
8709 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
8710 be a constant, and so has already been expanded into a vector constant. */
8712 void
8713 ix86_split_copysign_const (rtx operands[])
8715 enum machine_mode mode, vmode;
8716 rtx dest, op0, op1, mask, x;
8718 dest = operands[0];
8719 op0 = operands[1];
8720 op1 = operands[2];
8721 mask = operands[3];
8723 mode = GET_MODE (dest);
8724 vmode = GET_MODE (mask);
8726 dest = simplify_gen_subreg (vmode, dest, mode, 0);
8727 x = gen_rtx_AND (vmode, dest, mask);
8728 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8730 if (op0 != CONST0_RTX (vmode))
8732 x = gen_rtx_IOR (vmode, dest, op0);
8733 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8737 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
8738 so we have to do two masks. */
8740 void
8741 ix86_split_copysign_var (rtx operands[])
8743 enum machine_mode mode, vmode;
8744 rtx dest, scratch, op0, op1, mask, nmask, x;
8746 dest = operands[0];
8747 scratch = operands[1];
8748 op0 = operands[2];
8749 op1 = operands[3];
8750 nmask = operands[4];
8751 mask = operands[5];
8753 mode = GET_MODE (dest);
8754 vmode = GET_MODE (mask);
8756 if (rtx_equal_p (op0, op1))
8758 /* Shouldn't happen often (it's useless, obviously), but when it does
8759 we'd generate incorrect code if we continue below. */
8760 emit_move_insn (dest, op0);
8761 return;
8764 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
8766 gcc_assert (REGNO (op1) == REGNO (scratch));
8768 x = gen_rtx_AND (vmode, scratch, mask);
8769 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
8771 dest = mask;
8772 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
8773 x = gen_rtx_NOT (vmode, dest);
8774 x = gen_rtx_AND (vmode, x, op0);
8775 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8777 else
8779 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
8781 x = gen_rtx_AND (vmode, scratch, mask);
8783 else /* alternative 2,4 */
8785 gcc_assert (REGNO (mask) == REGNO (scratch));
8786 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
8787 x = gen_rtx_AND (vmode, scratch, op1);
8789 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
8791 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
8793 dest = simplify_gen_subreg (vmode, op0, mode, 0);
8794 x = gen_rtx_AND (vmode, dest, nmask);
8796 else /* alternative 3,4 */
8798 gcc_assert (REGNO (nmask) == REGNO (dest));
8799 dest = nmask;
8800 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
8801 x = gen_rtx_AND (vmode, dest, op0);
8803 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8806 x = gen_rtx_IOR (vmode, dest, scratch);
8807 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8810 /* Return TRUE or FALSE depending on whether the first SET in INSN
8811 has source and destination with matching CC modes, and that the
8812 CC mode is at least as constrained as REQ_MODE. */
8815 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
8817 rtx set;
8818 enum machine_mode set_mode;
8820 set = PATTERN (insn);
8821 if (GET_CODE (set) == PARALLEL)
8822 set = XVECEXP (set, 0, 0);
8823 gcc_assert (GET_CODE (set) == SET);
8824 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
8826 set_mode = GET_MODE (SET_DEST (set));
8827 switch (set_mode)
8829 case CCNOmode:
8830 if (req_mode != CCNOmode
8831 && (req_mode != CCmode
8832 || XEXP (SET_SRC (set), 1) != const0_rtx))
8833 return 0;
8834 break;
8835 case CCmode:
8836 if (req_mode == CCGCmode)
8837 return 0;
8838 /* FALLTHRU */
8839 case CCGCmode:
8840 if (req_mode == CCGOCmode || req_mode == CCNOmode)
8841 return 0;
8842 /* FALLTHRU */
8843 case CCGOCmode:
8844 if (req_mode == CCZmode)
8845 return 0;
8846 /* FALLTHRU */
8847 case CCZmode:
8848 break;
8850 default:
8851 gcc_unreachable ();
8854 return (GET_MODE (SET_SRC (set)) == set_mode);
8857 /* Generate insn patterns to do an integer compare of OPERANDS. */
8859 static rtx
8860 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
8862 enum machine_mode cmpmode;
8863 rtx tmp, flags;
8865 cmpmode = SELECT_CC_MODE (code, op0, op1);
8866 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
8868 /* This is very simple, but making the interface the same as in the
8869 FP case makes the rest of the code easier. */
8870 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
8871 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
8873 /* Return the test that should be put into the flags user, i.e.
8874 the bcc, scc, or cmov instruction. */
8875 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
8878 /* Figure out whether to use ordered or unordered fp comparisons.
8879 Return the appropriate mode to use. */
8881 enum machine_mode
8882 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
8884 /* ??? In order to make all comparisons reversible, we do all comparisons
8885 non-trapping when compiling for IEEE. Once gcc is able to distinguish
8886 all forms trapping and nontrapping comparisons, we can make inequality
8887 comparisons trapping again, since it results in better code when using
8888 FCOM based compares. */
8889 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
8892 enum machine_mode
8893 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
8895 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
8896 return ix86_fp_compare_mode (code);
8897 switch (code)
8899 /* Only zero flag is needed. */
8900 case EQ: /* ZF=0 */
8901 case NE: /* ZF!=0 */
8902 return CCZmode;
8903 /* Codes needing carry flag. */
8904 case GEU: /* CF=0 */
8905 case GTU: /* CF=0 & ZF=0 */
8906 case LTU: /* CF=1 */
8907 case LEU: /* CF=1 | ZF=1 */
8908 return CCmode;
8909 /* Codes possibly doable only with sign flag when
8910 comparing against zero. */
8911 case GE: /* SF=OF or SF=0 */
8912 case LT: /* SF<>OF or SF=1 */
8913 if (op1 == const0_rtx)
8914 return CCGOCmode;
8915 else
8916 /* For other cases Carry flag is not required. */
8917 return CCGCmode;
8918 /* Codes doable only with sign flag when comparing
8919 against zero, but we miss jump instruction for it
8920 so we need to use relational tests against overflow
8921 that thus needs to be zero. */
8922 case GT: /* ZF=0 & SF=OF */
8923 case LE: /* ZF=1 | SF<>OF */
8924 if (op1 == const0_rtx)
8925 return CCNOmode;
8926 else
8927 return CCGCmode;
8928 /* strcmp pattern do (use flags) and combine may ask us for proper
8929 mode. */
8930 case USE:
8931 return CCmode;
8932 default:
8933 gcc_unreachable ();
8937 /* Return the fixed registers used for condition codes. */
8939 static bool
8940 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
8942 *p1 = FLAGS_REG;
8943 *p2 = FPSR_REG;
8944 return true;
8947 /* If two condition code modes are compatible, return a condition code
8948 mode which is compatible with both. Otherwise, return
8949 VOIDmode. */
8951 static enum machine_mode
8952 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
8954 if (m1 == m2)
8955 return m1;
8957 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
8958 return VOIDmode;
8960 if ((m1 == CCGCmode && m2 == CCGOCmode)
8961 || (m1 == CCGOCmode && m2 == CCGCmode))
8962 return CCGCmode;
8964 switch (m1)
8966 default:
8967 gcc_unreachable ();
8969 case CCmode:
8970 case CCGCmode:
8971 case CCGOCmode:
8972 case CCNOmode:
8973 case CCZmode:
8974 switch (m2)
8976 default:
8977 return VOIDmode;
8979 case CCmode:
8980 case CCGCmode:
8981 case CCGOCmode:
8982 case CCNOmode:
8983 case CCZmode:
8984 return CCmode;
8987 case CCFPmode:
8988 case CCFPUmode:
8989 /* These are only compatible with themselves, which we already
8990 checked above. */
8991 return VOIDmode;
8995 /* Return true if we should use an FCOMI instruction for this fp comparison. */
8998 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
9000 enum rtx_code swapped_code = swap_condition (code);
9001 return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code))
9002 || (ix86_fp_comparison_cost (swapped_code)
9003 == ix86_fp_comparison_fcomi_cost (swapped_code)));
9006 /* Swap, force into registers, or otherwise massage the two operands
9007 to a fp comparison. The operands are updated in place; the new
9008 comparison code is returned. */
9010 static enum rtx_code
9011 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
9013 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
9014 rtx op0 = *pop0, op1 = *pop1;
9015 enum machine_mode op_mode = GET_MODE (op0);
9016 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
9018 /* All of the unordered compare instructions only work on registers.
9019 The same is true of the fcomi compare instructions. The same is
9020 true of the XFmode compare instructions if not comparing with
9021 zero (ftst insn is used in this case). */
9023 if (!is_sse
9024 && (fpcmp_mode == CCFPUmode
9025 || (op_mode == XFmode
9026 && ! (standard_80387_constant_p (op0) == 1
9027 || standard_80387_constant_p (op1) == 1))
9028 || ix86_use_fcomi_compare (code)))
9030 op0 = force_reg (op_mode, op0);
9031 op1 = force_reg (op_mode, op1);
9033 else
9035 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
9036 things around if they appear profitable, otherwise force op0
9037 into a register. */
9039 if (standard_80387_constant_p (op0) == 0
9040 || (GET_CODE (op0) == MEM
9041 && ! (standard_80387_constant_p (op1) == 0
9042 || GET_CODE (op1) == MEM)))
9044 rtx tmp;
9045 tmp = op0, op0 = op1, op1 = tmp;
9046 code = swap_condition (code);
9049 if (GET_CODE (op0) != REG)
9050 op0 = force_reg (op_mode, op0);
9052 if (CONSTANT_P (op1))
9054 int tmp = standard_80387_constant_p (op1);
9055 if (tmp == 0)
9056 op1 = validize_mem (force_const_mem (op_mode, op1));
9057 else if (tmp == 1)
9059 if (TARGET_CMOVE)
9060 op1 = force_reg (op_mode, op1);
9062 else
9063 op1 = force_reg (op_mode, op1);
9067 /* Try to rearrange the comparison to make it cheaper. */
9068 if (ix86_fp_comparison_cost (code)
9069 > ix86_fp_comparison_cost (swap_condition (code))
9070 && (GET_CODE (op1) == REG || !no_new_pseudos))
9072 rtx tmp;
9073 tmp = op0, op0 = op1, op1 = tmp;
9074 code = swap_condition (code);
9075 if (GET_CODE (op0) != REG)
9076 op0 = force_reg (op_mode, op0);
9079 *pop0 = op0;
9080 *pop1 = op1;
9081 return code;
9084 /* Convert comparison codes we use to represent FP comparison to integer
9085 code that will result in proper branch. Return UNKNOWN if no such code
9086 is available. */
9088 enum rtx_code
9089 ix86_fp_compare_code_to_integer (enum rtx_code code)
9091 switch (code)
9093 case GT:
9094 return GTU;
9095 case GE:
9096 return GEU;
9097 case ORDERED:
9098 case UNORDERED:
9099 return code;
9100 break;
9101 case UNEQ:
9102 return EQ;
9103 break;
9104 case UNLT:
9105 return LTU;
9106 break;
9107 case UNLE:
9108 return LEU;
9109 break;
9110 case LTGT:
9111 return NE;
9112 break;
9113 default:
9114 return UNKNOWN;
9118 /* Split comparison code CODE into comparisons we can do using branch
9119 instructions. BYPASS_CODE is comparison code for branch that will
9120 branch around FIRST_CODE and SECOND_CODE. If some of branches
9121 is not required, set value to UNKNOWN.
9122 We never require more than two branches. */
9124 void
9125 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
9126 enum rtx_code *first_code,
9127 enum rtx_code *second_code)
9129 *first_code = code;
9130 *bypass_code = UNKNOWN;
9131 *second_code = UNKNOWN;
9133 /* The fcomi comparison sets flags as follows:
9135 cmp ZF PF CF
9136 > 0 0 0
9137 < 0 0 1
9138 = 1 0 0
9139 un 1 1 1 */
9141 switch (code)
9143 case GT: /* GTU - CF=0 & ZF=0 */
9144 case GE: /* GEU - CF=0 */
9145 case ORDERED: /* PF=0 */
9146 case UNORDERED: /* PF=1 */
9147 case UNEQ: /* EQ - ZF=1 */
9148 case UNLT: /* LTU - CF=1 */
9149 case UNLE: /* LEU - CF=1 | ZF=1 */
9150 case LTGT: /* EQ - ZF=0 */
9151 break;
9152 case LT: /* LTU - CF=1 - fails on unordered */
9153 *first_code = UNLT;
9154 *bypass_code = UNORDERED;
9155 break;
9156 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
9157 *first_code = UNLE;
9158 *bypass_code = UNORDERED;
9159 break;
9160 case EQ: /* EQ - ZF=1 - fails on unordered */
9161 *first_code = UNEQ;
9162 *bypass_code = UNORDERED;
9163 break;
9164 case NE: /* NE - ZF=0 - fails on unordered */
9165 *first_code = LTGT;
9166 *second_code = UNORDERED;
9167 break;
9168 case UNGE: /* GEU - CF=0 - fails on unordered */
9169 *first_code = GE;
9170 *second_code = UNORDERED;
9171 break;
9172 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
9173 *first_code = GT;
9174 *second_code = UNORDERED;
9175 break;
9176 default:
9177 gcc_unreachable ();
9179 if (!TARGET_IEEE_FP)
9181 *second_code = UNKNOWN;
9182 *bypass_code = UNKNOWN;
9186 /* Return cost of comparison done fcom + arithmetics operations on AX.
9187 All following functions do use number of instructions as a cost metrics.
9188 In future this should be tweaked to compute bytes for optimize_size and
9189 take into account performance of various instructions on various CPUs. */
9190 static int
9191 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
9193 if (!TARGET_IEEE_FP)
9194 return 4;
9195 /* The cost of code output by ix86_expand_fp_compare. */
9196 switch (code)
9198 case UNLE:
9199 case UNLT:
9200 case LTGT:
9201 case GT:
9202 case GE:
9203 case UNORDERED:
9204 case ORDERED:
9205 case UNEQ:
9206 return 4;
9207 break;
9208 case LT:
9209 case NE:
9210 case EQ:
9211 case UNGE:
9212 return 5;
9213 break;
9214 case LE:
9215 case UNGT:
9216 return 6;
9217 break;
9218 default:
9219 gcc_unreachable ();
9223 /* Return cost of comparison done using fcomi operation.
9224 See ix86_fp_comparison_arithmetics_cost for the metrics. */
9225 static int
9226 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
9228 enum rtx_code bypass_code, first_code, second_code;
9229 /* Return arbitrarily high cost when instruction is not supported - this
9230 prevents gcc from using it. */
9231 if (!TARGET_CMOVE)
9232 return 1024;
9233 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9234 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
9237 /* Return cost of comparison done using sahf operation.
9238 See ix86_fp_comparison_arithmetics_cost for the metrics. */
9239 static int
9240 ix86_fp_comparison_sahf_cost (enum rtx_code code)
9242 enum rtx_code bypass_code, first_code, second_code;
9243 /* Return arbitrarily high cost when instruction is not preferred - this
9244 avoids gcc from using it. */
9245 if (!TARGET_USE_SAHF && !optimize_size)
9246 return 1024;
9247 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9248 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
9251 /* Compute cost of the comparison done using any method.
9252 See ix86_fp_comparison_arithmetics_cost for the metrics. */
9253 static int
9254 ix86_fp_comparison_cost (enum rtx_code code)
9256 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
9257 int min;
9259 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
9260 sahf_cost = ix86_fp_comparison_sahf_cost (code);
9262 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
9263 if (min > sahf_cost)
9264 min = sahf_cost;
9265 if (min > fcomi_cost)
9266 min = fcomi_cost;
9267 return min;
9270 /* Generate insn patterns to do a floating point compare of OPERANDS. */
9272 static rtx
9273 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
9274 rtx *second_test, rtx *bypass_test)
9276 enum machine_mode fpcmp_mode, intcmp_mode;
9277 rtx tmp, tmp2;
9278 int cost = ix86_fp_comparison_cost (code);
9279 enum rtx_code bypass_code, first_code, second_code;
9281 fpcmp_mode = ix86_fp_compare_mode (code);
9282 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
9284 if (second_test)
9285 *second_test = NULL_RTX;
9286 if (bypass_test)
9287 *bypass_test = NULL_RTX;
9289 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9291 /* Do fcomi/sahf based test when profitable. */
9292 if ((bypass_code == UNKNOWN || bypass_test)
9293 && (second_code == UNKNOWN || second_test)
9294 && ix86_fp_comparison_arithmetics_cost (code) > cost)
9296 if (TARGET_CMOVE)
9298 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9299 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
9300 tmp);
9301 emit_insn (tmp);
9303 else
9305 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9306 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
9307 if (!scratch)
9308 scratch = gen_reg_rtx (HImode);
9309 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
9310 emit_insn (gen_x86_sahf_1 (scratch));
9313 /* The FP codes work out to act like unsigned. */
9314 intcmp_mode = fpcmp_mode;
9315 code = first_code;
9316 if (bypass_code != UNKNOWN)
9317 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
9318 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9319 const0_rtx);
9320 if (second_code != UNKNOWN)
9321 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
9322 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9323 const0_rtx);
9325 else
9327 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
9328 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9329 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
9330 if (!scratch)
9331 scratch = gen_reg_rtx (HImode);
9332 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
9334 /* In the unordered case, we have to check C2 for NaN's, which
9335 doesn't happen to work out to anything nice combination-wise.
9336 So do some bit twiddling on the value we've got in AH to come
9337 up with an appropriate set of condition codes. */
9339 intcmp_mode = CCNOmode;
9340 switch (code)
9342 case GT:
9343 case UNGT:
9344 if (code == GT || !TARGET_IEEE_FP)
9346 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
9347 code = EQ;
9349 else
9351 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9352 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
9353 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
9354 intcmp_mode = CCmode;
9355 code = GEU;
9357 break;
9358 case LT:
9359 case UNLT:
9360 if (code == LT && TARGET_IEEE_FP)
9362 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9363 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
9364 intcmp_mode = CCmode;
9365 code = EQ;
9367 else
9369 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
9370 code = NE;
9372 break;
9373 case GE:
9374 case UNGE:
9375 if (code == GE || !TARGET_IEEE_FP)
9377 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
9378 code = EQ;
9380 else
9382 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9383 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
9384 GEN_INT (0x01)));
9385 code = NE;
9387 break;
9388 case LE:
9389 case UNLE:
9390 if (code == LE && TARGET_IEEE_FP)
9392 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9393 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
9394 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
9395 intcmp_mode = CCmode;
9396 code = LTU;
9398 else
9400 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
9401 code = NE;
9403 break;
9404 case EQ:
9405 case UNEQ:
9406 if (code == EQ && TARGET_IEEE_FP)
9408 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9409 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
9410 intcmp_mode = CCmode;
9411 code = EQ;
9413 else
9415 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
9416 code = NE;
9417 break;
9419 break;
9420 case NE:
9421 case LTGT:
9422 if (code == NE && TARGET_IEEE_FP)
9424 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9425 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
9426 GEN_INT (0x40)));
9427 code = NE;
9429 else
9431 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
9432 code = EQ;
9434 break;
9436 case UNORDERED:
9437 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
9438 code = NE;
9439 break;
9440 case ORDERED:
9441 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
9442 code = EQ;
9443 break;
9445 default:
9446 gcc_unreachable ();
9450 /* Return the test that should be put into the flags user, i.e.
9451 the bcc, scc, or cmov instruction. */
9452 return gen_rtx_fmt_ee (code, VOIDmode,
9453 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9454 const0_rtx);
9458 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
9460 rtx op0, op1, ret;
9461 op0 = ix86_compare_op0;
9462 op1 = ix86_compare_op1;
9464 if (second_test)
9465 *second_test = NULL_RTX;
9466 if (bypass_test)
9467 *bypass_test = NULL_RTX;
9469 if (ix86_compare_emitted)
9471 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
9472 ix86_compare_emitted = NULL_RTX;
9474 else if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
9475 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
9476 second_test, bypass_test);
9477 else
9478 ret = ix86_expand_int_compare (code, op0, op1);
9480 return ret;
9483 /* Return true if the CODE will result in nontrivial jump sequence. */
9484 bool
9485 ix86_fp_jump_nontrivial_p (enum rtx_code code)
9487 enum rtx_code bypass_code, first_code, second_code;
9488 if (!TARGET_CMOVE)
9489 return true;
9490 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9491 return bypass_code != UNKNOWN || second_code != UNKNOWN;
9494 void
9495 ix86_expand_branch (enum rtx_code code, rtx label)
9497 rtx tmp;
9499 switch (GET_MODE (ix86_compare_op0))
9501 case QImode:
9502 case HImode:
9503 case SImode:
9504 simple:
9505 tmp = ix86_expand_compare (code, NULL, NULL);
9506 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
9507 gen_rtx_LABEL_REF (VOIDmode, label),
9508 pc_rtx);
9509 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
9510 return;
9512 case SFmode:
9513 case DFmode:
9514 case XFmode:
9516 rtvec vec;
9517 int use_fcomi;
9518 enum rtx_code bypass_code, first_code, second_code;
9520 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
9521 &ix86_compare_op1);
9523 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9525 /* Check whether we will use the natural sequence with one jump. If
9526 so, we can expand jump early. Otherwise delay expansion by
9527 creating compound insn to not confuse optimizers. */
9528 if (bypass_code == UNKNOWN && second_code == UNKNOWN
9529 && TARGET_CMOVE)
9531 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
9532 gen_rtx_LABEL_REF (VOIDmode, label),
9533 pc_rtx, NULL_RTX, NULL_RTX);
9535 else
9537 tmp = gen_rtx_fmt_ee (code, VOIDmode,
9538 ix86_compare_op0, ix86_compare_op1);
9539 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
9540 gen_rtx_LABEL_REF (VOIDmode, label),
9541 pc_rtx);
9542 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
9544 use_fcomi = ix86_use_fcomi_compare (code);
9545 vec = rtvec_alloc (3 + !use_fcomi);
9546 RTVEC_ELT (vec, 0) = tmp;
9547 RTVEC_ELT (vec, 1)
9548 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
9549 RTVEC_ELT (vec, 2)
9550 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
9551 if (! use_fcomi)
9552 RTVEC_ELT (vec, 3)
9553 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
9555 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
9557 return;
9560 case DImode:
9561 if (TARGET_64BIT)
9562 goto simple;
9563 case TImode:
9564 /* Expand DImode branch into multiple compare+branch. */
9566 rtx lo[2], hi[2], label2;
9567 enum rtx_code code1, code2, code3;
9568 enum machine_mode submode;
9570 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
9572 tmp = ix86_compare_op0;
9573 ix86_compare_op0 = ix86_compare_op1;
9574 ix86_compare_op1 = tmp;
9575 code = swap_condition (code);
9577 if (GET_MODE (ix86_compare_op0) == DImode)
9579 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
9580 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
9581 submode = SImode;
9583 else
9585 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
9586 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
9587 submode = DImode;
9590 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
9591 avoid two branches. This costs one extra insn, so disable when
9592 optimizing for size. */
9594 if ((code == EQ || code == NE)
9595 && (!optimize_size
9596 || hi[1] == const0_rtx || lo[1] == const0_rtx))
9598 rtx xor0, xor1;
9600 xor1 = hi[0];
9601 if (hi[1] != const0_rtx)
9602 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
9603 NULL_RTX, 0, OPTAB_WIDEN);
9605 xor0 = lo[0];
9606 if (lo[1] != const0_rtx)
9607 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
9608 NULL_RTX, 0, OPTAB_WIDEN);
9610 tmp = expand_binop (submode, ior_optab, xor1, xor0,
9611 NULL_RTX, 0, OPTAB_WIDEN);
9613 ix86_compare_op0 = tmp;
9614 ix86_compare_op1 = const0_rtx;
9615 ix86_expand_branch (code, label);
9616 return;
9619 /* Otherwise, if we are doing less-than or greater-or-equal-than,
9620 op1 is a constant and the low word is zero, then we can just
9621 examine the high word. */
9623 if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx)
9624 switch (code)
9626 case LT: case LTU: case GE: case GEU:
9627 ix86_compare_op0 = hi[0];
9628 ix86_compare_op1 = hi[1];
9629 ix86_expand_branch (code, label);
9630 return;
9631 default:
9632 break;
9635 /* Otherwise, we need two or three jumps. */
9637 label2 = gen_label_rtx ();
9639 code1 = code;
9640 code2 = swap_condition (code);
9641 code3 = unsigned_condition (code);
9643 switch (code)
9645 case LT: case GT: case LTU: case GTU:
9646 break;
9648 case LE: code1 = LT; code2 = GT; break;
9649 case GE: code1 = GT; code2 = LT; break;
9650 case LEU: code1 = LTU; code2 = GTU; break;
9651 case GEU: code1 = GTU; code2 = LTU; break;
9653 case EQ: code1 = UNKNOWN; code2 = NE; break;
9654 case NE: code2 = UNKNOWN; break;
9656 default:
9657 gcc_unreachable ();
9661 * a < b =>
9662 * if (hi(a) < hi(b)) goto true;
9663 * if (hi(a) > hi(b)) goto false;
9664 * if (lo(a) < lo(b)) goto true;
9665 * false:
9668 ix86_compare_op0 = hi[0];
9669 ix86_compare_op1 = hi[1];
9671 if (code1 != UNKNOWN)
9672 ix86_expand_branch (code1, label);
9673 if (code2 != UNKNOWN)
9674 ix86_expand_branch (code2, label2);
9676 ix86_compare_op0 = lo[0];
9677 ix86_compare_op1 = lo[1];
9678 ix86_expand_branch (code3, label);
9680 if (code2 != UNKNOWN)
9681 emit_label (label2);
9682 return;
9685 default:
9686 gcc_unreachable ();
9690 /* Split branch based on floating point condition. */
9691 void
9692 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
9693 rtx target1, rtx target2, rtx tmp, rtx pushed)
9695 rtx second, bypass;
9696 rtx label = NULL_RTX;
9697 rtx condition;
9698 int bypass_probability = -1, second_probability = -1, probability = -1;
9699 rtx i;
9701 if (target2 != pc_rtx)
9703 rtx tmp = target2;
9704 code = reverse_condition_maybe_unordered (code);
9705 target2 = target1;
9706 target1 = tmp;
9709 condition = ix86_expand_fp_compare (code, op1, op2,
9710 tmp, &second, &bypass);
9712 /* Remove pushed operand from stack. */
9713 if (pushed)
9714 ix86_free_from_memory (GET_MODE (pushed));
9716 if (split_branch_probability >= 0)
9718 /* Distribute the probabilities across the jumps.
9719 Assume the BYPASS and SECOND to be always test
9720 for UNORDERED. */
9721 probability = split_branch_probability;
9723 /* Value of 1 is low enough to make no need for probability
9724 to be updated. Later we may run some experiments and see
9725 if unordered values are more frequent in practice. */
9726 if (bypass)
9727 bypass_probability = 1;
9728 if (second)
9729 second_probability = 1;
9731 if (bypass != NULL_RTX)
9733 label = gen_label_rtx ();
9734 i = emit_jump_insn (gen_rtx_SET
9735 (VOIDmode, pc_rtx,
9736 gen_rtx_IF_THEN_ELSE (VOIDmode,
9737 bypass,
9738 gen_rtx_LABEL_REF (VOIDmode,
9739 label),
9740 pc_rtx)));
9741 if (bypass_probability >= 0)
9742 REG_NOTES (i)
9743 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9744 GEN_INT (bypass_probability),
9745 REG_NOTES (i));
9747 i = emit_jump_insn (gen_rtx_SET
9748 (VOIDmode, pc_rtx,
9749 gen_rtx_IF_THEN_ELSE (VOIDmode,
9750 condition, target1, target2)));
9751 if (probability >= 0)
9752 REG_NOTES (i)
9753 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9754 GEN_INT (probability),
9755 REG_NOTES (i));
9756 if (second != NULL_RTX)
9758 i = emit_jump_insn (gen_rtx_SET
9759 (VOIDmode, pc_rtx,
9760 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
9761 target2)));
9762 if (second_probability >= 0)
9763 REG_NOTES (i)
9764 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9765 GEN_INT (second_probability),
9766 REG_NOTES (i));
9768 if (label != NULL_RTX)
9769 emit_label (label);
9773 ix86_expand_setcc (enum rtx_code code, rtx dest)
9775 rtx ret, tmp, tmpreg, equiv;
9776 rtx second_test, bypass_test;
9778 if (GET_MODE (ix86_compare_op0) == (TARGET_64BIT ? TImode : DImode))
9779 return 0; /* FAIL */
9781 gcc_assert (GET_MODE (dest) == QImode);
9783 ret = ix86_expand_compare (code, &second_test, &bypass_test);
9784 PUT_MODE (ret, QImode);
9786 tmp = dest;
9787 tmpreg = dest;
9789 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
9790 if (bypass_test || second_test)
9792 rtx test = second_test;
9793 int bypass = 0;
9794 rtx tmp2 = gen_reg_rtx (QImode);
9795 if (bypass_test)
9797 gcc_assert (!second_test);
9798 test = bypass_test;
9799 bypass = 1;
9800 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
9802 PUT_MODE (test, QImode);
9803 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
9805 if (bypass)
9806 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
9807 else
9808 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
9811 /* Attach a REG_EQUAL note describing the comparison result. */
9812 if (ix86_compare_op0 && ix86_compare_op1)
9814 equiv = simplify_gen_relational (code, QImode,
9815 GET_MODE (ix86_compare_op0),
9816 ix86_compare_op0, ix86_compare_op1);
9817 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
9820 return 1; /* DONE */
9823 /* Expand comparison setting or clearing carry flag. Return true when
9824 successful and set pop for the operation. */
9825 static bool
9826 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
9828 enum machine_mode mode =
9829 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
9831 /* Do not handle DImode compares that go trought special path. Also we can't
9832 deal with FP compares yet. This is possible to add. */
9833 if (mode == (TARGET_64BIT ? TImode : DImode))
9834 return false;
9835 if (FLOAT_MODE_P (mode))
9837 rtx second_test = NULL, bypass_test = NULL;
9838 rtx compare_op, compare_seq;
9840 /* Shortcut: following common codes never translate into carry flag compares. */
9841 if (code == EQ || code == NE || code == UNEQ || code == LTGT
9842 || code == ORDERED || code == UNORDERED)
9843 return false;
9845 /* These comparisons require zero flag; swap operands so they won't. */
9846 if ((code == GT || code == UNLE || code == LE || code == UNGT)
9847 && !TARGET_IEEE_FP)
9849 rtx tmp = op0;
9850 op0 = op1;
9851 op1 = tmp;
9852 code = swap_condition (code);
9855 /* Try to expand the comparison and verify that we end up with carry flag
9856 based comparison. This is fails to be true only when we decide to expand
9857 comparison using arithmetic that is not too common scenario. */
9858 start_sequence ();
9859 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
9860 &second_test, &bypass_test);
9861 compare_seq = get_insns ();
9862 end_sequence ();
9864 if (second_test || bypass_test)
9865 return false;
9866 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
9867 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
9868 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
9869 else
9870 code = GET_CODE (compare_op);
9871 if (code != LTU && code != GEU)
9872 return false;
9873 emit_insn (compare_seq);
9874 *pop = compare_op;
9875 return true;
9877 if (!INTEGRAL_MODE_P (mode))
9878 return false;
9879 switch (code)
9881 case LTU:
9882 case GEU:
9883 break;
9885 /* Convert a==0 into (unsigned)a<1. */
9886 case EQ:
9887 case NE:
9888 if (op1 != const0_rtx)
9889 return false;
9890 op1 = const1_rtx;
9891 code = (code == EQ ? LTU : GEU);
9892 break;
9894 /* Convert a>b into b<a or a>=b-1. */
9895 case GTU:
9896 case LEU:
9897 if (GET_CODE (op1) == CONST_INT)
9899 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
9900 /* Bail out on overflow. We still can swap operands but that
9901 would force loading of the constant into register. */
9902 if (op1 == const0_rtx
9903 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
9904 return false;
9905 code = (code == GTU ? GEU : LTU);
9907 else
9909 rtx tmp = op1;
9910 op1 = op0;
9911 op0 = tmp;
9912 code = (code == GTU ? LTU : GEU);
9914 break;
9916 /* Convert a>=0 into (unsigned)a<0x80000000. */
9917 case LT:
9918 case GE:
9919 if (mode == DImode || op1 != const0_rtx)
9920 return false;
9921 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
9922 code = (code == LT ? GEU : LTU);
9923 break;
9924 case LE:
9925 case GT:
9926 if (mode == DImode || op1 != constm1_rtx)
9927 return false;
9928 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
9929 code = (code == LE ? GEU : LTU);
9930 break;
9932 default:
9933 return false;
9935 /* Swapping operands may cause constant to appear as first operand. */
9936 if (!nonimmediate_operand (op0, VOIDmode))
9938 if (no_new_pseudos)
9939 return false;
9940 op0 = force_reg (mode, op0);
9942 ix86_compare_op0 = op0;
9943 ix86_compare_op1 = op1;
9944 *pop = ix86_expand_compare (code, NULL, NULL);
9945 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
9946 return true;
9950 ix86_expand_int_movcc (rtx operands[])
9952 enum rtx_code code = GET_CODE (operands[1]), compare_code;
9953 rtx compare_seq, compare_op;
9954 rtx second_test, bypass_test;
9955 enum machine_mode mode = GET_MODE (operands[0]);
9956 bool sign_bit_compare_p = false;;
9958 start_sequence ();
9959 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
9960 compare_seq = get_insns ();
9961 end_sequence ();
9963 compare_code = GET_CODE (compare_op);
9965 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
9966 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
9967 sign_bit_compare_p = true;
9969 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
9970 HImode insns, we'd be swallowed in word prefix ops. */
9972 if ((mode != HImode || TARGET_FAST_PREFIX)
9973 && (mode != (TARGET_64BIT ? TImode : DImode))
9974 && GET_CODE (operands[2]) == CONST_INT
9975 && GET_CODE (operands[3]) == CONST_INT)
9977 rtx out = operands[0];
9978 HOST_WIDE_INT ct = INTVAL (operands[2]);
9979 HOST_WIDE_INT cf = INTVAL (operands[3]);
9980 HOST_WIDE_INT diff;
9982 diff = ct - cf;
9983 /* Sign bit compares are better done using shifts than we do by using
9984 sbb. */
9985 if (sign_bit_compare_p
9986 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
9987 ix86_compare_op1, &compare_op))
9989 /* Detect overlap between destination and compare sources. */
9990 rtx tmp = out;
9992 if (!sign_bit_compare_p)
9994 bool fpcmp = false;
9996 compare_code = GET_CODE (compare_op);
9998 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
9999 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
10001 fpcmp = true;
10002 compare_code = ix86_fp_compare_code_to_integer (compare_code);
10005 /* To simplify rest of code, restrict to the GEU case. */
10006 if (compare_code == LTU)
10008 HOST_WIDE_INT tmp = ct;
10009 ct = cf;
10010 cf = tmp;
10011 compare_code = reverse_condition (compare_code);
10012 code = reverse_condition (code);
10014 else
10016 if (fpcmp)
10017 PUT_CODE (compare_op,
10018 reverse_condition_maybe_unordered
10019 (GET_CODE (compare_op)));
10020 else
10021 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
10023 diff = ct - cf;
10025 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
10026 || reg_overlap_mentioned_p (out, ix86_compare_op1))
10027 tmp = gen_reg_rtx (mode);
10029 if (mode == DImode)
10030 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
10031 else
10032 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
10034 else
10036 if (code == GT || code == GE)
10037 code = reverse_condition (code);
10038 else
10040 HOST_WIDE_INT tmp = ct;
10041 ct = cf;
10042 cf = tmp;
10043 diff = ct - cf;
10045 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
10046 ix86_compare_op1, VOIDmode, 0, -1);
10049 if (diff == 1)
10052 * cmpl op0,op1
10053 * sbbl dest,dest
10054 * [addl dest, ct]
10056 * Size 5 - 8.
10058 if (ct)
10059 tmp = expand_simple_binop (mode, PLUS,
10060 tmp, GEN_INT (ct),
10061 copy_rtx (tmp), 1, OPTAB_DIRECT);
10063 else if (cf == -1)
10066 * cmpl op0,op1
10067 * sbbl dest,dest
10068 * orl $ct, dest
10070 * Size 8.
10072 tmp = expand_simple_binop (mode, IOR,
10073 tmp, GEN_INT (ct),
10074 copy_rtx (tmp), 1, OPTAB_DIRECT);
10076 else if (diff == -1 && ct)
10079 * cmpl op0,op1
10080 * sbbl dest,dest
10081 * notl dest
10082 * [addl dest, cf]
10084 * Size 8 - 11.
10086 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
10087 if (cf)
10088 tmp = expand_simple_binop (mode, PLUS,
10089 copy_rtx (tmp), GEN_INT (cf),
10090 copy_rtx (tmp), 1, OPTAB_DIRECT);
10092 else
10095 * cmpl op0,op1
10096 * sbbl dest,dest
10097 * [notl dest]
10098 * andl cf - ct, dest
10099 * [addl dest, ct]
10101 * Size 8 - 11.
10104 if (cf == 0)
10106 cf = ct;
10107 ct = 0;
10108 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
10111 tmp = expand_simple_binop (mode, AND,
10112 copy_rtx (tmp),
10113 gen_int_mode (cf - ct, mode),
10114 copy_rtx (tmp), 1, OPTAB_DIRECT);
10115 if (ct)
10116 tmp = expand_simple_binop (mode, PLUS,
10117 copy_rtx (tmp), GEN_INT (ct),
10118 copy_rtx (tmp), 1, OPTAB_DIRECT);
10121 if (!rtx_equal_p (tmp, out))
10122 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
10124 return 1; /* DONE */
10127 if (diff < 0)
10129 HOST_WIDE_INT tmp;
10130 tmp = ct, ct = cf, cf = tmp;
10131 diff = -diff;
10132 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
10134 /* We may be reversing unordered compare to normal compare, that
10135 is not valid in general (we may convert non-trapping condition
10136 to trapping one), however on i386 we currently emit all
10137 comparisons unordered. */
10138 compare_code = reverse_condition_maybe_unordered (compare_code);
10139 code = reverse_condition_maybe_unordered (code);
10141 else
10143 compare_code = reverse_condition (compare_code);
10144 code = reverse_condition (code);
10148 compare_code = UNKNOWN;
10149 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
10150 && GET_CODE (ix86_compare_op1) == CONST_INT)
10152 if (ix86_compare_op1 == const0_rtx
10153 && (code == LT || code == GE))
10154 compare_code = code;
10155 else if (ix86_compare_op1 == constm1_rtx)
10157 if (code == LE)
10158 compare_code = LT;
10159 else if (code == GT)
10160 compare_code = GE;
10164 /* Optimize dest = (op0 < 0) ? -1 : cf. */
10165 if (compare_code != UNKNOWN
10166 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
10167 && (cf == -1 || ct == -1))
10169 /* If lea code below could be used, only optimize
10170 if it results in a 2 insn sequence. */
10172 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
10173 || diff == 3 || diff == 5 || diff == 9)
10174 || (compare_code == LT && ct == -1)
10175 || (compare_code == GE && cf == -1))
10178 * notl op1 (if necessary)
10179 * sarl $31, op1
10180 * orl cf, op1
10182 if (ct != -1)
10184 cf = ct;
10185 ct = -1;
10186 code = reverse_condition (code);
10189 out = emit_store_flag (out, code, ix86_compare_op0,
10190 ix86_compare_op1, VOIDmode, 0, -1);
10192 out = expand_simple_binop (mode, IOR,
10193 out, GEN_INT (cf),
10194 out, 1, OPTAB_DIRECT);
10195 if (out != operands[0])
10196 emit_move_insn (operands[0], out);
10198 return 1; /* DONE */
10203 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
10204 || diff == 3 || diff == 5 || diff == 9)
10205 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
10206 && (mode != DImode
10207 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
10210 * xorl dest,dest
10211 * cmpl op1,op2
10212 * setcc dest
10213 * lea cf(dest*(ct-cf)),dest
10215 * Size 14.
10217 * This also catches the degenerate setcc-only case.
10220 rtx tmp;
10221 int nops;
10223 out = emit_store_flag (out, code, ix86_compare_op0,
10224 ix86_compare_op1, VOIDmode, 0, 1);
10226 nops = 0;
10227 /* On x86_64 the lea instruction operates on Pmode, so we need
10228 to get arithmetics done in proper mode to match. */
10229 if (diff == 1)
10230 tmp = copy_rtx (out);
10231 else
10233 rtx out1;
10234 out1 = copy_rtx (out);
10235 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
10236 nops++;
10237 if (diff & 1)
10239 tmp = gen_rtx_PLUS (mode, tmp, out1);
10240 nops++;
10243 if (cf != 0)
10245 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
10246 nops++;
10248 if (!rtx_equal_p (tmp, out))
10250 if (nops == 1)
10251 out = force_operand (tmp, copy_rtx (out));
10252 else
10253 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
10255 if (!rtx_equal_p (out, operands[0]))
10256 emit_move_insn (operands[0], copy_rtx (out));
10258 return 1; /* DONE */
10262 * General case: Jumpful:
10263 * xorl dest,dest cmpl op1, op2
10264 * cmpl op1, op2 movl ct, dest
10265 * setcc dest jcc 1f
10266 * decl dest movl cf, dest
10267 * andl (cf-ct),dest 1:
10268 * addl ct,dest
10270 * Size 20. Size 14.
10272 * This is reasonably steep, but branch mispredict costs are
10273 * high on modern cpus, so consider failing only if optimizing
10274 * for space.
10277 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
10278 && BRANCH_COST >= 2)
10280 if (cf == 0)
10282 cf = ct;
10283 ct = 0;
10284 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
10285 /* We may be reversing unordered compare to normal compare,
10286 that is not valid in general (we may convert non-trapping
10287 condition to trapping one), however on i386 we currently
10288 emit all comparisons unordered. */
10289 code = reverse_condition_maybe_unordered (code);
10290 else
10292 code = reverse_condition (code);
10293 if (compare_code != UNKNOWN)
10294 compare_code = reverse_condition (compare_code);
10298 if (compare_code != UNKNOWN)
10300 /* notl op1 (if needed)
10301 sarl $31, op1
10302 andl (cf-ct), op1
10303 addl ct, op1
10305 For x < 0 (resp. x <= -1) there will be no notl,
10306 so if possible swap the constants to get rid of the
10307 complement.
10308 True/false will be -1/0 while code below (store flag
10309 followed by decrement) is 0/-1, so the constants need
10310 to be exchanged once more. */
10312 if (compare_code == GE || !cf)
10314 code = reverse_condition (code);
10315 compare_code = LT;
10317 else
10319 HOST_WIDE_INT tmp = cf;
10320 cf = ct;
10321 ct = tmp;
10324 out = emit_store_flag (out, code, ix86_compare_op0,
10325 ix86_compare_op1, VOIDmode, 0, -1);
10327 else
10329 out = emit_store_flag (out, code, ix86_compare_op0,
10330 ix86_compare_op1, VOIDmode, 0, 1);
10332 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
10333 copy_rtx (out), 1, OPTAB_DIRECT);
10336 out = expand_simple_binop (mode, AND, copy_rtx (out),
10337 gen_int_mode (cf - ct, mode),
10338 copy_rtx (out), 1, OPTAB_DIRECT);
10339 if (ct)
10340 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
10341 copy_rtx (out), 1, OPTAB_DIRECT);
10342 if (!rtx_equal_p (out, operands[0]))
10343 emit_move_insn (operands[0], copy_rtx (out));
10345 return 1; /* DONE */
10349 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
10351 /* Try a few things more with specific constants and a variable. */
10353 optab op;
10354 rtx var, orig_out, out, tmp;
10356 if (BRANCH_COST <= 2)
10357 return 0; /* FAIL */
10359 /* If one of the two operands is an interesting constant, load a
10360 constant with the above and mask it in with a logical operation. */
10362 if (GET_CODE (operands[2]) == CONST_INT)
10364 var = operands[3];
10365 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
10366 operands[3] = constm1_rtx, op = and_optab;
10367 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
10368 operands[3] = const0_rtx, op = ior_optab;
10369 else
10370 return 0; /* FAIL */
10372 else if (GET_CODE (operands[3]) == CONST_INT)
10374 var = operands[2];
10375 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
10376 operands[2] = constm1_rtx, op = and_optab;
10377 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
10378 operands[2] = const0_rtx, op = ior_optab;
10379 else
10380 return 0; /* FAIL */
10382 else
10383 return 0; /* FAIL */
10385 orig_out = operands[0];
10386 tmp = gen_reg_rtx (mode);
10387 operands[0] = tmp;
10389 /* Recurse to get the constant loaded. */
10390 if (ix86_expand_int_movcc (operands) == 0)
10391 return 0; /* FAIL */
10393 /* Mask in the interesting variable. */
10394 out = expand_binop (mode, op, var, tmp, orig_out, 0,
10395 OPTAB_WIDEN);
10396 if (!rtx_equal_p (out, orig_out))
10397 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
10399 return 1; /* DONE */
10403 * For comparison with above,
10405 * movl cf,dest
10406 * movl ct,tmp
10407 * cmpl op1,op2
10408 * cmovcc tmp,dest
10410 * Size 15.
10413 if (! nonimmediate_operand (operands[2], mode))
10414 operands[2] = force_reg (mode, operands[2]);
10415 if (! nonimmediate_operand (operands[3], mode))
10416 operands[3] = force_reg (mode, operands[3]);
10418 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
10420 rtx tmp = gen_reg_rtx (mode);
10421 emit_move_insn (tmp, operands[3]);
10422 operands[3] = tmp;
10424 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
10426 rtx tmp = gen_reg_rtx (mode);
10427 emit_move_insn (tmp, operands[2]);
10428 operands[2] = tmp;
10431 if (! register_operand (operands[2], VOIDmode)
10432 && (mode == QImode
10433 || ! register_operand (operands[3], VOIDmode)))
10434 operands[2] = force_reg (mode, operands[2]);
10436 if (mode == QImode
10437 && ! register_operand (operands[3], VOIDmode))
10438 operands[3] = force_reg (mode, operands[3]);
10440 emit_insn (compare_seq);
10441 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10442 gen_rtx_IF_THEN_ELSE (mode,
10443 compare_op, operands[2],
10444 operands[3])));
10445 if (bypass_test)
10446 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
10447 gen_rtx_IF_THEN_ELSE (mode,
10448 bypass_test,
10449 copy_rtx (operands[3]),
10450 copy_rtx (operands[0]))));
10451 if (second_test)
10452 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
10453 gen_rtx_IF_THEN_ELSE (mode,
10454 second_test,
10455 copy_rtx (operands[2]),
10456 copy_rtx (operands[0]))));
10458 return 1; /* DONE */
10461 /* Swap, force into registers, or otherwise massage the two operands
10462 to an sse comparison with a mask result. Thus we differ a bit from
10463 ix86_prepare_fp_compare_args which expects to produce a flags result.
10465 The DEST operand exists to help determine whether to commute commutative
10466 operators. The POP0/POP1 operands are updated in place. The new
10467 comparison code is returned, or UNKNOWN if not implementable. */
10469 static enum rtx_code
10470 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
10471 rtx *pop0, rtx *pop1)
10473 rtx tmp;
10475 switch (code)
10477 case LTGT:
10478 case UNEQ:
10479 /* We have no LTGT as an operator. We could implement it with
10480 NE & ORDERED, but this requires an extra temporary. It's
10481 not clear that it's worth it. */
10482 return UNKNOWN;
10484 case LT:
10485 case LE:
10486 case UNGT:
10487 case UNGE:
10488 /* These are supported directly. */
10489 break;
10491 case EQ:
10492 case NE:
10493 case UNORDERED:
10494 case ORDERED:
10495 /* For commutative operators, try to canonicalize the destination
10496 operand to be first in the comparison - this helps reload to
10497 avoid extra moves. */
10498 if (!dest || !rtx_equal_p (dest, *pop1))
10499 break;
10500 /* FALLTHRU */
10502 case GE:
10503 case GT:
10504 case UNLE:
10505 case UNLT:
10506 /* These are not supported directly. Swap the comparison operands
10507 to transform into something that is supported. */
10508 tmp = *pop0;
10509 *pop0 = *pop1;
10510 *pop1 = tmp;
10511 code = swap_condition (code);
10512 break;
10514 default:
10515 gcc_unreachable ();
10518 return code;
10521 /* Detect conditional moves that exactly match min/max operational
10522 semantics. Note that this is IEEE safe, as long as we don't
10523 interchange the operands.
10525 Returns FALSE if this conditional move doesn't match a MIN/MAX,
10526 and TRUE if the operation is successful and instructions are emitted. */
10528 static bool
10529 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
10530 rtx cmp_op1, rtx if_true, rtx if_false)
10532 enum machine_mode mode;
10533 bool is_min;
10534 rtx tmp;
10536 if (code == LT)
10538 else if (code == UNGE)
10540 tmp = if_true;
10541 if_true = if_false;
10542 if_false = tmp;
10544 else
10545 return false;
10547 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
10548 is_min = true;
10549 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
10550 is_min = false;
10551 else
10552 return false;
10554 mode = GET_MODE (dest);
10556 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
10557 but MODE may be a vector mode and thus not appropriate. */
10558 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
10560 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
10561 rtvec v;
10563 if_true = force_reg (mode, if_true);
10564 v = gen_rtvec (2, if_true, if_false);
10565 tmp = gen_rtx_UNSPEC (mode, v, u);
10567 else
10569 code = is_min ? SMIN : SMAX;
10570 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
10573 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
10574 return true;
10577 /* Expand an sse vector comparison. Return the register with the result. */
10579 static rtx
10580 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
10581 rtx op_true, rtx op_false)
10583 enum machine_mode mode = GET_MODE (dest);
10584 rtx x;
10586 cmp_op0 = force_reg (mode, cmp_op0);
10587 if (!nonimmediate_operand (cmp_op1, mode))
10588 cmp_op1 = force_reg (mode, cmp_op1);
10590 if (optimize
10591 || reg_overlap_mentioned_p (dest, op_true)
10592 || reg_overlap_mentioned_p (dest, op_false))
10593 dest = gen_reg_rtx (mode);
10595 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
10596 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10598 return dest;
10601 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
10602 operations. This is used for both scalar and vector conditional moves. */
10604 static void
10605 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
10607 enum machine_mode mode = GET_MODE (dest);
10608 rtx t2, t3, x;
10610 if (op_false == CONST0_RTX (mode))
10612 op_true = force_reg (mode, op_true);
10613 x = gen_rtx_AND (mode, cmp, op_true);
10614 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10616 else if (op_true == CONST0_RTX (mode))
10618 op_false = force_reg (mode, op_false);
10619 x = gen_rtx_NOT (mode, cmp);
10620 x = gen_rtx_AND (mode, x, op_false);
10621 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10623 else
10625 op_true = force_reg (mode, op_true);
10626 op_false = force_reg (mode, op_false);
10628 t2 = gen_reg_rtx (mode);
10629 if (optimize)
10630 t3 = gen_reg_rtx (mode);
10631 else
10632 t3 = dest;
10634 x = gen_rtx_AND (mode, op_true, cmp);
10635 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
10637 x = gen_rtx_NOT (mode, cmp);
10638 x = gen_rtx_AND (mode, x, op_false);
10639 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
10641 x = gen_rtx_IOR (mode, t3, t2);
10642 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10646 /* Expand a floating-point conditional move. Return true if successful. */
10649 ix86_expand_fp_movcc (rtx operands[])
10651 enum machine_mode mode = GET_MODE (operands[0]);
10652 enum rtx_code code = GET_CODE (operands[1]);
10653 rtx tmp, compare_op, second_test, bypass_test;
10655 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
10657 enum machine_mode cmode;
10659 /* Since we've no cmove for sse registers, don't force bad register
10660 allocation just to gain access to it. Deny movcc when the
10661 comparison mode doesn't match the move mode. */
10662 cmode = GET_MODE (ix86_compare_op0);
10663 if (cmode == VOIDmode)
10664 cmode = GET_MODE (ix86_compare_op1);
10665 if (cmode != mode)
10666 return 0;
10668 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
10669 &ix86_compare_op0,
10670 &ix86_compare_op1);
10671 if (code == UNKNOWN)
10672 return 0;
10674 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
10675 ix86_compare_op1, operands[2],
10676 operands[3]))
10677 return 1;
10679 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
10680 ix86_compare_op1, operands[2], operands[3]);
10681 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
10682 return 1;
10685 /* The floating point conditional move instructions don't directly
10686 support conditions resulting from a signed integer comparison. */
10688 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
10690 /* The floating point conditional move instructions don't directly
10691 support signed integer comparisons. */
10693 if (!fcmov_comparison_operator (compare_op, VOIDmode))
10695 gcc_assert (!second_test && !bypass_test);
10696 tmp = gen_reg_rtx (QImode);
10697 ix86_expand_setcc (code, tmp);
10698 code = NE;
10699 ix86_compare_op0 = tmp;
10700 ix86_compare_op1 = const0_rtx;
10701 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
10703 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
10705 tmp = gen_reg_rtx (mode);
10706 emit_move_insn (tmp, operands[3]);
10707 operands[3] = tmp;
10709 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
10711 tmp = gen_reg_rtx (mode);
10712 emit_move_insn (tmp, operands[2]);
10713 operands[2] = tmp;
10716 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10717 gen_rtx_IF_THEN_ELSE (mode, compare_op,
10718 operands[2], operands[3])));
10719 if (bypass_test)
10720 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10721 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
10722 operands[3], operands[0])));
10723 if (second_test)
10724 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10725 gen_rtx_IF_THEN_ELSE (mode, second_test,
10726 operands[2], operands[0])));
10728 return 1;
10731 /* Expand a floating-point vector conditional move; a vcond operation
10732 rather than a movcc operation. */
10734 bool
10735 ix86_expand_fp_vcond (rtx operands[])
10737 enum rtx_code code = GET_CODE (operands[3]);
10738 rtx cmp;
10740 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
10741 &operands[4], &operands[5]);
10742 if (code == UNKNOWN)
10743 return false;
10745 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
10746 operands[5], operands[1], operands[2]))
10747 return true;
10749 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
10750 operands[1], operands[2]);
10751 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
10752 return true;
10755 /* Expand a signed integral vector conditional move. */
10757 bool
10758 ix86_expand_int_vcond (rtx operands[])
10760 enum machine_mode mode = GET_MODE (operands[0]);
10761 enum rtx_code code = GET_CODE (operands[3]);
10762 bool negate = false;
10763 rtx x, cop0, cop1;
10765 cop0 = operands[4];
10766 cop1 = operands[5];
10768 /* Canonicalize the comparison to EQ, GT, GTU. */
10769 switch (code)
10771 case EQ:
10772 case GT:
10773 case GTU:
10774 break;
10776 case NE:
10777 case LE:
10778 case LEU:
10779 code = reverse_condition (code);
10780 negate = true;
10781 break;
10783 case GE:
10784 case GEU:
10785 code = reverse_condition (code);
10786 negate = true;
10787 /* FALLTHRU */
10789 case LT:
10790 case LTU:
10791 code = swap_condition (code);
10792 x = cop0, cop0 = cop1, cop1 = x;
10793 break;
10795 default:
10796 gcc_unreachable ();
10799 /* Unsigned parallel compare is not supported by the hardware. Play some
10800 tricks to turn this into a signed comparison against 0. */
10801 if (code == GTU)
10803 switch (mode)
10805 case V4SImode:
10807 rtx t1, t2, mask;
10809 /* Perform a parallel modulo subtraction. */
10810 t1 = gen_reg_rtx (mode);
10811 emit_insn (gen_subv4si3 (t1, cop0, cop1));
10813 /* Extract the original sign bit of op0. */
10814 mask = GEN_INT (-0x80000000);
10815 mask = gen_rtx_CONST_VECTOR (mode,
10816 gen_rtvec (4, mask, mask, mask, mask));
10817 mask = force_reg (mode, mask);
10818 t2 = gen_reg_rtx (mode);
10819 emit_insn (gen_andv4si3 (t2, cop0, mask));
10821 /* XOR it back into the result of the subtraction. This results
10822 in the sign bit set iff we saw unsigned underflow. */
10823 x = gen_reg_rtx (mode);
10824 emit_insn (gen_xorv4si3 (x, t1, t2));
10826 code = GT;
10828 break;
10830 case V16QImode:
10831 case V8HImode:
10832 /* Perform a parallel unsigned saturating subtraction. */
10833 x = gen_reg_rtx (mode);
10834 emit_insn (gen_rtx_SET (VOIDmode, x,
10835 gen_rtx_US_MINUS (mode, cop0, cop1)));
10837 code = EQ;
10838 negate = !negate;
10839 break;
10841 default:
10842 gcc_unreachable ();
10845 cop0 = x;
10846 cop1 = CONST0_RTX (mode);
10849 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
10850 operands[1+negate], operands[2-negate]);
10852 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
10853 operands[2-negate]);
10854 return true;
10857 /* Expand conditional increment or decrement using adb/sbb instructions.
10858 The default case using setcc followed by the conditional move can be
10859 done by generic code. */
10861 ix86_expand_int_addcc (rtx operands[])
10863 enum rtx_code code = GET_CODE (operands[1]);
10864 rtx compare_op;
10865 rtx val = const0_rtx;
10866 bool fpcmp = false;
10867 enum machine_mode mode = GET_MODE (operands[0]);
10869 if (operands[3] != const1_rtx
10870 && operands[3] != constm1_rtx)
10871 return 0;
10872 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
10873 ix86_compare_op1, &compare_op))
10874 return 0;
10875 code = GET_CODE (compare_op);
10877 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
10878 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
10880 fpcmp = true;
10881 code = ix86_fp_compare_code_to_integer (code);
10884 if (code != LTU)
10886 val = constm1_rtx;
10887 if (fpcmp)
10888 PUT_CODE (compare_op,
10889 reverse_condition_maybe_unordered
10890 (GET_CODE (compare_op)));
10891 else
10892 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
10894 PUT_MODE (compare_op, mode);
10896 /* Construct either adc or sbb insn. */
10897 if ((code == LTU) == (operands[3] == constm1_rtx))
10899 switch (GET_MODE (operands[0]))
10901 case QImode:
10902 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
10903 break;
10904 case HImode:
10905 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
10906 break;
10907 case SImode:
10908 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
10909 break;
10910 case DImode:
10911 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
10912 break;
10913 default:
10914 gcc_unreachable ();
10917 else
10919 switch (GET_MODE (operands[0]))
10921 case QImode:
10922 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
10923 break;
10924 case HImode:
10925 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
10926 break;
10927 case SImode:
10928 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
10929 break;
10930 case DImode:
10931 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
10932 break;
10933 default:
10934 gcc_unreachable ();
10937 return 1; /* DONE */
10941 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
10942 works for floating pointer parameters and nonoffsetable memories.
10943 For pushes, it returns just stack offsets; the values will be saved
10944 in the right order. Maximally three parts are generated. */
10946 static int
10947 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
10949 int size;
10951 if (!TARGET_64BIT)
10952 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
10953 else
10954 size = (GET_MODE_SIZE (mode) + 4) / 8;
10956 gcc_assert (GET_CODE (operand) != REG || !MMX_REGNO_P (REGNO (operand)));
10957 gcc_assert (size >= 2 && size <= 3);
10959 /* Optimize constant pool reference to immediates. This is used by fp
10960 moves, that force all constants to memory to allow combining. */
10961 if (GET_CODE (operand) == MEM && MEM_READONLY_P (operand))
10963 rtx tmp = maybe_get_pool_constant (operand);
10964 if (tmp)
10965 operand = tmp;
10968 if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand))
10970 /* The only non-offsetable memories we handle are pushes. */
10971 int ok = push_operand (operand, VOIDmode);
10973 gcc_assert (ok);
10975 operand = copy_rtx (operand);
10976 PUT_MODE (operand, Pmode);
10977 parts[0] = parts[1] = parts[2] = operand;
10978 return size;
10981 if (GET_CODE (operand) == CONST_VECTOR)
10983 enum machine_mode imode = int_mode_for_mode (mode);
10984 /* Caution: if we looked through a constant pool memory above,
10985 the operand may actually have a different mode now. That's
10986 ok, since we want to pun this all the way back to an integer. */
10987 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
10988 gcc_assert (operand != NULL);
10989 mode = imode;
10992 if (!TARGET_64BIT)
10994 if (mode == DImode)
10995 split_di (&operand, 1, &parts[0], &parts[1]);
10996 else
10998 if (REG_P (operand))
11000 gcc_assert (reload_completed);
11001 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
11002 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
11003 if (size == 3)
11004 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
11006 else if (offsettable_memref_p (operand))
11008 operand = adjust_address (operand, SImode, 0);
11009 parts[0] = operand;
11010 parts[1] = adjust_address (operand, SImode, 4);
11011 if (size == 3)
11012 parts[2] = adjust_address (operand, SImode, 8);
11014 else if (GET_CODE (operand) == CONST_DOUBLE)
11016 REAL_VALUE_TYPE r;
11017 long l[4];
11019 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
11020 switch (mode)
11022 case XFmode:
11023 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
11024 parts[2] = gen_int_mode (l[2], SImode);
11025 break;
11026 case DFmode:
11027 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
11028 break;
11029 default:
11030 gcc_unreachable ();
11032 parts[1] = gen_int_mode (l[1], SImode);
11033 parts[0] = gen_int_mode (l[0], SImode);
11035 else
11036 gcc_unreachable ();
11039 else
11041 if (mode == TImode)
11042 split_ti (&operand, 1, &parts[0], &parts[1]);
11043 if (mode == XFmode || mode == TFmode)
11045 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
11046 if (REG_P (operand))
11048 gcc_assert (reload_completed);
11049 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
11050 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
11052 else if (offsettable_memref_p (operand))
11054 operand = adjust_address (operand, DImode, 0);
11055 parts[0] = operand;
11056 parts[1] = adjust_address (operand, upper_mode, 8);
11058 else if (GET_CODE (operand) == CONST_DOUBLE)
11060 REAL_VALUE_TYPE r;
11061 long l[4];
11063 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
11064 real_to_target (l, &r, mode);
11066 /* Do not use shift by 32 to avoid warning on 32bit systems. */
11067 if (HOST_BITS_PER_WIDE_INT >= 64)
11068 parts[0]
11069 = gen_int_mode
11070 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
11071 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
11072 DImode);
11073 else
11074 parts[0] = immed_double_const (l[0], l[1], DImode);
11076 if (upper_mode == SImode)
11077 parts[1] = gen_int_mode (l[2], SImode);
11078 else if (HOST_BITS_PER_WIDE_INT >= 64)
11079 parts[1]
11080 = gen_int_mode
11081 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
11082 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
11083 DImode);
11084 else
11085 parts[1] = immed_double_const (l[2], l[3], DImode);
11087 else
11088 gcc_unreachable ();
11092 return size;
11095 /* Emit insns to perform a move or push of DI, DF, and XF values.
11096 Return false when normal moves are needed; true when all required
11097 insns have been emitted. Operands 2-4 contain the input values
11098 int the correct order; operands 5-7 contain the output values. */
11100 void
11101 ix86_split_long_move (rtx operands[])
11103 rtx part[2][3];
11104 int nparts;
11105 int push = 0;
11106 int collisions = 0;
11107 enum machine_mode mode = GET_MODE (operands[0]);
11109 /* The DFmode expanders may ask us to move double.
11110 For 64bit target this is single move. By hiding the fact
11111 here we simplify i386.md splitters. */
11112 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
11114 /* Optimize constant pool reference to immediates. This is used by
11115 fp moves, that force all constants to memory to allow combining. */
11117 if (GET_CODE (operands[1]) == MEM
11118 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
11119 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
11120 operands[1] = get_pool_constant (XEXP (operands[1], 0));
11121 if (push_operand (operands[0], VOIDmode))
11123 operands[0] = copy_rtx (operands[0]);
11124 PUT_MODE (operands[0], Pmode);
11126 else
11127 operands[0] = gen_lowpart (DImode, operands[0]);
11128 operands[1] = gen_lowpart (DImode, operands[1]);
11129 emit_move_insn (operands[0], operands[1]);
11130 return;
11133 /* The only non-offsettable memory we handle is push. */
11134 if (push_operand (operands[0], VOIDmode))
11135 push = 1;
11136 else
11137 gcc_assert (GET_CODE (operands[0]) != MEM
11138 || offsettable_memref_p (operands[0]));
11140 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
11141 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
11143 /* When emitting push, take care for source operands on the stack. */
11144 if (push && GET_CODE (operands[1]) == MEM
11145 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
11147 if (nparts == 3)
11148 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
11149 XEXP (part[1][2], 0));
11150 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
11151 XEXP (part[1][1], 0));
11154 /* We need to do copy in the right order in case an address register
11155 of the source overlaps the destination. */
11156 if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM)
11158 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
11159 collisions++;
11160 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
11161 collisions++;
11162 if (nparts == 3
11163 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
11164 collisions++;
11166 /* Collision in the middle part can be handled by reordering. */
11167 if (collisions == 1 && nparts == 3
11168 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
11170 rtx tmp;
11171 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
11172 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
11175 /* If there are more collisions, we can't handle it by reordering.
11176 Do an lea to the last part and use only one colliding move. */
11177 else if (collisions > 1)
11179 rtx base;
11181 collisions = 1;
11183 base = part[0][nparts - 1];
11185 /* Handle the case when the last part isn't valid for lea.
11186 Happens in 64-bit mode storing the 12-byte XFmode. */
11187 if (GET_MODE (base) != Pmode)
11188 base = gen_rtx_REG (Pmode, REGNO (base));
11190 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
11191 part[1][0] = replace_equiv_address (part[1][0], base);
11192 part[1][1] = replace_equiv_address (part[1][1],
11193 plus_constant (base, UNITS_PER_WORD));
11194 if (nparts == 3)
11195 part[1][2] = replace_equiv_address (part[1][2],
11196 plus_constant (base, 8));
11200 if (push)
11202 if (!TARGET_64BIT)
11204 if (nparts == 3)
11206 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
11207 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
11208 emit_move_insn (part[0][2], part[1][2]);
11211 else
11213 /* In 64bit mode we don't have 32bit push available. In case this is
11214 register, it is OK - we will just use larger counterpart. We also
11215 retype memory - these comes from attempt to avoid REX prefix on
11216 moving of second half of TFmode value. */
11217 if (GET_MODE (part[1][1]) == SImode)
11219 switch (GET_CODE (part[1][1]))
11221 case MEM:
11222 part[1][1] = adjust_address (part[1][1], DImode, 0);
11223 break;
11225 case REG:
11226 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
11227 break;
11229 default:
11230 gcc_unreachable ();
11233 if (GET_MODE (part[1][0]) == SImode)
11234 part[1][0] = part[1][1];
11237 emit_move_insn (part[0][1], part[1][1]);
11238 emit_move_insn (part[0][0], part[1][0]);
11239 return;
11242 /* Choose correct order to not overwrite the source before it is copied. */
11243 if ((REG_P (part[0][0])
11244 && REG_P (part[1][1])
11245 && (REGNO (part[0][0]) == REGNO (part[1][1])
11246 || (nparts == 3
11247 && REGNO (part[0][0]) == REGNO (part[1][2]))))
11248 || (collisions > 0
11249 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
11251 if (nparts == 3)
11253 operands[2] = part[0][2];
11254 operands[3] = part[0][1];
11255 operands[4] = part[0][0];
11256 operands[5] = part[1][2];
11257 operands[6] = part[1][1];
11258 operands[7] = part[1][0];
11260 else
11262 operands[2] = part[0][1];
11263 operands[3] = part[0][0];
11264 operands[5] = part[1][1];
11265 operands[6] = part[1][0];
11268 else
11270 if (nparts == 3)
11272 operands[2] = part[0][0];
11273 operands[3] = part[0][1];
11274 operands[4] = part[0][2];
11275 operands[5] = part[1][0];
11276 operands[6] = part[1][1];
11277 operands[7] = part[1][2];
11279 else
11281 operands[2] = part[0][0];
11282 operands[3] = part[0][1];
11283 operands[5] = part[1][0];
11284 operands[6] = part[1][1];
11288 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
11289 if (optimize_size)
11291 if (GET_CODE (operands[5]) == CONST_INT
11292 && operands[5] != const0_rtx
11293 && REG_P (operands[2]))
11295 if (GET_CODE (operands[6]) == CONST_INT
11296 && INTVAL (operands[6]) == INTVAL (operands[5]))
11297 operands[6] = operands[2];
11299 if (nparts == 3
11300 && GET_CODE (operands[7]) == CONST_INT
11301 && INTVAL (operands[7]) == INTVAL (operands[5]))
11302 operands[7] = operands[2];
11305 if (nparts == 3
11306 && GET_CODE (operands[6]) == CONST_INT
11307 && operands[6] != const0_rtx
11308 && REG_P (operands[3])
11309 && GET_CODE (operands[7]) == CONST_INT
11310 && INTVAL (operands[7]) == INTVAL (operands[6]))
11311 operands[7] = operands[3];
11314 emit_move_insn (operands[2], operands[5]);
11315 emit_move_insn (operands[3], operands[6]);
11316 if (nparts == 3)
11317 emit_move_insn (operands[4], operands[7]);
11319 return;
11322 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
11323 left shift by a constant, either using a single shift or
11324 a sequence of add instructions. */
11326 static void
11327 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
11329 if (count == 1)
11331 emit_insn ((mode == DImode
11332 ? gen_addsi3
11333 : gen_adddi3) (operand, operand, operand));
11335 else if (!optimize_size
11336 && count * ix86_cost->add <= ix86_cost->shift_const)
11338 int i;
11339 for (i=0; i<count; i++)
11341 emit_insn ((mode == DImode
11342 ? gen_addsi3
11343 : gen_adddi3) (operand, operand, operand));
11346 else
11347 emit_insn ((mode == DImode
11348 ? gen_ashlsi3
11349 : gen_ashldi3) (operand, operand, GEN_INT (count)));
11352 void
11353 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
11355 rtx low[2], high[2];
11356 int count;
11357 const int single_width = mode == DImode ? 32 : 64;
11359 if (GET_CODE (operands[2]) == CONST_INT)
11361 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
11362 count = INTVAL (operands[2]) & (single_width * 2 - 1);
11364 if (count >= single_width)
11366 emit_move_insn (high[0], low[1]);
11367 emit_move_insn (low[0], const0_rtx);
11369 if (count > single_width)
11370 ix86_expand_ashl_const (high[0], count - single_width, mode);
11372 else
11374 if (!rtx_equal_p (operands[0], operands[1]))
11375 emit_move_insn (operands[0], operands[1]);
11376 emit_insn ((mode == DImode
11377 ? gen_x86_shld_1
11378 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
11379 ix86_expand_ashl_const (low[0], count, mode);
11381 return;
11384 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11386 if (operands[1] == const1_rtx)
11388 /* Assuming we've chosen a QImode capable registers, then 1 << N
11389 can be done with two 32/64-bit shifts, no branches, no cmoves. */
11390 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
11392 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
11394 ix86_expand_clear (low[0]);
11395 ix86_expand_clear (high[0]);
11396 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
11398 d = gen_lowpart (QImode, low[0]);
11399 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
11400 s = gen_rtx_EQ (QImode, flags, const0_rtx);
11401 emit_insn (gen_rtx_SET (VOIDmode, d, s));
11403 d = gen_lowpart (QImode, high[0]);
11404 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
11405 s = gen_rtx_NE (QImode, flags, const0_rtx);
11406 emit_insn (gen_rtx_SET (VOIDmode, d, s));
11409 /* Otherwise, we can get the same results by manually performing
11410 a bit extract operation on bit 5/6, and then performing the two
11411 shifts. The two methods of getting 0/1 into low/high are exactly
11412 the same size. Avoiding the shift in the bit extract case helps
11413 pentium4 a bit; no one else seems to care much either way. */
11414 else
11416 rtx x;
11418 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
11419 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
11420 else
11421 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
11422 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
11424 emit_insn ((mode == DImode
11425 ? gen_lshrsi3
11426 : gen_lshrdi3) (high[0], high[0], GEN_INT (mode == DImode ? 5 : 6)));
11427 emit_insn ((mode == DImode
11428 ? gen_andsi3
11429 : gen_anddi3) (high[0], high[0], GEN_INT (1)));
11430 emit_move_insn (low[0], high[0]);
11431 emit_insn ((mode == DImode
11432 ? gen_xorsi3
11433 : gen_xordi3) (low[0], low[0], GEN_INT (1)));
11436 emit_insn ((mode == DImode
11437 ? gen_ashlsi3
11438 : gen_ashldi3) (low[0], low[0], operands[2]));
11439 emit_insn ((mode == DImode
11440 ? gen_ashlsi3
11441 : gen_ashldi3) (high[0], high[0], operands[2]));
11442 return;
11445 if (operands[1] == constm1_rtx)
11447 /* For -1 << N, we can avoid the shld instruction, because we
11448 know that we're shifting 0...31/63 ones into a -1. */
11449 emit_move_insn (low[0], constm1_rtx);
11450 if (optimize_size)
11451 emit_move_insn (high[0], low[0]);
11452 else
11453 emit_move_insn (high[0], constm1_rtx);
11455 else
11457 if (!rtx_equal_p (operands[0], operands[1]))
11458 emit_move_insn (operands[0], operands[1]);
11460 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11461 emit_insn ((mode == DImode
11462 ? gen_x86_shld_1
11463 : gen_x86_64_shld) (high[0], low[0], operands[2]));
11466 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
11468 if (TARGET_CMOVE && scratch)
11470 ix86_expand_clear (scratch);
11471 emit_insn ((mode == DImode
11472 ? gen_x86_shift_adj_1
11473 : gen_x86_64_shift_adj) (high[0], low[0], operands[2], scratch));
11475 else
11476 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
11479 void
11480 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
11482 rtx low[2], high[2];
11483 int count;
11484 const int single_width = mode == DImode ? 32 : 64;
11486 if (GET_CODE (operands[2]) == CONST_INT)
11488 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
11489 count = INTVAL (operands[2]) & (single_width * 2 - 1);
11491 if (count == single_width * 2 - 1)
11493 emit_move_insn (high[0], high[1]);
11494 emit_insn ((mode == DImode
11495 ? gen_ashrsi3
11496 : gen_ashrdi3) (high[0], high[0],
11497 GEN_INT (single_width - 1)));
11498 emit_move_insn (low[0], high[0]);
11501 else if (count >= single_width)
11503 emit_move_insn (low[0], high[1]);
11504 emit_move_insn (high[0], low[0]);
11505 emit_insn ((mode == DImode
11506 ? gen_ashrsi3
11507 : gen_ashrdi3) (high[0], high[0],
11508 GEN_INT (single_width - 1)));
11509 if (count > single_width)
11510 emit_insn ((mode == DImode
11511 ? gen_ashrsi3
11512 : gen_ashrdi3) (low[0], low[0],
11513 GEN_INT (count - single_width)));
11515 else
11517 if (!rtx_equal_p (operands[0], operands[1]))
11518 emit_move_insn (operands[0], operands[1]);
11519 emit_insn ((mode == DImode
11520 ? gen_x86_shrd_1
11521 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
11522 emit_insn ((mode == DImode
11523 ? gen_ashrsi3
11524 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
11527 else
11529 if (!rtx_equal_p (operands[0], operands[1]))
11530 emit_move_insn (operands[0], operands[1]);
11532 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11534 emit_insn ((mode == DImode
11535 ? gen_x86_shrd_1
11536 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
11537 emit_insn ((mode == DImode
11538 ? gen_ashrsi3
11539 : gen_ashrdi3) (high[0], high[0], operands[2]));
11541 if (TARGET_CMOVE && scratch)
11543 emit_move_insn (scratch, high[0]);
11544 emit_insn ((mode == DImode
11545 ? gen_ashrsi3
11546 : gen_ashrdi3) (scratch, scratch,
11547 GEN_INT (single_width - 1)));
11548 emit_insn ((mode == DImode
11549 ? gen_x86_shift_adj_1
11550 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
11551 scratch));
11553 else
11554 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
11558 void
11559 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
11561 rtx low[2], high[2];
11562 int count;
11563 const int single_width = mode == DImode ? 32 : 64;
11565 if (GET_CODE (operands[2]) == CONST_INT)
11567 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
11568 count = INTVAL (operands[2]) & (single_width * 2 - 1);
11570 if (count >= single_width)
11572 emit_move_insn (low[0], high[1]);
11573 ix86_expand_clear (high[0]);
11575 if (count > single_width)
11576 emit_insn ((mode == DImode
11577 ? gen_lshrsi3
11578 : gen_lshrdi3) (low[0], low[0],
11579 GEN_INT (count - single_width)));
11581 else
11583 if (!rtx_equal_p (operands[0], operands[1]))
11584 emit_move_insn (operands[0], operands[1]);
11585 emit_insn ((mode == DImode
11586 ? gen_x86_shrd_1
11587 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
11588 emit_insn ((mode == DImode
11589 ? gen_lshrsi3
11590 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
11593 else
11595 if (!rtx_equal_p (operands[0], operands[1]))
11596 emit_move_insn (operands[0], operands[1]);
11598 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11600 emit_insn ((mode == DImode
11601 ? gen_x86_shrd_1
11602 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
11603 emit_insn ((mode == DImode
11604 ? gen_lshrsi3
11605 : gen_lshrdi3) (high[0], high[0], operands[2]));
11607 /* Heh. By reversing the arguments, we can reuse this pattern. */
11608 if (TARGET_CMOVE && scratch)
11610 ix86_expand_clear (scratch);
11611 emit_insn ((mode == DImode
11612 ? gen_x86_shift_adj_1
11613 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
11614 scratch));
11616 else
11617 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
11621 /* Helper function for the string operations below. Dest VARIABLE whether
11622 it is aligned to VALUE bytes. If true, jump to the label. */
11623 static rtx
11624 ix86_expand_aligntest (rtx variable, int value)
11626 rtx label = gen_label_rtx ();
11627 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
11628 if (GET_MODE (variable) == DImode)
11629 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
11630 else
11631 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
11632 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
11633 1, label);
11634 return label;
11637 /* Adjust COUNTER by the VALUE. */
11638 static void
11639 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
11641 if (GET_MODE (countreg) == DImode)
11642 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
11643 else
11644 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
11647 /* Zero extend possibly SImode EXP to Pmode register. */
11649 ix86_zero_extend_to_Pmode (rtx exp)
11651 rtx r;
11652 if (GET_MODE (exp) == VOIDmode)
11653 return force_reg (Pmode, exp);
11654 if (GET_MODE (exp) == Pmode)
11655 return copy_to_mode_reg (Pmode, exp);
11656 r = gen_reg_rtx (Pmode);
11657 emit_insn (gen_zero_extendsidi2 (r, exp));
11658 return r;
11661 /* Expand string move (memcpy) operation. Use i386 string operations when
11662 profitable. expand_clrmem contains similar code. */
11664 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp)
11666 rtx srcreg, destreg, countreg, srcexp, destexp;
11667 enum machine_mode counter_mode;
11668 HOST_WIDE_INT align = 0;
11669 unsigned HOST_WIDE_INT count = 0;
11671 if (GET_CODE (align_exp) == CONST_INT)
11672 align = INTVAL (align_exp);
11674 /* Can't use any of this if the user has appropriated esi or edi. */
11675 if (global_regs[4] || global_regs[5])
11676 return 0;
11678 /* This simple hack avoids all inlining code and simplifies code below. */
11679 if (!TARGET_ALIGN_STRINGOPS)
11680 align = 64;
11682 if (GET_CODE (count_exp) == CONST_INT)
11684 count = INTVAL (count_exp);
11685 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
11686 return 0;
11689 /* Figure out proper mode for counter. For 32bits it is always SImode,
11690 for 64bits use SImode when possible, otherwise DImode.
11691 Set count to number of bytes copied when known at compile time. */
11692 if (!TARGET_64BIT
11693 || GET_MODE (count_exp) == SImode
11694 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
11695 counter_mode = SImode;
11696 else
11697 counter_mode = DImode;
11699 gcc_assert (counter_mode == SImode || counter_mode == DImode);
11701 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
11702 if (destreg != XEXP (dst, 0))
11703 dst = replace_equiv_address_nv (dst, destreg);
11704 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
11705 if (srcreg != XEXP (src, 0))
11706 src = replace_equiv_address_nv (src, srcreg);
11708 /* When optimizing for size emit simple rep ; movsb instruction for
11709 counts not divisible by 4, except when (movsl;)*(movsw;)?(movsb;)?
11710 sequence is shorter than mov{b,l} $count, %{ecx,cl}; rep; movsb.
11711 Sice of (movsl;)*(movsw;)?(movsb;)? sequence is
11712 count / 4 + (count & 3), the other sequence is either 4 or 7 bytes,
11713 but we don't know whether upper 24 (resp. 56) bits of %ecx will be
11714 known to be zero or not. The rep; movsb sequence causes higher
11715 register pressure though, so take that into account. */
11717 if ((!optimize || optimize_size)
11718 && (count == 0
11719 || ((count & 0x03)
11720 && (!optimize_size
11721 || count > 5 * 4
11722 || (count & 3) + count / 4 > 6))))
11724 emit_insn (gen_cld ());
11725 countreg = ix86_zero_extend_to_Pmode (count_exp);
11726 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
11727 srcexp = gen_rtx_PLUS (Pmode, srcreg, countreg);
11728 emit_insn (gen_rep_mov (destreg, dst, srcreg, src, countreg,
11729 destexp, srcexp));
11732 /* For constant aligned (or small unaligned) copies use rep movsl
11733 followed by code copying the rest. For PentiumPro ensure 8 byte
11734 alignment to allow rep movsl acceleration. */
11736 else if (count != 0
11737 && (align >= 8
11738 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
11739 || optimize_size || count < (unsigned int) 64))
11741 unsigned HOST_WIDE_INT offset = 0;
11742 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
11743 rtx srcmem, dstmem;
11745 emit_insn (gen_cld ());
11746 if (count & ~(size - 1))
11748 if ((TARGET_SINGLE_STRINGOP || optimize_size) && count < 5 * 4)
11750 enum machine_mode movs_mode = size == 4 ? SImode : DImode;
11752 while (offset < (count & ~(size - 1)))
11754 srcmem = adjust_automodify_address_nv (src, movs_mode,
11755 srcreg, offset);
11756 dstmem = adjust_automodify_address_nv (dst, movs_mode,
11757 destreg, offset);
11758 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11759 offset += size;
11762 else
11764 countreg = GEN_INT ((count >> (size == 4 ? 2 : 3))
11765 & (TARGET_64BIT ? -1 : 0x3fffffff));
11766 countreg = copy_to_mode_reg (counter_mode, countreg);
11767 countreg = ix86_zero_extend_to_Pmode (countreg);
11769 destexp = gen_rtx_ASHIFT (Pmode, countreg,
11770 GEN_INT (size == 4 ? 2 : 3));
11771 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
11772 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
11774 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
11775 countreg, destexp, srcexp));
11776 offset = count & ~(size - 1);
11779 if (size == 8 && (count & 0x04))
11781 srcmem = adjust_automodify_address_nv (src, SImode, srcreg,
11782 offset);
11783 dstmem = adjust_automodify_address_nv (dst, SImode, destreg,
11784 offset);
11785 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11786 offset += 4;
11788 if (count & 0x02)
11790 srcmem = adjust_automodify_address_nv (src, HImode, srcreg,
11791 offset);
11792 dstmem = adjust_automodify_address_nv (dst, HImode, destreg,
11793 offset);
11794 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11795 offset += 2;
11797 if (count & 0x01)
11799 srcmem = adjust_automodify_address_nv (src, QImode, srcreg,
11800 offset);
11801 dstmem = adjust_automodify_address_nv (dst, QImode, destreg,
11802 offset);
11803 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11806 /* The generic code based on the glibc implementation:
11807 - align destination to 4 bytes (8 byte alignment is used for PentiumPro
11808 allowing accelerated copying there)
11809 - copy the data using rep movsl
11810 - copy the rest. */
11811 else
11813 rtx countreg2;
11814 rtx label = NULL;
11815 rtx srcmem, dstmem;
11816 int desired_alignment = (TARGET_PENTIUMPRO
11817 && (count == 0 || count >= (unsigned int) 260)
11818 ? 8 : UNITS_PER_WORD);
11819 /* Get rid of MEM_OFFSETs, they won't be accurate. */
11820 dst = change_address (dst, BLKmode, destreg);
11821 src = change_address (src, BLKmode, srcreg);
11823 /* In case we don't know anything about the alignment, default to
11824 library version, since it is usually equally fast and result in
11825 shorter code.
11827 Also emit call when we know that the count is large and call overhead
11828 will not be important. */
11829 if (!TARGET_INLINE_ALL_STRINGOPS
11830 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
11831 return 0;
11833 if (TARGET_SINGLE_STRINGOP)
11834 emit_insn (gen_cld ());
11836 countreg2 = gen_reg_rtx (Pmode);
11837 countreg = copy_to_mode_reg (counter_mode, count_exp);
11839 /* We don't use loops to align destination and to copy parts smaller
11840 than 4 bytes, because gcc is able to optimize such code better (in
11841 the case the destination or the count really is aligned, gcc is often
11842 able to predict the branches) and also it is friendlier to the
11843 hardware branch prediction.
11845 Using loops is beneficial for generic case, because we can
11846 handle small counts using the loops. Many CPUs (such as Athlon)
11847 have large REP prefix setup costs.
11849 This is quite costly. Maybe we can revisit this decision later or
11850 add some customizability to this code. */
11852 if (count == 0 && align < desired_alignment)
11854 label = gen_label_rtx ();
11855 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
11856 LEU, 0, counter_mode, 1, label);
11858 if (align <= 1)
11860 rtx label = ix86_expand_aligntest (destreg, 1);
11861 srcmem = change_address (src, QImode, srcreg);
11862 dstmem = change_address (dst, QImode, destreg);
11863 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11864 ix86_adjust_counter (countreg, 1);
11865 emit_label (label);
11866 LABEL_NUSES (label) = 1;
11868 if (align <= 2)
11870 rtx label = ix86_expand_aligntest (destreg, 2);
11871 srcmem = change_address (src, HImode, srcreg);
11872 dstmem = change_address (dst, HImode, destreg);
11873 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11874 ix86_adjust_counter (countreg, 2);
11875 emit_label (label);
11876 LABEL_NUSES (label) = 1;
11878 if (align <= 4 && desired_alignment > 4)
11880 rtx label = ix86_expand_aligntest (destreg, 4);
11881 srcmem = change_address (src, SImode, srcreg);
11882 dstmem = change_address (dst, SImode, destreg);
11883 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11884 ix86_adjust_counter (countreg, 4);
11885 emit_label (label);
11886 LABEL_NUSES (label) = 1;
11889 if (label && desired_alignment > 4 && !TARGET_64BIT)
11891 emit_label (label);
11892 LABEL_NUSES (label) = 1;
11893 label = NULL_RTX;
11895 if (!TARGET_SINGLE_STRINGOP)
11896 emit_insn (gen_cld ());
11897 if (TARGET_64BIT)
11899 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
11900 GEN_INT (3)));
11901 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
11903 else
11905 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
11906 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
11908 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
11909 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
11910 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
11911 countreg2, destexp, srcexp));
11913 if (label)
11915 emit_label (label);
11916 LABEL_NUSES (label) = 1;
11918 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
11920 srcmem = change_address (src, SImode, srcreg);
11921 dstmem = change_address (dst, SImode, destreg);
11922 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11924 if ((align <= 4 || count == 0) && TARGET_64BIT)
11926 rtx label = ix86_expand_aligntest (countreg, 4);
11927 srcmem = change_address (src, SImode, srcreg);
11928 dstmem = change_address (dst, SImode, destreg);
11929 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11930 emit_label (label);
11931 LABEL_NUSES (label) = 1;
11933 if (align > 2 && count != 0 && (count & 2))
11935 srcmem = change_address (src, HImode, srcreg);
11936 dstmem = change_address (dst, HImode, destreg);
11937 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11939 if (align <= 2 || count == 0)
11941 rtx label = ix86_expand_aligntest (countreg, 2);
11942 srcmem = change_address (src, HImode, srcreg);
11943 dstmem = change_address (dst, HImode, destreg);
11944 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11945 emit_label (label);
11946 LABEL_NUSES (label) = 1;
11948 if (align > 1 && count != 0 && (count & 1))
11950 srcmem = change_address (src, QImode, srcreg);
11951 dstmem = change_address (dst, QImode, destreg);
11952 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11954 if (align <= 1 || count == 0)
11956 rtx label = ix86_expand_aligntest (countreg, 1);
11957 srcmem = change_address (src, QImode, srcreg);
11958 dstmem = change_address (dst, QImode, destreg);
11959 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11960 emit_label (label);
11961 LABEL_NUSES (label) = 1;
11965 return 1;
11968 /* Expand string clear operation (bzero). Use i386 string operations when
11969 profitable. expand_movmem contains similar code. */
11971 ix86_expand_clrmem (rtx dst, rtx count_exp, rtx align_exp)
11973 rtx destreg, zeroreg, countreg, destexp;
11974 enum machine_mode counter_mode;
11975 HOST_WIDE_INT align = 0;
11976 unsigned HOST_WIDE_INT count = 0;
11978 if (GET_CODE (align_exp) == CONST_INT)
11979 align = INTVAL (align_exp);
11981 /* Can't use any of this if the user has appropriated esi. */
11982 if (global_regs[4])
11983 return 0;
11985 /* This simple hack avoids all inlining code and simplifies code below. */
11986 if (!TARGET_ALIGN_STRINGOPS)
11987 align = 32;
11989 if (GET_CODE (count_exp) == CONST_INT)
11991 count = INTVAL (count_exp);
11992 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
11993 return 0;
11995 /* Figure out proper mode for counter. For 32bits it is always SImode,
11996 for 64bits use SImode when possible, otherwise DImode.
11997 Set count to number of bytes copied when known at compile time. */
11998 if (!TARGET_64BIT
11999 || GET_MODE (count_exp) == SImode
12000 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
12001 counter_mode = SImode;
12002 else
12003 counter_mode = DImode;
12005 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
12006 if (destreg != XEXP (dst, 0))
12007 dst = replace_equiv_address_nv (dst, destreg);
12010 /* When optimizing for size emit simple rep ; movsb instruction for
12011 counts not divisible by 4. The movl $N, %ecx; rep; stosb
12012 sequence is 7 bytes long, so if optimizing for size and count is
12013 small enough that some stosl, stosw and stosb instructions without
12014 rep are shorter, fall back into the next if. */
12016 if ((!optimize || optimize_size)
12017 && (count == 0
12018 || ((count & 0x03)
12019 && (!optimize_size || (count & 0x03) + (count >> 2) > 7))))
12021 emit_insn (gen_cld ());
12023 countreg = ix86_zero_extend_to_Pmode (count_exp);
12024 zeroreg = copy_to_mode_reg (QImode, const0_rtx);
12025 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
12026 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg, destexp));
12028 else if (count != 0
12029 && (align >= 8
12030 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
12031 || optimize_size || count < (unsigned int) 64))
12033 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
12034 unsigned HOST_WIDE_INT offset = 0;
12036 emit_insn (gen_cld ());
12038 zeroreg = copy_to_mode_reg (size == 4 ? SImode : DImode, const0_rtx);
12039 if (count & ~(size - 1))
12041 unsigned HOST_WIDE_INT repcount;
12042 unsigned int max_nonrep;
12044 repcount = count >> (size == 4 ? 2 : 3);
12045 if (!TARGET_64BIT)
12046 repcount &= 0x3fffffff;
12048 /* movl $N, %ecx; rep; stosl is 7 bytes, while N x stosl is N bytes.
12049 movl $N, %ecx; rep; stosq is 8 bytes, while N x stosq is 2xN
12050 bytes. In both cases the latter seems to be faster for small
12051 values of N. */
12052 max_nonrep = size == 4 ? 7 : 4;
12053 if (!optimize_size)
12054 switch (ix86_tune)
12056 case PROCESSOR_PENTIUM4:
12057 case PROCESSOR_NOCONA:
12058 max_nonrep = 3;
12059 break;
12060 default:
12061 break;
12064 if (repcount <= max_nonrep)
12065 while (repcount-- > 0)
12067 rtx mem = adjust_automodify_address_nv (dst,
12068 GET_MODE (zeroreg),
12069 destreg, offset);
12070 emit_insn (gen_strset (destreg, mem, zeroreg));
12071 offset += size;
12073 else
12075 countreg = copy_to_mode_reg (counter_mode, GEN_INT (repcount));
12076 countreg = ix86_zero_extend_to_Pmode (countreg);
12077 destexp = gen_rtx_ASHIFT (Pmode, countreg,
12078 GEN_INT (size == 4 ? 2 : 3));
12079 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
12080 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg,
12081 destexp));
12082 offset = count & ~(size - 1);
12085 if (size == 8 && (count & 0x04))
12087 rtx mem = adjust_automodify_address_nv (dst, SImode, destreg,
12088 offset);
12089 emit_insn (gen_strset (destreg, mem,
12090 gen_rtx_SUBREG (SImode, zeroreg, 0)));
12091 offset += 4;
12093 if (count & 0x02)
12095 rtx mem = adjust_automodify_address_nv (dst, HImode, destreg,
12096 offset);
12097 emit_insn (gen_strset (destreg, mem,
12098 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12099 offset += 2;
12101 if (count & 0x01)
12103 rtx mem = adjust_automodify_address_nv (dst, QImode, destreg,
12104 offset);
12105 emit_insn (gen_strset (destreg, mem,
12106 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12109 else
12111 rtx countreg2;
12112 rtx label = NULL;
12113 /* Compute desired alignment of the string operation. */
12114 int desired_alignment = (TARGET_PENTIUMPRO
12115 && (count == 0 || count >= (unsigned int) 260)
12116 ? 8 : UNITS_PER_WORD);
12118 /* In case we don't know anything about the alignment, default to
12119 library version, since it is usually equally fast and result in
12120 shorter code.
12122 Also emit call when we know that the count is large and call overhead
12123 will not be important. */
12124 if (!TARGET_INLINE_ALL_STRINGOPS
12125 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
12126 return 0;
12128 if (TARGET_SINGLE_STRINGOP)
12129 emit_insn (gen_cld ());
12131 countreg2 = gen_reg_rtx (Pmode);
12132 countreg = copy_to_mode_reg (counter_mode, count_exp);
12133 zeroreg = copy_to_mode_reg (Pmode, const0_rtx);
12134 /* Get rid of MEM_OFFSET, it won't be accurate. */
12135 dst = change_address (dst, BLKmode, destreg);
12137 if (count == 0 && align < desired_alignment)
12139 label = gen_label_rtx ();
12140 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
12141 LEU, 0, counter_mode, 1, label);
12143 if (align <= 1)
12145 rtx label = ix86_expand_aligntest (destreg, 1);
12146 emit_insn (gen_strset (destreg, dst,
12147 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12148 ix86_adjust_counter (countreg, 1);
12149 emit_label (label);
12150 LABEL_NUSES (label) = 1;
12152 if (align <= 2)
12154 rtx label = ix86_expand_aligntest (destreg, 2);
12155 emit_insn (gen_strset (destreg, dst,
12156 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12157 ix86_adjust_counter (countreg, 2);
12158 emit_label (label);
12159 LABEL_NUSES (label) = 1;
12161 if (align <= 4 && desired_alignment > 4)
12163 rtx label = ix86_expand_aligntest (destreg, 4);
12164 emit_insn (gen_strset (destreg, dst,
12165 (TARGET_64BIT
12166 ? gen_rtx_SUBREG (SImode, zeroreg, 0)
12167 : zeroreg)));
12168 ix86_adjust_counter (countreg, 4);
12169 emit_label (label);
12170 LABEL_NUSES (label) = 1;
12173 if (label && desired_alignment > 4 && !TARGET_64BIT)
12175 emit_label (label);
12176 LABEL_NUSES (label) = 1;
12177 label = NULL_RTX;
12180 if (!TARGET_SINGLE_STRINGOP)
12181 emit_insn (gen_cld ());
12182 if (TARGET_64BIT)
12184 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
12185 GEN_INT (3)));
12186 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
12188 else
12190 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
12191 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
12193 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
12194 emit_insn (gen_rep_stos (destreg, countreg2, dst, zeroreg, destexp));
12196 if (label)
12198 emit_label (label);
12199 LABEL_NUSES (label) = 1;
12202 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
12203 emit_insn (gen_strset (destreg, dst,
12204 gen_rtx_SUBREG (SImode, zeroreg, 0)));
12205 if (TARGET_64BIT && (align <= 4 || count == 0))
12207 rtx label = ix86_expand_aligntest (countreg, 4);
12208 emit_insn (gen_strset (destreg, dst,
12209 gen_rtx_SUBREG (SImode, zeroreg, 0)));
12210 emit_label (label);
12211 LABEL_NUSES (label) = 1;
12213 if (align > 2 && count != 0 && (count & 2))
12214 emit_insn (gen_strset (destreg, dst,
12215 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12216 if (align <= 2 || count == 0)
12218 rtx label = ix86_expand_aligntest (countreg, 2);
12219 emit_insn (gen_strset (destreg, dst,
12220 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12221 emit_label (label);
12222 LABEL_NUSES (label) = 1;
12224 if (align > 1 && count != 0 && (count & 1))
12225 emit_insn (gen_strset (destreg, dst,
12226 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12227 if (align <= 1 || count == 0)
12229 rtx label = ix86_expand_aligntest (countreg, 1);
12230 emit_insn (gen_strset (destreg, dst,
12231 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12232 emit_label (label);
12233 LABEL_NUSES (label) = 1;
12236 return 1;
12239 /* Expand strlen. */
12241 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
12243 rtx addr, scratch1, scratch2, scratch3, scratch4;
12245 /* The generic case of strlen expander is long. Avoid it's
12246 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
12248 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
12249 && !TARGET_INLINE_ALL_STRINGOPS
12250 && !optimize_size
12251 && (GET_CODE (align) != CONST_INT || INTVAL (align) < 4))
12252 return 0;
12254 addr = force_reg (Pmode, XEXP (src, 0));
12255 scratch1 = gen_reg_rtx (Pmode);
12257 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
12258 && !optimize_size)
12260 /* Well it seems that some optimizer does not combine a call like
12261 foo(strlen(bar), strlen(bar));
12262 when the move and the subtraction is done here. It does calculate
12263 the length just once when these instructions are done inside of
12264 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
12265 often used and I use one fewer register for the lifetime of
12266 output_strlen_unroll() this is better. */
12268 emit_move_insn (out, addr);
12270 ix86_expand_strlensi_unroll_1 (out, src, align);
12272 /* strlensi_unroll_1 returns the address of the zero at the end of
12273 the string, like memchr(), so compute the length by subtracting
12274 the start address. */
12275 if (TARGET_64BIT)
12276 emit_insn (gen_subdi3 (out, out, addr));
12277 else
12278 emit_insn (gen_subsi3 (out, out, addr));
12280 else
12282 rtx unspec;
12283 scratch2 = gen_reg_rtx (Pmode);
12284 scratch3 = gen_reg_rtx (Pmode);
12285 scratch4 = force_reg (Pmode, constm1_rtx);
12287 emit_move_insn (scratch3, addr);
12288 eoschar = force_reg (QImode, eoschar);
12290 emit_insn (gen_cld ());
12291 src = replace_equiv_address_nv (src, scratch3);
12293 /* If .md starts supporting :P, this can be done in .md. */
12294 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
12295 scratch4), UNSPEC_SCAS);
12296 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
12297 if (TARGET_64BIT)
12299 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
12300 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
12302 else
12304 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
12305 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
12308 return 1;
12311 /* Expand the appropriate insns for doing strlen if not just doing
12312 repnz; scasb
12314 out = result, initialized with the start address
12315 align_rtx = alignment of the address.
12316 scratch = scratch register, initialized with the startaddress when
12317 not aligned, otherwise undefined
12319 This is just the body. It needs the initializations mentioned above and
12320 some address computing at the end. These things are done in i386.md. */
12322 static void
12323 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
12325 int align;
12326 rtx tmp;
12327 rtx align_2_label = NULL_RTX;
12328 rtx align_3_label = NULL_RTX;
12329 rtx align_4_label = gen_label_rtx ();
12330 rtx end_0_label = gen_label_rtx ();
12331 rtx mem;
12332 rtx tmpreg = gen_reg_rtx (SImode);
12333 rtx scratch = gen_reg_rtx (SImode);
12334 rtx cmp;
12336 align = 0;
12337 if (GET_CODE (align_rtx) == CONST_INT)
12338 align = INTVAL (align_rtx);
12340 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
12342 /* Is there a known alignment and is it less than 4? */
12343 if (align < 4)
12345 rtx scratch1 = gen_reg_rtx (Pmode);
12346 emit_move_insn (scratch1, out);
12347 /* Is there a known alignment and is it not 2? */
12348 if (align != 2)
12350 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
12351 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
12353 /* Leave just the 3 lower bits. */
12354 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
12355 NULL_RTX, 0, OPTAB_WIDEN);
12357 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
12358 Pmode, 1, align_4_label);
12359 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
12360 Pmode, 1, align_2_label);
12361 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
12362 Pmode, 1, align_3_label);
12364 else
12366 /* Since the alignment is 2, we have to check 2 or 0 bytes;
12367 check if is aligned to 4 - byte. */
12369 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
12370 NULL_RTX, 0, OPTAB_WIDEN);
12372 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
12373 Pmode, 1, align_4_label);
12376 mem = change_address (src, QImode, out);
12378 /* Now compare the bytes. */
12380 /* Compare the first n unaligned byte on a byte per byte basis. */
12381 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
12382 QImode, 1, end_0_label);
12384 /* Increment the address. */
12385 if (TARGET_64BIT)
12386 emit_insn (gen_adddi3 (out, out, const1_rtx));
12387 else
12388 emit_insn (gen_addsi3 (out, out, const1_rtx));
12390 /* Not needed with an alignment of 2 */
12391 if (align != 2)
12393 emit_label (align_2_label);
12395 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
12396 end_0_label);
12398 if (TARGET_64BIT)
12399 emit_insn (gen_adddi3 (out, out, const1_rtx));
12400 else
12401 emit_insn (gen_addsi3 (out, out, const1_rtx));
12403 emit_label (align_3_label);
12406 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
12407 end_0_label);
12409 if (TARGET_64BIT)
12410 emit_insn (gen_adddi3 (out, out, const1_rtx));
12411 else
12412 emit_insn (gen_addsi3 (out, out, const1_rtx));
12415 /* Generate loop to check 4 bytes at a time. It is not a good idea to
12416 align this loop. It gives only huge programs, but does not help to
12417 speed up. */
12418 emit_label (align_4_label);
12420 mem = change_address (src, SImode, out);
12421 emit_move_insn (scratch, mem);
12422 if (TARGET_64BIT)
12423 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
12424 else
12425 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
12427 /* This formula yields a nonzero result iff one of the bytes is zero.
12428 This saves three branches inside loop and many cycles. */
12430 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
12431 emit_insn (gen_one_cmplsi2 (scratch, scratch));
12432 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
12433 emit_insn (gen_andsi3 (tmpreg, tmpreg,
12434 gen_int_mode (0x80808080, SImode)));
12435 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
12436 align_4_label);
12438 if (TARGET_CMOVE)
12440 rtx reg = gen_reg_rtx (SImode);
12441 rtx reg2 = gen_reg_rtx (Pmode);
12442 emit_move_insn (reg, tmpreg);
12443 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
12445 /* If zero is not in the first two bytes, move two bytes forward. */
12446 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
12447 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12448 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
12449 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
12450 gen_rtx_IF_THEN_ELSE (SImode, tmp,
12451 reg,
12452 tmpreg)));
12453 /* Emit lea manually to avoid clobbering of flags. */
12454 emit_insn (gen_rtx_SET (SImode, reg2,
12455 gen_rtx_PLUS (Pmode, out, const2_rtx)));
12457 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12458 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
12459 emit_insn (gen_rtx_SET (VOIDmode, out,
12460 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
12461 reg2,
12462 out)));
12465 else
12467 rtx end_2_label = gen_label_rtx ();
12468 /* Is zero in the first two bytes? */
12470 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
12471 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12472 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
12473 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
12474 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
12475 pc_rtx);
12476 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
12477 JUMP_LABEL (tmp) = end_2_label;
12479 /* Not in the first two. Move two bytes forward. */
12480 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
12481 if (TARGET_64BIT)
12482 emit_insn (gen_adddi3 (out, out, const2_rtx));
12483 else
12484 emit_insn (gen_addsi3 (out, out, const2_rtx));
12486 emit_label (end_2_label);
12490 /* Avoid branch in fixing the byte. */
12491 tmpreg = gen_lowpart (QImode, tmpreg);
12492 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
12493 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
12494 if (TARGET_64BIT)
12495 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
12496 else
12497 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
12499 emit_label (end_0_label);
12502 void
12503 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
12504 rtx callarg2 ATTRIBUTE_UNUSED,
12505 rtx pop, int sibcall)
12507 rtx use = NULL, call;
12509 if (pop == const0_rtx)
12510 pop = NULL;
12511 gcc_assert (!TARGET_64BIT || !pop);
12513 #if TARGET_MACHO
12514 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
12515 fnaddr = machopic_indirect_call_target (fnaddr);
12516 #else
12517 /* Static functions and indirect calls don't need the pic register. */
12518 if (! TARGET_64BIT && flag_pic
12519 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
12520 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
12521 use_reg (&use, pic_offset_table_rtx);
12523 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
12525 rtx al = gen_rtx_REG (QImode, 0);
12526 emit_move_insn (al, callarg2);
12527 use_reg (&use, al);
12529 #endif /* TARGET_MACHO */
12531 if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
12533 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
12534 fnaddr = gen_rtx_MEM (QImode, fnaddr);
12536 if (sibcall && TARGET_64BIT
12537 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
12539 rtx addr;
12540 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
12541 fnaddr = gen_rtx_REG (Pmode, FIRST_REX_INT_REG + 3 /* R11 */);
12542 emit_move_insn (fnaddr, addr);
12543 fnaddr = gen_rtx_MEM (QImode, fnaddr);
12546 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
12547 if (retval)
12548 call = gen_rtx_SET (VOIDmode, retval, call);
12549 if (pop)
12551 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
12552 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
12553 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
12556 call = emit_call_insn (call);
12557 if (use)
12558 CALL_INSN_FUNCTION_USAGE (call) = use;
12562 /* Clear stack slot assignments remembered from previous functions.
12563 This is called from INIT_EXPANDERS once before RTL is emitted for each
12564 function. */
12566 static struct machine_function *
12567 ix86_init_machine_status (void)
12569 struct machine_function *f;
12571 f = ggc_alloc_cleared (sizeof (struct machine_function));
12572 f->use_fast_prologue_epilogue_nregs = -1;
12574 return f;
12577 /* Return a MEM corresponding to a stack slot with mode MODE.
12578 Allocate a new slot if necessary.
12580 The RTL for a function can have several slots available: N is
12581 which slot to use. */
12584 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
12586 struct stack_local_entry *s;
12588 gcc_assert (n < MAX_386_STACK_LOCALS);
12590 for (s = ix86_stack_locals; s; s = s->next)
12591 if (s->mode == mode && s->n == n)
12592 return s->rtl;
12594 s = (struct stack_local_entry *)
12595 ggc_alloc (sizeof (struct stack_local_entry));
12596 s->n = n;
12597 s->mode = mode;
12598 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
12600 s->next = ix86_stack_locals;
12601 ix86_stack_locals = s;
12602 return s->rtl;
12605 /* Construct the SYMBOL_REF for the tls_get_addr function. */
12607 static GTY(()) rtx ix86_tls_symbol;
12609 ix86_tls_get_addr (void)
12612 if (!ix86_tls_symbol)
12614 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
12615 (TARGET_GNU_TLS && !TARGET_64BIT)
12616 ? "___tls_get_addr"
12617 : "__tls_get_addr");
12620 return ix86_tls_symbol;
12623 /* Calculate the length of the memory address in the instruction
12624 encoding. Does not include the one-byte modrm, opcode, or prefix. */
12627 memory_address_length (rtx addr)
12629 struct ix86_address parts;
12630 rtx base, index, disp;
12631 int len;
12632 int ok;
12634 if (GET_CODE (addr) == PRE_DEC
12635 || GET_CODE (addr) == POST_INC
12636 || GET_CODE (addr) == PRE_MODIFY
12637 || GET_CODE (addr) == POST_MODIFY)
12638 return 0;
12640 ok = ix86_decompose_address (addr, &parts);
12641 gcc_assert (ok);
12643 if (parts.base && GET_CODE (parts.base) == SUBREG)
12644 parts.base = SUBREG_REG (parts.base);
12645 if (parts.index && GET_CODE (parts.index) == SUBREG)
12646 parts.index = SUBREG_REG (parts.index);
12648 base = parts.base;
12649 index = parts.index;
12650 disp = parts.disp;
12651 len = 0;
12653 /* Rule of thumb:
12654 - esp as the base always wants an index,
12655 - ebp as the base always wants a displacement. */
12657 /* Register Indirect. */
12658 if (base && !index && !disp)
12660 /* esp (for its index) and ebp (for its displacement) need
12661 the two-byte modrm form. */
12662 if (addr == stack_pointer_rtx
12663 || addr == arg_pointer_rtx
12664 || addr == frame_pointer_rtx
12665 || addr == hard_frame_pointer_rtx)
12666 len = 1;
12669 /* Direct Addressing. */
12670 else if (disp && !base && !index)
12671 len = 4;
12673 else
12675 /* Find the length of the displacement constant. */
12676 if (disp)
12678 if (GET_CODE (disp) == CONST_INT
12679 && CONST_OK_FOR_LETTER_P (INTVAL (disp), 'K')
12680 && base)
12681 len = 1;
12682 else
12683 len = 4;
12685 /* ebp always wants a displacement. */
12686 else if (base == hard_frame_pointer_rtx)
12687 len = 1;
12689 /* An index requires the two-byte modrm form.... */
12690 if (index
12691 /* ...like esp, which always wants an index. */
12692 || base == stack_pointer_rtx
12693 || base == arg_pointer_rtx
12694 || base == frame_pointer_rtx)
12695 len += 1;
12698 return len;
12701 /* Compute default value for "length_immediate" attribute. When SHORTFORM
12702 is set, expect that insn have 8bit immediate alternative. */
12704 ix86_attr_length_immediate_default (rtx insn, int shortform)
12706 int len = 0;
12707 int i;
12708 extract_insn_cached (insn);
12709 for (i = recog_data.n_operands - 1; i >= 0; --i)
12710 if (CONSTANT_P (recog_data.operand[i]))
12712 gcc_assert (!len);
12713 if (shortform
12714 && GET_CODE (recog_data.operand[i]) == CONST_INT
12715 && CONST_OK_FOR_LETTER_P (INTVAL (recog_data.operand[i]), 'K'))
12716 len = 1;
12717 else
12719 switch (get_attr_mode (insn))
12721 case MODE_QI:
12722 len+=1;
12723 break;
12724 case MODE_HI:
12725 len+=2;
12726 break;
12727 case MODE_SI:
12728 len+=4;
12729 break;
12730 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
12731 case MODE_DI:
12732 len+=4;
12733 break;
12734 default:
12735 fatal_insn ("unknown insn mode", insn);
12739 return len;
12741 /* Compute default value for "length_address" attribute. */
12743 ix86_attr_length_address_default (rtx insn)
12745 int i;
12747 if (get_attr_type (insn) == TYPE_LEA)
12749 rtx set = PATTERN (insn);
12751 if (GET_CODE (set) == PARALLEL)
12752 set = XVECEXP (set, 0, 0);
12754 gcc_assert (GET_CODE (set) == SET);
12756 return memory_address_length (SET_SRC (set));
12759 extract_insn_cached (insn);
12760 for (i = recog_data.n_operands - 1; i >= 0; --i)
12761 if (GET_CODE (recog_data.operand[i]) == MEM)
12763 return memory_address_length (XEXP (recog_data.operand[i], 0));
12764 break;
12766 return 0;
12769 /* Return the maximum number of instructions a cpu can issue. */
12771 static int
12772 ix86_issue_rate (void)
12774 switch (ix86_tune)
12776 case PROCESSOR_PENTIUM:
12777 case PROCESSOR_K6:
12778 return 2;
12780 case PROCESSOR_PENTIUMPRO:
12781 case PROCESSOR_PENTIUM4:
12782 case PROCESSOR_ATHLON:
12783 case PROCESSOR_K8:
12784 case PROCESSOR_NOCONA:
12785 return 3;
12787 default:
12788 return 1;
12792 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
12793 by DEP_INSN and nothing set by DEP_INSN. */
12795 static int
12796 ix86_flags_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
12798 rtx set, set2;
12800 /* Simplify the test for uninteresting insns. */
12801 if (insn_type != TYPE_SETCC
12802 && insn_type != TYPE_ICMOV
12803 && insn_type != TYPE_FCMOV
12804 && insn_type != TYPE_IBR)
12805 return 0;
12807 if ((set = single_set (dep_insn)) != 0)
12809 set = SET_DEST (set);
12810 set2 = NULL_RTX;
12812 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
12813 && XVECLEN (PATTERN (dep_insn), 0) == 2
12814 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
12815 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
12817 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
12818 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
12820 else
12821 return 0;
12823 if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG)
12824 return 0;
12826 /* This test is true if the dependent insn reads the flags but
12827 not any other potentially set register. */
12828 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
12829 return 0;
12831 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
12832 return 0;
12834 return 1;
12837 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
12838 address with operands set by DEP_INSN. */
12840 static int
12841 ix86_agi_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
12843 rtx addr;
12845 if (insn_type == TYPE_LEA
12846 && TARGET_PENTIUM)
12848 addr = PATTERN (insn);
12850 if (GET_CODE (addr) == PARALLEL)
12851 addr = XVECEXP (addr, 0, 0);
12853 gcc_assert (GET_CODE (addr) == SET);
12855 addr = SET_SRC (addr);
12857 else
12859 int i;
12860 extract_insn_cached (insn);
12861 for (i = recog_data.n_operands - 1; i >= 0; --i)
12862 if (GET_CODE (recog_data.operand[i]) == MEM)
12864 addr = XEXP (recog_data.operand[i], 0);
12865 goto found;
12867 return 0;
12868 found:;
12871 return modified_in_p (addr, dep_insn);
12874 static int
12875 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
12877 enum attr_type insn_type, dep_insn_type;
12878 enum attr_memory memory;
12879 rtx set, set2;
12880 int dep_insn_code_number;
12882 /* Anti and output dependencies have zero cost on all CPUs. */
12883 if (REG_NOTE_KIND (link) != 0)
12884 return 0;
12886 dep_insn_code_number = recog_memoized (dep_insn);
12888 /* If we can't recognize the insns, we can't really do anything. */
12889 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
12890 return cost;
12892 insn_type = get_attr_type (insn);
12893 dep_insn_type = get_attr_type (dep_insn);
12895 switch (ix86_tune)
12897 case PROCESSOR_PENTIUM:
12898 /* Address Generation Interlock adds a cycle of latency. */
12899 if (ix86_agi_dependant (insn, dep_insn, insn_type))
12900 cost += 1;
12902 /* ??? Compares pair with jump/setcc. */
12903 if (ix86_flags_dependant (insn, dep_insn, insn_type))
12904 cost = 0;
12906 /* Floating point stores require value to be ready one cycle earlier. */
12907 if (insn_type == TYPE_FMOV
12908 && get_attr_memory (insn) == MEMORY_STORE
12909 && !ix86_agi_dependant (insn, dep_insn, insn_type))
12910 cost += 1;
12911 break;
12913 case PROCESSOR_PENTIUMPRO:
12914 memory = get_attr_memory (insn);
12916 /* INT->FP conversion is expensive. */
12917 if (get_attr_fp_int_src (dep_insn))
12918 cost += 5;
12920 /* There is one cycle extra latency between an FP op and a store. */
12921 if (insn_type == TYPE_FMOV
12922 && (set = single_set (dep_insn)) != NULL_RTX
12923 && (set2 = single_set (insn)) != NULL_RTX
12924 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
12925 && GET_CODE (SET_DEST (set2)) == MEM)
12926 cost += 1;
12928 /* Show ability of reorder buffer to hide latency of load by executing
12929 in parallel with previous instruction in case
12930 previous instruction is not needed to compute the address. */
12931 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
12932 && !ix86_agi_dependant (insn, dep_insn, insn_type))
12934 /* Claim moves to take one cycle, as core can issue one load
12935 at time and the next load can start cycle later. */
12936 if (dep_insn_type == TYPE_IMOV
12937 || dep_insn_type == TYPE_FMOV)
12938 cost = 1;
12939 else if (cost > 1)
12940 cost--;
12942 break;
12944 case PROCESSOR_K6:
12945 memory = get_attr_memory (insn);
12947 /* The esp dependency is resolved before the instruction is really
12948 finished. */
12949 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
12950 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
12951 return 1;
12953 /* INT->FP conversion is expensive. */
12954 if (get_attr_fp_int_src (dep_insn))
12955 cost += 5;
12957 /* Show ability of reorder buffer to hide latency of load by executing
12958 in parallel with previous instruction in case
12959 previous instruction is not needed to compute the address. */
12960 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
12961 && !ix86_agi_dependant (insn, dep_insn, insn_type))
12963 /* Claim moves to take one cycle, as core can issue one load
12964 at time and the next load can start cycle later. */
12965 if (dep_insn_type == TYPE_IMOV
12966 || dep_insn_type == TYPE_FMOV)
12967 cost = 1;
12968 else if (cost > 2)
12969 cost -= 2;
12970 else
12971 cost = 1;
12973 break;
12975 case PROCESSOR_ATHLON:
12976 case PROCESSOR_K8:
12977 memory = get_attr_memory (insn);
12979 /* Show ability of reorder buffer to hide latency of load by executing
12980 in parallel with previous instruction in case
12981 previous instruction is not needed to compute the address. */
12982 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
12983 && !ix86_agi_dependant (insn, dep_insn, insn_type))
12985 enum attr_unit unit = get_attr_unit (insn);
12986 int loadcost = 3;
12988 /* Because of the difference between the length of integer and
12989 floating unit pipeline preparation stages, the memory operands
12990 for floating point are cheaper.
12992 ??? For Athlon it the difference is most probably 2. */
12993 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
12994 loadcost = 3;
12995 else
12996 loadcost = TARGET_ATHLON ? 2 : 0;
12998 if (cost >= loadcost)
12999 cost -= loadcost;
13000 else
13001 cost = 0;
13004 default:
13005 break;
13008 return cost;
13011 /* How many alternative schedules to try. This should be as wide as the
13012 scheduling freedom in the DFA, but no wider. Making this value too
13013 large results extra work for the scheduler. */
13015 static int
13016 ia32_multipass_dfa_lookahead (void)
13018 if (ix86_tune == PROCESSOR_PENTIUM)
13019 return 2;
13021 if (ix86_tune == PROCESSOR_PENTIUMPRO
13022 || ix86_tune == PROCESSOR_K6)
13023 return 1;
13025 else
13026 return 0;
13030 /* Compute the alignment given to a constant that is being placed in memory.
13031 EXP is the constant and ALIGN is the alignment that the object would
13032 ordinarily have.
13033 The value of this function is used instead of that alignment to align
13034 the object. */
13037 ix86_constant_alignment (tree exp, int align)
13039 if (TREE_CODE (exp) == REAL_CST)
13041 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
13042 return 64;
13043 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
13044 return 128;
13046 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
13047 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
13048 return BITS_PER_WORD;
13050 return align;
13053 /* Compute the alignment for a static variable.
13054 TYPE is the data type, and ALIGN is the alignment that
13055 the object would ordinarily have. The value of this function is used
13056 instead of that alignment to align the object. */
13059 ix86_data_alignment (tree type, int align)
13061 if (AGGREGATE_TYPE_P (type)
13062 && TYPE_SIZE (type)
13063 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
13064 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 256
13065 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 256)
13066 return 256;
13068 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
13069 to 16byte boundary. */
13070 if (TARGET_64BIT)
13072 if (AGGREGATE_TYPE_P (type)
13073 && TYPE_SIZE (type)
13074 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
13075 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
13076 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
13077 return 128;
13080 if (TREE_CODE (type) == ARRAY_TYPE)
13082 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
13083 return 64;
13084 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
13085 return 128;
13087 else if (TREE_CODE (type) == COMPLEX_TYPE)
13090 if (TYPE_MODE (type) == DCmode && align < 64)
13091 return 64;
13092 if (TYPE_MODE (type) == XCmode && align < 128)
13093 return 128;
13095 else if ((TREE_CODE (type) == RECORD_TYPE
13096 || TREE_CODE (type) == UNION_TYPE
13097 || TREE_CODE (type) == QUAL_UNION_TYPE)
13098 && TYPE_FIELDS (type))
13100 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
13101 return 64;
13102 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
13103 return 128;
13105 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
13106 || TREE_CODE (type) == INTEGER_TYPE)
13108 if (TYPE_MODE (type) == DFmode && align < 64)
13109 return 64;
13110 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
13111 return 128;
13114 return align;
13117 /* Compute the alignment for a local variable.
13118 TYPE is the data type, and ALIGN is the alignment that
13119 the object would ordinarily have. The value of this macro is used
13120 instead of that alignment to align the object. */
13123 ix86_local_alignment (tree type, int align)
13125 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
13126 to 16byte boundary. */
13127 if (TARGET_64BIT)
13129 if (AGGREGATE_TYPE_P (type)
13130 && TYPE_SIZE (type)
13131 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
13132 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
13133 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
13134 return 128;
13136 if (TREE_CODE (type) == ARRAY_TYPE)
13138 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
13139 return 64;
13140 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
13141 return 128;
13143 else if (TREE_CODE (type) == COMPLEX_TYPE)
13145 if (TYPE_MODE (type) == DCmode && align < 64)
13146 return 64;
13147 if (TYPE_MODE (type) == XCmode && align < 128)
13148 return 128;
13150 else if ((TREE_CODE (type) == RECORD_TYPE
13151 || TREE_CODE (type) == UNION_TYPE
13152 || TREE_CODE (type) == QUAL_UNION_TYPE)
13153 && TYPE_FIELDS (type))
13155 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
13156 return 64;
13157 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
13158 return 128;
13160 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
13161 || TREE_CODE (type) == INTEGER_TYPE)
13164 if (TYPE_MODE (type) == DFmode && align < 64)
13165 return 64;
13166 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
13167 return 128;
13169 return align;
13172 /* Emit RTL insns to initialize the variable parts of a trampoline.
13173 FNADDR is an RTX for the address of the function's pure code.
13174 CXT is an RTX for the static chain value for the function. */
13175 void
13176 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
13178 if (!TARGET_64BIT)
13180 /* Compute offset from the end of the jmp to the target function. */
13181 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
13182 plus_constant (tramp, 10),
13183 NULL_RTX, 1, OPTAB_DIRECT);
13184 emit_move_insn (gen_rtx_MEM (QImode, tramp),
13185 gen_int_mode (0xb9, QImode));
13186 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
13187 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
13188 gen_int_mode (0xe9, QImode));
13189 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
13191 else
13193 int offset = 0;
13194 /* Try to load address using shorter movl instead of movabs.
13195 We may want to support movq for kernel mode, but kernel does not use
13196 trampolines at the moment. */
13197 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
13199 fnaddr = copy_to_mode_reg (DImode, fnaddr);
13200 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13201 gen_int_mode (0xbb41, HImode));
13202 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
13203 gen_lowpart (SImode, fnaddr));
13204 offset += 6;
13206 else
13208 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13209 gen_int_mode (0xbb49, HImode));
13210 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
13211 fnaddr);
13212 offset += 10;
13214 /* Load static chain using movabs to r10. */
13215 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13216 gen_int_mode (0xba49, HImode));
13217 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
13218 cxt);
13219 offset += 10;
13220 /* Jump to the r11 */
13221 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13222 gen_int_mode (0xff49, HImode));
13223 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
13224 gen_int_mode (0xe3, QImode));
13225 offset += 3;
13226 gcc_assert (offset <= TRAMPOLINE_SIZE);
13229 #ifdef ENABLE_EXECUTE_STACK
13230 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
13231 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
13232 #endif
13235 /* Codes for all the SSE/MMX builtins. */
13236 enum ix86_builtins
13238 IX86_BUILTIN_ADDPS,
13239 IX86_BUILTIN_ADDSS,
13240 IX86_BUILTIN_DIVPS,
13241 IX86_BUILTIN_DIVSS,
13242 IX86_BUILTIN_MULPS,
13243 IX86_BUILTIN_MULSS,
13244 IX86_BUILTIN_SUBPS,
13245 IX86_BUILTIN_SUBSS,
13247 IX86_BUILTIN_CMPEQPS,
13248 IX86_BUILTIN_CMPLTPS,
13249 IX86_BUILTIN_CMPLEPS,
13250 IX86_BUILTIN_CMPGTPS,
13251 IX86_BUILTIN_CMPGEPS,
13252 IX86_BUILTIN_CMPNEQPS,
13253 IX86_BUILTIN_CMPNLTPS,
13254 IX86_BUILTIN_CMPNLEPS,
13255 IX86_BUILTIN_CMPNGTPS,
13256 IX86_BUILTIN_CMPNGEPS,
13257 IX86_BUILTIN_CMPORDPS,
13258 IX86_BUILTIN_CMPUNORDPS,
13259 IX86_BUILTIN_CMPNEPS,
13260 IX86_BUILTIN_CMPEQSS,
13261 IX86_BUILTIN_CMPLTSS,
13262 IX86_BUILTIN_CMPLESS,
13263 IX86_BUILTIN_CMPNEQSS,
13264 IX86_BUILTIN_CMPNLTSS,
13265 IX86_BUILTIN_CMPNLESS,
13266 IX86_BUILTIN_CMPNGTSS,
13267 IX86_BUILTIN_CMPNGESS,
13268 IX86_BUILTIN_CMPORDSS,
13269 IX86_BUILTIN_CMPUNORDSS,
13270 IX86_BUILTIN_CMPNESS,
13272 IX86_BUILTIN_COMIEQSS,
13273 IX86_BUILTIN_COMILTSS,
13274 IX86_BUILTIN_COMILESS,
13275 IX86_BUILTIN_COMIGTSS,
13276 IX86_BUILTIN_COMIGESS,
13277 IX86_BUILTIN_COMINEQSS,
13278 IX86_BUILTIN_UCOMIEQSS,
13279 IX86_BUILTIN_UCOMILTSS,
13280 IX86_BUILTIN_UCOMILESS,
13281 IX86_BUILTIN_UCOMIGTSS,
13282 IX86_BUILTIN_UCOMIGESS,
13283 IX86_BUILTIN_UCOMINEQSS,
13285 IX86_BUILTIN_CVTPI2PS,
13286 IX86_BUILTIN_CVTPS2PI,
13287 IX86_BUILTIN_CVTSI2SS,
13288 IX86_BUILTIN_CVTSI642SS,
13289 IX86_BUILTIN_CVTSS2SI,
13290 IX86_BUILTIN_CVTSS2SI64,
13291 IX86_BUILTIN_CVTTPS2PI,
13292 IX86_BUILTIN_CVTTSS2SI,
13293 IX86_BUILTIN_CVTTSS2SI64,
13295 IX86_BUILTIN_MAXPS,
13296 IX86_BUILTIN_MAXSS,
13297 IX86_BUILTIN_MINPS,
13298 IX86_BUILTIN_MINSS,
13300 IX86_BUILTIN_LOADUPS,
13301 IX86_BUILTIN_STOREUPS,
13302 IX86_BUILTIN_MOVSS,
13304 IX86_BUILTIN_MOVHLPS,
13305 IX86_BUILTIN_MOVLHPS,
13306 IX86_BUILTIN_LOADHPS,
13307 IX86_BUILTIN_LOADLPS,
13308 IX86_BUILTIN_STOREHPS,
13309 IX86_BUILTIN_STORELPS,
13311 IX86_BUILTIN_MASKMOVQ,
13312 IX86_BUILTIN_MOVMSKPS,
13313 IX86_BUILTIN_PMOVMSKB,
13315 IX86_BUILTIN_MOVNTPS,
13316 IX86_BUILTIN_MOVNTQ,
13318 IX86_BUILTIN_LOADDQU,
13319 IX86_BUILTIN_STOREDQU,
13321 IX86_BUILTIN_PACKSSWB,
13322 IX86_BUILTIN_PACKSSDW,
13323 IX86_BUILTIN_PACKUSWB,
13325 IX86_BUILTIN_PADDB,
13326 IX86_BUILTIN_PADDW,
13327 IX86_BUILTIN_PADDD,
13328 IX86_BUILTIN_PADDQ,
13329 IX86_BUILTIN_PADDSB,
13330 IX86_BUILTIN_PADDSW,
13331 IX86_BUILTIN_PADDUSB,
13332 IX86_BUILTIN_PADDUSW,
13333 IX86_BUILTIN_PSUBB,
13334 IX86_BUILTIN_PSUBW,
13335 IX86_BUILTIN_PSUBD,
13336 IX86_BUILTIN_PSUBQ,
13337 IX86_BUILTIN_PSUBSB,
13338 IX86_BUILTIN_PSUBSW,
13339 IX86_BUILTIN_PSUBUSB,
13340 IX86_BUILTIN_PSUBUSW,
13342 IX86_BUILTIN_PAND,
13343 IX86_BUILTIN_PANDN,
13344 IX86_BUILTIN_POR,
13345 IX86_BUILTIN_PXOR,
13347 IX86_BUILTIN_PAVGB,
13348 IX86_BUILTIN_PAVGW,
13350 IX86_BUILTIN_PCMPEQB,
13351 IX86_BUILTIN_PCMPEQW,
13352 IX86_BUILTIN_PCMPEQD,
13353 IX86_BUILTIN_PCMPGTB,
13354 IX86_BUILTIN_PCMPGTW,
13355 IX86_BUILTIN_PCMPGTD,
13357 IX86_BUILTIN_PMADDWD,
13359 IX86_BUILTIN_PMAXSW,
13360 IX86_BUILTIN_PMAXUB,
13361 IX86_BUILTIN_PMINSW,
13362 IX86_BUILTIN_PMINUB,
13364 IX86_BUILTIN_PMULHUW,
13365 IX86_BUILTIN_PMULHW,
13366 IX86_BUILTIN_PMULLW,
13368 IX86_BUILTIN_PSADBW,
13369 IX86_BUILTIN_PSHUFW,
13371 IX86_BUILTIN_PSLLW,
13372 IX86_BUILTIN_PSLLD,
13373 IX86_BUILTIN_PSLLQ,
13374 IX86_BUILTIN_PSRAW,
13375 IX86_BUILTIN_PSRAD,
13376 IX86_BUILTIN_PSRLW,
13377 IX86_BUILTIN_PSRLD,
13378 IX86_BUILTIN_PSRLQ,
13379 IX86_BUILTIN_PSLLWI,
13380 IX86_BUILTIN_PSLLDI,
13381 IX86_BUILTIN_PSLLQI,
13382 IX86_BUILTIN_PSRAWI,
13383 IX86_BUILTIN_PSRADI,
13384 IX86_BUILTIN_PSRLWI,
13385 IX86_BUILTIN_PSRLDI,
13386 IX86_BUILTIN_PSRLQI,
13388 IX86_BUILTIN_PUNPCKHBW,
13389 IX86_BUILTIN_PUNPCKHWD,
13390 IX86_BUILTIN_PUNPCKHDQ,
13391 IX86_BUILTIN_PUNPCKLBW,
13392 IX86_BUILTIN_PUNPCKLWD,
13393 IX86_BUILTIN_PUNPCKLDQ,
13395 IX86_BUILTIN_SHUFPS,
13397 IX86_BUILTIN_RCPPS,
13398 IX86_BUILTIN_RCPSS,
13399 IX86_BUILTIN_RSQRTPS,
13400 IX86_BUILTIN_RSQRTSS,
13401 IX86_BUILTIN_SQRTPS,
13402 IX86_BUILTIN_SQRTSS,
13404 IX86_BUILTIN_UNPCKHPS,
13405 IX86_BUILTIN_UNPCKLPS,
13407 IX86_BUILTIN_ANDPS,
13408 IX86_BUILTIN_ANDNPS,
13409 IX86_BUILTIN_ORPS,
13410 IX86_BUILTIN_XORPS,
13412 IX86_BUILTIN_EMMS,
13413 IX86_BUILTIN_LDMXCSR,
13414 IX86_BUILTIN_STMXCSR,
13415 IX86_BUILTIN_SFENCE,
13417 /* 3DNow! Original */
13418 IX86_BUILTIN_FEMMS,
13419 IX86_BUILTIN_PAVGUSB,
13420 IX86_BUILTIN_PF2ID,
13421 IX86_BUILTIN_PFACC,
13422 IX86_BUILTIN_PFADD,
13423 IX86_BUILTIN_PFCMPEQ,
13424 IX86_BUILTIN_PFCMPGE,
13425 IX86_BUILTIN_PFCMPGT,
13426 IX86_BUILTIN_PFMAX,
13427 IX86_BUILTIN_PFMIN,
13428 IX86_BUILTIN_PFMUL,
13429 IX86_BUILTIN_PFRCP,
13430 IX86_BUILTIN_PFRCPIT1,
13431 IX86_BUILTIN_PFRCPIT2,
13432 IX86_BUILTIN_PFRSQIT1,
13433 IX86_BUILTIN_PFRSQRT,
13434 IX86_BUILTIN_PFSUB,
13435 IX86_BUILTIN_PFSUBR,
13436 IX86_BUILTIN_PI2FD,
13437 IX86_BUILTIN_PMULHRW,
13439 /* 3DNow! Athlon Extensions */
13440 IX86_BUILTIN_PF2IW,
13441 IX86_BUILTIN_PFNACC,
13442 IX86_BUILTIN_PFPNACC,
13443 IX86_BUILTIN_PI2FW,
13444 IX86_BUILTIN_PSWAPDSI,
13445 IX86_BUILTIN_PSWAPDSF,
13447 /* SSE2 */
13448 IX86_BUILTIN_ADDPD,
13449 IX86_BUILTIN_ADDSD,
13450 IX86_BUILTIN_DIVPD,
13451 IX86_BUILTIN_DIVSD,
13452 IX86_BUILTIN_MULPD,
13453 IX86_BUILTIN_MULSD,
13454 IX86_BUILTIN_SUBPD,
13455 IX86_BUILTIN_SUBSD,
13457 IX86_BUILTIN_CMPEQPD,
13458 IX86_BUILTIN_CMPLTPD,
13459 IX86_BUILTIN_CMPLEPD,
13460 IX86_BUILTIN_CMPGTPD,
13461 IX86_BUILTIN_CMPGEPD,
13462 IX86_BUILTIN_CMPNEQPD,
13463 IX86_BUILTIN_CMPNLTPD,
13464 IX86_BUILTIN_CMPNLEPD,
13465 IX86_BUILTIN_CMPNGTPD,
13466 IX86_BUILTIN_CMPNGEPD,
13467 IX86_BUILTIN_CMPORDPD,
13468 IX86_BUILTIN_CMPUNORDPD,
13469 IX86_BUILTIN_CMPNEPD,
13470 IX86_BUILTIN_CMPEQSD,
13471 IX86_BUILTIN_CMPLTSD,
13472 IX86_BUILTIN_CMPLESD,
13473 IX86_BUILTIN_CMPNEQSD,
13474 IX86_BUILTIN_CMPNLTSD,
13475 IX86_BUILTIN_CMPNLESD,
13476 IX86_BUILTIN_CMPORDSD,
13477 IX86_BUILTIN_CMPUNORDSD,
13478 IX86_BUILTIN_CMPNESD,
13480 IX86_BUILTIN_COMIEQSD,
13481 IX86_BUILTIN_COMILTSD,
13482 IX86_BUILTIN_COMILESD,
13483 IX86_BUILTIN_COMIGTSD,
13484 IX86_BUILTIN_COMIGESD,
13485 IX86_BUILTIN_COMINEQSD,
13486 IX86_BUILTIN_UCOMIEQSD,
13487 IX86_BUILTIN_UCOMILTSD,
13488 IX86_BUILTIN_UCOMILESD,
13489 IX86_BUILTIN_UCOMIGTSD,
13490 IX86_BUILTIN_UCOMIGESD,
13491 IX86_BUILTIN_UCOMINEQSD,
13493 IX86_BUILTIN_MAXPD,
13494 IX86_BUILTIN_MAXSD,
13495 IX86_BUILTIN_MINPD,
13496 IX86_BUILTIN_MINSD,
13498 IX86_BUILTIN_ANDPD,
13499 IX86_BUILTIN_ANDNPD,
13500 IX86_BUILTIN_ORPD,
13501 IX86_BUILTIN_XORPD,
13503 IX86_BUILTIN_SQRTPD,
13504 IX86_BUILTIN_SQRTSD,
13506 IX86_BUILTIN_UNPCKHPD,
13507 IX86_BUILTIN_UNPCKLPD,
13509 IX86_BUILTIN_SHUFPD,
13511 IX86_BUILTIN_LOADUPD,
13512 IX86_BUILTIN_STOREUPD,
13513 IX86_BUILTIN_MOVSD,
13515 IX86_BUILTIN_LOADHPD,
13516 IX86_BUILTIN_LOADLPD,
13518 IX86_BUILTIN_CVTDQ2PD,
13519 IX86_BUILTIN_CVTDQ2PS,
13521 IX86_BUILTIN_CVTPD2DQ,
13522 IX86_BUILTIN_CVTPD2PI,
13523 IX86_BUILTIN_CVTPD2PS,
13524 IX86_BUILTIN_CVTTPD2DQ,
13525 IX86_BUILTIN_CVTTPD2PI,
13527 IX86_BUILTIN_CVTPI2PD,
13528 IX86_BUILTIN_CVTSI2SD,
13529 IX86_BUILTIN_CVTSI642SD,
13531 IX86_BUILTIN_CVTSD2SI,
13532 IX86_BUILTIN_CVTSD2SI64,
13533 IX86_BUILTIN_CVTSD2SS,
13534 IX86_BUILTIN_CVTSS2SD,
13535 IX86_BUILTIN_CVTTSD2SI,
13536 IX86_BUILTIN_CVTTSD2SI64,
13538 IX86_BUILTIN_CVTPS2DQ,
13539 IX86_BUILTIN_CVTPS2PD,
13540 IX86_BUILTIN_CVTTPS2DQ,
13542 IX86_BUILTIN_MOVNTI,
13543 IX86_BUILTIN_MOVNTPD,
13544 IX86_BUILTIN_MOVNTDQ,
13546 /* SSE2 MMX */
13547 IX86_BUILTIN_MASKMOVDQU,
13548 IX86_BUILTIN_MOVMSKPD,
13549 IX86_BUILTIN_PMOVMSKB128,
13551 IX86_BUILTIN_PACKSSWB128,
13552 IX86_BUILTIN_PACKSSDW128,
13553 IX86_BUILTIN_PACKUSWB128,
13555 IX86_BUILTIN_PADDB128,
13556 IX86_BUILTIN_PADDW128,
13557 IX86_BUILTIN_PADDD128,
13558 IX86_BUILTIN_PADDQ128,
13559 IX86_BUILTIN_PADDSB128,
13560 IX86_BUILTIN_PADDSW128,
13561 IX86_BUILTIN_PADDUSB128,
13562 IX86_BUILTIN_PADDUSW128,
13563 IX86_BUILTIN_PSUBB128,
13564 IX86_BUILTIN_PSUBW128,
13565 IX86_BUILTIN_PSUBD128,
13566 IX86_BUILTIN_PSUBQ128,
13567 IX86_BUILTIN_PSUBSB128,
13568 IX86_BUILTIN_PSUBSW128,
13569 IX86_BUILTIN_PSUBUSB128,
13570 IX86_BUILTIN_PSUBUSW128,
13572 IX86_BUILTIN_PAND128,
13573 IX86_BUILTIN_PANDN128,
13574 IX86_BUILTIN_POR128,
13575 IX86_BUILTIN_PXOR128,
13577 IX86_BUILTIN_PAVGB128,
13578 IX86_BUILTIN_PAVGW128,
13580 IX86_BUILTIN_PCMPEQB128,
13581 IX86_BUILTIN_PCMPEQW128,
13582 IX86_BUILTIN_PCMPEQD128,
13583 IX86_BUILTIN_PCMPGTB128,
13584 IX86_BUILTIN_PCMPGTW128,
13585 IX86_BUILTIN_PCMPGTD128,
13587 IX86_BUILTIN_PMADDWD128,
13589 IX86_BUILTIN_PMAXSW128,
13590 IX86_BUILTIN_PMAXUB128,
13591 IX86_BUILTIN_PMINSW128,
13592 IX86_BUILTIN_PMINUB128,
13594 IX86_BUILTIN_PMULUDQ,
13595 IX86_BUILTIN_PMULUDQ128,
13596 IX86_BUILTIN_PMULHUW128,
13597 IX86_BUILTIN_PMULHW128,
13598 IX86_BUILTIN_PMULLW128,
13600 IX86_BUILTIN_PSADBW128,
13601 IX86_BUILTIN_PSHUFHW,
13602 IX86_BUILTIN_PSHUFLW,
13603 IX86_BUILTIN_PSHUFD,
13605 IX86_BUILTIN_PSLLW128,
13606 IX86_BUILTIN_PSLLD128,
13607 IX86_BUILTIN_PSLLQ128,
13608 IX86_BUILTIN_PSRAW128,
13609 IX86_BUILTIN_PSRAD128,
13610 IX86_BUILTIN_PSRLW128,
13611 IX86_BUILTIN_PSRLD128,
13612 IX86_BUILTIN_PSRLQ128,
13613 IX86_BUILTIN_PSLLDQI128,
13614 IX86_BUILTIN_PSLLWI128,
13615 IX86_BUILTIN_PSLLDI128,
13616 IX86_BUILTIN_PSLLQI128,
13617 IX86_BUILTIN_PSRAWI128,
13618 IX86_BUILTIN_PSRADI128,
13619 IX86_BUILTIN_PSRLDQI128,
13620 IX86_BUILTIN_PSRLWI128,
13621 IX86_BUILTIN_PSRLDI128,
13622 IX86_BUILTIN_PSRLQI128,
13624 IX86_BUILTIN_PUNPCKHBW128,
13625 IX86_BUILTIN_PUNPCKHWD128,
13626 IX86_BUILTIN_PUNPCKHDQ128,
13627 IX86_BUILTIN_PUNPCKHQDQ128,
13628 IX86_BUILTIN_PUNPCKLBW128,
13629 IX86_BUILTIN_PUNPCKLWD128,
13630 IX86_BUILTIN_PUNPCKLDQ128,
13631 IX86_BUILTIN_PUNPCKLQDQ128,
13633 IX86_BUILTIN_CLFLUSH,
13634 IX86_BUILTIN_MFENCE,
13635 IX86_BUILTIN_LFENCE,
13637 /* Prescott New Instructions. */
13638 IX86_BUILTIN_ADDSUBPS,
13639 IX86_BUILTIN_HADDPS,
13640 IX86_BUILTIN_HSUBPS,
13641 IX86_BUILTIN_MOVSHDUP,
13642 IX86_BUILTIN_MOVSLDUP,
13643 IX86_BUILTIN_ADDSUBPD,
13644 IX86_BUILTIN_HADDPD,
13645 IX86_BUILTIN_HSUBPD,
13646 IX86_BUILTIN_LDDQU,
13648 IX86_BUILTIN_MONITOR,
13649 IX86_BUILTIN_MWAIT,
13651 IX86_BUILTIN_VEC_INIT_V2SI,
13652 IX86_BUILTIN_VEC_INIT_V4HI,
13653 IX86_BUILTIN_VEC_INIT_V8QI,
13654 IX86_BUILTIN_VEC_EXT_V2DF,
13655 IX86_BUILTIN_VEC_EXT_V2DI,
13656 IX86_BUILTIN_VEC_EXT_V4SF,
13657 IX86_BUILTIN_VEC_EXT_V4SI,
13658 IX86_BUILTIN_VEC_EXT_V8HI,
13659 IX86_BUILTIN_VEC_EXT_V2SI,
13660 IX86_BUILTIN_VEC_EXT_V4HI,
13661 IX86_BUILTIN_VEC_SET_V8HI,
13662 IX86_BUILTIN_VEC_SET_V4HI,
13664 IX86_BUILTIN_MAX
13667 #define def_builtin(MASK, NAME, TYPE, CODE) \
13668 do { \
13669 if ((MASK) & target_flags \
13670 && (!((MASK) & MASK_64BIT) || TARGET_64BIT)) \
13671 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
13672 NULL, NULL_TREE); \
13673 } while (0)
13675 /* Bits for builtin_description.flag. */
13677 /* Set when we don't support the comparison natively, and should
13678 swap_comparison in order to support it. */
13679 #define BUILTIN_DESC_SWAP_OPERANDS 1
13681 struct builtin_description
13683 const unsigned int mask;
13684 const enum insn_code icode;
13685 const char *const name;
13686 const enum ix86_builtins code;
13687 const enum rtx_code comparison;
13688 const unsigned int flag;
13691 static const struct builtin_description bdesc_comi[] =
13693 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
13694 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
13695 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
13696 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
13697 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
13698 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
13699 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
13700 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
13701 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
13702 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
13703 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
13704 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
13705 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
13706 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
13707 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
13708 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
13709 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
13710 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
13711 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
13712 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
13713 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
13714 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
13715 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
13716 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
13719 static const struct builtin_description bdesc_2arg[] =
13721 /* SSE */
13722 { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
13723 { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
13724 { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
13725 { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
13726 { MASK_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
13727 { MASK_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
13728 { MASK_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
13729 { MASK_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
13731 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
13732 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
13733 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
13734 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT,
13735 BUILTIN_DESC_SWAP_OPERANDS },
13736 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE,
13737 BUILTIN_DESC_SWAP_OPERANDS },
13738 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
13739 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, 0 },
13740 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, 0 },
13741 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, 0 },
13742 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE,
13743 BUILTIN_DESC_SWAP_OPERANDS },
13744 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT,
13745 BUILTIN_DESC_SWAP_OPERANDS },
13746 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, 0 },
13747 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
13748 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
13749 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
13750 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
13751 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, 0 },
13752 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, 0 },
13753 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, 0 },
13754 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE,
13755 BUILTIN_DESC_SWAP_OPERANDS },
13756 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT,
13757 BUILTIN_DESC_SWAP_OPERANDS },
13758 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 },
13760 { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
13761 { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
13762 { MASK_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
13763 { MASK_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
13765 { MASK_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
13766 { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
13767 { MASK_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
13768 { MASK_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
13770 { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
13771 { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
13772 { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
13773 { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
13774 { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
13776 /* MMX */
13777 { MASK_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
13778 { MASK_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
13779 { MASK_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
13780 { MASK_MMX, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
13781 { MASK_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
13782 { MASK_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
13783 { MASK_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
13784 { MASK_MMX, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
13786 { MASK_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
13787 { MASK_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
13788 { MASK_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
13789 { MASK_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
13790 { MASK_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
13791 { MASK_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
13792 { MASK_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
13793 { MASK_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
13795 { MASK_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
13796 { MASK_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
13797 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
13799 { MASK_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
13800 { MASK_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
13801 { MASK_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
13802 { MASK_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
13804 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
13805 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
13807 { MASK_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
13808 { MASK_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
13809 { MASK_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
13810 { MASK_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
13811 { MASK_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
13812 { MASK_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
13814 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
13815 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
13816 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
13817 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
13819 { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
13820 { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
13821 { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
13822 { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
13823 { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
13824 { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
13826 /* Special. */
13827 { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
13828 { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
13829 { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
13831 { MASK_SSE, CODE_FOR_sse_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
13832 { MASK_SSE, CODE_FOR_sse_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
13833 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
13835 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
13836 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
13837 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
13838 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
13839 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
13840 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
13842 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
13843 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
13844 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
13845 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
13846 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
13847 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
13849 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
13850 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
13851 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
13852 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 },
13854 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
13855 { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
13857 /* SSE2 */
13858 { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
13859 { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
13860 { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
13861 { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
13862 { MASK_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
13863 { MASK_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
13864 { MASK_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
13865 { MASK_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
13867 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
13868 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
13869 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
13870 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT,
13871 BUILTIN_DESC_SWAP_OPERANDS },
13872 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE,
13873 BUILTIN_DESC_SWAP_OPERANDS },
13874 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
13875 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, 0 },
13876 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, 0 },
13877 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, 0 },
13878 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE,
13879 BUILTIN_DESC_SWAP_OPERANDS },
13880 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT,
13881 BUILTIN_DESC_SWAP_OPERANDS },
13882 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, 0 },
13883 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
13884 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
13885 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
13886 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
13887 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, 0 },
13888 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, 0 },
13889 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, 0 },
13890 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, 0 },
13892 { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
13893 { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
13894 { MASK_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
13895 { MASK_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
13897 { MASK_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
13898 { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
13899 { MASK_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
13900 { MASK_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
13902 { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
13903 { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
13904 { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
13906 /* SSE2 MMX */
13907 { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
13908 { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
13909 { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
13910 { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
13911 { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
13912 { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
13913 { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
13914 { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
13916 { MASK_MMX, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
13917 { MASK_MMX, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
13918 { MASK_MMX, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
13919 { MASK_MMX, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
13920 { MASK_MMX, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
13921 { MASK_MMX, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
13922 { MASK_MMX, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
13923 { MASK_MMX, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
13925 { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
13926 { MASK_SSE2, CODE_FOR_sse2_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
13928 { MASK_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
13929 { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
13930 { MASK_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
13931 { MASK_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
13933 { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
13934 { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
13936 { MASK_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
13937 { MASK_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
13938 { MASK_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
13939 { MASK_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
13940 { MASK_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
13941 { MASK_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
13943 { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
13944 { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
13945 { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
13946 { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
13948 { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
13949 { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
13950 { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
13951 { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
13952 { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
13953 { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
13954 { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
13955 { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
13957 { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
13958 { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
13959 { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
13961 { MASK_SSE2, CODE_FOR_sse2_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
13962 { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
13964 { MASK_SSE2, CODE_FOR_sse2_umulsidi3, 0, IX86_BUILTIN_PMULUDQ, 0, 0 },
13965 { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, 0, 0 },
13967 { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
13968 { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
13969 { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
13971 { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
13972 { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
13973 { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
13975 { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
13976 { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
13978 { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
13980 { MASK_SSE2, CODE_FOR_sse2_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
13981 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
13982 { MASK_SSE2, CODE_FOR_sse2_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
13983 { MASK_SSE2, CODE_FOR_sse2_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
13985 /* SSE3 MMX */
13986 { MASK_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
13987 { MASK_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
13988 { MASK_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
13989 { MASK_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
13990 { MASK_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
13991 { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 }
13994 static const struct builtin_description bdesc_1arg[] =
13996 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
13997 { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
13999 { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
14000 { MASK_SSE, CODE_FOR_sse_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
14001 { MASK_SSE, CODE_FOR_sse_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
14003 { MASK_SSE, CODE_FOR_sse_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
14004 { MASK_SSE, CODE_FOR_sse_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
14005 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
14006 { MASK_SSE, CODE_FOR_sse_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
14007 { MASK_SSE, CODE_FOR_sse_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
14008 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
14010 { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
14011 { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
14013 { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
14015 { MASK_SSE2, CODE_FOR_sse2_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
14016 { MASK_SSE2, CODE_FOR_sse2_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
14018 { MASK_SSE2, CODE_FOR_sse2_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
14019 { MASK_SSE2, CODE_FOR_sse2_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
14020 { MASK_SSE2, CODE_FOR_sse2_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
14021 { MASK_SSE2, CODE_FOR_sse2_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
14022 { MASK_SSE2, CODE_FOR_sse2_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
14024 { MASK_SSE2, CODE_FOR_sse2_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
14026 { MASK_SSE2, CODE_FOR_sse2_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
14027 { MASK_SSE2, CODE_FOR_sse2_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
14028 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
14029 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
14031 { MASK_SSE2, CODE_FOR_sse2_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
14032 { MASK_SSE2, CODE_FOR_sse2_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
14033 { MASK_SSE2, CODE_FOR_sse2_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
14035 /* SSE3 */
14036 { MASK_SSE3, CODE_FOR_sse3_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
14037 { MASK_SSE3, CODE_FOR_sse3_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
14040 static void
14041 ix86_init_builtins (void)
14043 if (TARGET_MMX)
14044 ix86_init_mmx_sse_builtins ();
14047 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
14048 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
14049 builtins. */
14050 static void
14051 ix86_init_mmx_sse_builtins (void)
14053 const struct builtin_description * d;
14054 size_t i;
14056 tree V16QI_type_node = build_vector_type_for_mode (intQI_type_node, V16QImode);
14057 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
14058 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
14059 tree V2DI_type_node
14060 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
14061 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
14062 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
14063 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
14064 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
14065 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
14066 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
14068 tree pchar_type_node = build_pointer_type (char_type_node);
14069 tree pcchar_type_node = build_pointer_type (
14070 build_type_variant (char_type_node, 1, 0));
14071 tree pfloat_type_node = build_pointer_type (float_type_node);
14072 tree pcfloat_type_node = build_pointer_type (
14073 build_type_variant (float_type_node, 1, 0));
14074 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
14075 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
14076 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
14078 /* Comparisons. */
14079 tree int_ftype_v4sf_v4sf
14080 = build_function_type_list (integer_type_node,
14081 V4SF_type_node, V4SF_type_node, NULL_TREE);
14082 tree v4si_ftype_v4sf_v4sf
14083 = build_function_type_list (V4SI_type_node,
14084 V4SF_type_node, V4SF_type_node, NULL_TREE);
14085 /* MMX/SSE/integer conversions. */
14086 tree int_ftype_v4sf
14087 = build_function_type_list (integer_type_node,
14088 V4SF_type_node, NULL_TREE);
14089 tree int64_ftype_v4sf
14090 = build_function_type_list (long_long_integer_type_node,
14091 V4SF_type_node, NULL_TREE);
14092 tree int_ftype_v8qi
14093 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
14094 tree v4sf_ftype_v4sf_int
14095 = build_function_type_list (V4SF_type_node,
14096 V4SF_type_node, integer_type_node, NULL_TREE);
14097 tree v4sf_ftype_v4sf_int64
14098 = build_function_type_list (V4SF_type_node,
14099 V4SF_type_node, long_long_integer_type_node,
14100 NULL_TREE);
14101 tree v4sf_ftype_v4sf_v2si
14102 = build_function_type_list (V4SF_type_node,
14103 V4SF_type_node, V2SI_type_node, NULL_TREE);
14105 /* Miscellaneous. */
14106 tree v8qi_ftype_v4hi_v4hi
14107 = build_function_type_list (V8QI_type_node,
14108 V4HI_type_node, V4HI_type_node, NULL_TREE);
14109 tree v4hi_ftype_v2si_v2si
14110 = build_function_type_list (V4HI_type_node,
14111 V2SI_type_node, V2SI_type_node, NULL_TREE);
14112 tree v4sf_ftype_v4sf_v4sf_int
14113 = build_function_type_list (V4SF_type_node,
14114 V4SF_type_node, V4SF_type_node,
14115 integer_type_node, NULL_TREE);
14116 tree v2si_ftype_v4hi_v4hi
14117 = build_function_type_list (V2SI_type_node,
14118 V4HI_type_node, V4HI_type_node, NULL_TREE);
14119 tree v4hi_ftype_v4hi_int
14120 = build_function_type_list (V4HI_type_node,
14121 V4HI_type_node, integer_type_node, NULL_TREE);
14122 tree v4hi_ftype_v4hi_di
14123 = build_function_type_list (V4HI_type_node,
14124 V4HI_type_node, long_long_unsigned_type_node,
14125 NULL_TREE);
14126 tree v2si_ftype_v2si_di
14127 = build_function_type_list (V2SI_type_node,
14128 V2SI_type_node, long_long_unsigned_type_node,
14129 NULL_TREE);
14130 tree void_ftype_void
14131 = build_function_type (void_type_node, void_list_node);
14132 tree void_ftype_unsigned
14133 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
14134 tree void_ftype_unsigned_unsigned
14135 = build_function_type_list (void_type_node, unsigned_type_node,
14136 unsigned_type_node, NULL_TREE);
14137 tree void_ftype_pcvoid_unsigned_unsigned
14138 = build_function_type_list (void_type_node, const_ptr_type_node,
14139 unsigned_type_node, unsigned_type_node,
14140 NULL_TREE);
14141 tree unsigned_ftype_void
14142 = build_function_type (unsigned_type_node, void_list_node);
14143 tree v2si_ftype_v4sf
14144 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
14145 /* Loads/stores. */
14146 tree void_ftype_v8qi_v8qi_pchar
14147 = build_function_type_list (void_type_node,
14148 V8QI_type_node, V8QI_type_node,
14149 pchar_type_node, NULL_TREE);
14150 tree v4sf_ftype_pcfloat
14151 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
14152 /* @@@ the type is bogus */
14153 tree v4sf_ftype_v4sf_pv2si
14154 = build_function_type_list (V4SF_type_node,
14155 V4SF_type_node, pv2si_type_node, NULL_TREE);
14156 tree void_ftype_pv2si_v4sf
14157 = build_function_type_list (void_type_node,
14158 pv2si_type_node, V4SF_type_node, NULL_TREE);
14159 tree void_ftype_pfloat_v4sf
14160 = build_function_type_list (void_type_node,
14161 pfloat_type_node, V4SF_type_node, NULL_TREE);
14162 tree void_ftype_pdi_di
14163 = build_function_type_list (void_type_node,
14164 pdi_type_node, long_long_unsigned_type_node,
14165 NULL_TREE);
14166 tree void_ftype_pv2di_v2di
14167 = build_function_type_list (void_type_node,
14168 pv2di_type_node, V2DI_type_node, NULL_TREE);
14169 /* Normal vector unops. */
14170 tree v4sf_ftype_v4sf
14171 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
14173 /* Normal vector binops. */
14174 tree v4sf_ftype_v4sf_v4sf
14175 = build_function_type_list (V4SF_type_node,
14176 V4SF_type_node, V4SF_type_node, NULL_TREE);
14177 tree v8qi_ftype_v8qi_v8qi
14178 = build_function_type_list (V8QI_type_node,
14179 V8QI_type_node, V8QI_type_node, NULL_TREE);
14180 tree v4hi_ftype_v4hi_v4hi
14181 = build_function_type_list (V4HI_type_node,
14182 V4HI_type_node, V4HI_type_node, NULL_TREE);
14183 tree v2si_ftype_v2si_v2si
14184 = build_function_type_list (V2SI_type_node,
14185 V2SI_type_node, V2SI_type_node, NULL_TREE);
14186 tree di_ftype_di_di
14187 = build_function_type_list (long_long_unsigned_type_node,
14188 long_long_unsigned_type_node,
14189 long_long_unsigned_type_node, NULL_TREE);
14191 tree v2si_ftype_v2sf
14192 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
14193 tree v2sf_ftype_v2si
14194 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
14195 tree v2si_ftype_v2si
14196 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
14197 tree v2sf_ftype_v2sf
14198 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
14199 tree v2sf_ftype_v2sf_v2sf
14200 = build_function_type_list (V2SF_type_node,
14201 V2SF_type_node, V2SF_type_node, NULL_TREE);
14202 tree v2si_ftype_v2sf_v2sf
14203 = build_function_type_list (V2SI_type_node,
14204 V2SF_type_node, V2SF_type_node, NULL_TREE);
14205 tree pint_type_node = build_pointer_type (integer_type_node);
14206 tree pdouble_type_node = build_pointer_type (double_type_node);
14207 tree pcdouble_type_node = build_pointer_type (
14208 build_type_variant (double_type_node, 1, 0));
14209 tree int_ftype_v2df_v2df
14210 = build_function_type_list (integer_type_node,
14211 V2DF_type_node, V2DF_type_node, NULL_TREE);
14213 tree ti_ftype_ti_ti
14214 = build_function_type_list (intTI_type_node,
14215 intTI_type_node, intTI_type_node, NULL_TREE);
14216 tree void_ftype_pcvoid
14217 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
14218 tree v4sf_ftype_v4si
14219 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
14220 tree v4si_ftype_v4sf
14221 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
14222 tree v2df_ftype_v4si
14223 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
14224 tree v4si_ftype_v2df
14225 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
14226 tree v2si_ftype_v2df
14227 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
14228 tree v4sf_ftype_v2df
14229 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
14230 tree v2df_ftype_v2si
14231 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
14232 tree v2df_ftype_v4sf
14233 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
14234 tree int_ftype_v2df
14235 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
14236 tree int64_ftype_v2df
14237 = build_function_type_list (long_long_integer_type_node,
14238 V2DF_type_node, NULL_TREE);
14239 tree v2df_ftype_v2df_int
14240 = build_function_type_list (V2DF_type_node,
14241 V2DF_type_node, integer_type_node, NULL_TREE);
14242 tree v2df_ftype_v2df_int64
14243 = build_function_type_list (V2DF_type_node,
14244 V2DF_type_node, long_long_integer_type_node,
14245 NULL_TREE);
14246 tree v4sf_ftype_v4sf_v2df
14247 = build_function_type_list (V4SF_type_node,
14248 V4SF_type_node, V2DF_type_node, NULL_TREE);
14249 tree v2df_ftype_v2df_v4sf
14250 = build_function_type_list (V2DF_type_node,
14251 V2DF_type_node, V4SF_type_node, NULL_TREE);
14252 tree v2df_ftype_v2df_v2df_int
14253 = build_function_type_list (V2DF_type_node,
14254 V2DF_type_node, V2DF_type_node,
14255 integer_type_node,
14256 NULL_TREE);
14257 tree v2df_ftype_v2df_pcdouble
14258 = build_function_type_list (V2DF_type_node,
14259 V2DF_type_node, pcdouble_type_node, NULL_TREE);
14260 tree void_ftype_pdouble_v2df
14261 = build_function_type_list (void_type_node,
14262 pdouble_type_node, V2DF_type_node, NULL_TREE);
14263 tree void_ftype_pint_int
14264 = build_function_type_list (void_type_node,
14265 pint_type_node, integer_type_node, NULL_TREE);
14266 tree void_ftype_v16qi_v16qi_pchar
14267 = build_function_type_list (void_type_node,
14268 V16QI_type_node, V16QI_type_node,
14269 pchar_type_node, NULL_TREE);
14270 tree v2df_ftype_pcdouble
14271 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
14272 tree v2df_ftype_v2df_v2df
14273 = build_function_type_list (V2DF_type_node,
14274 V2DF_type_node, V2DF_type_node, NULL_TREE);
14275 tree v16qi_ftype_v16qi_v16qi
14276 = build_function_type_list (V16QI_type_node,
14277 V16QI_type_node, V16QI_type_node, NULL_TREE);
14278 tree v8hi_ftype_v8hi_v8hi
14279 = build_function_type_list (V8HI_type_node,
14280 V8HI_type_node, V8HI_type_node, NULL_TREE);
14281 tree v4si_ftype_v4si_v4si
14282 = build_function_type_list (V4SI_type_node,
14283 V4SI_type_node, V4SI_type_node, NULL_TREE);
14284 tree v2di_ftype_v2di_v2di
14285 = build_function_type_list (V2DI_type_node,
14286 V2DI_type_node, V2DI_type_node, NULL_TREE);
14287 tree v2di_ftype_v2df_v2df
14288 = build_function_type_list (V2DI_type_node,
14289 V2DF_type_node, V2DF_type_node, NULL_TREE);
14290 tree v2df_ftype_v2df
14291 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
14292 tree v2di_ftype_v2di_int
14293 = build_function_type_list (V2DI_type_node,
14294 V2DI_type_node, integer_type_node, NULL_TREE);
14295 tree v4si_ftype_v4si_int
14296 = build_function_type_list (V4SI_type_node,
14297 V4SI_type_node, integer_type_node, NULL_TREE);
14298 tree v8hi_ftype_v8hi_int
14299 = build_function_type_list (V8HI_type_node,
14300 V8HI_type_node, integer_type_node, NULL_TREE);
14301 tree v8hi_ftype_v8hi_v2di
14302 = build_function_type_list (V8HI_type_node,
14303 V8HI_type_node, V2DI_type_node, NULL_TREE);
14304 tree v4si_ftype_v4si_v2di
14305 = build_function_type_list (V4SI_type_node,
14306 V4SI_type_node, V2DI_type_node, NULL_TREE);
14307 tree v4si_ftype_v8hi_v8hi
14308 = build_function_type_list (V4SI_type_node,
14309 V8HI_type_node, V8HI_type_node, NULL_TREE);
14310 tree di_ftype_v8qi_v8qi
14311 = build_function_type_list (long_long_unsigned_type_node,
14312 V8QI_type_node, V8QI_type_node, NULL_TREE);
14313 tree di_ftype_v2si_v2si
14314 = build_function_type_list (long_long_unsigned_type_node,
14315 V2SI_type_node, V2SI_type_node, NULL_TREE);
14316 tree v2di_ftype_v16qi_v16qi
14317 = build_function_type_list (V2DI_type_node,
14318 V16QI_type_node, V16QI_type_node, NULL_TREE);
14319 tree v2di_ftype_v4si_v4si
14320 = build_function_type_list (V2DI_type_node,
14321 V4SI_type_node, V4SI_type_node, NULL_TREE);
14322 tree int_ftype_v16qi
14323 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
14324 tree v16qi_ftype_pcchar
14325 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
14326 tree void_ftype_pchar_v16qi
14327 = build_function_type_list (void_type_node,
14328 pchar_type_node, V16QI_type_node, NULL_TREE);
14330 tree float80_type;
14331 tree float128_type;
14332 tree ftype;
14334 /* The __float80 type. */
14335 if (TYPE_MODE (long_double_type_node) == XFmode)
14336 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
14337 "__float80");
14338 else
14340 /* The __float80 type. */
14341 float80_type = make_node (REAL_TYPE);
14342 TYPE_PRECISION (float80_type) = 80;
14343 layout_type (float80_type);
14344 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
14347 float128_type = make_node (REAL_TYPE);
14348 TYPE_PRECISION (float128_type) = 128;
14349 layout_type (float128_type);
14350 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
14352 /* Add all builtins that are more or less simple operations on two
14353 operands. */
14354 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
14356 /* Use one of the operands; the target can have a different mode for
14357 mask-generating compares. */
14358 enum machine_mode mode;
14359 tree type;
14361 if (d->name == 0)
14362 continue;
14363 mode = insn_data[d->icode].operand[1].mode;
14365 switch (mode)
14367 case V16QImode:
14368 type = v16qi_ftype_v16qi_v16qi;
14369 break;
14370 case V8HImode:
14371 type = v8hi_ftype_v8hi_v8hi;
14372 break;
14373 case V4SImode:
14374 type = v4si_ftype_v4si_v4si;
14375 break;
14376 case V2DImode:
14377 type = v2di_ftype_v2di_v2di;
14378 break;
14379 case V2DFmode:
14380 type = v2df_ftype_v2df_v2df;
14381 break;
14382 case TImode:
14383 type = ti_ftype_ti_ti;
14384 break;
14385 case V4SFmode:
14386 type = v4sf_ftype_v4sf_v4sf;
14387 break;
14388 case V8QImode:
14389 type = v8qi_ftype_v8qi_v8qi;
14390 break;
14391 case V4HImode:
14392 type = v4hi_ftype_v4hi_v4hi;
14393 break;
14394 case V2SImode:
14395 type = v2si_ftype_v2si_v2si;
14396 break;
14397 case DImode:
14398 type = di_ftype_di_di;
14399 break;
14401 default:
14402 gcc_unreachable ();
14405 /* Override for comparisons. */
14406 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
14407 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3)
14408 type = v4si_ftype_v4sf_v4sf;
14410 if (d->icode == CODE_FOR_sse2_maskcmpv2df3
14411 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
14412 type = v2di_ftype_v2df_v2df;
14414 def_builtin (d->mask, d->name, type, d->code);
14417 /* Add the remaining MMX insns with somewhat more complicated types. */
14418 def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
14419 def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
14420 def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD);
14421 def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ);
14423 def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW);
14424 def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD);
14425 def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ);
14427 def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
14428 def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
14430 def_builtin (MASK_MMX, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
14431 def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
14433 /* comi/ucomi insns. */
14434 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
14435 if (d->mask == MASK_SSE2)
14436 def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
14437 else
14438 def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
14440 def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
14441 def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
14442 def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
14444 def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
14445 def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
14446 def_builtin (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
14447 def_builtin (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
14448 def_builtin (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
14449 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
14450 def_builtin (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
14451 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
14452 def_builtin (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
14453 def_builtin (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
14454 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
14456 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
14458 def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
14459 def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
14461 def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
14462 def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
14463 def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
14464 def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
14466 def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
14467 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
14468 def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
14469 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
14471 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
14473 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
14475 def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
14476 def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
14477 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
14478 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
14479 def_builtin (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
14480 def_builtin (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
14482 def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
14484 /* Original 3DNow! */
14485 def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
14486 def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
14487 def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
14488 def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
14489 def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
14490 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
14491 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
14492 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
14493 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
14494 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
14495 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
14496 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
14497 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
14498 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
14499 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
14500 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
14501 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
14502 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
14503 def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
14504 def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
14506 /* 3DNow! extension as used in the Athlon CPU. */
14507 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
14508 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
14509 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
14510 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
14511 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
14512 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
14514 /* SSE2 */
14515 def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
14517 def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
14518 def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
14520 def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADHPD);
14521 def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADLPD);
14523 def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
14524 def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
14525 def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
14526 def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
14527 def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
14529 def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
14530 def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
14531 def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
14532 def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
14534 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
14535 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
14537 def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
14539 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
14540 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
14542 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
14543 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
14544 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
14545 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
14546 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
14548 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
14550 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
14551 def_builtin (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
14552 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
14553 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
14555 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
14556 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
14557 def_builtin (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
14559 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
14560 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
14561 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
14562 def_builtin (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
14564 def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
14565 def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
14566 def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
14568 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
14569 def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
14571 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
14572 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
14574 def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128);
14575 def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128);
14576 def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
14578 def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRLW128);
14579 def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRLD128);
14580 def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
14582 def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRAW128);
14583 def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRAD128);
14585 def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
14586 def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
14587 def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
14588 def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
14590 def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
14591 def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
14592 def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
14593 def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
14595 def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
14596 def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
14598 def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
14600 /* Prescott New Instructions. */
14601 def_builtin (MASK_SSE3, "__builtin_ia32_monitor",
14602 void_ftype_pcvoid_unsigned_unsigned,
14603 IX86_BUILTIN_MONITOR);
14604 def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
14605 void_ftype_unsigned_unsigned,
14606 IX86_BUILTIN_MWAIT);
14607 def_builtin (MASK_SSE3, "__builtin_ia32_movshdup",
14608 v4sf_ftype_v4sf,
14609 IX86_BUILTIN_MOVSHDUP);
14610 def_builtin (MASK_SSE3, "__builtin_ia32_movsldup",
14611 v4sf_ftype_v4sf,
14612 IX86_BUILTIN_MOVSLDUP);
14613 def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
14614 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
14616 /* Access to the vec_init patterns. */
14617 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
14618 integer_type_node, NULL_TREE);
14619 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v2si",
14620 ftype, IX86_BUILTIN_VEC_INIT_V2SI);
14622 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
14623 short_integer_type_node,
14624 short_integer_type_node,
14625 short_integer_type_node, NULL_TREE);
14626 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v4hi",
14627 ftype, IX86_BUILTIN_VEC_INIT_V4HI);
14629 ftype = build_function_type_list (V8QI_type_node, char_type_node,
14630 char_type_node, char_type_node,
14631 char_type_node, char_type_node,
14632 char_type_node, char_type_node,
14633 char_type_node, NULL_TREE);
14634 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v8qi",
14635 ftype, IX86_BUILTIN_VEC_INIT_V8QI);
14637 /* Access to the vec_extract patterns. */
14638 ftype = build_function_type_list (double_type_node, V2DF_type_node,
14639 integer_type_node, NULL_TREE);
14640 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2df",
14641 ftype, IX86_BUILTIN_VEC_EXT_V2DF);
14643 ftype = build_function_type_list (long_long_integer_type_node,
14644 V2DI_type_node, integer_type_node,
14645 NULL_TREE);
14646 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2di",
14647 ftype, IX86_BUILTIN_VEC_EXT_V2DI);
14649 ftype = build_function_type_list (float_type_node, V4SF_type_node,
14650 integer_type_node, NULL_TREE);
14651 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4sf",
14652 ftype, IX86_BUILTIN_VEC_EXT_V4SF);
14654 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
14655 integer_type_node, NULL_TREE);
14656 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4si",
14657 ftype, IX86_BUILTIN_VEC_EXT_V4SI);
14659 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
14660 integer_type_node, NULL_TREE);
14661 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v8hi",
14662 ftype, IX86_BUILTIN_VEC_EXT_V8HI);
14664 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
14665 integer_type_node, NULL_TREE);
14666 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_ext_v4hi",
14667 ftype, IX86_BUILTIN_VEC_EXT_V4HI);
14669 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
14670 integer_type_node, NULL_TREE);
14671 def_builtin (MASK_MMX, "__builtin_ia32_vec_ext_v2si",
14672 ftype, IX86_BUILTIN_VEC_EXT_V2SI);
14674 /* Access to the vec_set patterns. */
14675 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
14676 intHI_type_node,
14677 integer_type_node, NULL_TREE);
14678 def_builtin (MASK_SSE, "__builtin_ia32_vec_set_v8hi",
14679 ftype, IX86_BUILTIN_VEC_SET_V8HI);
14681 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
14682 intHI_type_node,
14683 integer_type_node, NULL_TREE);
14684 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_set_v4hi",
14685 ftype, IX86_BUILTIN_VEC_SET_V4HI);
14688 /* Errors in the source file can cause expand_expr to return const0_rtx
14689 where we expect a vector. To avoid crashing, use one of the vector
14690 clear instructions. */
14691 static rtx
14692 safe_vector_operand (rtx x, enum machine_mode mode)
14694 if (x == const0_rtx)
14695 x = CONST0_RTX (mode);
14696 return x;
14699 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
14701 static rtx
14702 ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
14704 rtx pat, xops[3];
14705 tree arg0 = TREE_VALUE (arglist);
14706 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14707 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14708 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14709 enum machine_mode tmode = insn_data[icode].operand[0].mode;
14710 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
14711 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
14713 if (VECTOR_MODE_P (mode0))
14714 op0 = safe_vector_operand (op0, mode0);
14715 if (VECTOR_MODE_P (mode1))
14716 op1 = safe_vector_operand (op1, mode1);
14718 if (optimize || !target
14719 || GET_MODE (target) != tmode
14720 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14721 target = gen_reg_rtx (tmode);
14723 if (GET_MODE (op1) == SImode && mode1 == TImode)
14725 rtx x = gen_reg_rtx (V4SImode);
14726 emit_insn (gen_sse2_loadd (x, op1));
14727 op1 = gen_lowpart (TImode, x);
14730 /* The insn must want input operands in the same modes as the
14731 result. */
14732 gcc_assert ((GET_MODE (op0) == mode0 || GET_MODE (op0) == VOIDmode)
14733 && (GET_MODE (op1) == mode1 || GET_MODE (op1) == VOIDmode));
14735 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
14736 op0 = copy_to_mode_reg (mode0, op0);
14737 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
14738 op1 = copy_to_mode_reg (mode1, op1);
14740 /* ??? Using ix86_fixup_binary_operands is problematic when
14741 we've got mismatched modes. Fake it. */
14743 xops[0] = target;
14744 xops[1] = op0;
14745 xops[2] = op1;
14747 if (tmode == mode0 && tmode == mode1)
14749 target = ix86_fixup_binary_operands (UNKNOWN, tmode, xops);
14750 op0 = xops[1];
14751 op1 = xops[2];
14753 else if (optimize || !ix86_binary_operator_ok (UNKNOWN, tmode, xops))
14755 op0 = force_reg (mode0, op0);
14756 op1 = force_reg (mode1, op1);
14757 target = gen_reg_rtx (tmode);
14760 pat = GEN_FCN (icode) (target, op0, op1);
14761 if (! pat)
14762 return 0;
14763 emit_insn (pat);
14764 return target;
14767 /* Subroutine of ix86_expand_builtin to take care of stores. */
14769 static rtx
14770 ix86_expand_store_builtin (enum insn_code icode, tree arglist)
14772 rtx pat;
14773 tree arg0 = TREE_VALUE (arglist);
14774 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14775 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14776 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14777 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
14778 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
14780 if (VECTOR_MODE_P (mode1))
14781 op1 = safe_vector_operand (op1, mode1);
14783 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14784 op1 = copy_to_mode_reg (mode1, op1);
14786 pat = GEN_FCN (icode) (op0, op1);
14787 if (pat)
14788 emit_insn (pat);
14789 return 0;
14792 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
14794 static rtx
14795 ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
14796 rtx target, int do_load)
14798 rtx pat;
14799 tree arg0 = TREE_VALUE (arglist);
14800 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14801 enum machine_mode tmode = insn_data[icode].operand[0].mode;
14802 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
14804 if (optimize || !target
14805 || GET_MODE (target) != tmode
14806 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14807 target = gen_reg_rtx (tmode);
14808 if (do_load)
14809 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14810 else
14812 if (VECTOR_MODE_P (mode0))
14813 op0 = safe_vector_operand (op0, mode0);
14815 if ((optimize && !register_operand (op0, mode0))
14816 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14817 op0 = copy_to_mode_reg (mode0, op0);
14820 pat = GEN_FCN (icode) (target, op0);
14821 if (! pat)
14822 return 0;
14823 emit_insn (pat);
14824 return target;
14827 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
14828 sqrtss, rsqrtss, rcpss. */
14830 static rtx
14831 ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
14833 rtx pat;
14834 tree arg0 = TREE_VALUE (arglist);
14835 rtx op1, op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14836 enum machine_mode tmode = insn_data[icode].operand[0].mode;
14837 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
14839 if (optimize || !target
14840 || GET_MODE (target) != tmode
14841 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14842 target = gen_reg_rtx (tmode);
14844 if (VECTOR_MODE_P (mode0))
14845 op0 = safe_vector_operand (op0, mode0);
14847 if ((optimize && !register_operand (op0, mode0))
14848 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14849 op0 = copy_to_mode_reg (mode0, op0);
14851 op1 = op0;
14852 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
14853 op1 = copy_to_mode_reg (mode0, op1);
14855 pat = GEN_FCN (icode) (target, op0, op1);
14856 if (! pat)
14857 return 0;
14858 emit_insn (pat);
14859 return target;
14862 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
14864 static rtx
14865 ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
14866 rtx target)
14868 rtx pat;
14869 tree arg0 = TREE_VALUE (arglist);
14870 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14871 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14872 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14873 rtx op2;
14874 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
14875 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
14876 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
14877 enum rtx_code comparison = d->comparison;
14879 if (VECTOR_MODE_P (mode0))
14880 op0 = safe_vector_operand (op0, mode0);
14881 if (VECTOR_MODE_P (mode1))
14882 op1 = safe_vector_operand (op1, mode1);
14884 /* Swap operands if we have a comparison that isn't available in
14885 hardware. */
14886 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
14888 rtx tmp = gen_reg_rtx (mode1);
14889 emit_move_insn (tmp, op1);
14890 op1 = op0;
14891 op0 = tmp;
14894 if (optimize || !target
14895 || GET_MODE (target) != tmode
14896 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
14897 target = gen_reg_rtx (tmode);
14899 if ((optimize && !register_operand (op0, mode0))
14900 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
14901 op0 = copy_to_mode_reg (mode0, op0);
14902 if ((optimize && !register_operand (op1, mode1))
14903 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
14904 op1 = copy_to_mode_reg (mode1, op1);
14906 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
14907 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
14908 if (! pat)
14909 return 0;
14910 emit_insn (pat);
14911 return target;
14914 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
14916 static rtx
14917 ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
14918 rtx target)
14920 rtx pat;
14921 tree arg0 = TREE_VALUE (arglist);
14922 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14923 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14924 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14925 rtx op2;
14926 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
14927 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
14928 enum rtx_code comparison = d->comparison;
14930 if (VECTOR_MODE_P (mode0))
14931 op0 = safe_vector_operand (op0, mode0);
14932 if (VECTOR_MODE_P (mode1))
14933 op1 = safe_vector_operand (op1, mode1);
14935 /* Swap operands if we have a comparison that isn't available in
14936 hardware. */
14937 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
14939 rtx tmp = op1;
14940 op1 = op0;
14941 op0 = tmp;
14944 target = gen_reg_rtx (SImode);
14945 emit_move_insn (target, const0_rtx);
14946 target = gen_rtx_SUBREG (QImode, target, 0);
14948 if ((optimize && !register_operand (op0, mode0))
14949 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14950 op0 = copy_to_mode_reg (mode0, op0);
14951 if ((optimize && !register_operand (op1, mode1))
14952 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14953 op1 = copy_to_mode_reg (mode1, op1);
14955 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
14956 pat = GEN_FCN (d->icode) (op0, op1);
14957 if (! pat)
14958 return 0;
14959 emit_insn (pat);
14960 emit_insn (gen_rtx_SET (VOIDmode,
14961 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
14962 gen_rtx_fmt_ee (comparison, QImode,
14963 SET_DEST (pat),
14964 const0_rtx)));
14966 return SUBREG_REG (target);
14969 /* Return the integer constant in ARG. Constrain it to be in the range
14970 of the subparts of VEC_TYPE; issue an error if not. */
14972 static int
14973 get_element_number (tree vec_type, tree arg)
14975 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14977 if (!host_integerp (arg, 1)
14978 || (elt = tree_low_cst (arg, 1), elt > max))
14980 error ("selector must be an integer constant in the range 0..%wi", max);
14981 return 0;
14984 return elt;
14987 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
14988 ix86_expand_vector_init. We DO have language-level syntax for this, in
14989 the form of (type){ init-list }. Except that since we can't place emms
14990 instructions from inside the compiler, we can't allow the use of MMX
14991 registers unless the user explicitly asks for it. So we do *not* define
14992 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
14993 we have builtins invoked by mmintrin.h that gives us license to emit
14994 these sorts of instructions. */
14996 static rtx
14997 ix86_expand_vec_init_builtin (tree type, tree arglist, rtx target)
14999 enum machine_mode tmode = TYPE_MODE (type);
15000 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
15001 int i, n_elt = GET_MODE_NUNITS (tmode);
15002 rtvec v = rtvec_alloc (n_elt);
15004 gcc_assert (VECTOR_MODE_P (tmode));
15006 for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
15008 rtx x = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
15009 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
15012 gcc_assert (arglist == NULL);
15014 if (!target || !register_operand (target, tmode))
15015 target = gen_reg_rtx (tmode);
15017 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
15018 return target;
15021 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
15022 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
15023 had a language-level syntax for referencing vector elements. */
15025 static rtx
15026 ix86_expand_vec_ext_builtin (tree arglist, rtx target)
15028 enum machine_mode tmode, mode0;
15029 tree arg0, arg1;
15030 int elt;
15031 rtx op0;
15033 arg0 = TREE_VALUE (arglist);
15034 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15036 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15037 elt = get_element_number (TREE_TYPE (arg0), arg1);
15039 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15040 mode0 = TYPE_MODE (TREE_TYPE (arg0));
15041 gcc_assert (VECTOR_MODE_P (mode0));
15043 op0 = force_reg (mode0, op0);
15045 if (optimize || !target || !register_operand (target, tmode))
15046 target = gen_reg_rtx (tmode);
15048 ix86_expand_vector_extract (true, target, op0, elt);
15050 return target;
15053 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
15054 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
15055 a language-level syntax for referencing vector elements. */
15057 static rtx
15058 ix86_expand_vec_set_builtin (tree arglist)
15060 enum machine_mode tmode, mode1;
15061 tree arg0, arg1, arg2;
15062 int elt;
15063 rtx op0, op1;
15065 arg0 = TREE_VALUE (arglist);
15066 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15067 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15069 tmode = TYPE_MODE (TREE_TYPE (arg0));
15070 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15071 gcc_assert (VECTOR_MODE_P (tmode));
15073 op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
15074 op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
15075 elt = get_element_number (TREE_TYPE (arg0), arg2);
15077 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
15078 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
15080 op0 = force_reg (tmode, op0);
15081 op1 = force_reg (mode1, op1);
15083 ix86_expand_vector_set (true, op0, op1, elt);
15085 return op0;
15088 /* Expand an expression EXP that calls a built-in function,
15089 with result going to TARGET if that's convenient
15090 (and in mode MODE if that's convenient).
15091 SUBTARGET may be used as the target for computing one of EXP's operands.
15092 IGNORE is nonzero if the value is to be ignored. */
15094 static rtx
15095 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
15096 enum machine_mode mode ATTRIBUTE_UNUSED,
15097 int ignore ATTRIBUTE_UNUSED)
15099 const struct builtin_description *d;
15100 size_t i;
15101 enum insn_code icode;
15102 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
15103 tree arglist = TREE_OPERAND (exp, 1);
15104 tree arg0, arg1, arg2;
15105 rtx op0, op1, op2, pat;
15106 enum machine_mode tmode, mode0, mode1, mode2;
15107 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15109 switch (fcode)
15111 case IX86_BUILTIN_EMMS:
15112 emit_insn (gen_mmx_emms ());
15113 return 0;
15115 case IX86_BUILTIN_SFENCE:
15116 emit_insn (gen_sse_sfence ());
15117 return 0;
15119 case IX86_BUILTIN_MASKMOVQ:
15120 case IX86_BUILTIN_MASKMOVDQU:
15121 icode = (fcode == IX86_BUILTIN_MASKMOVQ
15122 ? CODE_FOR_mmx_maskmovq
15123 : CODE_FOR_sse2_maskmovdqu);
15124 /* Note the arg order is different from the operand order. */
15125 arg1 = TREE_VALUE (arglist);
15126 arg2 = TREE_VALUE (TREE_CHAIN (arglist));
15127 arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15128 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15129 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15130 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
15131 mode0 = insn_data[icode].operand[0].mode;
15132 mode1 = insn_data[icode].operand[1].mode;
15133 mode2 = insn_data[icode].operand[2].mode;
15135 op0 = force_reg (Pmode, op0);
15136 op0 = gen_rtx_MEM (mode1, op0);
15138 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15139 op0 = copy_to_mode_reg (mode0, op0);
15140 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
15141 op1 = copy_to_mode_reg (mode1, op1);
15142 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
15143 op2 = copy_to_mode_reg (mode2, op2);
15144 pat = GEN_FCN (icode) (op0, op1, op2);
15145 if (! pat)
15146 return 0;
15147 emit_insn (pat);
15148 return 0;
15150 case IX86_BUILTIN_SQRTSS:
15151 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2, arglist, target);
15152 case IX86_BUILTIN_RSQRTSS:
15153 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2, arglist, target);
15154 case IX86_BUILTIN_RCPSS:
15155 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, arglist, target);
15157 case IX86_BUILTIN_LOADUPS:
15158 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
15160 case IX86_BUILTIN_STOREUPS:
15161 return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
15163 case IX86_BUILTIN_LOADHPS:
15164 case IX86_BUILTIN_LOADLPS:
15165 case IX86_BUILTIN_LOADHPD:
15166 case IX86_BUILTIN_LOADLPD:
15167 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_loadhps
15168 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_loadlps
15169 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_loadhpd
15170 : CODE_FOR_sse2_loadlpd);
15171 arg0 = TREE_VALUE (arglist);
15172 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15173 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15174 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15175 tmode = insn_data[icode].operand[0].mode;
15176 mode0 = insn_data[icode].operand[1].mode;
15177 mode1 = insn_data[icode].operand[2].mode;
15179 op0 = force_reg (mode0, op0);
15180 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
15181 if (optimize || target == 0
15182 || GET_MODE (target) != tmode
15183 || !register_operand (target, tmode))
15184 target = gen_reg_rtx (tmode);
15185 pat = GEN_FCN (icode) (target, op0, op1);
15186 if (! pat)
15187 return 0;
15188 emit_insn (pat);
15189 return target;
15191 case IX86_BUILTIN_STOREHPS:
15192 case IX86_BUILTIN_STORELPS:
15193 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_storehps
15194 : CODE_FOR_sse_storelps);
15195 arg0 = TREE_VALUE (arglist);
15196 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15197 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15198 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15199 mode0 = insn_data[icode].operand[0].mode;
15200 mode1 = insn_data[icode].operand[1].mode;
15202 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15203 op1 = force_reg (mode1, op1);
15205 pat = GEN_FCN (icode) (op0, op1);
15206 if (! pat)
15207 return 0;
15208 emit_insn (pat);
15209 return const0_rtx;
15211 case IX86_BUILTIN_MOVNTPS:
15212 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
15213 case IX86_BUILTIN_MOVNTQ:
15214 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
15216 case IX86_BUILTIN_LDMXCSR:
15217 op0 = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
15218 target = assign_386_stack_local (SImode, SLOT_TEMP);
15219 emit_move_insn (target, op0);
15220 emit_insn (gen_sse_ldmxcsr (target));
15221 return 0;
15223 case IX86_BUILTIN_STMXCSR:
15224 target = assign_386_stack_local (SImode, SLOT_TEMP);
15225 emit_insn (gen_sse_stmxcsr (target));
15226 return copy_to_mode_reg (SImode, target);
15228 case IX86_BUILTIN_SHUFPS:
15229 case IX86_BUILTIN_SHUFPD:
15230 icode = (fcode == IX86_BUILTIN_SHUFPS
15231 ? CODE_FOR_sse_shufps
15232 : CODE_FOR_sse2_shufpd);
15233 arg0 = TREE_VALUE (arglist);
15234 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15235 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15236 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15237 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15238 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
15239 tmode = insn_data[icode].operand[0].mode;
15240 mode0 = insn_data[icode].operand[1].mode;
15241 mode1 = insn_data[icode].operand[2].mode;
15242 mode2 = insn_data[icode].operand[3].mode;
15244 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15245 op0 = copy_to_mode_reg (mode0, op0);
15246 if ((optimize && !register_operand (op1, mode1))
15247 || !(*insn_data[icode].operand[2].predicate) (op1, mode1))
15248 op1 = copy_to_mode_reg (mode1, op1);
15249 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
15251 /* @@@ better error message */
15252 error ("mask must be an immediate");
15253 return gen_reg_rtx (tmode);
15255 if (optimize || target == 0
15256 || GET_MODE (target) != tmode
15257 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15258 target = gen_reg_rtx (tmode);
15259 pat = GEN_FCN (icode) (target, op0, op1, op2);
15260 if (! pat)
15261 return 0;
15262 emit_insn (pat);
15263 return target;
15265 case IX86_BUILTIN_PSHUFW:
15266 case IX86_BUILTIN_PSHUFD:
15267 case IX86_BUILTIN_PSHUFHW:
15268 case IX86_BUILTIN_PSHUFLW:
15269 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
15270 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
15271 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
15272 : CODE_FOR_mmx_pshufw);
15273 arg0 = TREE_VALUE (arglist);
15274 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15275 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15276 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15277 tmode = insn_data[icode].operand[0].mode;
15278 mode1 = insn_data[icode].operand[1].mode;
15279 mode2 = insn_data[icode].operand[2].mode;
15281 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
15282 op0 = copy_to_mode_reg (mode1, op0);
15283 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
15285 /* @@@ better error message */
15286 error ("mask must be an immediate");
15287 return const0_rtx;
15289 if (target == 0
15290 || GET_MODE (target) != tmode
15291 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15292 target = gen_reg_rtx (tmode);
15293 pat = GEN_FCN (icode) (target, op0, op1);
15294 if (! pat)
15295 return 0;
15296 emit_insn (pat);
15297 return target;
15299 case IX86_BUILTIN_PSLLDQI128:
15300 case IX86_BUILTIN_PSRLDQI128:
15301 icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
15302 : CODE_FOR_sse2_lshrti3);
15303 arg0 = TREE_VALUE (arglist);
15304 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15305 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15306 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15307 tmode = insn_data[icode].operand[0].mode;
15308 mode1 = insn_data[icode].operand[1].mode;
15309 mode2 = insn_data[icode].operand[2].mode;
15311 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
15313 op0 = copy_to_reg (op0);
15314 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
15316 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
15318 error ("shift must be an immediate");
15319 return const0_rtx;
15321 target = gen_reg_rtx (V2DImode);
15322 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0), op0, op1);
15323 if (! pat)
15324 return 0;
15325 emit_insn (pat);
15326 return target;
15328 case IX86_BUILTIN_FEMMS:
15329 emit_insn (gen_mmx_femms ());
15330 return NULL_RTX;
15332 case IX86_BUILTIN_PAVGUSB:
15333 return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3, arglist, target);
15335 case IX86_BUILTIN_PF2ID:
15336 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id, arglist, target, 0);
15338 case IX86_BUILTIN_PFACC:
15339 return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3, arglist, target);
15341 case IX86_BUILTIN_PFADD:
15342 return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3, arglist, target);
15344 case IX86_BUILTIN_PFCMPEQ:
15345 return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3, arglist, target);
15347 case IX86_BUILTIN_PFCMPGE:
15348 return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3, arglist, target);
15350 case IX86_BUILTIN_PFCMPGT:
15351 return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3, arglist, target);
15353 case IX86_BUILTIN_PFMAX:
15354 return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3, arglist, target);
15356 case IX86_BUILTIN_PFMIN:
15357 return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3, arglist, target);
15359 case IX86_BUILTIN_PFMUL:
15360 return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3, arglist, target);
15362 case IX86_BUILTIN_PFRCP:
15363 return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2, arglist, target, 0);
15365 case IX86_BUILTIN_PFRCPIT1:
15366 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3, arglist, target);
15368 case IX86_BUILTIN_PFRCPIT2:
15369 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3, arglist, target);
15371 case IX86_BUILTIN_PFRSQIT1:
15372 return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3, arglist, target);
15374 case IX86_BUILTIN_PFRSQRT:
15375 return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2, arglist, target, 0);
15377 case IX86_BUILTIN_PFSUB:
15378 return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3, arglist, target);
15380 case IX86_BUILTIN_PFSUBR:
15381 return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3, arglist, target);
15383 case IX86_BUILTIN_PI2FD:
15384 return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2, arglist, target, 0);
15386 case IX86_BUILTIN_PMULHRW:
15387 return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3, arglist, target);
15389 case IX86_BUILTIN_PF2IW:
15390 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw, arglist, target, 0);
15392 case IX86_BUILTIN_PFNACC:
15393 return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3, arglist, target);
15395 case IX86_BUILTIN_PFPNACC:
15396 return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3, arglist, target);
15398 case IX86_BUILTIN_PI2FW:
15399 return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw, arglist, target, 0);
15401 case IX86_BUILTIN_PSWAPDSI:
15402 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2, arglist, target, 0);
15404 case IX86_BUILTIN_PSWAPDSF:
15405 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, arglist, target, 0);
15407 case IX86_BUILTIN_SQRTSD:
15408 return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, arglist, target);
15409 case IX86_BUILTIN_LOADUPD:
15410 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
15411 case IX86_BUILTIN_STOREUPD:
15412 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
15414 case IX86_BUILTIN_MFENCE:
15415 emit_insn (gen_sse2_mfence ());
15416 return 0;
15417 case IX86_BUILTIN_LFENCE:
15418 emit_insn (gen_sse2_lfence ());
15419 return 0;
15421 case IX86_BUILTIN_CLFLUSH:
15422 arg0 = TREE_VALUE (arglist);
15423 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15424 icode = CODE_FOR_sse2_clflush;
15425 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
15426 op0 = copy_to_mode_reg (Pmode, op0);
15428 emit_insn (gen_sse2_clflush (op0));
15429 return 0;
15431 case IX86_BUILTIN_MOVNTPD:
15432 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
15433 case IX86_BUILTIN_MOVNTDQ:
15434 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
15435 case IX86_BUILTIN_MOVNTI:
15436 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
15438 case IX86_BUILTIN_LOADDQU:
15439 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
15440 case IX86_BUILTIN_STOREDQU:
15441 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
15443 case IX86_BUILTIN_MONITOR:
15444 arg0 = TREE_VALUE (arglist);
15445 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15446 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15447 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15448 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15449 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
15450 if (!REG_P (op0))
15451 op0 = copy_to_mode_reg (SImode, op0);
15452 if (!REG_P (op1))
15453 op1 = copy_to_mode_reg (SImode, op1);
15454 if (!REG_P (op2))
15455 op2 = copy_to_mode_reg (SImode, op2);
15456 emit_insn (gen_sse3_monitor (op0, op1, op2));
15457 return 0;
15459 case IX86_BUILTIN_MWAIT:
15460 arg0 = TREE_VALUE (arglist);
15461 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15462 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15463 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15464 if (!REG_P (op0))
15465 op0 = copy_to_mode_reg (SImode, op0);
15466 if (!REG_P (op1))
15467 op1 = copy_to_mode_reg (SImode, op1);
15468 emit_insn (gen_sse3_mwait (op0, op1));
15469 return 0;
15471 case IX86_BUILTIN_LDDQU:
15472 return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist,
15473 target, 1);
15475 case IX86_BUILTIN_VEC_INIT_V2SI:
15476 case IX86_BUILTIN_VEC_INIT_V4HI:
15477 case IX86_BUILTIN_VEC_INIT_V8QI:
15478 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
15480 case IX86_BUILTIN_VEC_EXT_V2DF:
15481 case IX86_BUILTIN_VEC_EXT_V2DI:
15482 case IX86_BUILTIN_VEC_EXT_V4SF:
15483 case IX86_BUILTIN_VEC_EXT_V4SI:
15484 case IX86_BUILTIN_VEC_EXT_V8HI:
15485 case IX86_BUILTIN_VEC_EXT_V2SI:
15486 case IX86_BUILTIN_VEC_EXT_V4HI:
15487 return ix86_expand_vec_ext_builtin (arglist, target);
15489 case IX86_BUILTIN_VEC_SET_V8HI:
15490 case IX86_BUILTIN_VEC_SET_V4HI:
15491 return ix86_expand_vec_set_builtin (arglist);
15493 default:
15494 break;
15497 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
15498 if (d->code == fcode)
15500 /* Compares are treated specially. */
15501 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
15502 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3
15503 || d->icode == CODE_FOR_sse2_maskcmpv2df3
15504 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
15505 return ix86_expand_sse_compare (d, arglist, target);
15507 return ix86_expand_binop_builtin (d->icode, arglist, target);
15510 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
15511 if (d->code == fcode)
15512 return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
15514 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
15515 if (d->code == fcode)
15516 return ix86_expand_sse_comi (d, arglist, target);
15518 gcc_unreachable ();
15521 /* Store OPERAND to the memory after reload is completed. This means
15522 that we can't easily use assign_stack_local. */
15524 ix86_force_to_memory (enum machine_mode mode, rtx operand)
15526 rtx result;
15528 gcc_assert (reload_completed);
15529 if (TARGET_RED_ZONE)
15531 result = gen_rtx_MEM (mode,
15532 gen_rtx_PLUS (Pmode,
15533 stack_pointer_rtx,
15534 GEN_INT (-RED_ZONE_SIZE)));
15535 emit_move_insn (result, operand);
15537 else if (!TARGET_RED_ZONE && TARGET_64BIT)
15539 switch (mode)
15541 case HImode:
15542 case SImode:
15543 operand = gen_lowpart (DImode, operand);
15544 /* FALLTHRU */
15545 case DImode:
15546 emit_insn (
15547 gen_rtx_SET (VOIDmode,
15548 gen_rtx_MEM (DImode,
15549 gen_rtx_PRE_DEC (DImode,
15550 stack_pointer_rtx)),
15551 operand));
15552 break;
15553 default:
15554 gcc_unreachable ();
15556 result = gen_rtx_MEM (mode, stack_pointer_rtx);
15558 else
15560 switch (mode)
15562 case DImode:
15564 rtx operands[2];
15565 split_di (&operand, 1, operands, operands + 1);
15566 emit_insn (
15567 gen_rtx_SET (VOIDmode,
15568 gen_rtx_MEM (SImode,
15569 gen_rtx_PRE_DEC (Pmode,
15570 stack_pointer_rtx)),
15571 operands[1]));
15572 emit_insn (
15573 gen_rtx_SET (VOIDmode,
15574 gen_rtx_MEM (SImode,
15575 gen_rtx_PRE_DEC (Pmode,
15576 stack_pointer_rtx)),
15577 operands[0]));
15579 break;
15580 case HImode:
15581 /* It is better to store HImodes as SImodes. */
15582 if (!TARGET_PARTIAL_REG_STALL)
15583 operand = gen_lowpart (SImode, operand);
15584 /* FALLTHRU */
15585 case SImode:
15586 emit_insn (
15587 gen_rtx_SET (VOIDmode,
15588 gen_rtx_MEM (GET_MODE (operand),
15589 gen_rtx_PRE_DEC (SImode,
15590 stack_pointer_rtx)),
15591 operand));
15592 break;
15593 default:
15594 gcc_unreachable ();
15596 result = gen_rtx_MEM (mode, stack_pointer_rtx);
15598 return result;
15601 /* Free operand from the memory. */
15602 void
15603 ix86_free_from_memory (enum machine_mode mode)
15605 if (!TARGET_RED_ZONE)
15607 int size;
15609 if (mode == DImode || TARGET_64BIT)
15610 size = 8;
15611 else if (mode == HImode && TARGET_PARTIAL_REG_STALL)
15612 size = 2;
15613 else
15614 size = 4;
15615 /* Use LEA to deallocate stack space. In peephole2 it will be converted
15616 to pop or add instruction if registers are available. */
15617 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
15618 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
15619 GEN_INT (size))));
15623 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
15624 QImode must go into class Q_REGS.
15625 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
15626 movdf to do mem-to-mem moves through integer regs. */
15627 enum reg_class
15628 ix86_preferred_reload_class (rtx x, enum reg_class class)
15630 /* We're only allowed to return a subclass of CLASS. Many of the
15631 following checks fail for NO_REGS, so eliminate that early. */
15632 if (class == NO_REGS)
15633 return NO_REGS;
15635 /* All classes can load zeros. */
15636 if (x == CONST0_RTX (GET_MODE (x)))
15637 return class;
15639 /* Floating-point constants need more complex checks. */
15640 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
15642 /* General regs can load everything. */
15643 if (reg_class_subset_p (class, GENERAL_REGS))
15644 return class;
15646 /* Floats can load 0 and 1 plus some others. Note that we eliminated
15647 zero above. We only want to wind up preferring 80387 registers if
15648 we plan on doing computation with them. */
15649 if (TARGET_80387
15650 && (TARGET_MIX_SSE_I387
15651 || !(TARGET_SSE_MATH && SSE_FLOAT_MODE_P (GET_MODE (x))))
15652 && standard_80387_constant_p (x))
15654 /* Limit class to non-sse. */
15655 if (class == FLOAT_SSE_REGS)
15656 return FLOAT_REGS;
15657 if (class == FP_TOP_SSE_REGS)
15658 return FP_TOP_REG;
15659 if (class == FP_SECOND_SSE_REGS)
15660 return FP_SECOND_REG;
15661 if (class == FLOAT_INT_REGS || class == FLOAT_REGS)
15662 return class;
15665 return NO_REGS;
15667 if (MAYBE_MMX_CLASS_P (class) && CONSTANT_P (x))
15668 return NO_REGS;
15669 if (MAYBE_SSE_CLASS_P (class) && CONSTANT_P (x))
15670 return NO_REGS;
15672 /* Generally when we see PLUS here, it's the function invariant
15673 (plus soft-fp const_int). Which can only be computed into general
15674 regs. */
15675 if (GET_CODE (x) == PLUS)
15676 return reg_class_subset_p (class, GENERAL_REGS) ? class : NO_REGS;
15678 /* QImode constants are easy to load, but non-constant QImode data
15679 must go into Q_REGS. */
15680 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
15682 if (reg_class_subset_p (class, Q_REGS))
15683 return class;
15684 if (reg_class_subset_p (Q_REGS, class))
15685 return Q_REGS;
15686 return NO_REGS;
15689 return class;
15692 /* If we are copying between general and FP registers, we need a memory
15693 location. The same is true for SSE and MMX registers.
15695 The macro can't work reliably when one of the CLASSES is class containing
15696 registers from multiple units (SSE, MMX, integer). We avoid this by never
15697 combining those units in single alternative in the machine description.
15698 Ensure that this constraint holds to avoid unexpected surprises.
15700 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
15701 enforce these sanity checks. */
15704 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
15705 enum machine_mode mode, int strict)
15707 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
15708 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
15709 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
15710 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
15711 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
15712 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
15714 gcc_assert (!strict);
15715 return true;
15718 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
15719 return true;
15721 /* ??? This is a lie. We do have moves between mmx/general, and for
15722 mmx/sse2. But by saying we need secondary memory we discourage the
15723 register allocator from using the mmx registers unless needed. */
15724 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
15725 return true;
15727 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
15729 /* SSE1 doesn't have any direct moves from other classes. */
15730 if (!TARGET_SSE2)
15731 return true;
15733 /* If the target says that inter-unit moves are more expensive
15734 than moving through memory, then don't generate them. */
15735 if (!TARGET_INTER_UNIT_MOVES && !optimize_size)
15736 return true;
15738 /* Between SSE and general, we have moves no larger than word size. */
15739 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
15740 return true;
15742 /* ??? For the cost of one register reformat penalty, we could use
15743 the same instructions to move SFmode and DFmode data, but the
15744 relevant move patterns don't support those alternatives. */
15745 if (mode == SFmode || mode == DFmode)
15746 return true;
15749 return false;
15752 /* Return true if the registers in CLASS cannot represent the change from
15753 modes FROM to TO. */
15755 bool
15756 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
15757 enum reg_class class)
15759 if (from == to)
15760 return false;
15762 /* x87 registers can't do subreg at all, as all values are reformatted
15763 to extended precision. */
15764 if (MAYBE_FLOAT_CLASS_P (class))
15765 return true;
15767 if (MAYBE_SSE_CLASS_P (class) || MAYBE_MMX_CLASS_P (class))
15769 /* Vector registers do not support QI or HImode loads. If we don't
15770 disallow a change to these modes, reload will assume it's ok to
15771 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
15772 the vec_dupv4hi pattern. */
15773 if (GET_MODE_SIZE (from) < 4)
15774 return true;
15776 /* Vector registers do not support subreg with nonzero offsets, which
15777 are otherwise valid for integer registers. Since we can't see
15778 whether we have a nonzero offset from here, prohibit all
15779 nonparadoxical subregs changing size. */
15780 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
15781 return true;
15784 return false;
15787 /* Return the cost of moving data from a register in class CLASS1 to
15788 one in class CLASS2.
15790 It is not required that the cost always equal 2 when FROM is the same as TO;
15791 on some machines it is expensive to move between registers if they are not
15792 general registers. */
15795 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
15796 enum reg_class class2)
15798 /* In case we require secondary memory, compute cost of the store followed
15799 by load. In order to avoid bad register allocation choices, we need
15800 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
15802 if (ix86_secondary_memory_needed (class1, class2, mode, 0))
15804 int cost = 1;
15806 cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
15807 MEMORY_MOVE_COST (mode, class1, 1));
15808 cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
15809 MEMORY_MOVE_COST (mode, class2, 1));
15811 /* In case of copying from general_purpose_register we may emit multiple
15812 stores followed by single load causing memory size mismatch stall.
15813 Count this as arbitrarily high cost of 20. */
15814 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
15815 cost += 20;
15817 /* In the case of FP/MMX moves, the registers actually overlap, and we
15818 have to switch modes in order to treat them differently. */
15819 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
15820 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
15821 cost += 20;
15823 return cost;
15826 /* Moves between SSE/MMX and integer unit are expensive. */
15827 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
15828 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
15829 return ix86_cost->mmxsse_to_integer;
15830 if (MAYBE_FLOAT_CLASS_P (class1))
15831 return ix86_cost->fp_move;
15832 if (MAYBE_SSE_CLASS_P (class1))
15833 return ix86_cost->sse_move;
15834 if (MAYBE_MMX_CLASS_P (class1))
15835 return ix86_cost->mmx_move;
15836 return 2;
15839 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
15841 bool
15842 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
15844 /* Flags and only flags can only hold CCmode values. */
15845 if (CC_REGNO_P (regno))
15846 return GET_MODE_CLASS (mode) == MODE_CC;
15847 if (GET_MODE_CLASS (mode) == MODE_CC
15848 || GET_MODE_CLASS (mode) == MODE_RANDOM
15849 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
15850 return 0;
15851 if (FP_REGNO_P (regno))
15852 return VALID_FP_MODE_P (mode);
15853 if (SSE_REGNO_P (regno))
15855 /* We implement the move patterns for all vector modes into and
15856 out of SSE registers, even when no operation instructions
15857 are available. */
15858 return (VALID_SSE_REG_MODE (mode)
15859 || VALID_SSE2_REG_MODE (mode)
15860 || VALID_MMX_REG_MODE (mode)
15861 || VALID_MMX_REG_MODE_3DNOW (mode));
15863 if (MMX_REGNO_P (regno))
15865 /* We implement the move patterns for 3DNOW modes even in MMX mode,
15866 so if the register is available at all, then we can move data of
15867 the given mode into or out of it. */
15868 return (VALID_MMX_REG_MODE (mode)
15869 || VALID_MMX_REG_MODE_3DNOW (mode));
15872 if (mode == QImode)
15874 /* Take care for QImode values - they can be in non-QI regs,
15875 but then they do cause partial register stalls. */
15876 if (regno < 4 || TARGET_64BIT)
15877 return 1;
15878 if (!TARGET_PARTIAL_REG_STALL)
15879 return 1;
15880 return reload_in_progress || reload_completed;
15882 /* We handle both integer and floats in the general purpose registers. */
15883 else if (VALID_INT_MODE_P (mode))
15884 return 1;
15885 else if (VALID_FP_MODE_P (mode))
15886 return 1;
15887 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
15888 on to use that value in smaller contexts, this can easily force a
15889 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
15890 supporting DImode, allow it. */
15891 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
15892 return 1;
15894 return 0;
15897 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
15898 tieable integer mode. */
15900 static bool
15901 ix86_tieable_integer_mode_p (enum machine_mode mode)
15903 switch (mode)
15905 case HImode:
15906 case SImode:
15907 return true;
15909 case QImode:
15910 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
15912 case DImode:
15913 return TARGET_64BIT;
15915 default:
15916 return false;
15920 /* Return true if MODE1 is accessible in a register that can hold MODE2
15921 without copying. That is, all register classes that can hold MODE2
15922 can also hold MODE1. */
15924 bool
15925 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
15927 if (mode1 == mode2)
15928 return true;
15930 if (ix86_tieable_integer_mode_p (mode1)
15931 && ix86_tieable_integer_mode_p (mode2))
15932 return true;
15934 /* MODE2 being XFmode implies fp stack or general regs, which means we
15935 can tie any smaller floating point modes to it. Note that we do not
15936 tie this with TFmode. */
15937 if (mode2 == XFmode)
15938 return mode1 == SFmode || mode1 == DFmode;
15940 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
15941 that we can tie it with SFmode. */
15942 if (mode2 == DFmode)
15943 return mode1 == SFmode;
15945 /* If MODE2 is only appropriate for an SSE register, then tie with
15946 any other mode acceptable to SSE registers. */
15947 if (GET_MODE_SIZE (mode2) >= 8
15948 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
15949 return ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1);
15951 /* If MODE2 is appropriate for an MMX (or SSE) register, then tie
15952 with any other mode acceptable to MMX registers. */
15953 if (GET_MODE_SIZE (mode2) == 8
15954 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
15955 return ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1);
15957 return false;
15960 /* Return the cost of moving data of mode M between a
15961 register and memory. A value of 2 is the default; this cost is
15962 relative to those in `REGISTER_MOVE_COST'.
15964 If moving between registers and memory is more expensive than
15965 between two registers, you should define this macro to express the
15966 relative cost.
15968 Model also increased moving costs of QImode registers in non
15969 Q_REGS classes.
15972 ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
15974 if (FLOAT_CLASS_P (class))
15976 int index;
15977 switch (mode)
15979 case SFmode:
15980 index = 0;
15981 break;
15982 case DFmode:
15983 index = 1;
15984 break;
15985 case XFmode:
15986 index = 2;
15987 break;
15988 default:
15989 return 100;
15991 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
15993 if (SSE_CLASS_P (class))
15995 int index;
15996 switch (GET_MODE_SIZE (mode))
15998 case 4:
15999 index = 0;
16000 break;
16001 case 8:
16002 index = 1;
16003 break;
16004 case 16:
16005 index = 2;
16006 break;
16007 default:
16008 return 100;
16010 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
16012 if (MMX_CLASS_P (class))
16014 int index;
16015 switch (GET_MODE_SIZE (mode))
16017 case 4:
16018 index = 0;
16019 break;
16020 case 8:
16021 index = 1;
16022 break;
16023 default:
16024 return 100;
16026 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
16028 switch (GET_MODE_SIZE (mode))
16030 case 1:
16031 if (in)
16032 return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
16033 : ix86_cost->movzbl_load);
16034 else
16035 return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
16036 : ix86_cost->int_store[0] + 4);
16037 break;
16038 case 2:
16039 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
16040 default:
16041 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
16042 if (mode == TFmode)
16043 mode = XFmode;
16044 return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
16045 * (((int) GET_MODE_SIZE (mode)
16046 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
16050 /* Compute a (partial) cost for rtx X. Return true if the complete
16051 cost has been computed, and false if subexpressions should be
16052 scanned. In either case, *TOTAL contains the cost result. */
16054 static bool
16055 ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
16057 enum machine_mode mode = GET_MODE (x);
16059 switch (code)
16061 case CONST_INT:
16062 case CONST:
16063 case LABEL_REF:
16064 case SYMBOL_REF:
16065 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
16066 *total = 3;
16067 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
16068 *total = 2;
16069 else if (flag_pic && SYMBOLIC_CONST (x)
16070 && (!TARGET_64BIT
16071 || (!GET_CODE (x) != LABEL_REF
16072 && (GET_CODE (x) != SYMBOL_REF
16073 || !SYMBOL_REF_LOCAL_P (x)))))
16074 *total = 1;
16075 else
16076 *total = 0;
16077 return true;
16079 case CONST_DOUBLE:
16080 if (mode == VOIDmode)
16081 *total = 0;
16082 else
16083 switch (standard_80387_constant_p (x))
16085 case 1: /* 0.0 */
16086 *total = 1;
16087 break;
16088 default: /* Other constants */
16089 *total = 2;
16090 break;
16091 case 0:
16092 case -1:
16093 /* Start with (MEM (SYMBOL_REF)), since that's where
16094 it'll probably end up. Add a penalty for size. */
16095 *total = (COSTS_N_INSNS (1)
16096 + (flag_pic != 0 && !TARGET_64BIT)
16097 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
16098 break;
16100 return true;
16102 case ZERO_EXTEND:
16103 /* The zero extensions is often completely free on x86_64, so make
16104 it as cheap as possible. */
16105 if (TARGET_64BIT && mode == DImode
16106 && GET_MODE (XEXP (x, 0)) == SImode)
16107 *total = 1;
16108 else if (TARGET_ZERO_EXTEND_WITH_AND)
16109 *total = COSTS_N_INSNS (ix86_cost->add);
16110 else
16111 *total = COSTS_N_INSNS (ix86_cost->movzx);
16112 return false;
16114 case SIGN_EXTEND:
16115 *total = COSTS_N_INSNS (ix86_cost->movsx);
16116 return false;
16118 case ASHIFT:
16119 if (GET_CODE (XEXP (x, 1)) == CONST_INT
16120 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
16122 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
16123 if (value == 1)
16125 *total = COSTS_N_INSNS (ix86_cost->add);
16126 return false;
16128 if ((value == 2 || value == 3)
16129 && ix86_cost->lea <= ix86_cost->shift_const)
16131 *total = COSTS_N_INSNS (ix86_cost->lea);
16132 return false;
16135 /* FALLTHRU */
16137 case ROTATE:
16138 case ASHIFTRT:
16139 case LSHIFTRT:
16140 case ROTATERT:
16141 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
16143 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
16145 if (INTVAL (XEXP (x, 1)) > 32)
16146 *total = COSTS_N_INSNS(ix86_cost->shift_const + 2);
16147 else
16148 *total = COSTS_N_INSNS(ix86_cost->shift_const * 2);
16150 else
16152 if (GET_CODE (XEXP (x, 1)) == AND)
16153 *total = COSTS_N_INSNS(ix86_cost->shift_var * 2);
16154 else
16155 *total = COSTS_N_INSNS(ix86_cost->shift_var * 6 + 2);
16158 else
16160 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
16161 *total = COSTS_N_INSNS (ix86_cost->shift_const);
16162 else
16163 *total = COSTS_N_INSNS (ix86_cost->shift_var);
16165 return false;
16167 case MULT:
16168 if (FLOAT_MODE_P (mode))
16170 *total = COSTS_N_INSNS (ix86_cost->fmul);
16171 return false;
16173 else
16175 rtx op0 = XEXP (x, 0);
16176 rtx op1 = XEXP (x, 1);
16177 int nbits;
16178 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
16180 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
16181 for (nbits = 0; value != 0; value &= value - 1)
16182 nbits++;
16184 else
16185 /* This is arbitrary. */
16186 nbits = 7;
16188 /* Compute costs correctly for widening multiplication. */
16189 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND)
16190 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
16191 == GET_MODE_SIZE (mode))
16193 int is_mulwiden = 0;
16194 enum machine_mode inner_mode = GET_MODE (op0);
16196 if (GET_CODE (op0) == GET_CODE (op1))
16197 is_mulwiden = 1, op1 = XEXP (op1, 0);
16198 else if (GET_CODE (op1) == CONST_INT)
16200 if (GET_CODE (op0) == SIGN_EXTEND)
16201 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
16202 == INTVAL (op1);
16203 else
16204 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
16207 if (is_mulwiden)
16208 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
16211 *total = COSTS_N_INSNS (ix86_cost->mult_init[MODE_INDEX (mode)]
16212 + nbits * ix86_cost->mult_bit)
16213 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code);
16215 return true;
16218 case DIV:
16219 case UDIV:
16220 case MOD:
16221 case UMOD:
16222 if (FLOAT_MODE_P (mode))
16223 *total = COSTS_N_INSNS (ix86_cost->fdiv);
16224 else
16225 *total = COSTS_N_INSNS (ix86_cost->divide[MODE_INDEX (mode)]);
16226 return false;
16228 case PLUS:
16229 if (FLOAT_MODE_P (mode))
16230 *total = COSTS_N_INSNS (ix86_cost->fadd);
16231 else if (GET_MODE_CLASS (mode) == MODE_INT
16232 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
16234 if (GET_CODE (XEXP (x, 0)) == PLUS
16235 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
16236 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
16237 && CONSTANT_P (XEXP (x, 1)))
16239 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
16240 if (val == 2 || val == 4 || val == 8)
16242 *total = COSTS_N_INSNS (ix86_cost->lea);
16243 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
16244 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
16245 outer_code);
16246 *total += rtx_cost (XEXP (x, 1), outer_code);
16247 return true;
16250 else if (GET_CODE (XEXP (x, 0)) == MULT
16251 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
16253 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
16254 if (val == 2 || val == 4 || val == 8)
16256 *total = COSTS_N_INSNS (ix86_cost->lea);
16257 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
16258 *total += rtx_cost (XEXP (x, 1), outer_code);
16259 return true;
16262 else if (GET_CODE (XEXP (x, 0)) == PLUS)
16264 *total = COSTS_N_INSNS (ix86_cost->lea);
16265 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
16266 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
16267 *total += rtx_cost (XEXP (x, 1), outer_code);
16268 return true;
16271 /* FALLTHRU */
16273 case MINUS:
16274 if (FLOAT_MODE_P (mode))
16276 *total = COSTS_N_INSNS (ix86_cost->fadd);
16277 return false;
16279 /* FALLTHRU */
16281 case AND:
16282 case IOR:
16283 case XOR:
16284 if (!TARGET_64BIT && mode == DImode)
16286 *total = (COSTS_N_INSNS (ix86_cost->add) * 2
16287 + (rtx_cost (XEXP (x, 0), outer_code)
16288 << (GET_MODE (XEXP (x, 0)) != DImode))
16289 + (rtx_cost (XEXP (x, 1), outer_code)
16290 << (GET_MODE (XEXP (x, 1)) != DImode)));
16291 return true;
16293 /* FALLTHRU */
16295 case NEG:
16296 if (FLOAT_MODE_P (mode))
16298 *total = COSTS_N_INSNS (ix86_cost->fchs);
16299 return false;
16301 /* FALLTHRU */
16303 case NOT:
16304 if (!TARGET_64BIT && mode == DImode)
16305 *total = COSTS_N_INSNS (ix86_cost->add * 2);
16306 else
16307 *total = COSTS_N_INSNS (ix86_cost->add);
16308 return false;
16310 case COMPARE:
16311 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
16312 && XEXP (XEXP (x, 0), 1) == const1_rtx
16313 && GET_CODE (XEXP (XEXP (x, 0), 2)) == CONST_INT
16314 && XEXP (x, 1) == const0_rtx)
16316 /* This kind of construct is implemented using test[bwl].
16317 Treat it as if we had an AND. */
16318 *total = (COSTS_N_INSNS (ix86_cost->add)
16319 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
16320 + rtx_cost (const1_rtx, outer_code));
16321 return true;
16323 return false;
16325 case FLOAT_EXTEND:
16326 if (!TARGET_SSE_MATH
16327 || mode == XFmode
16328 || (mode == DFmode && !TARGET_SSE2))
16329 *total = 0;
16330 return false;
16332 case ABS:
16333 if (FLOAT_MODE_P (mode))
16334 *total = COSTS_N_INSNS (ix86_cost->fabs);
16335 return false;
16337 case SQRT:
16338 if (FLOAT_MODE_P (mode))
16339 *total = COSTS_N_INSNS (ix86_cost->fsqrt);
16340 return false;
16342 case UNSPEC:
16343 if (XINT (x, 1) == UNSPEC_TP)
16344 *total = 0;
16345 return false;
16347 default:
16348 return false;
16352 #if TARGET_MACHO
16354 static int current_machopic_label_num;
16356 /* Given a symbol name and its associated stub, write out the
16357 definition of the stub. */
16359 void
16360 machopic_output_stub (FILE *file, const char *symb, const char *stub)
16362 unsigned int length;
16363 char *binder_name, *symbol_name, lazy_ptr_name[32];
16364 int label = ++current_machopic_label_num;
16366 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
16367 symb = (*targetm.strip_name_encoding) (symb);
16369 length = strlen (stub);
16370 binder_name = alloca (length + 32);
16371 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
16373 length = strlen (symb);
16374 symbol_name = alloca (length + 32);
16375 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
16377 sprintf (lazy_ptr_name, "L%d$lz", label);
16379 if (MACHOPIC_PURE)
16380 machopic_picsymbol_stub_section ();
16381 else
16382 machopic_symbol_stub_section ();
16384 fprintf (file, "%s:\n", stub);
16385 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
16387 if (MACHOPIC_PURE)
16389 fprintf (file, "\tcall LPC$%d\nLPC$%d:\tpopl %%eax\n", label, label);
16390 fprintf (file, "\tmovl %s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
16391 fprintf (file, "\tjmp %%edx\n");
16393 else
16394 fprintf (file, "\tjmp *%s\n", lazy_ptr_name);
16396 fprintf (file, "%s:\n", binder_name);
16398 if (MACHOPIC_PURE)
16400 fprintf (file, "\tlea %s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
16401 fprintf (file, "\tpushl %%eax\n");
16403 else
16404 fprintf (file, "\t pushl $%s\n", lazy_ptr_name);
16406 fprintf (file, "\tjmp dyld_stub_binding_helper\n");
16408 machopic_lazy_symbol_ptr_section ();
16409 fprintf (file, "%s:\n", lazy_ptr_name);
16410 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
16411 fprintf (file, "\t.long %s\n", binder_name);
16413 #endif /* TARGET_MACHO */
16415 /* Order the registers for register allocator. */
16417 void
16418 x86_order_regs_for_local_alloc (void)
16420 int pos = 0;
16421 int i;
16423 /* First allocate the local general purpose registers. */
16424 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
16425 if (GENERAL_REGNO_P (i) && call_used_regs[i])
16426 reg_alloc_order [pos++] = i;
16428 /* Global general purpose registers. */
16429 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
16430 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
16431 reg_alloc_order [pos++] = i;
16433 /* x87 registers come first in case we are doing FP math
16434 using them. */
16435 if (!TARGET_SSE_MATH)
16436 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
16437 reg_alloc_order [pos++] = i;
16439 /* SSE registers. */
16440 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
16441 reg_alloc_order [pos++] = i;
16442 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
16443 reg_alloc_order [pos++] = i;
16445 /* x87 registers. */
16446 if (TARGET_SSE_MATH)
16447 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
16448 reg_alloc_order [pos++] = i;
16450 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
16451 reg_alloc_order [pos++] = i;
16453 /* Initialize the rest of array as we do not allocate some registers
16454 at all. */
16455 while (pos < FIRST_PSEUDO_REGISTER)
16456 reg_alloc_order [pos++] = 0;
16459 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
16460 struct attribute_spec.handler. */
16461 static tree
16462 ix86_handle_struct_attribute (tree *node, tree name,
16463 tree args ATTRIBUTE_UNUSED,
16464 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
16466 tree *type = NULL;
16467 if (DECL_P (*node))
16469 if (TREE_CODE (*node) == TYPE_DECL)
16470 type = &TREE_TYPE (*node);
16472 else
16473 type = node;
16475 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
16476 || TREE_CODE (*type) == UNION_TYPE)))
16478 warning (OPT_Wattributes, "%qs attribute ignored",
16479 IDENTIFIER_POINTER (name));
16480 *no_add_attrs = true;
16483 else if ((is_attribute_p ("ms_struct", name)
16484 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
16485 || ((is_attribute_p ("gcc_struct", name)
16486 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
16488 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
16489 IDENTIFIER_POINTER (name));
16490 *no_add_attrs = true;
16493 return NULL_TREE;
16496 static bool
16497 ix86_ms_bitfield_layout_p (tree record_type)
16499 return (TARGET_MS_BITFIELD_LAYOUT &&
16500 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
16501 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
16504 /* Returns an expression indicating where the this parameter is
16505 located on entry to the FUNCTION. */
16507 static rtx
16508 x86_this_parameter (tree function)
16510 tree type = TREE_TYPE (function);
16512 if (TARGET_64BIT)
16514 int n = aggregate_value_p (TREE_TYPE (type), type) != 0;
16515 return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]);
16518 if (ix86_function_regparm (type, function) > 0)
16520 tree parm;
16522 parm = TYPE_ARG_TYPES (type);
16523 /* Figure out whether or not the function has a variable number of
16524 arguments. */
16525 for (; parm; parm = TREE_CHAIN (parm))
16526 if (TREE_VALUE (parm) == void_type_node)
16527 break;
16528 /* If not, the this parameter is in the first argument. */
16529 if (parm)
16531 int regno = 0;
16532 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
16533 regno = 2;
16534 return gen_rtx_REG (SImode, regno);
16538 if (aggregate_value_p (TREE_TYPE (type), type))
16539 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8));
16540 else
16541 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4));
16544 /* Determine whether x86_output_mi_thunk can succeed. */
16546 static bool
16547 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
16548 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
16549 HOST_WIDE_INT vcall_offset, tree function)
16551 /* 64-bit can handle anything. */
16552 if (TARGET_64BIT)
16553 return true;
16555 /* For 32-bit, everything's fine if we have one free register. */
16556 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
16557 return true;
16559 /* Need a free register for vcall_offset. */
16560 if (vcall_offset)
16561 return false;
16563 /* Need a free register for GOT references. */
16564 if (flag_pic && !(*targetm.binds_local_p) (function))
16565 return false;
16567 /* Otherwise ok. */
16568 return true;
16571 /* Output the assembler code for a thunk function. THUNK_DECL is the
16572 declaration for the thunk function itself, FUNCTION is the decl for
16573 the target function. DELTA is an immediate constant offset to be
16574 added to THIS. If VCALL_OFFSET is nonzero, the word at
16575 *(*this + vcall_offset) should be added to THIS. */
16577 static void
16578 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
16579 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
16580 HOST_WIDE_INT vcall_offset, tree function)
16582 rtx xops[3];
16583 rtx this = x86_this_parameter (function);
16584 rtx this_reg, tmp;
16586 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
16587 pull it in now and let DELTA benefit. */
16588 if (REG_P (this))
16589 this_reg = this;
16590 else if (vcall_offset)
16592 /* Put the this parameter into %eax. */
16593 xops[0] = this;
16594 xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
16595 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
16597 else
16598 this_reg = NULL_RTX;
16600 /* Adjust the this parameter by a fixed constant. */
16601 if (delta)
16603 xops[0] = GEN_INT (delta);
16604 xops[1] = this_reg ? this_reg : this;
16605 if (TARGET_64BIT)
16607 if (!x86_64_general_operand (xops[0], DImode))
16609 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
16610 xops[1] = tmp;
16611 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
16612 xops[0] = tmp;
16613 xops[1] = this;
16615 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
16617 else
16618 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
16621 /* Adjust the this parameter by a value stored in the vtable. */
16622 if (vcall_offset)
16624 if (TARGET_64BIT)
16625 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
16626 else
16628 int tmp_regno = 2 /* ECX */;
16629 if (lookup_attribute ("fastcall",
16630 TYPE_ATTRIBUTES (TREE_TYPE (function))))
16631 tmp_regno = 0 /* EAX */;
16632 tmp = gen_rtx_REG (SImode, tmp_regno);
16635 xops[0] = gen_rtx_MEM (Pmode, this_reg);
16636 xops[1] = tmp;
16637 if (TARGET_64BIT)
16638 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
16639 else
16640 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
16642 /* Adjust the this parameter. */
16643 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
16644 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
16646 rtx tmp2 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
16647 xops[0] = GEN_INT (vcall_offset);
16648 xops[1] = tmp2;
16649 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
16650 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
16652 xops[1] = this_reg;
16653 if (TARGET_64BIT)
16654 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
16655 else
16656 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
16659 /* If necessary, drop THIS back to its stack slot. */
16660 if (this_reg && this_reg != this)
16662 xops[0] = this_reg;
16663 xops[1] = this;
16664 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
16667 xops[0] = XEXP (DECL_RTL (function), 0);
16668 if (TARGET_64BIT)
16670 if (!flag_pic || (*targetm.binds_local_p) (function))
16671 output_asm_insn ("jmp\t%P0", xops);
16672 else
16674 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
16675 tmp = gen_rtx_CONST (Pmode, tmp);
16676 tmp = gen_rtx_MEM (QImode, tmp);
16677 xops[0] = tmp;
16678 output_asm_insn ("jmp\t%A0", xops);
16681 else
16683 if (!flag_pic || (*targetm.binds_local_p) (function))
16684 output_asm_insn ("jmp\t%P0", xops);
16685 else
16686 #if TARGET_MACHO
16687 if (TARGET_MACHO)
16689 rtx sym_ref = XEXP (DECL_RTL (function), 0);
16690 tmp = (gen_rtx_SYMBOL_REF
16691 (Pmode,
16692 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
16693 tmp = gen_rtx_MEM (QImode, tmp);
16694 xops[0] = tmp;
16695 output_asm_insn ("jmp\t%0", xops);
16697 else
16698 #endif /* TARGET_MACHO */
16700 tmp = gen_rtx_REG (SImode, 2 /* ECX */);
16701 output_set_got (tmp);
16703 xops[1] = tmp;
16704 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
16705 output_asm_insn ("jmp\t{*}%1", xops);
16710 static void
16711 x86_file_start (void)
16713 default_file_start ();
16714 if (X86_FILE_START_VERSION_DIRECTIVE)
16715 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
16716 if (X86_FILE_START_FLTUSED)
16717 fputs ("\t.global\t__fltused\n", asm_out_file);
16718 if (ix86_asm_dialect == ASM_INTEL)
16719 fputs ("\t.intel_syntax\n", asm_out_file);
16723 x86_field_alignment (tree field, int computed)
16725 enum machine_mode mode;
16726 tree type = TREE_TYPE (field);
16728 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
16729 return computed;
16730 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
16731 ? get_inner_array_type (type) : type);
16732 if (mode == DFmode || mode == DCmode
16733 || GET_MODE_CLASS (mode) == MODE_INT
16734 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
16735 return MIN (32, computed);
16736 return computed;
16739 /* Output assembler code to FILE to increment profiler label # LABELNO
16740 for profiling a function entry. */
16741 void
16742 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
16744 if (TARGET_64BIT)
16745 if (flag_pic)
16747 #ifndef NO_PROFILE_COUNTERS
16748 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
16749 #endif
16750 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
16752 else
16754 #ifndef NO_PROFILE_COUNTERS
16755 fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno);
16756 #endif
16757 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
16759 else if (flag_pic)
16761 #ifndef NO_PROFILE_COUNTERS
16762 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
16763 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
16764 #endif
16765 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
16767 else
16769 #ifndef NO_PROFILE_COUNTERS
16770 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
16771 PROFILE_COUNT_REGISTER);
16772 #endif
16773 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
16777 /* We don't have exact information about the insn sizes, but we may assume
16778 quite safely that we are informed about all 1 byte insns and memory
16779 address sizes. This is enough to eliminate unnecessary padding in
16780 99% of cases. */
16782 static int
16783 min_insn_size (rtx insn)
16785 int l = 0;
16787 if (!INSN_P (insn) || !active_insn_p (insn))
16788 return 0;
16790 /* Discard alignments we've emit and jump instructions. */
16791 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
16792 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
16793 return 0;
16794 if (GET_CODE (insn) == JUMP_INSN
16795 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
16796 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
16797 return 0;
16799 /* Important case - calls are always 5 bytes.
16800 It is common to have many calls in the row. */
16801 if (GET_CODE (insn) == CALL_INSN
16802 && symbolic_reference_mentioned_p (PATTERN (insn))
16803 && !SIBLING_CALL_P (insn))
16804 return 5;
16805 if (get_attr_length (insn) <= 1)
16806 return 1;
16808 /* For normal instructions we may rely on the sizes of addresses
16809 and the presence of symbol to require 4 bytes of encoding.
16810 This is not the case for jumps where references are PC relative. */
16811 if (GET_CODE (insn) != JUMP_INSN)
16813 l = get_attr_length_address (insn);
16814 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
16815 l = 4;
16817 if (l)
16818 return 1+l;
16819 else
16820 return 2;
16823 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
16824 window. */
16826 static void
16827 ix86_avoid_jump_misspredicts (void)
16829 rtx insn, start = get_insns ();
16830 int nbytes = 0, njumps = 0;
16831 int isjump = 0;
16833 /* Look for all minimal intervals of instructions containing 4 jumps.
16834 The intervals are bounded by START and INSN. NBYTES is the total
16835 size of instructions in the interval including INSN and not including
16836 START. When the NBYTES is smaller than 16 bytes, it is possible
16837 that the end of START and INSN ends up in the same 16byte page.
16839 The smallest offset in the page INSN can start is the case where START
16840 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
16841 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
16843 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
16846 nbytes += min_insn_size (insn);
16847 if (dump_file)
16848 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
16849 INSN_UID (insn), min_insn_size (insn));
16850 if ((GET_CODE (insn) == JUMP_INSN
16851 && GET_CODE (PATTERN (insn)) != ADDR_VEC
16852 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
16853 || GET_CODE (insn) == CALL_INSN)
16854 njumps++;
16855 else
16856 continue;
16858 while (njumps > 3)
16860 start = NEXT_INSN (start);
16861 if ((GET_CODE (start) == JUMP_INSN
16862 && GET_CODE (PATTERN (start)) != ADDR_VEC
16863 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
16864 || GET_CODE (start) == CALL_INSN)
16865 njumps--, isjump = 1;
16866 else
16867 isjump = 0;
16868 nbytes -= min_insn_size (start);
16870 gcc_assert (njumps >= 0);
16871 if (dump_file)
16872 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
16873 INSN_UID (start), INSN_UID (insn), nbytes);
16875 if (njumps == 3 && isjump && nbytes < 16)
16877 int padsize = 15 - nbytes + min_insn_size (insn);
16879 if (dump_file)
16880 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
16881 INSN_UID (insn), padsize);
16882 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
16887 /* AMD Athlon works faster
16888 when RET is not destination of conditional jump or directly preceded
16889 by other jump instruction. We avoid the penalty by inserting NOP just
16890 before the RET instructions in such cases. */
16891 static void
16892 ix86_pad_returns (void)
16894 edge e;
16895 edge_iterator ei;
16897 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
16899 basic_block bb = e->src;
16900 rtx ret = BB_END (bb);
16901 rtx prev;
16902 bool replace = false;
16904 if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN
16905 || !maybe_hot_bb_p (bb))
16906 continue;
16907 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
16908 if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL)
16909 break;
16910 if (prev && GET_CODE (prev) == CODE_LABEL)
16912 edge e;
16913 edge_iterator ei;
16915 FOR_EACH_EDGE (e, ei, bb->preds)
16916 if (EDGE_FREQUENCY (e) && e->src->index >= 0
16917 && !(e->flags & EDGE_FALLTHRU))
16918 replace = true;
16920 if (!replace)
16922 prev = prev_active_insn (ret);
16923 if (prev
16924 && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev))
16925 || GET_CODE (prev) == CALL_INSN))
16926 replace = true;
16927 /* Empty functions get branch mispredict even when the jump destination
16928 is not visible to us. */
16929 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
16930 replace = true;
16932 if (replace)
16934 emit_insn_before (gen_return_internal_long (), ret);
16935 delete_insn (ret);
16940 /* Implement machine specific optimizations. We implement padding of returns
16941 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
16942 static void
16943 ix86_reorg (void)
16945 if (TARGET_ATHLON_K8 && optimize && !optimize_size)
16946 ix86_pad_returns ();
16947 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
16948 ix86_avoid_jump_misspredicts ();
16951 /* Return nonzero when QImode register that must be represented via REX prefix
16952 is used. */
16953 bool
16954 x86_extended_QIreg_mentioned_p (rtx insn)
16956 int i;
16957 extract_insn_cached (insn);
16958 for (i = 0; i < recog_data.n_operands; i++)
16959 if (REG_P (recog_data.operand[i])
16960 && REGNO (recog_data.operand[i]) >= 4)
16961 return true;
16962 return false;
16965 /* Return nonzero when P points to register encoded via REX prefix.
16966 Called via for_each_rtx. */
16967 static int
16968 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
16970 unsigned int regno;
16971 if (!REG_P (*p))
16972 return 0;
16973 regno = REGNO (*p);
16974 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
16977 /* Return true when INSN mentions register that must be encoded using REX
16978 prefix. */
16979 bool
16980 x86_extended_reg_mentioned_p (rtx insn)
16982 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
16985 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
16986 optabs would emit if we didn't have TFmode patterns. */
16988 void
16989 x86_emit_floatuns (rtx operands[2])
16991 rtx neglab, donelab, i0, i1, f0, in, out;
16992 enum machine_mode mode, inmode;
16994 inmode = GET_MODE (operands[1]);
16995 gcc_assert (inmode == SImode || inmode == DImode);
16997 out = operands[0];
16998 in = force_reg (inmode, operands[1]);
16999 mode = GET_MODE (out);
17000 neglab = gen_label_rtx ();
17001 donelab = gen_label_rtx ();
17002 i1 = gen_reg_rtx (Pmode);
17003 f0 = gen_reg_rtx (mode);
17005 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab);
17007 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
17008 emit_jump_insn (gen_jump (donelab));
17009 emit_barrier ();
17011 emit_label (neglab);
17013 i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
17014 i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
17015 i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
17016 expand_float (f0, i0, 0);
17017 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
17019 emit_label (donelab);
17022 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
17023 with all elements equal to VAR. Return true if successful. */
17025 static bool
17026 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
17027 rtx target, rtx val)
17029 enum machine_mode smode, wsmode, wvmode;
17030 rtx x;
17032 switch (mode)
17034 case V2SImode:
17035 case V2SFmode:
17036 if (!mmx_ok && !TARGET_SSE)
17037 return false;
17038 /* FALLTHRU */
17040 case V2DFmode:
17041 case V2DImode:
17042 case V4SFmode:
17043 case V4SImode:
17044 val = force_reg (GET_MODE_INNER (mode), val);
17045 x = gen_rtx_VEC_DUPLICATE (mode, val);
17046 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17047 return true;
17049 case V4HImode:
17050 if (!mmx_ok)
17051 return false;
17052 if (TARGET_SSE || TARGET_3DNOW_A)
17054 val = gen_lowpart (SImode, val);
17055 x = gen_rtx_TRUNCATE (HImode, val);
17056 x = gen_rtx_VEC_DUPLICATE (mode, x);
17057 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17058 return true;
17060 else
17062 smode = HImode;
17063 wsmode = SImode;
17064 wvmode = V2SImode;
17065 goto widen;
17068 case V8QImode:
17069 if (!mmx_ok)
17070 return false;
17071 smode = QImode;
17072 wsmode = HImode;
17073 wvmode = V4HImode;
17074 goto widen;
17075 case V8HImode:
17076 smode = HImode;
17077 wsmode = SImode;
17078 wvmode = V4SImode;
17079 goto widen;
17080 case V16QImode:
17081 smode = QImode;
17082 wsmode = HImode;
17083 wvmode = V8HImode;
17084 goto widen;
17085 widen:
17086 /* Replicate the value once into the next wider mode and recurse. */
17087 val = convert_modes (wsmode, smode, val, true);
17088 x = expand_simple_binop (wsmode, ASHIFT, val,
17089 GEN_INT (GET_MODE_BITSIZE (smode)),
17090 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17091 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
17093 x = gen_reg_rtx (wvmode);
17094 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
17095 gcc_unreachable ();
17096 emit_move_insn (target, gen_lowpart (mode, x));
17097 return true;
17099 default:
17100 return false;
17104 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
17105 whose low element is VAR, and other elements are zero. Return true
17106 if successful. */
17108 static bool
17109 ix86_expand_vector_init_low_nonzero (bool mmx_ok, enum machine_mode mode,
17110 rtx target, rtx var)
17112 enum machine_mode vsimode;
17113 rtx x;
17115 switch (mode)
17117 case V2SFmode:
17118 case V2SImode:
17119 if (!mmx_ok && !TARGET_SSE)
17120 return false;
17121 /* FALLTHRU */
17123 case V2DFmode:
17124 case V2DImode:
17125 var = force_reg (GET_MODE_INNER (mode), var);
17126 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
17127 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17128 return true;
17130 case V4SFmode:
17131 case V4SImode:
17132 var = force_reg (GET_MODE_INNER (mode), var);
17133 x = gen_rtx_VEC_DUPLICATE (mode, var);
17134 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
17135 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17136 return true;
17138 case V8HImode:
17139 case V16QImode:
17140 vsimode = V4SImode;
17141 goto widen;
17142 case V4HImode:
17143 case V8QImode:
17144 if (!mmx_ok)
17145 return false;
17146 vsimode = V2SImode;
17147 goto widen;
17148 widen:
17149 /* Zero extend the variable element to SImode and recurse. */
17150 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
17152 x = gen_reg_rtx (vsimode);
17153 if (!ix86_expand_vector_init_low_nonzero (mmx_ok, vsimode, x, var))
17154 gcc_unreachable ();
17156 emit_move_insn (target, gen_lowpart (mode, x));
17157 return true;
17159 default:
17160 return false;
17164 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
17165 consisting of the values in VALS. It is known that all elements
17166 except ONE_VAR are constants. Return true if successful. */
17168 static bool
17169 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
17170 rtx target, rtx vals, int one_var)
17172 rtx var = XVECEXP (vals, 0, one_var);
17173 enum machine_mode wmode;
17174 rtx const_vec, x;
17176 XVECEXP (vals, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
17177 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
17179 switch (mode)
17181 case V2DFmode:
17182 case V2DImode:
17183 case V2SFmode:
17184 case V2SImode:
17185 /* For the two element vectors, it's just as easy to use
17186 the general case. */
17187 return false;
17189 case V4SFmode:
17190 case V4SImode:
17191 case V8HImode:
17192 case V4HImode:
17193 break;
17195 case V16QImode:
17196 wmode = V8HImode;
17197 goto widen;
17198 case V8QImode:
17199 wmode = V4HImode;
17200 goto widen;
17201 widen:
17202 /* There's no way to set one QImode entry easily. Combine
17203 the variable value with its adjacent constant value, and
17204 promote to an HImode set. */
17205 x = XVECEXP (vals, 0, one_var ^ 1);
17206 if (one_var & 1)
17208 var = convert_modes (HImode, QImode, var, true);
17209 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
17210 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17211 x = GEN_INT (INTVAL (x) & 0xff);
17213 else
17215 var = convert_modes (HImode, QImode, var, true);
17216 x = gen_int_mode (INTVAL (x) << 8, HImode);
17218 if (x != const0_rtx)
17219 var = expand_simple_binop (HImode, IOR, var, x, var,
17220 1, OPTAB_LIB_WIDEN);
17222 x = gen_reg_rtx (wmode);
17223 emit_move_insn (x, gen_lowpart (wmode, const_vec));
17224 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
17226 emit_move_insn (target, gen_lowpart (mode, x));
17227 return true;
17229 default:
17230 return false;
17233 emit_move_insn (target, const_vec);
17234 ix86_expand_vector_set (mmx_ok, target, var, one_var);
17235 return true;
17238 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
17239 all values variable, and none identical. */
17241 static void
17242 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
17243 rtx target, rtx vals)
17245 enum machine_mode half_mode = GET_MODE_INNER (mode);
17246 rtx op0 = NULL, op1 = NULL;
17247 bool use_vec_concat = false;
17249 switch (mode)
17251 case V2SFmode:
17252 case V2SImode:
17253 if (!mmx_ok && !TARGET_SSE)
17254 break;
17255 /* FALLTHRU */
17257 case V2DFmode:
17258 case V2DImode:
17259 /* For the two element vectors, we always implement VEC_CONCAT. */
17260 op0 = XVECEXP (vals, 0, 0);
17261 op1 = XVECEXP (vals, 0, 1);
17262 use_vec_concat = true;
17263 break;
17265 case V4SFmode:
17266 half_mode = V2SFmode;
17267 goto half;
17268 case V4SImode:
17269 half_mode = V2SImode;
17270 goto half;
17271 half:
17273 rtvec v;
17275 /* For V4SF and V4SI, we implement a concat of two V2 vectors.
17276 Recurse to load the two halves. */
17278 op0 = gen_reg_rtx (half_mode);
17279 v = gen_rtvec (2, XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1));
17280 ix86_expand_vector_init (false, op0, gen_rtx_PARALLEL (half_mode, v));
17282 op1 = gen_reg_rtx (half_mode);
17283 v = gen_rtvec (2, XVECEXP (vals, 0, 2), XVECEXP (vals, 0, 3));
17284 ix86_expand_vector_init (false, op1, gen_rtx_PARALLEL (half_mode, v));
17286 use_vec_concat = true;
17288 break;
17290 case V8HImode:
17291 case V16QImode:
17292 case V4HImode:
17293 case V8QImode:
17294 break;
17296 default:
17297 gcc_unreachable ();
17300 if (use_vec_concat)
17302 if (!register_operand (op0, half_mode))
17303 op0 = force_reg (half_mode, op0);
17304 if (!register_operand (op1, half_mode))
17305 op1 = force_reg (half_mode, op1);
17307 emit_insn (gen_rtx_SET (VOIDmode, target,
17308 gen_rtx_VEC_CONCAT (mode, op0, op1)));
17310 else
17312 int i, j, n_elts, n_words, n_elt_per_word;
17313 enum machine_mode inner_mode;
17314 rtx words[4], shift;
17316 inner_mode = GET_MODE_INNER (mode);
17317 n_elts = GET_MODE_NUNITS (mode);
17318 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
17319 n_elt_per_word = n_elts / n_words;
17320 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
17322 for (i = 0; i < n_words; ++i)
17324 rtx word = NULL_RTX;
17326 for (j = 0; j < n_elt_per_word; ++j)
17328 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
17329 elt = convert_modes (word_mode, inner_mode, elt, true);
17331 if (j == 0)
17332 word = elt;
17333 else
17335 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
17336 word, 1, OPTAB_LIB_WIDEN);
17337 word = expand_simple_binop (word_mode, IOR, word, elt,
17338 word, 1, OPTAB_LIB_WIDEN);
17342 words[i] = word;
17345 if (n_words == 1)
17346 emit_move_insn (target, gen_lowpart (mode, words[0]));
17347 else if (n_words == 2)
17349 rtx tmp = gen_reg_rtx (mode);
17350 emit_insn (gen_rtx_CLOBBER (VOIDmode, tmp));
17351 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
17352 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
17353 emit_move_insn (target, tmp);
17355 else if (n_words == 4)
17357 rtx tmp = gen_reg_rtx (V4SImode);
17358 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
17359 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
17360 emit_move_insn (target, gen_lowpart (mode, tmp));
17362 else
17363 gcc_unreachable ();
17367 /* Initialize vector TARGET via VALS. Suppress the use of MMX
17368 instructions unless MMX_OK is true. */
17370 void
17371 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
17373 enum machine_mode mode = GET_MODE (target);
17374 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17375 int n_elts = GET_MODE_NUNITS (mode);
17376 int n_var = 0, one_var = -1;
17377 bool all_same = true, all_const_zero = true;
17378 int i;
17379 rtx x;
17381 for (i = 0; i < n_elts; ++i)
17383 x = XVECEXP (vals, 0, i);
17384 if (!CONSTANT_P (x))
17385 n_var++, one_var = i;
17386 else if (x != CONST0_RTX (inner_mode))
17387 all_const_zero = false;
17388 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
17389 all_same = false;
17392 /* Constants are best loaded from the constant pool. */
17393 if (n_var == 0)
17395 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
17396 return;
17399 /* If all values are identical, broadcast the value. */
17400 if (all_same
17401 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
17402 XVECEXP (vals, 0, 0)))
17403 return;
17405 /* Values where only one field is non-constant are best loaded from
17406 the pool and overwritten via move later. */
17407 if (n_var == 1)
17409 if (all_const_zero && one_var == 0
17410 && ix86_expand_vector_init_low_nonzero (mmx_ok, mode, target,
17411 XVECEXP (vals, 0, 0)))
17412 return;
17414 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
17415 return;
17418 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
17421 void
17422 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
17424 enum machine_mode mode = GET_MODE (target);
17425 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17426 bool use_vec_merge = false;
17427 rtx tmp;
17429 switch (mode)
17431 case V2SFmode:
17432 case V2SImode:
17433 if (mmx_ok)
17435 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
17436 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
17437 if (elt == 0)
17438 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
17439 else
17440 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
17441 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17442 return;
17444 break;
17446 case V2DFmode:
17447 case V2DImode:
17449 rtx op0, op1;
17451 /* For the two element vectors, we implement a VEC_CONCAT with
17452 the extraction of the other element. */
17454 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
17455 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
17457 if (elt == 0)
17458 op0 = val, op1 = tmp;
17459 else
17460 op0 = tmp, op1 = val;
17462 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
17463 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17465 return;
17467 case V4SFmode:
17468 switch (elt)
17470 case 0:
17471 use_vec_merge = true;
17472 break;
17474 case 1:
17475 /* tmp = target = A B C D */
17476 tmp = copy_to_reg (target);
17477 /* target = A A B B */
17478 emit_insn (gen_sse_unpcklps (target, target, target));
17479 /* target = X A B B */
17480 ix86_expand_vector_set (false, target, val, 0);
17481 /* target = A X C D */
17482 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17483 GEN_INT (1), GEN_INT (0),
17484 GEN_INT (2+4), GEN_INT (3+4)));
17485 return;
17487 case 2:
17488 /* tmp = target = A B C D */
17489 tmp = copy_to_reg (target);
17490 /* tmp = X B C D */
17491 ix86_expand_vector_set (false, tmp, val, 0);
17492 /* target = A B X D */
17493 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17494 GEN_INT (0), GEN_INT (1),
17495 GEN_INT (0+4), GEN_INT (3+4)));
17496 return;
17498 case 3:
17499 /* tmp = target = A B C D */
17500 tmp = copy_to_reg (target);
17501 /* tmp = X B C D */
17502 ix86_expand_vector_set (false, tmp, val, 0);
17503 /* target = A B X D */
17504 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17505 GEN_INT (0), GEN_INT (1),
17506 GEN_INT (2+4), GEN_INT (0+4)));
17507 return;
17509 default:
17510 gcc_unreachable ();
17512 break;
17514 case V4SImode:
17515 /* Element 0 handled by vec_merge below. */
17516 if (elt == 0)
17518 use_vec_merge = true;
17519 break;
17522 if (TARGET_SSE2)
17524 /* With SSE2, use integer shuffles to swap element 0 and ELT,
17525 store into element 0, then shuffle them back. */
17527 rtx order[4];
17529 order[0] = GEN_INT (elt);
17530 order[1] = const1_rtx;
17531 order[2] = const2_rtx;
17532 order[3] = GEN_INT (3);
17533 order[elt] = const0_rtx;
17535 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
17536 order[1], order[2], order[3]));
17538 ix86_expand_vector_set (false, target, val, 0);
17540 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
17541 order[1], order[2], order[3]));
17543 else
17545 /* For SSE1, we have to reuse the V4SF code. */
17546 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
17547 gen_lowpart (SFmode, val), elt);
17549 return;
17551 case V8HImode:
17552 use_vec_merge = TARGET_SSE2;
17553 break;
17554 case V4HImode:
17555 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
17556 break;
17558 case V16QImode:
17559 case V8QImode:
17560 default:
17561 break;
17564 if (use_vec_merge)
17566 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
17567 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
17568 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17570 else
17572 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
17574 emit_move_insn (mem, target);
17576 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
17577 emit_move_insn (tmp, val);
17579 emit_move_insn (target, mem);
17583 void
17584 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
17586 enum machine_mode mode = GET_MODE (vec);
17587 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17588 bool use_vec_extr = false;
17589 rtx tmp;
17591 switch (mode)
17593 case V2SImode:
17594 case V2SFmode:
17595 if (!mmx_ok)
17596 break;
17597 /* FALLTHRU */
17599 case V2DFmode:
17600 case V2DImode:
17601 use_vec_extr = true;
17602 break;
17604 case V4SFmode:
17605 switch (elt)
17607 case 0:
17608 tmp = vec;
17609 break;
17611 case 1:
17612 case 3:
17613 tmp = gen_reg_rtx (mode);
17614 emit_insn (gen_sse_shufps_1 (tmp, vec, vec,
17615 GEN_INT (elt), GEN_INT (elt),
17616 GEN_INT (elt+4), GEN_INT (elt+4)));
17617 break;
17619 case 2:
17620 tmp = gen_reg_rtx (mode);
17621 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
17622 break;
17624 default:
17625 gcc_unreachable ();
17627 vec = tmp;
17628 use_vec_extr = true;
17629 elt = 0;
17630 break;
17632 case V4SImode:
17633 if (TARGET_SSE2)
17635 switch (elt)
17637 case 0:
17638 tmp = vec;
17639 break;
17641 case 1:
17642 case 3:
17643 tmp = gen_reg_rtx (mode);
17644 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
17645 GEN_INT (elt), GEN_INT (elt),
17646 GEN_INT (elt), GEN_INT (elt)));
17647 break;
17649 case 2:
17650 tmp = gen_reg_rtx (mode);
17651 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
17652 break;
17654 default:
17655 gcc_unreachable ();
17657 vec = tmp;
17658 use_vec_extr = true;
17659 elt = 0;
17661 else
17663 /* For SSE1, we have to reuse the V4SF code. */
17664 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
17665 gen_lowpart (V4SFmode, vec), elt);
17666 return;
17668 break;
17670 case V8HImode:
17671 use_vec_extr = TARGET_SSE2;
17672 break;
17673 case V4HImode:
17674 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
17675 break;
17677 case V16QImode:
17678 case V8QImode:
17679 /* ??? Could extract the appropriate HImode element and shift. */
17680 default:
17681 break;
17684 if (use_vec_extr)
17686 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
17687 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
17689 /* Let the rtl optimizers know about the zero extension performed. */
17690 if (inner_mode == HImode)
17692 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
17693 target = gen_lowpart (SImode, target);
17696 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17698 else
17700 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
17702 emit_move_insn (mem, vec);
17704 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
17705 emit_move_insn (target, tmp);
17709 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
17710 pattern to reduce; DEST is the destination; IN is the input vector. */
17712 void
17713 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
17715 rtx tmp1, tmp2, tmp3;
17717 tmp1 = gen_reg_rtx (V4SFmode);
17718 tmp2 = gen_reg_rtx (V4SFmode);
17719 tmp3 = gen_reg_rtx (V4SFmode);
17721 emit_insn (gen_sse_movhlps (tmp1, in, in));
17722 emit_insn (fn (tmp2, tmp1, in));
17724 emit_insn (gen_sse_shufps_1 (tmp3, tmp2, tmp2,
17725 GEN_INT (1), GEN_INT (1),
17726 GEN_INT (1+4), GEN_INT (1+4)));
17727 emit_insn (fn (dest, tmp2, tmp3));
17730 /* Implements target hook vector_mode_supported_p. */
17731 static bool
17732 ix86_vector_mode_supported_p (enum machine_mode mode)
17734 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
17735 return true;
17736 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
17737 return true;
17738 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
17739 return true;
17740 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
17741 return true;
17742 return false;
17745 /* Worker function for TARGET_MD_ASM_CLOBBERS.
17747 We do this in the new i386 backend to maintain source compatibility
17748 with the old cc0-based compiler. */
17750 static tree
17751 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
17752 tree inputs ATTRIBUTE_UNUSED,
17753 tree clobbers)
17755 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
17756 clobbers);
17757 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
17758 clobbers);
17759 clobbers = tree_cons (NULL_TREE, build_string (7, "dirflag"),
17760 clobbers);
17761 return clobbers;
17764 /* Return true if this goes in small data/bss. */
17766 static bool
17767 ix86_in_large_data_p (tree exp)
17769 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
17770 return false;
17772 /* Functions are never large data. */
17773 if (TREE_CODE (exp) == FUNCTION_DECL)
17774 return false;
17776 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
17778 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
17779 if (strcmp (section, ".ldata") == 0
17780 || strcmp (section, ".lbss") == 0)
17781 return true;
17782 return false;
17784 else
17786 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
17788 /* If this is an incomplete type with size 0, then we can't put it
17789 in data because it might be too big when completed. */
17790 if (!size || size > ix86_section_threshold)
17791 return true;
17794 return false;
17796 static void
17797 ix86_encode_section_info (tree decl, rtx rtl, int first)
17799 default_encode_section_info (decl, rtl, first);
17801 if (TREE_CODE (decl) == VAR_DECL
17802 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
17803 && ix86_in_large_data_p (decl))
17804 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
17807 /* Worker function for REVERSE_CONDITION. */
17809 enum rtx_code
17810 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
17812 return (mode != CCFPmode && mode != CCFPUmode
17813 ? reverse_condition (code)
17814 : reverse_condition_maybe_unordered (code));
17817 /* Output code to perform an x87 FP register move, from OPERANDS[1]
17818 to OPERANDS[0]. */
17820 const char *
17821 output_387_reg_move (rtx insn, rtx *operands)
17823 if (REG_P (operands[1])
17824 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
17826 if (REGNO (operands[0]) == FIRST_STACK_REG
17827 && TARGET_USE_FFREEP)
17828 return "ffreep\t%y0";
17829 return "fstp\t%y0";
17831 if (STACK_TOP_P (operands[0]))
17832 return "fld%z1\t%y1";
17833 return "fst\t%y0";
17836 /* Output code to perform a conditional jump to LABEL, if C2 flag in
17837 FP status register is set. */
17839 void
17840 ix86_emit_fp_unordered_jump (rtx label)
17842 rtx reg = gen_reg_rtx (HImode);
17843 rtx temp;
17845 emit_insn (gen_x86_fnstsw_1 (reg));
17847 if (TARGET_USE_SAHF)
17849 emit_insn (gen_x86_sahf_1 (reg));
17851 temp = gen_rtx_REG (CCmode, FLAGS_REG);
17852 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
17854 else
17856 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
17858 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
17859 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
17862 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
17863 gen_rtx_LABEL_REF (VOIDmode, label),
17864 pc_rtx);
17865 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
17866 emit_jump_insn (temp);
17869 /* Output code to perform a log1p XFmode calculation. */
17871 void ix86_emit_i387_log1p (rtx op0, rtx op1)
17873 rtx label1 = gen_label_rtx ();
17874 rtx label2 = gen_label_rtx ();
17876 rtx tmp = gen_reg_rtx (XFmode);
17877 rtx tmp2 = gen_reg_rtx (XFmode);
17879 emit_insn (gen_absxf2 (tmp, op1));
17880 emit_insn (gen_cmpxf (tmp,
17881 CONST_DOUBLE_FROM_REAL_VALUE (
17882 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
17883 XFmode)));
17884 emit_jump_insn (gen_bge (label1));
17886 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
17887 emit_insn (gen_fyl2xp1_xf3 (op0, tmp2, op1));
17888 emit_jump (label2);
17890 emit_label (label1);
17891 emit_move_insn (tmp, CONST1_RTX (XFmode));
17892 emit_insn (gen_addxf3 (tmp, op1, tmp));
17893 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
17894 emit_insn (gen_fyl2x_xf3 (op0, tmp2, tmp));
17896 emit_label (label2);
17899 /* Solaris named-section hook. Parameters are as for
17900 named_section_real. */
17902 static void
17903 i386_solaris_elf_named_section (const char *name, unsigned int flags,
17904 tree decl)
17906 /* With Binutils 2.15, the "@unwind" marker must be specified on
17907 every occurrence of the ".eh_frame" section, not just the first
17908 one. */
17909 if (TARGET_64BIT
17910 && strcmp (name, ".eh_frame") == 0)
17912 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
17913 flags & SECTION_WRITE ? "aw" : "a");
17914 return;
17916 default_elf_asm_named_section (name, flags, decl);
17919 /* Return the mangling of TYPE if it is an extended fundamental type. */
17921 static const char *
17922 ix86_mangle_fundamental_type (tree type)
17924 switch (TYPE_MODE (type))
17926 case TFmode:
17927 /* __float128 is "g". */
17928 return "g";
17929 case XFmode:
17930 /* "long double" or __float80 is "e". */
17931 return "e";
17932 default:
17933 return NULL;
17937 /* For 32-bit code we can save PIC register setup by using
17938 __stack_chk_fail_local hidden function instead of calling
17939 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
17940 register, so it is better to call __stack_chk_fail directly. */
17942 static tree
17943 ix86_stack_protect_fail (void)
17945 return TARGET_64BIT
17946 ? default_external_stack_protect_fail ()
17947 : default_hidden_stack_protect_fail ();
17950 #include "gt-i386.h"