PR target/22585
[official-gcc.git] / gcc / config / i386 / i386.c
blob914e66ea0e0629b18716dcdeae16b28983d3a764
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
20 Boston, MA 02110-1301, USA. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "tree-gimple.h"
51 #include "dwarf2.h"
53 #ifndef CHECK_STACK_LIMIT
54 #define CHECK_STACK_LIMIT (-1)
55 #endif
57 /* Return index of given mode in mult and division cost tables. */
58 #define MODE_INDEX(mode) \
59 ((mode) == QImode ? 0 \
60 : (mode) == HImode ? 1 \
61 : (mode) == SImode ? 2 \
62 : (mode) == DImode ? 3 \
63 : 4)
65 /* Processor costs (relative to an add) */
66 static const
67 struct processor_costs size_cost = { /* costs for tunning for size */
68 2, /* cost of an add instruction */
69 3, /* cost of a lea instruction */
70 2, /* variable shift costs */
71 3, /* constant shift costs */
72 {3, 3, 3, 3, 5}, /* cost of starting a multiply */
73 0, /* cost of multiply per each bit set */
74 {3, 3, 3, 3, 5}, /* cost of a divide/mod */
75 3, /* cost of movsx */
76 3, /* cost of movzx */
77 0, /* "large" insn */
78 2, /* MOVE_RATIO */
79 2, /* cost for loading QImode using movzbl */
80 {2, 2, 2}, /* cost of loading integer registers
81 in QImode, HImode and SImode.
82 Relative to reg-reg move (2). */
83 {2, 2, 2}, /* cost of storing integer registers */
84 2, /* cost of reg,reg fld/fst */
85 {2, 2, 2}, /* cost of loading fp registers
86 in SFmode, DFmode and XFmode */
87 {2, 2, 2}, /* cost of loading integer registers */
88 3, /* cost of moving MMX register */
89 {3, 3}, /* cost of loading MMX registers
90 in SImode and DImode */
91 {3, 3}, /* cost of storing MMX registers
92 in SImode and DImode */
93 3, /* cost of moving SSE register */
94 {3, 3, 3}, /* cost of loading SSE registers
95 in SImode, DImode and TImode */
96 {3, 3, 3}, /* cost of storing SSE registers
97 in SImode, DImode and TImode */
98 3, /* MMX or SSE register to integer */
99 0, /* size of prefetch block */
100 0, /* number of parallel prefetches */
101 1, /* Branch cost */
102 2, /* cost of FADD and FSUB insns. */
103 2, /* cost of FMUL instruction. */
104 2, /* cost of FDIV instruction. */
105 2, /* cost of FABS instruction. */
106 2, /* cost of FCHS instruction. */
107 2, /* cost of FSQRT instruction. */
110 /* Processor costs (relative to an add) */
111 static const
112 struct processor_costs i386_cost = { /* 386 specific costs */
113 1, /* cost of an add instruction */
114 1, /* cost of a lea instruction */
115 3, /* variable shift costs */
116 2, /* constant shift costs */
117 {6, 6, 6, 6, 6}, /* cost of starting a multiply */
118 1, /* cost of multiply per each bit set */
119 {23, 23, 23, 23, 23}, /* cost of a divide/mod */
120 3, /* cost of movsx */
121 2, /* cost of movzx */
122 15, /* "large" insn */
123 3, /* MOVE_RATIO */
124 4, /* cost for loading QImode using movzbl */
125 {2, 4, 2}, /* cost of loading integer registers
126 in QImode, HImode and SImode.
127 Relative to reg-reg move (2). */
128 {2, 4, 2}, /* cost of storing integer registers */
129 2, /* cost of reg,reg fld/fst */
130 {8, 8, 8}, /* cost of loading fp registers
131 in SFmode, DFmode and XFmode */
132 {8, 8, 8}, /* cost of loading integer registers */
133 2, /* cost of moving MMX register */
134 {4, 8}, /* cost of loading MMX registers
135 in SImode and DImode */
136 {4, 8}, /* cost of storing MMX registers
137 in SImode and DImode */
138 2, /* cost of moving SSE register */
139 {4, 8, 16}, /* cost of loading SSE registers
140 in SImode, DImode and TImode */
141 {4, 8, 16}, /* cost of storing SSE registers
142 in SImode, DImode and TImode */
143 3, /* MMX or SSE register to integer */
144 0, /* size of prefetch block */
145 0, /* number of parallel prefetches */
146 1, /* Branch cost */
147 23, /* cost of FADD and FSUB insns. */
148 27, /* cost of FMUL instruction. */
149 88, /* cost of FDIV instruction. */
150 22, /* cost of FABS instruction. */
151 24, /* cost of FCHS instruction. */
152 122, /* cost of FSQRT instruction. */
155 static const
156 struct processor_costs i486_cost = { /* 486 specific costs */
157 1, /* cost of an add instruction */
158 1, /* cost of a lea instruction */
159 3, /* variable shift costs */
160 2, /* constant shift costs */
161 {12, 12, 12, 12, 12}, /* cost of starting a multiply */
162 1, /* cost of multiply per each bit set */
163 {40, 40, 40, 40, 40}, /* cost of a divide/mod */
164 3, /* cost of movsx */
165 2, /* cost of movzx */
166 15, /* "large" insn */
167 3, /* MOVE_RATIO */
168 4, /* cost for loading QImode using movzbl */
169 {2, 4, 2}, /* cost of loading integer registers
170 in QImode, HImode and SImode.
171 Relative to reg-reg move (2). */
172 {2, 4, 2}, /* cost of storing integer registers */
173 2, /* cost of reg,reg fld/fst */
174 {8, 8, 8}, /* cost of loading fp registers
175 in SFmode, DFmode and XFmode */
176 {8, 8, 8}, /* cost of loading integer registers */
177 2, /* cost of moving MMX register */
178 {4, 8}, /* cost of loading MMX registers
179 in SImode and DImode */
180 {4, 8}, /* cost of storing MMX registers
181 in SImode and DImode */
182 2, /* cost of moving SSE register */
183 {4, 8, 16}, /* cost of loading SSE registers
184 in SImode, DImode and TImode */
185 {4, 8, 16}, /* cost of storing SSE registers
186 in SImode, DImode and TImode */
187 3, /* MMX or SSE register to integer */
188 0, /* size of prefetch block */
189 0, /* number of parallel prefetches */
190 1, /* Branch cost */
191 8, /* cost of FADD and FSUB insns. */
192 16, /* cost of FMUL instruction. */
193 73, /* cost of FDIV instruction. */
194 3, /* cost of FABS instruction. */
195 3, /* cost of FCHS instruction. */
196 83, /* cost of FSQRT instruction. */
199 static const
200 struct processor_costs pentium_cost = {
201 1, /* cost of an add instruction */
202 1, /* cost of a lea instruction */
203 4, /* variable shift costs */
204 1, /* constant shift costs */
205 {11, 11, 11, 11, 11}, /* cost of starting a multiply */
206 0, /* cost of multiply per each bit set */
207 {25, 25, 25, 25, 25}, /* cost of a divide/mod */
208 3, /* cost of movsx */
209 2, /* cost of movzx */
210 8, /* "large" insn */
211 6, /* MOVE_RATIO */
212 6, /* cost for loading QImode using movzbl */
213 {2, 4, 2}, /* cost of loading integer registers
214 in QImode, HImode and SImode.
215 Relative to reg-reg move (2). */
216 {2, 4, 2}, /* cost of storing integer registers */
217 2, /* cost of reg,reg fld/fst */
218 {2, 2, 6}, /* cost of loading fp registers
219 in SFmode, DFmode and XFmode */
220 {4, 4, 6}, /* cost of loading integer registers */
221 8, /* cost of moving MMX register */
222 {8, 8}, /* cost of loading MMX registers
223 in SImode and DImode */
224 {8, 8}, /* cost of storing MMX registers
225 in SImode and DImode */
226 2, /* cost of moving SSE register */
227 {4, 8, 16}, /* cost of loading SSE registers
228 in SImode, DImode and TImode */
229 {4, 8, 16}, /* cost of storing SSE registers
230 in SImode, DImode and TImode */
231 3, /* MMX or SSE register to integer */
232 0, /* size of prefetch block */
233 0, /* number of parallel prefetches */
234 2, /* Branch cost */
235 3, /* cost of FADD and FSUB insns. */
236 3, /* cost of FMUL instruction. */
237 39, /* cost of FDIV instruction. */
238 1, /* cost of FABS instruction. */
239 1, /* cost of FCHS instruction. */
240 70, /* cost of FSQRT instruction. */
243 static const
244 struct processor_costs pentiumpro_cost = {
245 1, /* cost of an add instruction */
246 1, /* cost of a lea instruction */
247 1, /* variable shift costs */
248 1, /* constant shift costs */
249 {4, 4, 4, 4, 4}, /* cost of starting a multiply */
250 0, /* cost of multiply per each bit set */
251 {17, 17, 17, 17, 17}, /* cost of a divide/mod */
252 1, /* cost of movsx */
253 1, /* cost of movzx */
254 8, /* "large" insn */
255 6, /* MOVE_RATIO */
256 2, /* cost for loading QImode using movzbl */
257 {4, 4, 4}, /* cost of loading integer registers
258 in QImode, HImode and SImode.
259 Relative to reg-reg move (2). */
260 {2, 2, 2}, /* cost of storing integer registers */
261 2, /* cost of reg,reg fld/fst */
262 {2, 2, 6}, /* cost of loading fp registers
263 in SFmode, DFmode and XFmode */
264 {4, 4, 6}, /* cost of loading integer registers */
265 2, /* cost of moving MMX register */
266 {2, 2}, /* cost of loading MMX registers
267 in SImode and DImode */
268 {2, 2}, /* cost of storing MMX registers
269 in SImode and DImode */
270 2, /* cost of moving SSE register */
271 {2, 2, 8}, /* cost of loading SSE registers
272 in SImode, DImode and TImode */
273 {2, 2, 8}, /* cost of storing SSE registers
274 in SImode, DImode and TImode */
275 3, /* MMX or SSE register to integer */
276 32, /* size of prefetch block */
277 6, /* number of parallel prefetches */
278 2, /* Branch cost */
279 3, /* cost of FADD and FSUB insns. */
280 5, /* cost of FMUL instruction. */
281 56, /* cost of FDIV instruction. */
282 2, /* cost of FABS instruction. */
283 2, /* cost of FCHS instruction. */
284 56, /* cost of FSQRT instruction. */
287 static const
288 struct processor_costs k6_cost = {
289 1, /* cost of an add instruction */
290 2, /* cost of a lea instruction */
291 1, /* variable shift costs */
292 1, /* constant shift costs */
293 {3, 3, 3, 3, 3}, /* cost of starting a multiply */
294 0, /* cost of multiply per each bit set */
295 {18, 18, 18, 18, 18}, /* cost of a divide/mod */
296 2, /* cost of movsx */
297 2, /* cost of movzx */
298 8, /* "large" insn */
299 4, /* MOVE_RATIO */
300 3, /* cost for loading QImode using movzbl */
301 {4, 5, 4}, /* cost of loading integer registers
302 in QImode, HImode and SImode.
303 Relative to reg-reg move (2). */
304 {2, 3, 2}, /* cost of storing integer registers */
305 4, /* cost of reg,reg fld/fst */
306 {6, 6, 6}, /* cost of loading fp registers
307 in SFmode, DFmode and XFmode */
308 {4, 4, 4}, /* cost of loading integer registers */
309 2, /* cost of moving MMX register */
310 {2, 2}, /* cost of loading MMX registers
311 in SImode and DImode */
312 {2, 2}, /* cost of storing MMX registers
313 in SImode and DImode */
314 2, /* cost of moving SSE register */
315 {2, 2, 8}, /* cost of loading SSE registers
316 in SImode, DImode and TImode */
317 {2, 2, 8}, /* cost of storing SSE registers
318 in SImode, DImode and TImode */
319 6, /* MMX or SSE register to integer */
320 32, /* size of prefetch block */
321 1, /* number of parallel prefetches */
322 1, /* Branch cost */
323 2, /* cost of FADD and FSUB insns. */
324 2, /* cost of FMUL instruction. */
325 56, /* cost of FDIV instruction. */
326 2, /* cost of FABS instruction. */
327 2, /* cost of FCHS instruction. */
328 56, /* cost of FSQRT instruction. */
331 static const
332 struct processor_costs athlon_cost = {
333 1, /* cost of an add instruction */
334 2, /* cost of a lea instruction */
335 1, /* variable shift costs */
336 1, /* constant shift costs */
337 {5, 5, 5, 5, 5}, /* cost of starting a multiply */
338 0, /* cost of multiply per each bit set */
339 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
340 1, /* cost of movsx */
341 1, /* cost of movzx */
342 8, /* "large" insn */
343 9, /* MOVE_RATIO */
344 4, /* cost for loading QImode using movzbl */
345 {3, 4, 3}, /* cost of loading integer registers
346 in QImode, HImode and SImode.
347 Relative to reg-reg move (2). */
348 {3, 4, 3}, /* cost of storing integer registers */
349 4, /* cost of reg,reg fld/fst */
350 {4, 4, 12}, /* cost of loading fp registers
351 in SFmode, DFmode and XFmode */
352 {6, 6, 8}, /* cost of loading integer registers */
353 2, /* cost of moving MMX register */
354 {4, 4}, /* cost of loading MMX registers
355 in SImode and DImode */
356 {4, 4}, /* cost of storing MMX registers
357 in SImode and DImode */
358 2, /* cost of moving SSE register */
359 {4, 4, 6}, /* cost of loading SSE registers
360 in SImode, DImode and TImode */
361 {4, 4, 5}, /* cost of storing SSE registers
362 in SImode, DImode and TImode */
363 5, /* MMX or SSE register to integer */
364 64, /* size of prefetch block */
365 6, /* number of parallel prefetches */
366 5, /* Branch cost */
367 4, /* cost of FADD and FSUB insns. */
368 4, /* cost of FMUL instruction. */
369 24, /* cost of FDIV instruction. */
370 2, /* cost of FABS instruction. */
371 2, /* cost of FCHS instruction. */
372 35, /* cost of FSQRT instruction. */
375 static const
376 struct processor_costs k8_cost = {
377 1, /* cost of an add instruction */
378 2, /* cost of a lea instruction */
379 1, /* variable shift costs */
380 1, /* constant shift costs */
381 {3, 4, 3, 4, 5}, /* cost of starting a multiply */
382 0, /* cost of multiply per each bit set */
383 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
384 1, /* cost of movsx */
385 1, /* cost of movzx */
386 8, /* "large" insn */
387 9, /* MOVE_RATIO */
388 4, /* cost for loading QImode using movzbl */
389 {3, 4, 3}, /* cost of loading integer registers
390 in QImode, HImode and SImode.
391 Relative to reg-reg move (2). */
392 {3, 4, 3}, /* cost of storing integer registers */
393 4, /* cost of reg,reg fld/fst */
394 {4, 4, 12}, /* cost of loading fp registers
395 in SFmode, DFmode and XFmode */
396 {6, 6, 8}, /* cost of loading integer registers */
397 2, /* cost of moving MMX register */
398 {3, 3}, /* cost of loading MMX registers
399 in SImode and DImode */
400 {4, 4}, /* cost of storing MMX registers
401 in SImode and DImode */
402 2, /* cost of moving SSE register */
403 {4, 3, 6}, /* cost of loading SSE registers
404 in SImode, DImode and TImode */
405 {4, 4, 5}, /* cost of storing SSE registers
406 in SImode, DImode and TImode */
407 5, /* MMX or SSE register to integer */
408 64, /* size of prefetch block */
409 6, /* number of parallel prefetches */
410 5, /* Branch cost */
411 4, /* cost of FADD and FSUB insns. */
412 4, /* cost of FMUL instruction. */
413 19, /* cost of FDIV instruction. */
414 2, /* cost of FABS instruction. */
415 2, /* cost of FCHS instruction. */
416 35, /* cost of FSQRT instruction. */
419 static const
420 struct processor_costs pentium4_cost = {
421 1, /* cost of an add instruction */
422 3, /* cost of a lea instruction */
423 4, /* variable shift costs */
424 4, /* constant shift costs */
425 {15, 15, 15, 15, 15}, /* cost of starting a multiply */
426 0, /* cost of multiply per each bit set */
427 {56, 56, 56, 56, 56}, /* cost of a divide/mod */
428 1, /* cost of movsx */
429 1, /* cost of movzx */
430 16, /* "large" insn */
431 6, /* MOVE_RATIO */
432 2, /* cost for loading QImode using movzbl */
433 {4, 5, 4}, /* cost of loading integer registers
434 in QImode, HImode and SImode.
435 Relative to reg-reg move (2). */
436 {2, 3, 2}, /* cost of storing integer registers */
437 2, /* cost of reg,reg fld/fst */
438 {2, 2, 6}, /* cost of loading fp registers
439 in SFmode, DFmode and XFmode */
440 {4, 4, 6}, /* cost of loading integer registers */
441 2, /* cost of moving MMX register */
442 {2, 2}, /* cost of loading MMX registers
443 in SImode and DImode */
444 {2, 2}, /* cost of storing MMX registers
445 in SImode and DImode */
446 12, /* cost of moving SSE register */
447 {12, 12, 12}, /* cost of loading SSE registers
448 in SImode, DImode and TImode */
449 {2, 2, 8}, /* cost of storing SSE registers
450 in SImode, DImode and TImode */
451 10, /* MMX or SSE register to integer */
452 64, /* size of prefetch block */
453 6, /* number of parallel prefetches */
454 2, /* Branch cost */
455 5, /* cost of FADD and FSUB insns. */
456 7, /* cost of FMUL instruction. */
457 43, /* cost of FDIV instruction. */
458 2, /* cost of FABS instruction. */
459 2, /* cost of FCHS instruction. */
460 43, /* cost of FSQRT instruction. */
463 static const
464 struct processor_costs nocona_cost = {
465 1, /* cost of an add instruction */
466 1, /* cost of a lea instruction */
467 1, /* variable shift costs */
468 1, /* constant shift costs */
469 {10, 10, 10, 10, 10}, /* cost of starting a multiply */
470 0, /* cost of multiply per each bit set */
471 {66, 66, 66, 66, 66}, /* cost of a divide/mod */
472 1, /* cost of movsx */
473 1, /* cost of movzx */
474 16, /* "large" insn */
475 17, /* MOVE_RATIO */
476 4, /* cost for loading QImode using movzbl */
477 {4, 4, 4}, /* cost of loading integer registers
478 in QImode, HImode and SImode.
479 Relative to reg-reg move (2). */
480 {4, 4, 4}, /* cost of storing integer registers */
481 3, /* cost of reg,reg fld/fst */
482 {12, 12, 12}, /* cost of loading fp registers
483 in SFmode, DFmode and XFmode */
484 {4, 4, 4}, /* cost of loading integer registers */
485 6, /* cost of moving MMX register */
486 {12, 12}, /* cost of loading MMX registers
487 in SImode and DImode */
488 {12, 12}, /* cost of storing MMX registers
489 in SImode and DImode */
490 6, /* cost of moving SSE register */
491 {12, 12, 12}, /* cost of loading SSE registers
492 in SImode, DImode and TImode */
493 {12, 12, 12}, /* cost of storing SSE registers
494 in SImode, DImode and TImode */
495 8, /* MMX or SSE register to integer */
496 128, /* size of prefetch block */
497 8, /* number of parallel prefetches */
498 1, /* Branch cost */
499 6, /* cost of FADD and FSUB insns. */
500 8, /* cost of FMUL instruction. */
501 40, /* cost of FDIV instruction. */
502 3, /* cost of FABS instruction. */
503 3, /* cost of FCHS instruction. */
504 44, /* cost of FSQRT instruction. */
507 const struct processor_costs *ix86_cost = &pentium_cost;
509 /* Processor feature/optimization bitmasks. */
510 #define m_386 (1<<PROCESSOR_I386)
511 #define m_486 (1<<PROCESSOR_I486)
512 #define m_PENT (1<<PROCESSOR_PENTIUM)
513 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
514 #define m_K6 (1<<PROCESSOR_K6)
515 #define m_ATHLON (1<<PROCESSOR_ATHLON)
516 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
517 #define m_K8 (1<<PROCESSOR_K8)
518 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
519 #define m_NOCONA (1<<PROCESSOR_NOCONA)
521 const int x86_use_leave = m_386 | m_K6 | m_ATHLON_K8;
522 const int x86_push_memory = m_386 | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
523 const int x86_zero_extend_with_and = m_486 | m_PENT;
524 const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA /* m_386 | m_K6 */;
525 const int x86_double_with_add = ~m_386;
526 const int x86_use_bit_test = m_386;
527 const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6;
528 const int x86_cmove = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
529 const int x86_fisttp = m_NOCONA;
530 const int x86_3dnow_a = m_ATHLON_K8;
531 const int x86_deep_branch = m_PPRO | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
532 /* Branch hints were put in P4 based on simulation result. But
533 after P4 was made, no performance benefit was observed with
534 branch hints. It also increases the code size. As the result,
535 icc never generates branch hints. */
536 const int x86_branch_hints = 0;
537 const int x86_use_sahf = m_PPRO | m_K6 | m_PENT4 | m_NOCONA;
538 const int x86_partial_reg_stall = m_PPRO;
539 const int x86_use_himode_fiop = m_386 | m_486 | m_K6;
540 const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT);
541 const int x86_use_mov0 = m_K6;
542 const int x86_use_cltd = ~(m_PENT | m_K6);
543 const int x86_read_modify_write = ~m_PENT;
544 const int x86_read_modify = ~(m_PENT | m_PPRO);
545 const int x86_split_long_moves = m_PPRO;
546 const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486 | m_ATHLON_K8;
547 const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
548 const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
549 const int x86_qimode_math = ~(0);
550 const int x86_promote_qi_regs = 0;
551 const int x86_himode_math = ~(m_PPRO);
552 const int x86_promote_hi_regs = m_PPRO;
553 const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA;
554 const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA;
555 const int x86_add_esp_4 = m_ATHLON_K8 | m_K6 | m_PENT4 | m_NOCONA;
556 const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6 | m_386 | m_486 | m_PENT4 | m_NOCONA;
557 const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO);
558 const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
559 const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
560 const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO;
561 const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO;
562 const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO;
563 const int x86_decompose_lea = m_PENT4 | m_NOCONA;
564 const int x86_shift1 = ~m_486;
565 const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
566 const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO;
567 /* Set for machines where the type and dependencies are resolved on SSE
568 register parts instead of whole registers, so we may maintain just
569 lower part of scalar values in proper format leaving the upper part
570 undefined. */
571 const int x86_sse_split_regs = m_ATHLON_K8;
572 const int x86_sse_typeless_stores = m_ATHLON_K8;
573 const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
574 const int x86_use_ffreep = m_ATHLON_K8;
575 const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6;
577 /* ??? Allowing interunit moves makes it all too easy for the compiler to put
578 integer data in xmm registers. Which results in pretty abysmal code. */
579 const int x86_inter_unit_moves = 0 /* ~(m_ATHLON_K8) */;
581 const int x86_ext_80387_constants = m_K6 | m_ATHLON | m_PENT4 | m_NOCONA | m_PPRO;
582 /* Some CPU cores are not able to predict more than 4 branch instructions in
583 the 16 byte window. */
584 const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
585 const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6 | m_PENT;
586 const int x86_use_bt = m_ATHLON_K8;
587 /* Compare and exchange was added for 80486. */
588 const int x86_cmpxchg = ~m_386;
589 /* Exchange and add was added for 80486. */
590 const int x86_xadd = ~m_386;
592 /* In case the average insn count for single function invocation is
593 lower than this constant, emit fast (but longer) prologue and
594 epilogue code. */
595 #define FAST_PROLOGUE_INSN_COUNT 20
597 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
598 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
599 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
600 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
602 /* Array of the smallest class containing reg number REGNO, indexed by
603 REGNO. Used by REGNO_REG_CLASS in i386.h. */
605 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
607 /* ax, dx, cx, bx */
608 AREG, DREG, CREG, BREG,
609 /* si, di, bp, sp */
610 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
611 /* FP registers */
612 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
613 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
614 /* arg pointer */
615 NON_Q_REGS,
616 /* flags, fpsr, dirflag, frame */
617 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
618 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
619 SSE_REGS, SSE_REGS,
620 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
621 MMX_REGS, MMX_REGS,
622 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
623 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
624 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
625 SSE_REGS, SSE_REGS,
628 /* The "default" register map used in 32bit mode. */
630 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
632 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
633 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
634 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
635 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
636 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
637 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
638 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
641 static int const x86_64_int_parameter_registers[6] =
643 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
644 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
647 static int const x86_64_int_return_registers[4] =
649 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
652 /* The "default" register map used in 64bit mode. */
653 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
655 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
656 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
657 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
658 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
659 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
660 8,9,10,11,12,13,14,15, /* extended integer registers */
661 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
664 /* Define the register numbers to be used in Dwarf debugging information.
665 The SVR4 reference port C compiler uses the following register numbers
666 in its Dwarf output code:
667 0 for %eax (gcc regno = 0)
668 1 for %ecx (gcc regno = 2)
669 2 for %edx (gcc regno = 1)
670 3 for %ebx (gcc regno = 3)
671 4 for %esp (gcc regno = 7)
672 5 for %ebp (gcc regno = 6)
673 6 for %esi (gcc regno = 4)
674 7 for %edi (gcc regno = 5)
675 The following three DWARF register numbers are never generated by
676 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
677 believes these numbers have these meanings.
678 8 for %eip (no gcc equivalent)
679 9 for %eflags (gcc regno = 17)
680 10 for %trapno (no gcc equivalent)
681 It is not at all clear how we should number the FP stack registers
682 for the x86 architecture. If the version of SDB on x86/svr4 were
683 a bit less brain dead with respect to floating-point then we would
684 have a precedent to follow with respect to DWARF register numbers
685 for x86 FP registers, but the SDB on x86/svr4 is so completely
686 broken with respect to FP registers that it is hardly worth thinking
687 of it as something to strive for compatibility with.
688 The version of x86/svr4 SDB I have at the moment does (partially)
689 seem to believe that DWARF register number 11 is associated with
690 the x86 register %st(0), but that's about all. Higher DWARF
691 register numbers don't seem to be associated with anything in
692 particular, and even for DWARF regno 11, SDB only seems to under-
693 stand that it should say that a variable lives in %st(0) (when
694 asked via an `=' command) if we said it was in DWARF regno 11,
695 but SDB still prints garbage when asked for the value of the
696 variable in question (via a `/' command).
697 (Also note that the labels SDB prints for various FP stack regs
698 when doing an `x' command are all wrong.)
699 Note that these problems generally don't affect the native SVR4
700 C compiler because it doesn't allow the use of -O with -g and
701 because when it is *not* optimizing, it allocates a memory
702 location for each floating-point variable, and the memory
703 location is what gets described in the DWARF AT_location
704 attribute for the variable in question.
705 Regardless of the severe mental illness of the x86/svr4 SDB, we
706 do something sensible here and we use the following DWARF
707 register numbers. Note that these are all stack-top-relative
708 numbers.
709 11 for %st(0) (gcc regno = 8)
710 12 for %st(1) (gcc regno = 9)
711 13 for %st(2) (gcc regno = 10)
712 14 for %st(3) (gcc regno = 11)
713 15 for %st(4) (gcc regno = 12)
714 16 for %st(5) (gcc regno = 13)
715 17 for %st(6) (gcc regno = 14)
716 18 for %st(7) (gcc regno = 15)
718 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
720 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
721 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
722 -1, 9, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
723 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
724 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
725 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
726 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
729 /* Test and compare insns in i386.md store the information needed to
730 generate branch and scc insns here. */
732 rtx ix86_compare_op0 = NULL_RTX;
733 rtx ix86_compare_op1 = NULL_RTX;
734 rtx ix86_compare_emitted = NULL_RTX;
736 /* Size of the register save area. */
737 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
739 /* Define the structure for the machine field in struct function. */
741 struct stack_local_entry GTY(())
743 unsigned short mode;
744 unsigned short n;
745 rtx rtl;
746 struct stack_local_entry *next;
749 /* Structure describing stack frame layout.
750 Stack grows downward:
752 [arguments]
753 <- ARG_POINTER
754 saved pc
756 saved frame pointer if frame_pointer_needed
757 <- HARD_FRAME_POINTER
758 [saved regs]
760 [padding1] \
762 [va_arg registers] (
763 > to_allocate <- FRAME_POINTER
764 [frame] (
766 [padding2] /
768 struct ix86_frame
770 int nregs;
771 int padding1;
772 int va_arg_size;
773 HOST_WIDE_INT frame;
774 int padding2;
775 int outgoing_arguments_size;
776 int red_zone_size;
778 HOST_WIDE_INT to_allocate;
779 /* The offsets relative to ARG_POINTER. */
780 HOST_WIDE_INT frame_pointer_offset;
781 HOST_WIDE_INT hard_frame_pointer_offset;
782 HOST_WIDE_INT stack_pointer_offset;
784 /* When save_regs_using_mov is set, emit prologue using
785 move instead of push instructions. */
786 bool save_regs_using_mov;
789 /* Code model option. */
790 enum cmodel ix86_cmodel;
791 /* Asm dialect. */
792 enum asm_dialect ix86_asm_dialect = ASM_ATT;
793 /* TLS dialext. */
794 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
796 /* Which unit we are generating floating point math for. */
797 enum fpmath_unit ix86_fpmath;
799 /* Which cpu are we scheduling for. */
800 enum processor_type ix86_tune;
801 /* Which instruction set architecture to use. */
802 enum processor_type ix86_arch;
804 /* true if sse prefetch instruction is not NOOP. */
805 int x86_prefetch_sse;
807 /* ix86_regparm_string as a number */
808 static int ix86_regparm;
810 /* Preferred alignment for stack boundary in bits. */
811 unsigned int ix86_preferred_stack_boundary;
813 /* Values 1-5: see jump.c */
814 int ix86_branch_cost;
816 /* Variables which are this size or smaller are put in the data/bss
817 or ldata/lbss sections. */
819 int ix86_section_threshold = 65536;
821 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
822 char internal_label_prefix[16];
823 int internal_label_prefix_len;
825 static bool ix86_handle_option (size_t, const char *, int);
826 static void output_pic_addr_const (FILE *, rtx, int);
827 static void put_condition_code (enum rtx_code, enum machine_mode,
828 int, int, FILE *);
829 static const char *get_some_local_dynamic_name (void);
830 static int get_some_local_dynamic_name_1 (rtx *, void *);
831 static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx);
832 static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *,
833 rtx *);
834 static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *);
835 static enum machine_mode ix86_cc_modes_compatible (enum machine_mode,
836 enum machine_mode);
837 static rtx get_thread_pointer (int);
838 static rtx legitimize_tls_address (rtx, enum tls_model, int);
839 static void get_pc_thunk_name (char [32], unsigned int);
840 static rtx gen_push (rtx);
841 static int ix86_flags_dependant (rtx, rtx, enum attr_type);
842 static int ix86_agi_dependant (rtx, rtx, enum attr_type);
843 static struct machine_function * ix86_init_machine_status (void);
844 static int ix86_split_to_parts (rtx, rtx *, enum machine_mode);
845 static int ix86_nsaved_regs (void);
846 static void ix86_emit_save_regs (void);
847 static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT);
848 static void ix86_emit_restore_regs_using_mov (rtx, HOST_WIDE_INT, int);
849 static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT);
850 static HOST_WIDE_INT ix86_GOT_alias_set (void);
851 static void ix86_adjust_counter (rtx, HOST_WIDE_INT);
852 static rtx ix86_expand_aligntest (rtx, int);
853 static void ix86_expand_strlensi_unroll_1 (rtx, rtx, rtx);
854 static int ix86_issue_rate (void);
855 static int ix86_adjust_cost (rtx, rtx, rtx, int);
856 static int ia32_multipass_dfa_lookahead (void);
857 static void ix86_init_mmx_sse_builtins (void);
858 static rtx x86_this_parameter (tree);
859 static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
860 HOST_WIDE_INT, tree);
861 static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
862 static void x86_file_start (void);
863 static void ix86_reorg (void);
864 static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*);
865 static tree ix86_build_builtin_va_list (void);
866 static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
867 tree, int *, int);
868 static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
869 static bool ix86_vector_mode_supported_p (enum machine_mode);
871 static int ix86_address_cost (rtx);
872 static bool ix86_cannot_force_const_mem (rtx);
873 static rtx ix86_delegitimize_address (rtx);
875 static void i386_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
877 struct builtin_description;
878 static rtx ix86_expand_sse_comi (const struct builtin_description *,
879 tree, rtx);
880 static rtx ix86_expand_sse_compare (const struct builtin_description *,
881 tree, rtx);
882 static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx);
883 static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int);
884 static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx);
885 static rtx ix86_expand_store_builtin (enum insn_code, tree);
886 static rtx safe_vector_operand (rtx, enum machine_mode);
887 static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *);
888 static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code);
889 static int ix86_fp_comparison_fcomi_cost (enum rtx_code code);
890 static int ix86_fp_comparison_sahf_cost (enum rtx_code code);
891 static int ix86_fp_comparison_cost (enum rtx_code code);
892 static unsigned int ix86_select_alt_pic_regnum (void);
893 static int ix86_save_reg (unsigned int, int);
894 static void ix86_compute_frame_layout (struct ix86_frame *);
895 static int ix86_comp_type_attributes (tree, tree);
896 static int ix86_function_regparm (tree, tree);
897 const struct attribute_spec ix86_attribute_table[];
898 static bool ix86_function_ok_for_sibcall (tree, tree);
899 static tree ix86_handle_cconv_attribute (tree *, tree, tree, int, bool *);
900 static int ix86_value_regno (enum machine_mode, tree, tree);
901 static bool contains_128bit_aligned_vector_p (tree);
902 static rtx ix86_struct_value_rtx (tree, int);
903 static bool ix86_ms_bitfield_layout_p (tree);
904 static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *);
905 static int extended_reg_mentioned_1 (rtx *, void *);
906 static bool ix86_rtx_costs (rtx, int, int, int *);
907 static int min_insn_size (rtx);
908 static tree ix86_md_asm_clobbers (tree outputs, tree inputs, tree clobbers);
909 static bool ix86_must_pass_in_stack (enum machine_mode mode, tree type);
910 static bool ix86_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
911 tree, bool);
912 static void ix86_init_builtins (void);
913 static rtx ix86_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
914 static const char *ix86_mangle_fundamental_type (tree);
915 static tree ix86_stack_protect_fail (void);
917 /* This function is only used on Solaris. */
918 static void i386_solaris_elf_named_section (const char *, unsigned int, tree)
919 ATTRIBUTE_UNUSED;
921 /* Register class used for passing given 64bit part of the argument.
922 These represent classes as documented by the PS ABI, with the exception
923 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
924 use SF or DFmode move instead of DImode to avoid reformatting penalties.
926 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
927 whenever possible (upper half does contain padding).
929 enum x86_64_reg_class
931 X86_64_NO_CLASS,
932 X86_64_INTEGER_CLASS,
933 X86_64_INTEGERSI_CLASS,
934 X86_64_SSE_CLASS,
935 X86_64_SSESF_CLASS,
936 X86_64_SSEDF_CLASS,
937 X86_64_SSEUP_CLASS,
938 X86_64_X87_CLASS,
939 X86_64_X87UP_CLASS,
940 X86_64_COMPLEX_X87_CLASS,
941 X86_64_MEMORY_CLASS
943 static const char * const x86_64_reg_class_name[] = {
944 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
945 "sseup", "x87", "x87up", "cplx87", "no"
948 #define MAX_CLASSES 4
950 /* Table of constants used by fldpi, fldln2, etc.... */
951 static REAL_VALUE_TYPE ext_80387_constants_table [5];
952 static bool ext_80387_constants_init = 0;
953 static void init_ext_80387_constants (void);
954 static bool ix86_in_large_data_p (tree) ATTRIBUTE_UNUSED;
955 static void ix86_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
956 static void x86_64_elf_unique_section (tree decl, int reloc) ATTRIBUTE_UNUSED;
957 static void x86_64_elf_select_section (tree decl, int reloc,
958 unsigned HOST_WIDE_INT align)
959 ATTRIBUTE_UNUSED;
961 /* Initialize the GCC target structure. */
962 #undef TARGET_ATTRIBUTE_TABLE
963 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
964 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
965 # undef TARGET_MERGE_DECL_ATTRIBUTES
966 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
967 #endif
969 #undef TARGET_COMP_TYPE_ATTRIBUTES
970 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
972 #undef TARGET_INIT_BUILTINS
973 #define TARGET_INIT_BUILTINS ix86_init_builtins
974 #undef TARGET_EXPAND_BUILTIN
975 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
977 #undef TARGET_ASM_FUNCTION_EPILOGUE
978 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
980 #undef TARGET_ENCODE_SECTION_INFO
981 #ifndef SUBTARGET_ENCODE_SECTION_INFO
982 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
983 #else
984 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
985 #endif
987 #undef TARGET_ASM_OPEN_PAREN
988 #define TARGET_ASM_OPEN_PAREN ""
989 #undef TARGET_ASM_CLOSE_PAREN
990 #define TARGET_ASM_CLOSE_PAREN ""
992 #undef TARGET_ASM_ALIGNED_HI_OP
993 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
994 #undef TARGET_ASM_ALIGNED_SI_OP
995 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
996 #ifdef ASM_QUAD
997 #undef TARGET_ASM_ALIGNED_DI_OP
998 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
999 #endif
1001 #undef TARGET_ASM_UNALIGNED_HI_OP
1002 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
1003 #undef TARGET_ASM_UNALIGNED_SI_OP
1004 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
1005 #undef TARGET_ASM_UNALIGNED_DI_OP
1006 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
1008 #undef TARGET_SCHED_ADJUST_COST
1009 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
1010 #undef TARGET_SCHED_ISSUE_RATE
1011 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
1012 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1013 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1014 ia32_multipass_dfa_lookahead
1016 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1017 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
1019 #ifdef HAVE_AS_TLS
1020 #undef TARGET_HAVE_TLS
1021 #define TARGET_HAVE_TLS true
1022 #endif
1023 #undef TARGET_CANNOT_FORCE_CONST_MEM
1024 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
1026 #undef TARGET_DELEGITIMIZE_ADDRESS
1027 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
1029 #undef TARGET_MS_BITFIELD_LAYOUT_P
1030 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
1032 #if TARGET_MACHO
1033 #undef TARGET_BINDS_LOCAL_P
1034 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1035 #endif
1037 #undef TARGET_ASM_OUTPUT_MI_THUNK
1038 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
1039 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1040 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
1042 #undef TARGET_ASM_FILE_START
1043 #define TARGET_ASM_FILE_START x86_file_start
1045 #undef TARGET_DEFAULT_TARGET_FLAGS
1046 #define TARGET_DEFAULT_TARGET_FLAGS \
1047 (TARGET_DEFAULT \
1048 | TARGET_64BIT_DEFAULT \
1049 | TARGET_SUBTARGET_DEFAULT \
1050 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
1052 #undef TARGET_HANDLE_OPTION
1053 #define TARGET_HANDLE_OPTION ix86_handle_option
1055 #undef TARGET_RTX_COSTS
1056 #define TARGET_RTX_COSTS ix86_rtx_costs
1057 #undef TARGET_ADDRESS_COST
1058 #define TARGET_ADDRESS_COST ix86_address_cost
1060 #undef TARGET_FIXED_CONDITION_CODE_REGS
1061 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
1062 #undef TARGET_CC_MODES_COMPATIBLE
1063 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
1065 #undef TARGET_MACHINE_DEPENDENT_REORG
1066 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
1068 #undef TARGET_BUILD_BUILTIN_VA_LIST
1069 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
1071 #undef TARGET_MD_ASM_CLOBBERS
1072 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
1074 #undef TARGET_PROMOTE_PROTOTYPES
1075 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1076 #undef TARGET_STRUCT_VALUE_RTX
1077 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
1078 #undef TARGET_SETUP_INCOMING_VARARGS
1079 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
1080 #undef TARGET_MUST_PASS_IN_STACK
1081 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
1082 #undef TARGET_PASS_BY_REFERENCE
1083 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
1085 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1086 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
1088 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1089 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
1091 #ifdef HAVE_AS_TLS
1092 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1093 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
1094 #endif
1096 #ifdef SUBTARGET_INSERT_ATTRIBUTES
1097 #undef TARGET_INSERT_ATTRIBUTES
1098 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
1099 #endif
1101 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
1102 #define TARGET_MANGLE_FUNDAMENTAL_TYPE ix86_mangle_fundamental_type
1104 #undef TARGET_STACK_PROTECT_FAIL
1105 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
1107 #undef TARGET_FUNCTION_VALUE
1108 #define TARGET_FUNCTION_VALUE ix86_function_value
1110 struct gcc_target targetm = TARGET_INITIALIZER;
1113 /* The svr4 ABI for the i386 says that records and unions are returned
1114 in memory. */
1115 #ifndef DEFAULT_PCC_STRUCT_RETURN
1116 #define DEFAULT_PCC_STRUCT_RETURN 1
1117 #endif
1119 /* Implement TARGET_HANDLE_OPTION. */
1121 static bool
1122 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
1124 switch (code)
1126 case OPT_m3dnow:
1127 if (!value)
1129 target_flags &= ~MASK_3DNOW_A;
1130 target_flags_explicit |= MASK_3DNOW_A;
1132 return true;
1134 case OPT_mmmx:
1135 if (!value)
1137 target_flags &= ~(MASK_3DNOW | MASK_3DNOW_A);
1138 target_flags_explicit |= MASK_3DNOW | MASK_3DNOW_A;
1140 return true;
1142 case OPT_msse:
1143 if (!value)
1145 target_flags &= ~(MASK_SSE2 | MASK_SSE3);
1146 target_flags_explicit |= MASK_SSE2 | MASK_SSE3;
1148 return true;
1150 case OPT_msse2:
1151 if (!value)
1153 target_flags &= ~MASK_SSE3;
1154 target_flags_explicit |= MASK_SSE3;
1156 return true;
1158 default:
1159 return true;
1163 /* Sometimes certain combinations of command options do not make
1164 sense on a particular target machine. You can define a macro
1165 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1166 defined, is executed once just after all the command options have
1167 been parsed.
1169 Don't use this macro to turn on various extra optimizations for
1170 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1172 void
1173 override_options (void)
1175 int i;
1176 int ix86_tune_defaulted = 0;
1178 /* Comes from final.c -- no real reason to change it. */
1179 #define MAX_CODE_ALIGN 16
1181 static struct ptt
1183 const struct processor_costs *cost; /* Processor costs */
1184 const int target_enable; /* Target flags to enable. */
1185 const int target_disable; /* Target flags to disable. */
1186 const int align_loop; /* Default alignments. */
1187 const int align_loop_max_skip;
1188 const int align_jump;
1189 const int align_jump_max_skip;
1190 const int align_func;
1192 const processor_target_table[PROCESSOR_max] =
1194 {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
1195 {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
1196 {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
1197 {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
1198 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1199 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1200 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1201 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1202 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0}
1205 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1206 static struct pta
1208 const char *const name; /* processor name or nickname. */
1209 const enum processor_type processor;
1210 const enum pta_flags
1212 PTA_SSE = 1,
1213 PTA_SSE2 = 2,
1214 PTA_SSE3 = 4,
1215 PTA_MMX = 8,
1216 PTA_PREFETCH_SSE = 16,
1217 PTA_3DNOW = 32,
1218 PTA_3DNOW_A = 64,
1219 PTA_64BIT = 128
1220 } flags;
1222 const processor_alias_table[] =
1224 {"i386", PROCESSOR_I386, 0},
1225 {"i486", PROCESSOR_I486, 0},
1226 {"i586", PROCESSOR_PENTIUM, 0},
1227 {"pentium", PROCESSOR_PENTIUM, 0},
1228 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
1229 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
1230 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1231 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1232 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
1233 {"i686", PROCESSOR_PENTIUMPRO, 0},
1234 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
1235 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
1236 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1237 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1238 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1239 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1240 | PTA_MMX | PTA_PREFETCH_SSE},
1241 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1242 | PTA_MMX | PTA_PREFETCH_SSE},
1243 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1244 | PTA_MMX | PTA_PREFETCH_SSE},
1245 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1246 | PTA_MMX | PTA_PREFETCH_SSE},
1247 {"k6", PROCESSOR_K6, PTA_MMX},
1248 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1249 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1250 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1251 | PTA_3DNOW_A},
1252 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
1253 | PTA_3DNOW | PTA_3DNOW_A},
1254 {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1255 | PTA_3DNOW_A | PTA_SSE},
1256 {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1257 | PTA_3DNOW_A | PTA_SSE},
1258 {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1259 | PTA_3DNOW_A | PTA_SSE},
1260 {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT
1261 | PTA_SSE | PTA_SSE2 },
1262 {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1263 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1264 {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1265 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1266 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1267 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1268 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1269 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1272 int const pta_size = ARRAY_SIZE (processor_alias_table);
1274 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1275 SUBTARGET_OVERRIDE_OPTIONS;
1276 #endif
1278 /* Set the default values for switches whose default depends on TARGET_64BIT
1279 in case they weren't overwritten by command line options. */
1280 if (TARGET_64BIT)
1282 if (flag_omit_frame_pointer == 2)
1283 flag_omit_frame_pointer = 1;
1284 if (flag_asynchronous_unwind_tables == 2)
1285 flag_asynchronous_unwind_tables = 1;
1286 if (flag_pcc_struct_return == 2)
1287 flag_pcc_struct_return = 0;
1289 else
1291 if (flag_omit_frame_pointer == 2)
1292 flag_omit_frame_pointer = 0;
1293 if (flag_asynchronous_unwind_tables == 2)
1294 flag_asynchronous_unwind_tables = 0;
1295 if (flag_pcc_struct_return == 2)
1296 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
1299 if (!ix86_tune_string && ix86_arch_string)
1300 ix86_tune_string = ix86_arch_string;
1301 if (!ix86_tune_string)
1303 ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
1304 ix86_tune_defaulted = 1;
1306 if (!ix86_arch_string)
1307 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
1309 if (ix86_cmodel_string != 0)
1311 if (!strcmp (ix86_cmodel_string, "small"))
1312 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1313 else if (!strcmp (ix86_cmodel_string, "medium"))
1314 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
1315 else if (flag_pic)
1316 sorry ("code model %s not supported in PIC mode", ix86_cmodel_string);
1317 else if (!strcmp (ix86_cmodel_string, "32"))
1318 ix86_cmodel = CM_32;
1319 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
1320 ix86_cmodel = CM_KERNEL;
1321 else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic)
1322 ix86_cmodel = CM_LARGE;
1323 else
1324 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
1326 else
1328 ix86_cmodel = CM_32;
1329 if (TARGET_64BIT)
1330 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1332 if (ix86_asm_string != 0)
1334 if (!strcmp (ix86_asm_string, "intel"))
1335 ix86_asm_dialect = ASM_INTEL;
1336 else if (!strcmp (ix86_asm_string, "att"))
1337 ix86_asm_dialect = ASM_ATT;
1338 else
1339 error ("bad value (%s) for -masm= switch", ix86_asm_string);
1341 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
1342 error ("code model %qs not supported in the %s bit mode",
1343 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
1344 if (ix86_cmodel == CM_LARGE)
1345 sorry ("code model %<large%> not supported yet");
1346 if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
1347 sorry ("%i-bit mode not compiled in",
1348 (target_flags & MASK_64BIT) ? 64 : 32);
1350 for (i = 0; i < pta_size; i++)
1351 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
1353 ix86_arch = processor_alias_table[i].processor;
1354 /* Default cpu tuning to the architecture. */
1355 ix86_tune = ix86_arch;
1356 if (processor_alias_table[i].flags & PTA_MMX
1357 && !(target_flags_explicit & MASK_MMX))
1358 target_flags |= MASK_MMX;
1359 if (processor_alias_table[i].flags & PTA_3DNOW
1360 && !(target_flags_explicit & MASK_3DNOW))
1361 target_flags |= MASK_3DNOW;
1362 if (processor_alias_table[i].flags & PTA_3DNOW_A
1363 && !(target_flags_explicit & MASK_3DNOW_A))
1364 target_flags |= MASK_3DNOW_A;
1365 if (processor_alias_table[i].flags & PTA_SSE
1366 && !(target_flags_explicit & MASK_SSE))
1367 target_flags |= MASK_SSE;
1368 if (processor_alias_table[i].flags & PTA_SSE2
1369 && !(target_flags_explicit & MASK_SSE2))
1370 target_flags |= MASK_SSE2;
1371 if (processor_alias_table[i].flags & PTA_SSE3
1372 && !(target_flags_explicit & MASK_SSE3))
1373 target_flags |= MASK_SSE3;
1374 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1375 x86_prefetch_sse = true;
1376 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1377 error ("CPU you selected does not support x86-64 "
1378 "instruction set");
1379 break;
1382 if (i == pta_size)
1383 error ("bad value (%s) for -march= switch", ix86_arch_string);
1385 for (i = 0; i < pta_size; i++)
1386 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
1388 ix86_tune = processor_alias_table[i].processor;
1389 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1391 if (ix86_tune_defaulted)
1393 ix86_tune_string = "x86-64";
1394 for (i = 0; i < pta_size; i++)
1395 if (! strcmp (ix86_tune_string,
1396 processor_alias_table[i].name))
1397 break;
1398 ix86_tune = processor_alias_table[i].processor;
1400 else
1401 error ("CPU you selected does not support x86-64 "
1402 "instruction set");
1404 /* Intel CPUs have always interpreted SSE prefetch instructions as
1405 NOPs; so, we can enable SSE prefetch instructions even when
1406 -mtune (rather than -march) points us to a processor that has them.
1407 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
1408 higher processors. */
1409 if (TARGET_CMOVE && (processor_alias_table[i].flags & PTA_PREFETCH_SSE))
1410 x86_prefetch_sse = true;
1411 break;
1413 if (i == pta_size)
1414 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1416 if (optimize_size)
1417 ix86_cost = &size_cost;
1418 else
1419 ix86_cost = processor_target_table[ix86_tune].cost;
1420 target_flags |= processor_target_table[ix86_tune].target_enable;
1421 target_flags &= ~processor_target_table[ix86_tune].target_disable;
1423 /* Arrange to set up i386_stack_locals for all functions. */
1424 init_machine_status = ix86_init_machine_status;
1426 /* Validate -mregparm= value. */
1427 if (ix86_regparm_string)
1429 i = atoi (ix86_regparm_string);
1430 if (i < 0 || i > REGPARM_MAX)
1431 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
1432 else
1433 ix86_regparm = i;
1435 else
1436 if (TARGET_64BIT)
1437 ix86_regparm = REGPARM_MAX;
1439 /* If the user has provided any of the -malign-* options,
1440 warn and use that value only if -falign-* is not set.
1441 Remove this code in GCC 3.2 or later. */
1442 if (ix86_align_loops_string)
1444 warning (0, "-malign-loops is obsolete, use -falign-loops");
1445 if (align_loops == 0)
1447 i = atoi (ix86_align_loops_string);
1448 if (i < 0 || i > MAX_CODE_ALIGN)
1449 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1450 else
1451 align_loops = 1 << i;
1455 if (ix86_align_jumps_string)
1457 warning (0, "-malign-jumps is obsolete, use -falign-jumps");
1458 if (align_jumps == 0)
1460 i = atoi (ix86_align_jumps_string);
1461 if (i < 0 || i > MAX_CODE_ALIGN)
1462 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1463 else
1464 align_jumps = 1 << i;
1468 if (ix86_align_funcs_string)
1470 warning (0, "-malign-functions is obsolete, use -falign-functions");
1471 if (align_functions == 0)
1473 i = atoi (ix86_align_funcs_string);
1474 if (i < 0 || i > MAX_CODE_ALIGN)
1475 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1476 else
1477 align_functions = 1 << i;
1481 /* Default align_* from the processor table. */
1482 if (align_loops == 0)
1484 align_loops = processor_target_table[ix86_tune].align_loop;
1485 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
1487 if (align_jumps == 0)
1489 align_jumps = processor_target_table[ix86_tune].align_jump;
1490 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
1492 if (align_functions == 0)
1494 align_functions = processor_target_table[ix86_tune].align_func;
1497 /* Validate -mpreferred-stack-boundary= value, or provide default.
1498 The default of 128 bits is for Pentium III's SSE __m128, but we
1499 don't want additional code to keep the stack aligned when
1500 optimizing for code size. */
1501 ix86_preferred_stack_boundary = (optimize_size
1502 ? TARGET_64BIT ? 128 : 32
1503 : 128);
1504 if (ix86_preferred_stack_boundary_string)
1506 i = atoi (ix86_preferred_stack_boundary_string);
1507 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
1508 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
1509 TARGET_64BIT ? 4 : 2);
1510 else
1511 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
1514 /* Validate -mbranch-cost= value, or provide default. */
1515 ix86_branch_cost = processor_target_table[ix86_tune].cost->branch_cost;
1516 if (ix86_branch_cost_string)
1518 i = atoi (ix86_branch_cost_string);
1519 if (i < 0 || i > 5)
1520 error ("-mbranch-cost=%d is not between 0 and 5", i);
1521 else
1522 ix86_branch_cost = i;
1524 if (ix86_section_threshold_string)
1526 i = atoi (ix86_section_threshold_string);
1527 if (i < 0)
1528 error ("-mlarge-data-threshold=%d is negative", i);
1529 else
1530 ix86_section_threshold = i;
1533 if (ix86_tls_dialect_string)
1535 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
1536 ix86_tls_dialect = TLS_DIALECT_GNU;
1537 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
1538 ix86_tls_dialect = TLS_DIALECT_SUN;
1539 else
1540 error ("bad value (%s) for -mtls-dialect= switch",
1541 ix86_tls_dialect_string);
1544 /* Keep nonleaf frame pointers. */
1545 if (flag_omit_frame_pointer)
1546 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
1547 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
1548 flag_omit_frame_pointer = 1;
1550 /* If we're doing fast math, we don't care about comparison order
1551 wrt NaNs. This lets us use a shorter comparison sequence. */
1552 if (flag_unsafe_math_optimizations)
1553 target_flags &= ~MASK_IEEE_FP;
1555 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
1556 since the insns won't need emulation. */
1557 if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
1558 target_flags &= ~MASK_NO_FANCY_MATH_387;
1560 /* Likewise, if the target doesn't have a 387, or we've specified
1561 software floating point, don't use 387 inline intrinsics. */
1562 if (!TARGET_80387)
1563 target_flags |= MASK_NO_FANCY_MATH_387;
1565 /* Turn on SSE2 builtins for -msse3. */
1566 if (TARGET_SSE3)
1567 target_flags |= MASK_SSE2;
1569 /* Turn on SSE builtins for -msse2. */
1570 if (TARGET_SSE2)
1571 target_flags |= MASK_SSE;
1573 /* Turn on MMX builtins for -msse. */
1574 if (TARGET_SSE)
1576 target_flags |= MASK_MMX & ~target_flags_explicit;
1577 x86_prefetch_sse = true;
1580 /* Turn on MMX builtins for 3Dnow. */
1581 if (TARGET_3DNOW)
1582 target_flags |= MASK_MMX;
1584 if (TARGET_64BIT)
1586 if (TARGET_ALIGN_DOUBLE)
1587 error ("-malign-double makes no sense in the 64bit mode");
1588 if (TARGET_RTD)
1589 error ("-mrtd calling convention not supported in the 64bit mode");
1591 /* Enable by default the SSE and MMX builtins. Do allow the user to
1592 explicitly disable any of these. In particular, disabling SSE and
1593 MMX for kernel code is extremely useful. */
1594 target_flags
1595 |= ((MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE)
1596 & ~target_flags_explicit);
1598 else
1600 /* i386 ABI does not specify red zone. It still makes sense to use it
1601 when programmer takes care to stack from being destroyed. */
1602 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
1603 target_flags |= MASK_NO_RED_ZONE;
1606 /* Accept -msseregparm only if at least SSE support is enabled. */
1607 if (TARGET_SSEREGPARM
1608 && ! TARGET_SSE)
1609 error ("-msseregparm used without SSE enabled");
1611 ix86_fpmath = TARGET_FPMATH_DEFAULT;
1613 if (ix86_fpmath_string != 0)
1615 if (! strcmp (ix86_fpmath_string, "387"))
1616 ix86_fpmath = FPMATH_387;
1617 else if (! strcmp (ix86_fpmath_string, "sse"))
1619 if (!TARGET_SSE)
1621 warning (0, "SSE instruction set disabled, using 387 arithmetics");
1622 ix86_fpmath = FPMATH_387;
1624 else
1625 ix86_fpmath = FPMATH_SSE;
1627 else if (! strcmp (ix86_fpmath_string, "387,sse")
1628 || ! strcmp (ix86_fpmath_string, "sse,387"))
1630 if (!TARGET_SSE)
1632 warning (0, "SSE instruction set disabled, using 387 arithmetics");
1633 ix86_fpmath = FPMATH_387;
1635 else if (!TARGET_80387)
1637 warning (0, "387 instruction set disabled, using SSE arithmetics");
1638 ix86_fpmath = FPMATH_SSE;
1640 else
1641 ix86_fpmath = FPMATH_SSE | FPMATH_387;
1643 else
1644 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
1647 /* If the i387 is disabled, then do not return values in it. */
1648 if (!TARGET_80387)
1649 target_flags &= ~MASK_FLOAT_RETURNS;
1651 if ((x86_accumulate_outgoing_args & TUNEMASK)
1652 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
1653 && !optimize_size)
1654 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
1656 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
1658 char *p;
1659 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
1660 p = strchr (internal_label_prefix, 'X');
1661 internal_label_prefix_len = p - internal_label_prefix;
1662 *p = '\0';
1665 /* When scheduling description is not available, disable scheduler pass
1666 so it won't slow down the compilation and make x87 code slower. */
1667 if (!TARGET_SCHEDULE)
1668 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
1671 /* switch to the appropriate section for output of DECL.
1672 DECL is either a `VAR_DECL' node or a constant of some sort.
1673 RELOC indicates whether forming the initial value of DECL requires
1674 link-time relocations. */
1676 static void
1677 x86_64_elf_select_section (tree decl, int reloc,
1678 unsigned HOST_WIDE_INT align)
1680 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1681 && ix86_in_large_data_p (decl))
1683 const char *sname = NULL;
1684 switch (categorize_decl_for_section (decl, reloc, flag_pic))
1686 case SECCAT_DATA:
1687 sname = ".ldata";
1688 break;
1689 case SECCAT_DATA_REL:
1690 sname = ".ldata.rel";
1691 break;
1692 case SECCAT_DATA_REL_LOCAL:
1693 sname = ".ldata.rel.local";
1694 break;
1695 case SECCAT_DATA_REL_RO:
1696 sname = ".ldata.rel.ro";
1697 break;
1698 case SECCAT_DATA_REL_RO_LOCAL:
1699 sname = ".ldata.rel.ro.local";
1700 break;
1701 case SECCAT_BSS:
1702 sname = ".lbss";
1703 break;
1704 case SECCAT_RODATA:
1705 case SECCAT_RODATA_MERGE_STR:
1706 case SECCAT_RODATA_MERGE_STR_INIT:
1707 case SECCAT_RODATA_MERGE_CONST:
1708 sname = ".lrodata";
1709 break;
1710 case SECCAT_SRODATA:
1711 case SECCAT_SDATA:
1712 case SECCAT_SBSS:
1713 gcc_unreachable ();
1714 case SECCAT_TEXT:
1715 case SECCAT_TDATA:
1716 case SECCAT_TBSS:
1717 /* We don't split these for medium model. Place them into
1718 default sections and hope for best. */
1719 break;
1721 if (sname)
1723 named_section (decl, sname, reloc);
1724 return;
1727 default_elf_select_section (decl, reloc, align);
1730 /* Build up a unique section name, expressed as a
1731 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
1732 RELOC indicates whether the initial value of EXP requires
1733 link-time relocations. */
1735 static void
1736 x86_64_elf_unique_section (tree decl, int reloc)
1738 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1739 && ix86_in_large_data_p (decl))
1741 const char *prefix = NULL;
1742 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
1743 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
1745 switch (categorize_decl_for_section (decl, reloc, flag_pic))
1747 case SECCAT_DATA:
1748 case SECCAT_DATA_REL:
1749 case SECCAT_DATA_REL_LOCAL:
1750 case SECCAT_DATA_REL_RO:
1751 case SECCAT_DATA_REL_RO_LOCAL:
1752 prefix = one_only ? ".gnu.linkonce.ld." : ".ldata.";
1753 break;
1754 case SECCAT_BSS:
1755 prefix = one_only ? ".gnu.linkonce.lb." : ".lbss.";
1756 break;
1757 case SECCAT_RODATA:
1758 case SECCAT_RODATA_MERGE_STR:
1759 case SECCAT_RODATA_MERGE_STR_INIT:
1760 case SECCAT_RODATA_MERGE_CONST:
1761 prefix = one_only ? ".gnu.linkonce.lr." : ".lrodata.";
1762 break;
1763 case SECCAT_SRODATA:
1764 case SECCAT_SDATA:
1765 case SECCAT_SBSS:
1766 gcc_unreachable ();
1767 case SECCAT_TEXT:
1768 case SECCAT_TDATA:
1769 case SECCAT_TBSS:
1770 /* We don't split these for medium model. Place them into
1771 default sections and hope for best. */
1772 break;
1774 if (prefix)
1776 const char *name;
1777 size_t nlen, plen;
1778 char *string;
1779 plen = strlen (prefix);
1781 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
1782 name = targetm.strip_name_encoding (name);
1783 nlen = strlen (name);
1785 string = alloca (nlen + plen + 1);
1786 memcpy (string, prefix, plen);
1787 memcpy (string + plen, name, nlen + 1);
1789 DECL_SECTION_NAME (decl) = build_string (nlen + plen, string);
1790 return;
1793 default_unique_section (decl, reloc);
1796 #ifdef COMMON_ASM_OP
1797 /* This says how to output assembler code to declare an
1798 uninitialized external linkage data object.
1800 For medium model x86-64 we need to use .largecomm opcode for
1801 large objects. */
1802 void
1803 x86_elf_aligned_common (FILE *file,
1804 const char *name, unsigned HOST_WIDE_INT size,
1805 int align)
1807 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1808 && size > (unsigned int)ix86_section_threshold)
1809 fprintf (file, ".largecomm\t");
1810 else
1811 fprintf (file, "%s", COMMON_ASM_OP);
1812 assemble_name (file, name);
1813 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
1814 size, align / BITS_PER_UNIT);
1817 /* Utility function for targets to use in implementing
1818 ASM_OUTPUT_ALIGNED_BSS. */
1820 void
1821 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
1822 const char *name, unsigned HOST_WIDE_INT size,
1823 int align)
1825 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1826 && size > (unsigned int)ix86_section_threshold)
1827 named_section (decl, ".lbss", 0);
1828 else
1829 bss_section ();
1830 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
1831 #ifdef ASM_DECLARE_OBJECT_NAME
1832 last_assemble_variable_decl = decl;
1833 ASM_DECLARE_OBJECT_NAME (file, name, decl);
1834 #else
1835 /* Standard thing is just output label for the object. */
1836 ASM_OUTPUT_LABEL (file, name);
1837 #endif /* ASM_DECLARE_OBJECT_NAME */
1838 ASM_OUTPUT_SKIP (file, size ? size : 1);
1840 #endif
1842 void
1843 optimization_options (int level, int size ATTRIBUTE_UNUSED)
1845 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
1846 make the problem with not enough registers even worse. */
1847 #ifdef INSN_SCHEDULING
1848 if (level > 1)
1849 flag_schedule_insns = 0;
1850 #endif
1852 if (TARGET_MACHO)
1853 /* The Darwin libraries never set errno, so we might as well
1854 avoid calling them when that's the only reason we would. */
1855 flag_errno_math = 0;
1857 /* The default values of these switches depend on the TARGET_64BIT
1858 that is not known at this moment. Mark these values with 2 and
1859 let user the to override these. In case there is no command line option
1860 specifying them, we will set the defaults in override_options. */
1861 if (optimize >= 1)
1862 flag_omit_frame_pointer = 2;
1863 flag_pcc_struct_return = 2;
1864 flag_asynchronous_unwind_tables = 2;
1865 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
1866 SUBTARGET_OPTIMIZATION_OPTIONS;
1867 #endif
1870 /* Table of valid machine attributes. */
1871 const struct attribute_spec ix86_attribute_table[] =
1873 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
1874 /* Stdcall attribute says callee is responsible for popping arguments
1875 if they are not variable. */
1876 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1877 /* Fastcall attribute says callee is responsible for popping arguments
1878 if they are not variable. */
1879 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1880 /* Cdecl attribute says the callee is a normal C declaration */
1881 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1882 /* Regparm attribute specifies how many integer arguments are to be
1883 passed in registers. */
1884 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
1885 /* Sseregparm attribute says we are using x86_64 calling conventions
1886 for FP arguments. */
1887 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1888 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1889 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
1890 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
1891 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
1892 #endif
1893 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1894 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1895 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1896 SUBTARGET_ATTRIBUTE_TABLE,
1897 #endif
1898 { NULL, 0, 0, false, false, false, NULL }
1901 /* Decide whether we can make a sibling call to a function. DECL is the
1902 declaration of the function being targeted by the call and EXP is the
1903 CALL_EXPR representing the call. */
1905 static bool
1906 ix86_function_ok_for_sibcall (tree decl, tree exp)
1908 tree func;
1909 rtx a, b;
1911 /* If we are generating position-independent code, we cannot sibcall
1912 optimize any indirect call, or a direct call to a global function,
1913 as the PLT requires %ebx be live. */
1914 if (!TARGET_64BIT && flag_pic && (!decl || TREE_PUBLIC (decl)))
1915 return false;
1917 if (decl)
1918 func = decl;
1919 else
1921 func = TREE_TYPE (TREE_OPERAND (exp, 0));
1922 if (POINTER_TYPE_P (func))
1923 func = TREE_TYPE (func);
1926 /* Check that the return value locations are the same. Like
1927 if we are returning floats on the 80387 register stack, we cannot
1928 make a sibcall from a function that doesn't return a float to a
1929 function that does or, conversely, from a function that does return
1930 a float to a function that doesn't; the necessary stack adjustment
1931 would not be executed. This is also the place we notice
1932 differences in the return value ABI. Note that it is ok for one
1933 of the functions to have void return type as long as the return
1934 value of the other is passed in a register. */
1935 a = ix86_function_value (TREE_TYPE (exp), func, false);
1936 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
1937 cfun->decl, false);
1938 if (STACK_REG_P (a) || STACK_REG_P (b))
1940 if (!rtx_equal_p (a, b))
1941 return false;
1943 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1945 else if (!rtx_equal_p (a, b))
1946 return false;
1948 /* If this call is indirect, we'll need to be able to use a call-clobbered
1949 register for the address of the target function. Make sure that all
1950 such registers are not used for passing parameters. */
1951 if (!decl && !TARGET_64BIT)
1953 tree type;
1955 /* We're looking at the CALL_EXPR, we need the type of the function. */
1956 type = TREE_OPERAND (exp, 0); /* pointer expression */
1957 type = TREE_TYPE (type); /* pointer type */
1958 type = TREE_TYPE (type); /* function type */
1960 if (ix86_function_regparm (type, NULL) >= 3)
1962 /* ??? Need to count the actual number of registers to be used,
1963 not the possible number of registers. Fix later. */
1964 return false;
1968 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1969 /* Dllimport'd functions are also called indirectly. */
1970 if (decl && lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl))
1971 && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
1972 return false;
1973 #endif
1975 /* Otherwise okay. That also includes certain types of indirect calls. */
1976 return true;
1979 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
1980 calling convention attributes;
1981 arguments as in struct attribute_spec.handler. */
1983 static tree
1984 ix86_handle_cconv_attribute (tree *node, tree name,
1985 tree args,
1986 int flags ATTRIBUTE_UNUSED,
1987 bool *no_add_attrs)
1989 if (TREE_CODE (*node) != FUNCTION_TYPE
1990 && TREE_CODE (*node) != METHOD_TYPE
1991 && TREE_CODE (*node) != FIELD_DECL
1992 && TREE_CODE (*node) != TYPE_DECL)
1994 warning (OPT_Wattributes, "%qs attribute only applies to functions",
1995 IDENTIFIER_POINTER (name));
1996 *no_add_attrs = true;
1997 return NULL_TREE;
2000 /* Can combine regparm with all attributes but fastcall. */
2001 if (is_attribute_p ("regparm", name))
2003 tree cst;
2005 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2007 error ("fastcall and regparm attributes are not compatible");
2010 cst = TREE_VALUE (args);
2011 if (TREE_CODE (cst) != INTEGER_CST)
2013 warning (OPT_Wattributes,
2014 "%qs attribute requires an integer constant argument",
2015 IDENTIFIER_POINTER (name));
2016 *no_add_attrs = true;
2018 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
2020 warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
2021 IDENTIFIER_POINTER (name), REGPARM_MAX);
2022 *no_add_attrs = true;
2025 return NULL_TREE;
2028 if (TARGET_64BIT)
2030 warning (OPT_Wattributes, "%qs attribute ignored",
2031 IDENTIFIER_POINTER (name));
2032 *no_add_attrs = true;
2033 return NULL_TREE;
2036 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
2037 if (is_attribute_p ("fastcall", name))
2039 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2041 error ("fastcall and cdecl attributes are not compatible");
2043 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2045 error ("fastcall and stdcall attributes are not compatible");
2047 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
2049 error ("fastcall and regparm attributes are not compatible");
2053 /* Can combine stdcall with fastcall (redundant), regparm and
2054 sseregparm. */
2055 else if (is_attribute_p ("stdcall", name))
2057 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2059 error ("stdcall and cdecl attributes are not compatible");
2061 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2063 error ("stdcall and fastcall attributes are not compatible");
2067 /* Can combine cdecl with regparm and sseregparm. */
2068 else if (is_attribute_p ("cdecl", name))
2070 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2072 error ("stdcall and cdecl attributes are not compatible");
2074 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2076 error ("fastcall and cdecl attributes are not compatible");
2080 /* Can combine sseregparm with all attributes. */
2082 return NULL_TREE;
2085 /* Return 0 if the attributes for two types are incompatible, 1 if they
2086 are compatible, and 2 if they are nearly compatible (which causes a
2087 warning to be generated). */
2089 static int
2090 ix86_comp_type_attributes (tree type1, tree type2)
2092 /* Check for mismatch of non-default calling convention. */
2093 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
2095 if (TREE_CODE (type1) != FUNCTION_TYPE)
2096 return 1;
2098 /* Check for mismatched fastcall/regparm types. */
2099 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
2100 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
2101 || (ix86_function_regparm (type1, NULL)
2102 != ix86_function_regparm (type2, NULL)))
2103 return 0;
2105 /* Check for mismatched sseregparm types. */
2106 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
2107 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
2108 return 0;
2110 /* Check for mismatched return types (cdecl vs stdcall). */
2111 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
2112 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
2113 return 0;
2115 return 1;
2118 /* Return the regparm value for a function with the indicated TYPE and DECL.
2119 DECL may be NULL when calling function indirectly
2120 or considering a libcall. */
2122 static int
2123 ix86_function_regparm (tree type, tree decl)
2125 tree attr;
2126 int regparm = ix86_regparm;
2127 bool user_convention = false;
2129 if (!TARGET_64BIT)
2131 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
2132 if (attr)
2134 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
2135 user_convention = true;
2138 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
2140 regparm = 2;
2141 user_convention = true;
2144 /* Use register calling convention for local functions when possible. */
2145 if (!TARGET_64BIT && !user_convention && decl
2146 && flag_unit_at_a_time && !profile_flag)
2148 struct cgraph_local_info *i = cgraph_local_info (decl);
2149 if (i && i->local)
2151 int local_regparm, globals = 0, regno;
2153 /* Make sure no regparm register is taken by a global register
2154 variable. */
2155 for (local_regparm = 0; local_regparm < 3; local_regparm++)
2156 if (global_regs[local_regparm])
2157 break;
2158 /* We can't use regparm(3) for nested functions as these use
2159 static chain pointer in third argument. */
2160 if (local_regparm == 3
2161 && DECL_CONTEXT (decl) && !DECL_NO_STATIC_CHAIN (decl))
2162 local_regparm = 2;
2163 /* Each global register variable increases register preassure,
2164 so the more global reg vars there are, the smaller regparm
2165 optimization use, unless requested by the user explicitly. */
2166 for (regno = 0; regno < 6; regno++)
2167 if (global_regs[regno])
2168 globals++;
2169 local_regparm
2170 = globals < local_regparm ? local_regparm - globals : 0;
2172 if (local_regparm > regparm)
2173 regparm = local_regparm;
2177 return regparm;
2180 /* Return 1 or 2, if we can pass up to 8 SFmode (1) and DFmode (2) arguments
2181 in SSE registers for a function with the indicated TYPE and DECL.
2182 DECL may be NULL when calling function indirectly
2183 or considering a libcall. Otherwise return 0. */
2185 static int
2186 ix86_function_sseregparm (tree type, tree decl)
2188 /* Use SSE registers to pass SFmode and DFmode arguments if requested
2189 by the sseregparm attribute. */
2190 if (TARGET_SSEREGPARM
2191 || (type
2192 && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
2194 if (!TARGET_SSE)
2196 if (decl)
2197 error ("Calling %qD with attribute sseregparm without "
2198 "SSE/SSE2 enabled", decl);
2199 else
2200 error ("Calling %qT with attribute sseregparm without "
2201 "SSE/SSE2 enabled", type);
2202 return 0;
2205 return 2;
2208 /* For local functions, pass SFmode (and DFmode for SSE2) arguments
2209 in SSE registers even for 32-bit mode and not just 3, but up to
2210 8 SSE arguments in registers. */
2211 if (!TARGET_64BIT && decl
2212 && TARGET_SSE_MATH && flag_unit_at_a_time && !profile_flag)
2214 struct cgraph_local_info *i = cgraph_local_info (decl);
2215 if (i && i->local)
2216 return TARGET_SSE2 ? 2 : 1;
2219 return 0;
2222 /* Return true if EAX is live at the start of the function. Used by
2223 ix86_expand_prologue to determine if we need special help before
2224 calling allocate_stack_worker. */
2226 static bool
2227 ix86_eax_live_at_start_p (void)
2229 /* Cheat. Don't bother working forward from ix86_function_regparm
2230 to the function type to whether an actual argument is located in
2231 eax. Instead just look at cfg info, which is still close enough
2232 to correct at this point. This gives false positives for broken
2233 functions that might use uninitialized data that happens to be
2234 allocated in eax, but who cares? */
2235 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->il.rtl->global_live_at_end, 0);
2238 /* Value is the number of bytes of arguments automatically
2239 popped when returning from a subroutine call.
2240 FUNDECL is the declaration node of the function (as a tree),
2241 FUNTYPE is the data type of the function (as a tree),
2242 or for a library call it is an identifier node for the subroutine name.
2243 SIZE is the number of bytes of arguments passed on the stack.
2245 On the 80386, the RTD insn may be used to pop them if the number
2246 of args is fixed, but if the number is variable then the caller
2247 must pop them all. RTD can't be used for library calls now
2248 because the library is compiled with the Unix compiler.
2249 Use of RTD is a selectable option, since it is incompatible with
2250 standard Unix calling sequences. If the option is not selected,
2251 the caller must always pop the args.
2253 The attribute stdcall is equivalent to RTD on a per module basis. */
2256 ix86_return_pops_args (tree fundecl, tree funtype, int size)
2258 int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
2260 /* Cdecl functions override -mrtd, and never pop the stack. */
2261 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
2263 /* Stdcall and fastcall functions will pop the stack if not
2264 variable args. */
2265 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
2266 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
2267 rtd = 1;
2269 if (rtd
2270 && (TYPE_ARG_TYPES (funtype) == NULL_TREE
2271 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
2272 == void_type_node)))
2273 return size;
2276 /* Lose any fake structure return argument if it is passed on the stack. */
2277 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
2278 && !TARGET_64BIT
2279 && !KEEP_AGGREGATE_RETURN_POINTER)
2281 int nregs = ix86_function_regparm (funtype, fundecl);
2283 if (!nregs)
2284 return GET_MODE_SIZE (Pmode);
2287 return 0;
2290 /* Argument support functions. */
2292 /* Return true when register may be used to pass function parameters. */
2293 bool
2294 ix86_function_arg_regno_p (int regno)
2296 int i;
2297 if (!TARGET_64BIT)
2298 return (regno < REGPARM_MAX
2299 || (TARGET_MMX && MMX_REGNO_P (regno)
2300 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
2301 || (TARGET_SSE && SSE_REGNO_P (regno)
2302 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
2304 if (TARGET_SSE && SSE_REGNO_P (regno)
2305 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
2306 return true;
2307 /* RAX is used as hidden argument to va_arg functions. */
2308 if (!regno)
2309 return true;
2310 for (i = 0; i < REGPARM_MAX; i++)
2311 if (regno == x86_64_int_parameter_registers[i])
2312 return true;
2313 return false;
2316 /* Return if we do not know how to pass TYPE solely in registers. */
2318 static bool
2319 ix86_must_pass_in_stack (enum machine_mode mode, tree type)
2321 if (must_pass_in_stack_var_size_or_pad (mode, type))
2322 return true;
2324 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
2325 The layout_type routine is crafty and tries to trick us into passing
2326 currently unsupported vector types on the stack by using TImode. */
2327 return (!TARGET_64BIT && mode == TImode
2328 && type && TREE_CODE (type) != VECTOR_TYPE);
2331 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2332 for a call to a function whose data type is FNTYPE.
2333 For a library call, FNTYPE is 0. */
2335 void
2336 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
2337 tree fntype, /* tree ptr for function decl */
2338 rtx libname, /* SYMBOL_REF of library name or 0 */
2339 tree fndecl)
2341 static CUMULATIVE_ARGS zero_cum;
2342 tree param, next_param;
2344 if (TARGET_DEBUG_ARG)
2346 fprintf (stderr, "\ninit_cumulative_args (");
2347 if (fntype)
2348 fprintf (stderr, "fntype code = %s, ret code = %s",
2349 tree_code_name[(int) TREE_CODE (fntype)],
2350 tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
2351 else
2352 fprintf (stderr, "no fntype");
2354 if (libname)
2355 fprintf (stderr, ", libname = %s", XSTR (libname, 0));
2358 *cum = zero_cum;
2360 /* Set up the number of registers to use for passing arguments. */
2361 cum->nregs = ix86_regparm;
2362 if (TARGET_SSE)
2363 cum->sse_nregs = SSE_REGPARM_MAX;
2364 if (TARGET_MMX)
2365 cum->mmx_nregs = MMX_REGPARM_MAX;
2366 cum->warn_sse = true;
2367 cum->warn_mmx = true;
2368 cum->maybe_vaarg = false;
2370 /* Use ecx and edx registers if function has fastcall attribute,
2371 else look for regparm information. */
2372 if (fntype && !TARGET_64BIT)
2374 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
2376 cum->nregs = 2;
2377 cum->fastcall = 1;
2379 else
2380 cum->nregs = ix86_function_regparm (fntype, fndecl);
2383 /* Set up the number of SSE registers used for passing SFmode
2384 and DFmode arguments. Warn for mismatching ABI. */
2385 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl);
2387 /* Determine if this function has variable arguments. This is
2388 indicated by the last argument being 'void_type_mode' if there
2389 are no variable arguments. If there are variable arguments, then
2390 we won't pass anything in registers in 32-bit mode. */
2392 if (cum->nregs || cum->mmx_nregs || cum->sse_nregs)
2394 for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
2395 param != 0; param = next_param)
2397 next_param = TREE_CHAIN (param);
2398 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
2400 if (!TARGET_64BIT)
2402 cum->nregs = 0;
2403 cum->sse_nregs = 0;
2404 cum->mmx_nregs = 0;
2405 cum->warn_sse = 0;
2406 cum->warn_mmx = 0;
2407 cum->fastcall = 0;
2408 cum->float_in_sse = 0;
2410 cum->maybe_vaarg = true;
2414 if ((!fntype && !libname)
2415 || (fntype && !TYPE_ARG_TYPES (fntype)))
2416 cum->maybe_vaarg = true;
2418 if (TARGET_DEBUG_ARG)
2419 fprintf (stderr, ", nregs=%d )\n", cum->nregs);
2421 return;
2424 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
2425 But in the case of vector types, it is some vector mode.
2427 When we have only some of our vector isa extensions enabled, then there
2428 are some modes for which vector_mode_supported_p is false. For these
2429 modes, the generic vector support in gcc will choose some non-vector mode
2430 in order to implement the type. By computing the natural mode, we'll
2431 select the proper ABI location for the operand and not depend on whatever
2432 the middle-end decides to do with these vector types. */
2434 static enum machine_mode
2435 type_natural_mode (tree type)
2437 enum machine_mode mode = TYPE_MODE (type);
2439 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
2441 HOST_WIDE_INT size = int_size_in_bytes (type);
2442 if ((size == 8 || size == 16)
2443 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
2444 && TYPE_VECTOR_SUBPARTS (type) > 1)
2446 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
2448 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
2449 mode = MIN_MODE_VECTOR_FLOAT;
2450 else
2451 mode = MIN_MODE_VECTOR_INT;
2453 /* Get the mode which has this inner mode and number of units. */
2454 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
2455 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
2456 && GET_MODE_INNER (mode) == innermode)
2457 return mode;
2459 gcc_unreachable ();
2463 return mode;
2466 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
2467 this may not agree with the mode that the type system has chosen for the
2468 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
2469 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
2471 static rtx
2472 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
2473 unsigned int regno)
2475 rtx tmp;
2477 if (orig_mode != BLKmode)
2478 tmp = gen_rtx_REG (orig_mode, regno);
2479 else
2481 tmp = gen_rtx_REG (mode, regno);
2482 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
2483 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
2486 return tmp;
2489 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
2490 of this code is to classify each 8bytes of incoming argument by the register
2491 class and assign registers accordingly. */
2493 /* Return the union class of CLASS1 and CLASS2.
2494 See the x86-64 PS ABI for details. */
2496 static enum x86_64_reg_class
2497 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
2499 /* Rule #1: If both classes are equal, this is the resulting class. */
2500 if (class1 == class2)
2501 return class1;
2503 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
2504 the other class. */
2505 if (class1 == X86_64_NO_CLASS)
2506 return class2;
2507 if (class2 == X86_64_NO_CLASS)
2508 return class1;
2510 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
2511 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
2512 return X86_64_MEMORY_CLASS;
2514 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
2515 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
2516 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
2517 return X86_64_INTEGERSI_CLASS;
2518 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
2519 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
2520 return X86_64_INTEGER_CLASS;
2522 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
2523 MEMORY is used. */
2524 if (class1 == X86_64_X87_CLASS
2525 || class1 == X86_64_X87UP_CLASS
2526 || class1 == X86_64_COMPLEX_X87_CLASS
2527 || class2 == X86_64_X87_CLASS
2528 || class2 == X86_64_X87UP_CLASS
2529 || class2 == X86_64_COMPLEX_X87_CLASS)
2530 return X86_64_MEMORY_CLASS;
2532 /* Rule #6: Otherwise class SSE is used. */
2533 return X86_64_SSE_CLASS;
2536 /* Classify the argument of type TYPE and mode MODE.
2537 CLASSES will be filled by the register class used to pass each word
2538 of the operand. The number of words is returned. In case the parameter
2539 should be passed in memory, 0 is returned. As a special case for zero
2540 sized containers, classes[0] will be NO_CLASS and 1 is returned.
2542 BIT_OFFSET is used internally for handling records and specifies offset
2543 of the offset in bits modulo 256 to avoid overflow cases.
2545 See the x86-64 PS ABI for details.
2548 static int
2549 classify_argument (enum machine_mode mode, tree type,
2550 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
2552 HOST_WIDE_INT bytes =
2553 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2554 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2556 /* Variable sized entities are always passed/returned in memory. */
2557 if (bytes < 0)
2558 return 0;
2560 if (mode != VOIDmode
2561 && targetm.calls.must_pass_in_stack (mode, type))
2562 return 0;
2564 if (type && AGGREGATE_TYPE_P (type))
2566 int i;
2567 tree field;
2568 enum x86_64_reg_class subclasses[MAX_CLASSES];
2570 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
2571 if (bytes > 16)
2572 return 0;
2574 for (i = 0; i < words; i++)
2575 classes[i] = X86_64_NO_CLASS;
2577 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
2578 signalize memory class, so handle it as special case. */
2579 if (!words)
2581 classes[0] = X86_64_NO_CLASS;
2582 return 1;
2585 /* Classify each field of record and merge classes. */
2586 switch (TREE_CODE (type))
2588 case RECORD_TYPE:
2589 /* For classes first merge in the field of the subclasses. */
2590 if (TYPE_BINFO (type))
2592 tree binfo, base_binfo;
2593 int basenum;
2595 for (binfo = TYPE_BINFO (type), basenum = 0;
2596 BINFO_BASE_ITERATE (binfo, basenum, base_binfo); basenum++)
2598 int num;
2599 int offset = tree_low_cst (BINFO_OFFSET (base_binfo), 0) * 8;
2600 tree type = BINFO_TYPE (base_binfo);
2602 num = classify_argument (TYPE_MODE (type),
2603 type, subclasses,
2604 (offset + bit_offset) % 256);
2605 if (!num)
2606 return 0;
2607 for (i = 0; i < num; i++)
2609 int pos = (offset + (bit_offset % 64)) / 8 / 8;
2610 classes[i + pos] =
2611 merge_classes (subclasses[i], classes[i + pos]);
2615 /* And now merge the fields of structure. */
2616 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2618 if (TREE_CODE (field) == FIELD_DECL)
2620 int num;
2622 /* Bitfields are always classified as integer. Handle them
2623 early, since later code would consider them to be
2624 misaligned integers. */
2625 if (DECL_BIT_FIELD (field))
2627 for (i = int_bit_position (field) / 8 / 8;
2628 i < (int_bit_position (field)
2629 + tree_low_cst (DECL_SIZE (field), 0)
2630 + 63) / 8 / 8; i++)
2631 classes[i] =
2632 merge_classes (X86_64_INTEGER_CLASS,
2633 classes[i]);
2635 else
2637 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2638 TREE_TYPE (field), subclasses,
2639 (int_bit_position (field)
2640 + bit_offset) % 256);
2641 if (!num)
2642 return 0;
2643 for (i = 0; i < num; i++)
2645 int pos =
2646 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
2647 classes[i + pos] =
2648 merge_classes (subclasses[i], classes[i + pos]);
2653 break;
2655 case ARRAY_TYPE:
2656 /* Arrays are handled as small records. */
2658 int num;
2659 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
2660 TREE_TYPE (type), subclasses, bit_offset);
2661 if (!num)
2662 return 0;
2664 /* The partial classes are now full classes. */
2665 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
2666 subclasses[0] = X86_64_SSE_CLASS;
2667 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
2668 subclasses[0] = X86_64_INTEGER_CLASS;
2670 for (i = 0; i < words; i++)
2671 classes[i] = subclasses[i % num];
2673 break;
2675 case UNION_TYPE:
2676 case QUAL_UNION_TYPE:
2677 /* Unions are similar to RECORD_TYPE but offset is always 0.
2680 /* Unions are not derived. */
2681 gcc_assert (!TYPE_BINFO (type)
2682 || !BINFO_N_BASE_BINFOS (TYPE_BINFO (type)));
2683 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2685 if (TREE_CODE (field) == FIELD_DECL)
2687 int num;
2688 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2689 TREE_TYPE (field), subclasses,
2690 bit_offset);
2691 if (!num)
2692 return 0;
2693 for (i = 0; i < num; i++)
2694 classes[i] = merge_classes (subclasses[i], classes[i]);
2697 break;
2699 default:
2700 gcc_unreachable ();
2703 /* Final merger cleanup. */
2704 for (i = 0; i < words; i++)
2706 /* If one class is MEMORY, everything should be passed in
2707 memory. */
2708 if (classes[i] == X86_64_MEMORY_CLASS)
2709 return 0;
2711 /* The X86_64_SSEUP_CLASS should be always preceded by
2712 X86_64_SSE_CLASS. */
2713 if (classes[i] == X86_64_SSEUP_CLASS
2714 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
2715 classes[i] = X86_64_SSE_CLASS;
2717 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
2718 if (classes[i] == X86_64_X87UP_CLASS
2719 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
2720 classes[i] = X86_64_SSE_CLASS;
2722 return words;
2725 /* Compute alignment needed. We align all types to natural boundaries with
2726 exception of XFmode that is aligned to 64bits. */
2727 if (mode != VOIDmode && mode != BLKmode)
2729 int mode_alignment = GET_MODE_BITSIZE (mode);
2731 if (mode == XFmode)
2732 mode_alignment = 128;
2733 else if (mode == XCmode)
2734 mode_alignment = 256;
2735 if (COMPLEX_MODE_P (mode))
2736 mode_alignment /= 2;
2737 /* Misaligned fields are always returned in memory. */
2738 if (bit_offset % mode_alignment)
2739 return 0;
2742 /* for V1xx modes, just use the base mode */
2743 if (VECTOR_MODE_P (mode)
2744 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
2745 mode = GET_MODE_INNER (mode);
2747 /* Classification of atomic types. */
2748 switch (mode)
2750 case DImode:
2751 case SImode:
2752 case HImode:
2753 case QImode:
2754 case CSImode:
2755 case CHImode:
2756 case CQImode:
2757 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2758 classes[0] = X86_64_INTEGERSI_CLASS;
2759 else
2760 classes[0] = X86_64_INTEGER_CLASS;
2761 return 1;
2762 case CDImode:
2763 case TImode:
2764 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
2765 return 2;
2766 case CTImode:
2767 return 0;
2768 case SFmode:
2769 if (!(bit_offset % 64))
2770 classes[0] = X86_64_SSESF_CLASS;
2771 else
2772 classes[0] = X86_64_SSE_CLASS;
2773 return 1;
2774 case DFmode:
2775 classes[0] = X86_64_SSEDF_CLASS;
2776 return 1;
2777 case XFmode:
2778 classes[0] = X86_64_X87_CLASS;
2779 classes[1] = X86_64_X87UP_CLASS;
2780 return 2;
2781 case TFmode:
2782 classes[0] = X86_64_SSE_CLASS;
2783 classes[1] = X86_64_SSEUP_CLASS;
2784 return 2;
2785 case SCmode:
2786 classes[0] = X86_64_SSE_CLASS;
2787 return 1;
2788 case DCmode:
2789 classes[0] = X86_64_SSEDF_CLASS;
2790 classes[1] = X86_64_SSEDF_CLASS;
2791 return 2;
2792 case XCmode:
2793 classes[0] = X86_64_COMPLEX_X87_CLASS;
2794 return 1;
2795 case TCmode:
2796 /* This modes is larger than 16 bytes. */
2797 return 0;
2798 case V4SFmode:
2799 case V4SImode:
2800 case V16QImode:
2801 case V8HImode:
2802 case V2DFmode:
2803 case V2DImode:
2804 classes[0] = X86_64_SSE_CLASS;
2805 classes[1] = X86_64_SSEUP_CLASS;
2806 return 2;
2807 case V2SFmode:
2808 case V2SImode:
2809 case V4HImode:
2810 case V8QImode:
2811 classes[0] = X86_64_SSE_CLASS;
2812 return 1;
2813 case BLKmode:
2814 case VOIDmode:
2815 return 0;
2816 default:
2817 gcc_assert (VECTOR_MODE_P (mode));
2819 if (bytes > 16)
2820 return 0;
2822 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
2824 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2825 classes[0] = X86_64_INTEGERSI_CLASS;
2826 else
2827 classes[0] = X86_64_INTEGER_CLASS;
2828 classes[1] = X86_64_INTEGER_CLASS;
2829 return 1 + (bytes > 8);
2833 /* Examine the argument and return set number of register required in each
2834 class. Return 0 iff parameter should be passed in memory. */
2835 static int
2836 examine_argument (enum machine_mode mode, tree type, int in_return,
2837 int *int_nregs, int *sse_nregs)
2839 enum x86_64_reg_class class[MAX_CLASSES];
2840 int n = classify_argument (mode, type, class, 0);
2842 *int_nregs = 0;
2843 *sse_nregs = 0;
2844 if (!n)
2845 return 0;
2846 for (n--; n >= 0; n--)
2847 switch (class[n])
2849 case X86_64_INTEGER_CLASS:
2850 case X86_64_INTEGERSI_CLASS:
2851 (*int_nregs)++;
2852 break;
2853 case X86_64_SSE_CLASS:
2854 case X86_64_SSESF_CLASS:
2855 case X86_64_SSEDF_CLASS:
2856 (*sse_nregs)++;
2857 break;
2858 case X86_64_NO_CLASS:
2859 case X86_64_SSEUP_CLASS:
2860 break;
2861 case X86_64_X87_CLASS:
2862 case X86_64_X87UP_CLASS:
2863 if (!in_return)
2864 return 0;
2865 break;
2866 case X86_64_COMPLEX_X87_CLASS:
2867 return in_return ? 2 : 0;
2868 case X86_64_MEMORY_CLASS:
2869 gcc_unreachable ();
2871 return 1;
2874 /* Construct container for the argument used by GCC interface. See
2875 FUNCTION_ARG for the detailed description. */
2877 static rtx
2878 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
2879 tree type, int in_return, int nintregs, int nsseregs,
2880 const int *intreg, int sse_regno)
2882 enum machine_mode tmpmode;
2883 int bytes =
2884 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2885 enum x86_64_reg_class class[MAX_CLASSES];
2886 int n;
2887 int i;
2888 int nexps = 0;
2889 int needed_sseregs, needed_intregs;
2890 rtx exp[MAX_CLASSES];
2891 rtx ret;
2893 n = classify_argument (mode, type, class, 0);
2894 if (TARGET_DEBUG_ARG)
2896 if (!n)
2897 fprintf (stderr, "Memory class\n");
2898 else
2900 fprintf (stderr, "Classes:");
2901 for (i = 0; i < n; i++)
2903 fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]);
2905 fprintf (stderr, "\n");
2908 if (!n)
2909 return NULL;
2910 if (!examine_argument (mode, type, in_return, &needed_intregs,
2911 &needed_sseregs))
2912 return NULL;
2913 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
2914 return NULL;
2916 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
2917 some less clueful developer tries to use floating-point anyway. */
2918 if (needed_sseregs && !TARGET_SSE)
2920 static bool issued_error;
2921 if (!issued_error)
2923 issued_error = true;
2924 if (in_return)
2925 error ("SSE register return with SSE disabled");
2926 else
2927 error ("SSE register argument with SSE disabled");
2929 return NULL;
2932 /* First construct simple cases. Avoid SCmode, since we want to use
2933 single register to pass this type. */
2934 if (n == 1 && mode != SCmode)
2935 switch (class[0])
2937 case X86_64_INTEGER_CLASS:
2938 case X86_64_INTEGERSI_CLASS:
2939 return gen_rtx_REG (mode, intreg[0]);
2940 case X86_64_SSE_CLASS:
2941 case X86_64_SSESF_CLASS:
2942 case X86_64_SSEDF_CLASS:
2943 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
2944 case X86_64_X87_CLASS:
2945 case X86_64_COMPLEX_X87_CLASS:
2946 return gen_rtx_REG (mode, FIRST_STACK_REG);
2947 case X86_64_NO_CLASS:
2948 /* Zero sized array, struct or class. */
2949 return NULL;
2950 default:
2951 gcc_unreachable ();
2953 if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS
2954 && mode != BLKmode)
2955 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
2956 if (n == 2
2957 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
2958 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
2959 if (n == 2 && class[0] == X86_64_INTEGER_CLASS
2960 && class[1] == X86_64_INTEGER_CLASS
2961 && (mode == CDImode || mode == TImode || mode == TFmode)
2962 && intreg[0] + 1 == intreg[1])
2963 return gen_rtx_REG (mode, intreg[0]);
2965 /* Otherwise figure out the entries of the PARALLEL. */
2966 for (i = 0; i < n; i++)
2968 switch (class[i])
2970 case X86_64_NO_CLASS:
2971 break;
2972 case X86_64_INTEGER_CLASS:
2973 case X86_64_INTEGERSI_CLASS:
2974 /* Merge TImodes on aligned occasions here too. */
2975 if (i * 8 + 8 > bytes)
2976 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
2977 else if (class[i] == X86_64_INTEGERSI_CLASS)
2978 tmpmode = SImode;
2979 else
2980 tmpmode = DImode;
2981 /* We've requested 24 bytes we don't have mode for. Use DImode. */
2982 if (tmpmode == BLKmode)
2983 tmpmode = DImode;
2984 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2985 gen_rtx_REG (tmpmode, *intreg),
2986 GEN_INT (i*8));
2987 intreg++;
2988 break;
2989 case X86_64_SSESF_CLASS:
2990 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2991 gen_rtx_REG (SFmode,
2992 SSE_REGNO (sse_regno)),
2993 GEN_INT (i*8));
2994 sse_regno++;
2995 break;
2996 case X86_64_SSEDF_CLASS:
2997 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2998 gen_rtx_REG (DFmode,
2999 SSE_REGNO (sse_regno)),
3000 GEN_INT (i*8));
3001 sse_regno++;
3002 break;
3003 case X86_64_SSE_CLASS:
3004 if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
3005 tmpmode = TImode;
3006 else
3007 tmpmode = DImode;
3008 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3009 gen_rtx_REG (tmpmode,
3010 SSE_REGNO (sse_regno)),
3011 GEN_INT (i*8));
3012 if (tmpmode == TImode)
3013 i++;
3014 sse_regno++;
3015 break;
3016 default:
3017 gcc_unreachable ();
3021 /* Empty aligned struct, union or class. */
3022 if (nexps == 0)
3023 return NULL;
3025 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
3026 for (i = 0; i < nexps; i++)
3027 XVECEXP (ret, 0, i) = exp [i];
3028 return ret;
3031 /* Update the data in CUM to advance over an argument
3032 of mode MODE and data type TYPE.
3033 (TYPE is null for libcalls where that information may not be available.) */
3035 void
3036 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3037 tree type, int named)
3039 int bytes =
3040 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3041 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3043 if (type)
3044 mode = type_natural_mode (type);
3046 if (TARGET_DEBUG_ARG)
3047 fprintf (stderr, "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, "
3048 "mode=%s, named=%d)\n\n",
3049 words, cum->words, cum->nregs, cum->sse_nregs,
3050 GET_MODE_NAME (mode), named);
3052 if (TARGET_64BIT)
3054 int int_nregs, sse_nregs;
3055 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
3056 cum->words += words;
3057 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
3059 cum->nregs -= int_nregs;
3060 cum->sse_nregs -= sse_nregs;
3061 cum->regno += int_nregs;
3062 cum->sse_regno += sse_nregs;
3064 else
3065 cum->words += words;
3067 else
3069 switch (mode)
3071 default:
3072 break;
3074 case BLKmode:
3075 if (bytes < 0)
3076 break;
3077 /* FALLTHRU */
3079 case DImode:
3080 case SImode:
3081 case HImode:
3082 case QImode:
3083 cum->words += words;
3084 cum->nregs -= words;
3085 cum->regno += words;
3087 if (cum->nregs <= 0)
3089 cum->nregs = 0;
3090 cum->regno = 0;
3092 break;
3094 case DFmode:
3095 if (cum->float_in_sse < 2)
3096 break;
3097 case SFmode:
3098 if (cum->float_in_sse < 1)
3099 break;
3100 /* FALLTHRU */
3102 case TImode:
3103 case V16QImode:
3104 case V8HImode:
3105 case V4SImode:
3106 case V2DImode:
3107 case V4SFmode:
3108 case V2DFmode:
3109 if (!type || !AGGREGATE_TYPE_P (type))
3111 cum->sse_words += words;
3112 cum->sse_nregs -= 1;
3113 cum->sse_regno += 1;
3114 if (cum->sse_nregs <= 0)
3116 cum->sse_nregs = 0;
3117 cum->sse_regno = 0;
3120 break;
3122 case V8QImode:
3123 case V4HImode:
3124 case V2SImode:
3125 case V2SFmode:
3126 if (!type || !AGGREGATE_TYPE_P (type))
3128 cum->mmx_words += words;
3129 cum->mmx_nregs -= 1;
3130 cum->mmx_regno += 1;
3131 if (cum->mmx_nregs <= 0)
3133 cum->mmx_nregs = 0;
3134 cum->mmx_regno = 0;
3137 break;
3142 /* Define where to put the arguments to a function.
3143 Value is zero to push the argument on the stack,
3144 or a hard register in which to store the argument.
3146 MODE is the argument's machine mode.
3147 TYPE is the data type of the argument (as a tree).
3148 This is null for libcalls where that information may
3149 not be available.
3150 CUM is a variable of type CUMULATIVE_ARGS which gives info about
3151 the preceding args and about the function being called.
3152 NAMED is nonzero if this argument is a named parameter
3153 (otherwise it is an extra parameter matching an ellipsis). */
3156 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode orig_mode,
3157 tree type, int named)
3159 enum machine_mode mode = orig_mode;
3160 rtx ret = NULL_RTX;
3161 int bytes =
3162 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3163 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3164 static bool warnedsse, warnedmmx;
3166 /* To simplify the code below, represent vector types with a vector mode
3167 even if MMX/SSE are not active. */
3168 if (type && TREE_CODE (type) == VECTOR_TYPE)
3169 mode = type_natural_mode (type);
3171 /* Handle a hidden AL argument containing number of registers for varargs
3172 x86-64 functions. For i386 ABI just return constm1_rtx to avoid
3173 any AL settings. */
3174 if (mode == VOIDmode)
3176 if (TARGET_64BIT)
3177 return GEN_INT (cum->maybe_vaarg
3178 ? (cum->sse_nregs < 0
3179 ? SSE_REGPARM_MAX
3180 : cum->sse_regno)
3181 : -1);
3182 else
3183 return constm1_rtx;
3185 if (TARGET_64BIT)
3186 ret = construct_container (mode, orig_mode, type, 0, cum->nregs,
3187 cum->sse_nregs,
3188 &x86_64_int_parameter_registers [cum->regno],
3189 cum->sse_regno);
3190 else
3191 switch (mode)
3193 /* For now, pass fp/complex values on the stack. */
3194 default:
3195 break;
3197 case BLKmode:
3198 if (bytes < 0)
3199 break;
3200 /* FALLTHRU */
3201 case DImode:
3202 case SImode:
3203 case HImode:
3204 case QImode:
3205 if (words <= cum->nregs)
3207 int regno = cum->regno;
3209 /* Fastcall allocates the first two DWORD (SImode) or
3210 smaller arguments to ECX and EDX. */
3211 if (cum->fastcall)
3213 if (mode == BLKmode || mode == DImode)
3214 break;
3216 /* ECX not EAX is the first allocated register. */
3217 if (regno == 0)
3218 regno = 2;
3220 ret = gen_rtx_REG (mode, regno);
3222 break;
3223 case DFmode:
3224 if (cum->float_in_sse < 2)
3225 break;
3226 case SFmode:
3227 if (cum->float_in_sse < 1)
3228 break;
3229 /* FALLTHRU */
3230 case TImode:
3231 case V16QImode:
3232 case V8HImode:
3233 case V4SImode:
3234 case V2DImode:
3235 case V4SFmode:
3236 case V2DFmode:
3237 if (!type || !AGGREGATE_TYPE_P (type))
3239 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
3241 warnedsse = true;
3242 warning (0, "SSE vector argument without SSE enabled "
3243 "changes the ABI");
3245 if (cum->sse_nregs)
3246 ret = gen_reg_or_parallel (mode, orig_mode,
3247 cum->sse_regno + FIRST_SSE_REG);
3249 break;
3250 case V8QImode:
3251 case V4HImode:
3252 case V2SImode:
3253 case V2SFmode:
3254 if (!type || !AGGREGATE_TYPE_P (type))
3256 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
3258 warnedmmx = true;
3259 warning (0, "MMX vector argument without MMX enabled "
3260 "changes the ABI");
3262 if (cum->mmx_nregs)
3263 ret = gen_reg_or_parallel (mode, orig_mode,
3264 cum->mmx_regno + FIRST_MMX_REG);
3266 break;
3269 if (TARGET_DEBUG_ARG)
3271 fprintf (stderr,
3272 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
3273 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
3275 if (ret)
3276 print_simple_rtl (stderr, ret);
3277 else
3278 fprintf (stderr, ", stack");
3280 fprintf (stderr, " )\n");
3283 return ret;
3286 /* A C expression that indicates when an argument must be passed by
3287 reference. If nonzero for an argument, a copy of that argument is
3288 made in memory and a pointer to the argument is passed instead of
3289 the argument itself. The pointer is passed in whatever way is
3290 appropriate for passing a pointer to that type. */
3292 static bool
3293 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3294 enum machine_mode mode ATTRIBUTE_UNUSED,
3295 tree type, bool named ATTRIBUTE_UNUSED)
3297 if (!TARGET_64BIT)
3298 return 0;
3300 if (type && int_size_in_bytes (type) == -1)
3302 if (TARGET_DEBUG_ARG)
3303 fprintf (stderr, "function_arg_pass_by_reference\n");
3304 return 1;
3307 return 0;
3310 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
3311 ABI. Only called if TARGET_SSE. */
3312 static bool
3313 contains_128bit_aligned_vector_p (tree type)
3315 enum machine_mode mode = TYPE_MODE (type);
3316 if (SSE_REG_MODE_P (mode)
3317 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
3318 return true;
3319 if (TYPE_ALIGN (type) < 128)
3320 return false;
3322 if (AGGREGATE_TYPE_P (type))
3324 /* Walk the aggregates recursively. */
3325 switch (TREE_CODE (type))
3327 case RECORD_TYPE:
3328 case UNION_TYPE:
3329 case QUAL_UNION_TYPE:
3331 tree field;
3333 if (TYPE_BINFO (type))
3335 tree binfo, base_binfo;
3336 int i;
3338 for (binfo = TYPE_BINFO (type), i = 0;
3339 BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
3340 if (contains_128bit_aligned_vector_p
3341 (BINFO_TYPE (base_binfo)))
3342 return true;
3344 /* And now merge the fields of structure. */
3345 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3347 if (TREE_CODE (field) == FIELD_DECL
3348 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
3349 return true;
3351 break;
3354 case ARRAY_TYPE:
3355 /* Just for use if some languages passes arrays by value. */
3356 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
3357 return true;
3358 break;
3360 default:
3361 gcc_unreachable ();
3364 return false;
3367 /* Gives the alignment boundary, in bits, of an argument with the
3368 specified mode and type. */
3371 ix86_function_arg_boundary (enum machine_mode mode, tree type)
3373 int align;
3374 if (type)
3375 align = TYPE_ALIGN (type);
3376 else
3377 align = GET_MODE_ALIGNMENT (mode);
3378 if (align < PARM_BOUNDARY)
3379 align = PARM_BOUNDARY;
3380 if (!TARGET_64BIT)
3382 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
3383 make an exception for SSE modes since these require 128bit
3384 alignment.
3386 The handling here differs from field_alignment. ICC aligns MMX
3387 arguments to 4 byte boundaries, while structure fields are aligned
3388 to 8 byte boundaries. */
3389 if (!TARGET_SSE)
3390 align = PARM_BOUNDARY;
3391 else if (!type)
3393 if (!SSE_REG_MODE_P (mode))
3394 align = PARM_BOUNDARY;
3396 else
3398 if (!contains_128bit_aligned_vector_p (type))
3399 align = PARM_BOUNDARY;
3402 if (align > 128)
3403 align = 128;
3404 return align;
3407 /* Return true if N is a possible register number of function value. */
3408 bool
3409 ix86_function_value_regno_p (int regno)
3411 if (regno == 0
3412 || (regno == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
3413 || (regno == FIRST_SSE_REG && TARGET_SSE))
3414 return true;
3416 if (!TARGET_64BIT
3417 && (regno == FIRST_MMX_REG && TARGET_MMX))
3418 return true;
3420 return false;
3423 /* Define how to find the value returned by a function.
3424 VALTYPE is the data type of the value (as a tree).
3425 If the precise function being called is known, FUNC is its FUNCTION_DECL;
3426 otherwise, FUNC is 0. */
3428 ix86_function_value (tree valtype, tree fntype_or_decl,
3429 bool outgoing ATTRIBUTE_UNUSED)
3431 enum machine_mode natmode = type_natural_mode (valtype);
3433 if (TARGET_64BIT)
3435 rtx ret = construct_container (natmode, TYPE_MODE (valtype), valtype,
3436 1, REGPARM_MAX, SSE_REGPARM_MAX,
3437 x86_64_int_return_registers, 0);
3438 /* For zero sized structures, construct_container return NULL, but we
3439 need to keep rest of compiler happy by returning meaningful value. */
3440 if (!ret)
3441 ret = gen_rtx_REG (TYPE_MODE (valtype), 0);
3442 return ret;
3444 else
3446 tree fn = NULL_TREE, fntype;
3447 if (fntype_or_decl
3448 && DECL_P (fntype_or_decl))
3449 fn = fntype_or_decl;
3450 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
3451 return gen_rtx_REG (TYPE_MODE (valtype),
3452 ix86_value_regno (natmode, fn, fntype));
3456 /* Return false iff type is returned in memory. */
3458 ix86_return_in_memory (tree type)
3460 int needed_intregs, needed_sseregs, size;
3461 enum machine_mode mode = type_natural_mode (type);
3463 if (TARGET_64BIT)
3464 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
3466 if (mode == BLKmode)
3467 return 1;
3469 size = int_size_in_bytes (type);
3471 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
3472 return 0;
3474 if (VECTOR_MODE_P (mode) || mode == TImode)
3476 /* User-created vectors small enough to fit in EAX. */
3477 if (size < 8)
3478 return 0;
3480 /* MMX/3dNow values are returned in MM0,
3481 except when it doesn't exits. */
3482 if (size == 8)
3483 return (TARGET_MMX ? 0 : 1);
3485 /* SSE values are returned in XMM0, except when it doesn't exist. */
3486 if (size == 16)
3487 return (TARGET_SSE ? 0 : 1);
3490 if (mode == XFmode)
3491 return 0;
3493 if (size > 12)
3494 return 1;
3495 return 0;
3498 /* When returning SSE vector types, we have a choice of either
3499 (1) being abi incompatible with a -march switch, or
3500 (2) generating an error.
3501 Given no good solution, I think the safest thing is one warning.
3502 The user won't be able to use -Werror, but....
3504 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
3505 called in response to actually generating a caller or callee that
3506 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
3507 via aggregate_value_p for general type probing from tree-ssa. */
3509 static rtx
3510 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
3512 static bool warnedsse, warnedmmx;
3514 if (type)
3516 /* Look at the return type of the function, not the function type. */
3517 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
3519 if (!TARGET_SSE && !warnedsse)
3521 if (mode == TImode
3522 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3524 warnedsse = true;
3525 warning (0, "SSE vector return without SSE enabled "
3526 "changes the ABI");
3530 if (!TARGET_MMX && !warnedmmx)
3532 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
3534 warnedmmx = true;
3535 warning (0, "MMX vector return without MMX enabled "
3536 "changes the ABI");
3541 return NULL;
3544 /* Define how to find the value returned by a library function
3545 assuming the value has mode MODE. */
3547 ix86_libcall_value (enum machine_mode mode)
3549 if (TARGET_64BIT)
3551 switch (mode)
3553 case SFmode:
3554 case SCmode:
3555 case DFmode:
3556 case DCmode:
3557 case TFmode:
3558 return gen_rtx_REG (mode, FIRST_SSE_REG);
3559 case XFmode:
3560 case XCmode:
3561 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
3562 case TCmode:
3563 return NULL;
3564 default:
3565 return gen_rtx_REG (mode, 0);
3568 else
3569 return gen_rtx_REG (mode, ix86_value_regno (mode, NULL, NULL));
3572 /* Given a mode, return the register to use for a return value. */
3574 static int
3575 ix86_value_regno (enum machine_mode mode, tree func, tree fntype)
3577 gcc_assert (!TARGET_64BIT);
3579 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
3580 we prevent this case when mmx is not available. */
3581 if ((VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8))
3582 return FIRST_MMX_REG;
3584 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
3585 we prevent this case when sse is not available. */
3586 if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3587 return FIRST_SSE_REG;
3589 /* Most things go in %eax, except (unless -mno-fp-ret-in-387) fp values. */
3590 if (GET_MODE_CLASS (mode) != MODE_FLOAT || !TARGET_FLOAT_RETURNS_IN_80387)
3591 return 0;
3593 /* Floating point return values in %st(0), except for local functions when
3594 SSE math is enabled or for functions with sseregparm attribute. */
3595 if ((func || fntype)
3596 && (mode == SFmode || mode == DFmode))
3598 int sse_level = ix86_function_sseregparm (fntype, func);
3599 if ((sse_level >= 1 && mode == SFmode)
3600 || (sse_level == 2 && mode == DFmode))
3601 return FIRST_SSE_REG;
3604 return FIRST_FLOAT_REG;
3607 /* Create the va_list data type. */
3609 static tree
3610 ix86_build_builtin_va_list (void)
3612 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
3614 /* For i386 we use plain pointer to argument area. */
3615 if (!TARGET_64BIT)
3616 return build_pointer_type (char_type_node);
3618 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
3619 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
3621 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
3622 unsigned_type_node);
3623 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
3624 unsigned_type_node);
3625 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
3626 ptr_type_node);
3627 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
3628 ptr_type_node);
3630 va_list_gpr_counter_field = f_gpr;
3631 va_list_fpr_counter_field = f_fpr;
3633 DECL_FIELD_CONTEXT (f_gpr) = record;
3634 DECL_FIELD_CONTEXT (f_fpr) = record;
3635 DECL_FIELD_CONTEXT (f_ovf) = record;
3636 DECL_FIELD_CONTEXT (f_sav) = record;
3638 TREE_CHAIN (record) = type_decl;
3639 TYPE_NAME (record) = type_decl;
3640 TYPE_FIELDS (record) = f_gpr;
3641 TREE_CHAIN (f_gpr) = f_fpr;
3642 TREE_CHAIN (f_fpr) = f_ovf;
3643 TREE_CHAIN (f_ovf) = f_sav;
3645 layout_type (record);
3647 /* The correct type is an array type of one element. */
3648 return build_array_type (record, build_index_type (size_zero_node));
3651 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
3653 static void
3654 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3655 tree type, int *pretend_size ATTRIBUTE_UNUSED,
3656 int no_rtl)
3658 CUMULATIVE_ARGS next_cum;
3659 rtx save_area = NULL_RTX, mem;
3660 rtx label;
3661 rtx label_ref;
3662 rtx tmp_reg;
3663 rtx nsse_reg;
3664 int set;
3665 tree fntype;
3666 int stdarg_p;
3667 int i;
3669 if (!TARGET_64BIT)
3670 return;
3672 if (! cfun->va_list_gpr_size && ! cfun->va_list_fpr_size)
3673 return;
3675 /* Indicate to allocate space on the stack for varargs save area. */
3676 ix86_save_varrargs_registers = 1;
3678 cfun->stack_alignment_needed = 128;
3680 fntype = TREE_TYPE (current_function_decl);
3681 stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
3682 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
3683 != void_type_node));
3685 /* For varargs, we do not want to skip the dummy va_dcl argument.
3686 For stdargs, we do want to skip the last named argument. */
3687 next_cum = *cum;
3688 if (stdarg_p)
3689 function_arg_advance (&next_cum, mode, type, 1);
3691 if (!no_rtl)
3692 save_area = frame_pointer_rtx;
3694 set = get_varargs_alias_set ();
3696 for (i = next_cum.regno;
3697 i < ix86_regparm
3698 && i < next_cum.regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
3699 i++)
3701 mem = gen_rtx_MEM (Pmode,
3702 plus_constant (save_area, i * UNITS_PER_WORD));
3703 MEM_NOTRAP_P (mem) = 1;
3704 set_mem_alias_set (mem, set);
3705 emit_move_insn (mem, gen_rtx_REG (Pmode,
3706 x86_64_int_parameter_registers[i]));
3709 if (next_cum.sse_nregs && cfun->va_list_fpr_size)
3711 /* Now emit code to save SSE registers. The AX parameter contains number
3712 of SSE parameter registers used to call this function. We use
3713 sse_prologue_save insn template that produces computed jump across
3714 SSE saves. We need some preparation work to get this working. */
3716 label = gen_label_rtx ();
3717 label_ref = gen_rtx_LABEL_REF (Pmode, label);
3719 /* Compute address to jump to :
3720 label - 5*eax + nnamed_sse_arguments*5 */
3721 tmp_reg = gen_reg_rtx (Pmode);
3722 nsse_reg = gen_reg_rtx (Pmode);
3723 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
3724 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3725 gen_rtx_MULT (Pmode, nsse_reg,
3726 GEN_INT (4))));
3727 if (next_cum.sse_regno)
3728 emit_move_insn
3729 (nsse_reg,
3730 gen_rtx_CONST (DImode,
3731 gen_rtx_PLUS (DImode,
3732 label_ref,
3733 GEN_INT (next_cum.sse_regno * 4))));
3734 else
3735 emit_move_insn (nsse_reg, label_ref);
3736 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
3738 /* Compute address of memory block we save into. We always use pointer
3739 pointing 127 bytes after first byte to store - this is needed to keep
3740 instruction size limited by 4 bytes. */
3741 tmp_reg = gen_reg_rtx (Pmode);
3742 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3743 plus_constant (save_area,
3744 8 * REGPARM_MAX + 127)));
3745 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
3746 MEM_NOTRAP_P (mem) = 1;
3747 set_mem_alias_set (mem, set);
3748 set_mem_align (mem, BITS_PER_WORD);
3750 /* And finally do the dirty job! */
3751 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
3752 GEN_INT (next_cum.sse_regno), label));
3757 /* Implement va_start. */
3759 void
3760 ix86_va_start (tree valist, rtx nextarg)
3762 HOST_WIDE_INT words, n_gpr, n_fpr;
3763 tree f_gpr, f_fpr, f_ovf, f_sav;
3764 tree gpr, fpr, ovf, sav, t;
3766 /* Only 64bit target needs something special. */
3767 if (!TARGET_64BIT)
3769 std_expand_builtin_va_start (valist, nextarg);
3770 return;
3773 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3774 f_fpr = TREE_CHAIN (f_gpr);
3775 f_ovf = TREE_CHAIN (f_fpr);
3776 f_sav = TREE_CHAIN (f_ovf);
3778 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
3779 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3780 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3781 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3782 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3784 /* Count number of gp and fp argument registers used. */
3785 words = current_function_args_info.words;
3786 n_gpr = current_function_args_info.regno;
3787 n_fpr = current_function_args_info.sse_regno;
3789 if (TARGET_DEBUG_ARG)
3790 fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
3791 (int) words, (int) n_gpr, (int) n_fpr);
3793 if (cfun->va_list_gpr_size)
3795 t = build (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
3796 build_int_cst (NULL_TREE, n_gpr * 8));
3797 TREE_SIDE_EFFECTS (t) = 1;
3798 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3801 if (cfun->va_list_fpr_size)
3803 t = build (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
3804 build_int_cst (NULL_TREE, n_fpr * 16 + 8*REGPARM_MAX));
3805 TREE_SIDE_EFFECTS (t) = 1;
3806 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3809 /* Find the overflow area. */
3810 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
3811 if (words != 0)
3812 t = build (PLUS_EXPR, TREE_TYPE (ovf), t,
3813 build_int_cst (NULL_TREE, words * UNITS_PER_WORD));
3814 t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3815 TREE_SIDE_EFFECTS (t) = 1;
3816 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3818 if (cfun->va_list_gpr_size || cfun->va_list_fpr_size)
3820 /* Find the register save area.
3821 Prologue of the function save it right above stack frame. */
3822 t = make_tree (TREE_TYPE (sav), frame_pointer_rtx);
3823 t = build (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
3824 TREE_SIDE_EFFECTS (t) = 1;
3825 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3829 /* Implement va_arg. */
3831 tree
3832 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
3834 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
3835 tree f_gpr, f_fpr, f_ovf, f_sav;
3836 tree gpr, fpr, ovf, sav, t;
3837 int size, rsize;
3838 tree lab_false, lab_over = NULL_TREE;
3839 tree addr, t2;
3840 rtx container;
3841 int indirect_p = 0;
3842 tree ptrtype;
3843 enum machine_mode nat_mode;
3845 /* Only 64bit target needs something special. */
3846 if (!TARGET_64BIT)
3847 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3849 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3850 f_fpr = TREE_CHAIN (f_gpr);
3851 f_ovf = TREE_CHAIN (f_fpr);
3852 f_sav = TREE_CHAIN (f_ovf);
3854 valist = build_va_arg_indirect_ref (valist);
3855 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3856 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3857 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3858 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3860 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
3861 if (indirect_p)
3862 type = build_pointer_type (type);
3863 size = int_size_in_bytes (type);
3864 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3866 nat_mode = type_natural_mode (type);
3867 container = construct_container (nat_mode, TYPE_MODE (type), type, 0,
3868 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
3870 /* Pull the value out of the saved registers. */
3872 addr = create_tmp_var (ptr_type_node, "addr");
3873 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
3875 if (container)
3877 int needed_intregs, needed_sseregs;
3878 bool need_temp;
3879 tree int_addr, sse_addr;
3881 lab_false = create_artificial_label ();
3882 lab_over = create_artificial_label ();
3884 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
3886 need_temp = (!REG_P (container)
3887 && ((needed_intregs && TYPE_ALIGN (type) > 64)
3888 || TYPE_ALIGN (type) > 128));
3890 /* In case we are passing structure, verify that it is consecutive block
3891 on the register save area. If not we need to do moves. */
3892 if (!need_temp && !REG_P (container))
3894 /* Verify that all registers are strictly consecutive */
3895 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
3897 int i;
3899 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3901 rtx slot = XVECEXP (container, 0, i);
3902 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
3903 || INTVAL (XEXP (slot, 1)) != i * 16)
3904 need_temp = 1;
3907 else
3909 int i;
3911 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3913 rtx slot = XVECEXP (container, 0, i);
3914 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
3915 || INTVAL (XEXP (slot, 1)) != i * 8)
3916 need_temp = 1;
3920 if (!need_temp)
3922 int_addr = addr;
3923 sse_addr = addr;
3925 else
3927 int_addr = create_tmp_var (ptr_type_node, "int_addr");
3928 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
3929 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
3930 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
3933 /* First ensure that we fit completely in registers. */
3934 if (needed_intregs)
3936 t = build_int_cst (TREE_TYPE (gpr),
3937 (REGPARM_MAX - needed_intregs + 1) * 8);
3938 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
3939 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3940 t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3941 gimplify_and_add (t, pre_p);
3943 if (needed_sseregs)
3945 t = build_int_cst (TREE_TYPE (fpr),
3946 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
3947 + REGPARM_MAX * 8);
3948 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
3949 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3950 t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3951 gimplify_and_add (t, pre_p);
3954 /* Compute index to start of area used for integer regs. */
3955 if (needed_intregs)
3957 /* int_addr = gpr + sav; */
3958 t = fold_convert (ptr_type_node, gpr);
3959 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
3960 t = build2 (MODIFY_EXPR, void_type_node, int_addr, t);
3961 gimplify_and_add (t, pre_p);
3963 if (needed_sseregs)
3965 /* sse_addr = fpr + sav; */
3966 t = fold_convert (ptr_type_node, fpr);
3967 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
3968 t = build2 (MODIFY_EXPR, void_type_node, sse_addr, t);
3969 gimplify_and_add (t, pre_p);
3971 if (need_temp)
3973 int i;
3974 tree temp = create_tmp_var (type, "va_arg_tmp");
3976 /* addr = &temp; */
3977 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
3978 t = build2 (MODIFY_EXPR, void_type_node, addr, t);
3979 gimplify_and_add (t, pre_p);
3981 for (i = 0; i < XVECLEN (container, 0); i++)
3983 rtx slot = XVECEXP (container, 0, i);
3984 rtx reg = XEXP (slot, 0);
3985 enum machine_mode mode = GET_MODE (reg);
3986 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
3987 tree addr_type = build_pointer_type (piece_type);
3988 tree src_addr, src;
3989 int src_offset;
3990 tree dest_addr, dest;
3992 if (SSE_REGNO_P (REGNO (reg)))
3994 src_addr = sse_addr;
3995 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
3997 else
3999 src_addr = int_addr;
4000 src_offset = REGNO (reg) * 8;
4002 src_addr = fold_convert (addr_type, src_addr);
4003 src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr,
4004 size_int (src_offset)));
4005 src = build_va_arg_indirect_ref (src_addr);
4007 dest_addr = fold_convert (addr_type, addr);
4008 dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr,
4009 size_int (INTVAL (XEXP (slot, 1)))));
4010 dest = build_va_arg_indirect_ref (dest_addr);
4012 t = build2 (MODIFY_EXPR, void_type_node, dest, src);
4013 gimplify_and_add (t, pre_p);
4017 if (needed_intregs)
4019 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
4020 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
4021 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr, t);
4022 gimplify_and_add (t, pre_p);
4024 if (needed_sseregs)
4026 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
4027 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
4028 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr, t);
4029 gimplify_and_add (t, pre_p);
4032 t = build1 (GOTO_EXPR, void_type_node, lab_over);
4033 gimplify_and_add (t, pre_p);
4035 t = build1 (LABEL_EXPR, void_type_node, lab_false);
4036 append_to_statement_list (t, pre_p);
4039 /* ... otherwise out of the overflow area. */
4041 /* Care for on-stack alignment if needed. */
4042 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64)
4043 t = ovf;
4044 else
4046 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
4047 t = build (PLUS_EXPR, TREE_TYPE (ovf), ovf,
4048 build_int_cst (TREE_TYPE (ovf), align - 1));
4049 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
4050 build_int_cst (TREE_TYPE (t), -align));
4052 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
4054 t2 = build2 (MODIFY_EXPR, void_type_node, addr, t);
4055 gimplify_and_add (t2, pre_p);
4057 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
4058 build_int_cst (TREE_TYPE (t), rsize * UNITS_PER_WORD));
4059 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
4060 gimplify_and_add (t, pre_p);
4062 if (container)
4064 t = build1 (LABEL_EXPR, void_type_node, lab_over);
4065 append_to_statement_list (t, pre_p);
4068 ptrtype = build_pointer_type (type);
4069 addr = fold_convert (ptrtype, addr);
4071 if (indirect_p)
4072 addr = build_va_arg_indirect_ref (addr);
4073 return build_va_arg_indirect_ref (addr);
4076 /* Return nonzero if OPNUM's MEM should be matched
4077 in movabs* patterns. */
4080 ix86_check_movabs (rtx insn, int opnum)
4082 rtx set, mem;
4084 set = PATTERN (insn);
4085 if (GET_CODE (set) == PARALLEL)
4086 set = XVECEXP (set, 0, 0);
4087 gcc_assert (GET_CODE (set) == SET);
4088 mem = XEXP (set, opnum);
4089 while (GET_CODE (mem) == SUBREG)
4090 mem = SUBREG_REG (mem);
4091 gcc_assert (GET_CODE (mem) == MEM);
4092 return (volatile_ok || !MEM_VOLATILE_P (mem));
4095 /* Initialize the table of extra 80387 mathematical constants. */
4097 static void
4098 init_ext_80387_constants (void)
4100 static const char * cst[5] =
4102 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
4103 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
4104 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
4105 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
4106 "3.1415926535897932385128089594061862044", /* 4: fldpi */
4108 int i;
4110 for (i = 0; i < 5; i++)
4112 real_from_string (&ext_80387_constants_table[i], cst[i]);
4113 /* Ensure each constant is rounded to XFmode precision. */
4114 real_convert (&ext_80387_constants_table[i],
4115 XFmode, &ext_80387_constants_table[i]);
4118 ext_80387_constants_init = 1;
4121 /* Return true if the constant is something that can be loaded with
4122 a special instruction. */
4125 standard_80387_constant_p (rtx x)
4127 if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
4128 return -1;
4130 if (x == CONST0_RTX (GET_MODE (x)))
4131 return 1;
4132 if (x == CONST1_RTX (GET_MODE (x)))
4133 return 2;
4135 /* For XFmode constants, try to find a special 80387 instruction when
4136 optimizing for size or on those CPUs that benefit from them. */
4137 if (GET_MODE (x) == XFmode
4138 && (optimize_size || x86_ext_80387_constants & TUNEMASK))
4140 REAL_VALUE_TYPE r;
4141 int i;
4143 if (! ext_80387_constants_init)
4144 init_ext_80387_constants ();
4146 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4147 for (i = 0; i < 5; i++)
4148 if (real_identical (&r, &ext_80387_constants_table[i]))
4149 return i + 3;
4152 return 0;
4155 /* Return the opcode of the special instruction to be used to load
4156 the constant X. */
4158 const char *
4159 standard_80387_constant_opcode (rtx x)
4161 switch (standard_80387_constant_p (x))
4163 case 1:
4164 return "fldz";
4165 case 2:
4166 return "fld1";
4167 case 3:
4168 return "fldlg2";
4169 case 4:
4170 return "fldln2";
4171 case 5:
4172 return "fldl2e";
4173 case 6:
4174 return "fldl2t";
4175 case 7:
4176 return "fldpi";
4177 default:
4178 gcc_unreachable ();
4182 /* Return the CONST_DOUBLE representing the 80387 constant that is
4183 loaded by the specified special instruction. The argument IDX
4184 matches the return value from standard_80387_constant_p. */
4187 standard_80387_constant_rtx (int idx)
4189 int i;
4191 if (! ext_80387_constants_init)
4192 init_ext_80387_constants ();
4194 switch (idx)
4196 case 3:
4197 case 4:
4198 case 5:
4199 case 6:
4200 case 7:
4201 i = idx - 3;
4202 break;
4204 default:
4205 gcc_unreachable ();
4208 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
4209 XFmode);
4212 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
4215 standard_sse_constant_p (rtx x)
4217 if (x == const0_rtx)
4218 return 1;
4219 return (x == CONST0_RTX (GET_MODE (x)));
4222 /* Returns 1 if OP contains a symbol reference */
4225 symbolic_reference_mentioned_p (rtx op)
4227 const char *fmt;
4228 int i;
4230 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
4231 return 1;
4233 fmt = GET_RTX_FORMAT (GET_CODE (op));
4234 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
4236 if (fmt[i] == 'E')
4238 int j;
4240 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
4241 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
4242 return 1;
4245 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
4246 return 1;
4249 return 0;
4252 /* Return 1 if it is appropriate to emit `ret' instructions in the
4253 body of a function. Do this only if the epilogue is simple, needing a
4254 couple of insns. Prior to reloading, we can't tell how many registers
4255 must be saved, so return 0 then. Return 0 if there is no frame
4256 marker to de-allocate. */
4259 ix86_can_use_return_insn_p (void)
4261 struct ix86_frame frame;
4263 if (! reload_completed || frame_pointer_needed)
4264 return 0;
4266 /* Don't allow more than 32 pop, since that's all we can do
4267 with one instruction. */
4268 if (current_function_pops_args
4269 && current_function_args_size >= 32768)
4270 return 0;
4272 ix86_compute_frame_layout (&frame);
4273 return frame.to_allocate == 0 && frame.nregs == 0;
4276 /* Value should be nonzero if functions must have frame pointers.
4277 Zero means the frame pointer need not be set up (and parms may
4278 be accessed via the stack pointer) in functions that seem suitable. */
4281 ix86_frame_pointer_required (void)
4283 /* If we accessed previous frames, then the generated code expects
4284 to be able to access the saved ebp value in our frame. */
4285 if (cfun->machine->accesses_prev_frame)
4286 return 1;
4288 /* Several x86 os'es need a frame pointer for other reasons,
4289 usually pertaining to setjmp. */
4290 if (SUBTARGET_FRAME_POINTER_REQUIRED)
4291 return 1;
4293 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
4294 the frame pointer by default. Turn it back on now if we've not
4295 got a leaf function. */
4296 if (TARGET_OMIT_LEAF_FRAME_POINTER
4297 && (!current_function_is_leaf))
4298 return 1;
4300 if (current_function_profile)
4301 return 1;
4303 return 0;
4306 /* Record that the current function accesses previous call frames. */
4308 void
4309 ix86_setup_frame_addresses (void)
4311 cfun->machine->accesses_prev_frame = 1;
4314 #if defined(HAVE_GAS_HIDDEN) && defined(SUPPORTS_ONE_ONLY)
4315 # define USE_HIDDEN_LINKONCE 1
4316 #else
4317 # define USE_HIDDEN_LINKONCE 0
4318 #endif
4320 static int pic_labels_used;
4322 /* Fills in the label name that should be used for a pc thunk for
4323 the given register. */
4325 static void
4326 get_pc_thunk_name (char name[32], unsigned int regno)
4328 if (USE_HIDDEN_LINKONCE)
4329 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
4330 else
4331 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
4335 /* This function generates code for -fpic that loads %ebx with
4336 the return address of the caller and then returns. */
4338 void
4339 ix86_file_end (void)
4341 rtx xops[2];
4342 int regno;
4344 for (regno = 0; regno < 8; ++regno)
4346 char name[32];
4348 if (! ((pic_labels_used >> regno) & 1))
4349 continue;
4351 get_pc_thunk_name (name, regno);
4353 if (USE_HIDDEN_LINKONCE)
4355 tree decl;
4357 decl = build_decl (FUNCTION_DECL, get_identifier (name),
4358 error_mark_node);
4359 TREE_PUBLIC (decl) = 1;
4360 TREE_STATIC (decl) = 1;
4361 DECL_ONE_ONLY (decl) = 1;
4363 (*targetm.asm_out.unique_section) (decl, 0);
4364 named_section (decl, NULL, 0);
4366 (*targetm.asm_out.globalize_label) (asm_out_file, name);
4367 fputs ("\t.hidden\t", asm_out_file);
4368 assemble_name (asm_out_file, name);
4369 fputc ('\n', asm_out_file);
4370 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
4372 else
4374 text_section ();
4375 ASM_OUTPUT_LABEL (asm_out_file, name);
4378 xops[0] = gen_rtx_REG (SImode, regno);
4379 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
4380 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
4381 output_asm_insn ("ret", xops);
4384 if (NEED_INDICATE_EXEC_STACK)
4385 file_end_indicate_exec_stack ();
4388 /* Emit code for the SET_GOT patterns. */
4390 const char *
4391 output_set_got (rtx dest)
4393 rtx xops[3];
4395 xops[0] = dest;
4396 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
4398 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
4400 xops[2] = gen_rtx_LABEL_REF (Pmode, gen_label_rtx ());
4402 if (!flag_pic)
4403 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
4404 else
4405 output_asm_insn ("call\t%a2", xops);
4407 #if TARGET_MACHO
4408 /* Output the "canonical" label name ("Lxx$pb") here too. This
4409 is what will be referred to by the Mach-O PIC subsystem. */
4410 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
4411 #endif
4412 (*targetm.asm_out.internal_label) (asm_out_file, "L",
4413 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
4415 if (flag_pic)
4416 output_asm_insn ("pop{l}\t%0", xops);
4418 else
4420 char name[32];
4421 get_pc_thunk_name (name, REGNO (dest));
4422 pic_labels_used |= 1 << REGNO (dest);
4424 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
4425 xops[2] = gen_rtx_MEM (QImode, xops[2]);
4426 output_asm_insn ("call\t%X2", xops);
4429 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
4430 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
4431 else if (!TARGET_MACHO)
4432 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
4434 return "";
4437 /* Generate an "push" pattern for input ARG. */
4439 static rtx
4440 gen_push (rtx arg)
4442 return gen_rtx_SET (VOIDmode,
4443 gen_rtx_MEM (Pmode,
4444 gen_rtx_PRE_DEC (Pmode,
4445 stack_pointer_rtx)),
4446 arg);
4449 /* Return >= 0 if there is an unused call-clobbered register available
4450 for the entire function. */
4452 static unsigned int
4453 ix86_select_alt_pic_regnum (void)
4455 if (current_function_is_leaf && !current_function_profile)
4457 int i;
4458 for (i = 2; i >= 0; --i)
4459 if (!regs_ever_live[i])
4460 return i;
4463 return INVALID_REGNUM;
4466 /* Return 1 if we need to save REGNO. */
4467 static int
4468 ix86_save_reg (unsigned int regno, int maybe_eh_return)
4470 if (pic_offset_table_rtx
4471 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
4472 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4473 || current_function_profile
4474 || current_function_calls_eh_return
4475 || current_function_uses_const_pool))
4477 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
4478 return 0;
4479 return 1;
4482 if (current_function_calls_eh_return && maybe_eh_return)
4484 unsigned i;
4485 for (i = 0; ; i++)
4487 unsigned test = EH_RETURN_DATA_REGNO (i);
4488 if (test == INVALID_REGNUM)
4489 break;
4490 if (test == regno)
4491 return 1;
4495 return (regs_ever_live[regno]
4496 && !call_used_regs[regno]
4497 && !fixed_regs[regno]
4498 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
4501 /* Return number of registers to be saved on the stack. */
4503 static int
4504 ix86_nsaved_regs (void)
4506 int nregs = 0;
4507 int regno;
4509 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
4510 if (ix86_save_reg (regno, true))
4511 nregs++;
4512 return nregs;
4515 /* Return the offset between two registers, one to be eliminated, and the other
4516 its replacement, at the start of a routine. */
4518 HOST_WIDE_INT
4519 ix86_initial_elimination_offset (int from, int to)
4521 struct ix86_frame frame;
4522 ix86_compute_frame_layout (&frame);
4524 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
4525 return frame.hard_frame_pointer_offset;
4526 else if (from == FRAME_POINTER_REGNUM
4527 && to == HARD_FRAME_POINTER_REGNUM)
4528 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
4529 else
4531 gcc_assert (to == STACK_POINTER_REGNUM);
4533 if (from == ARG_POINTER_REGNUM)
4534 return frame.stack_pointer_offset;
4536 gcc_assert (from == FRAME_POINTER_REGNUM);
4537 return frame.stack_pointer_offset - frame.frame_pointer_offset;
4541 /* Fill structure ix86_frame about frame of currently computed function. */
4543 static void
4544 ix86_compute_frame_layout (struct ix86_frame *frame)
4546 HOST_WIDE_INT total_size;
4547 unsigned int stack_alignment_needed;
4548 HOST_WIDE_INT offset;
4549 unsigned int preferred_alignment;
4550 HOST_WIDE_INT size = get_frame_size ();
4552 frame->nregs = ix86_nsaved_regs ();
4553 total_size = size;
4555 stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
4556 preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
4558 /* During reload iteration the amount of registers saved can change.
4559 Recompute the value as needed. Do not recompute when amount of registers
4560 didn't change as reload does multiple calls to the function and does not
4561 expect the decision to change within single iteration. */
4562 if (!optimize_size
4563 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
4565 int count = frame->nregs;
4567 cfun->machine->use_fast_prologue_epilogue_nregs = count;
4568 /* The fast prologue uses move instead of push to save registers. This
4569 is significantly longer, but also executes faster as modern hardware
4570 can execute the moves in parallel, but can't do that for push/pop.
4572 Be careful about choosing what prologue to emit: When function takes
4573 many instructions to execute we may use slow version as well as in
4574 case function is known to be outside hot spot (this is known with
4575 feedback only). Weight the size of function by number of registers
4576 to save as it is cheap to use one or two push instructions but very
4577 slow to use many of them. */
4578 if (count)
4579 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
4580 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
4581 || (flag_branch_probabilities
4582 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
4583 cfun->machine->use_fast_prologue_epilogue = false;
4584 else
4585 cfun->machine->use_fast_prologue_epilogue
4586 = !expensive_function_p (count);
4588 if (TARGET_PROLOGUE_USING_MOVE
4589 && cfun->machine->use_fast_prologue_epilogue)
4590 frame->save_regs_using_mov = true;
4591 else
4592 frame->save_regs_using_mov = false;
4595 /* Skip return address and saved base pointer. */
4596 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
4598 frame->hard_frame_pointer_offset = offset;
4600 /* Do some sanity checking of stack_alignment_needed and
4601 preferred_alignment, since i386 port is the only using those features
4602 that may break easily. */
4604 gcc_assert (!size || stack_alignment_needed);
4605 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
4606 gcc_assert (preferred_alignment <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
4607 gcc_assert (stack_alignment_needed
4608 <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
4610 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
4611 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
4613 /* Register save area */
4614 offset += frame->nregs * UNITS_PER_WORD;
4616 /* Va-arg area */
4617 if (ix86_save_varrargs_registers)
4619 offset += X86_64_VARARGS_SIZE;
4620 frame->va_arg_size = X86_64_VARARGS_SIZE;
4622 else
4623 frame->va_arg_size = 0;
4625 /* Align start of frame for local function. */
4626 frame->padding1 = ((offset + stack_alignment_needed - 1)
4627 & -stack_alignment_needed) - offset;
4629 offset += frame->padding1;
4631 /* Frame pointer points here. */
4632 frame->frame_pointer_offset = offset;
4634 offset += size;
4636 /* Add outgoing arguments area. Can be skipped if we eliminated
4637 all the function calls as dead code.
4638 Skipping is however impossible when function calls alloca. Alloca
4639 expander assumes that last current_function_outgoing_args_size
4640 of stack frame are unused. */
4641 if (ACCUMULATE_OUTGOING_ARGS
4642 && (!current_function_is_leaf || current_function_calls_alloca))
4644 offset += current_function_outgoing_args_size;
4645 frame->outgoing_arguments_size = current_function_outgoing_args_size;
4647 else
4648 frame->outgoing_arguments_size = 0;
4650 /* Align stack boundary. Only needed if we're calling another function
4651 or using alloca. */
4652 if (!current_function_is_leaf || current_function_calls_alloca)
4653 frame->padding2 = ((offset + preferred_alignment - 1)
4654 & -preferred_alignment) - offset;
4655 else
4656 frame->padding2 = 0;
4658 offset += frame->padding2;
4660 /* We've reached end of stack frame. */
4661 frame->stack_pointer_offset = offset;
4663 /* Size prologue needs to allocate. */
4664 frame->to_allocate =
4665 (size + frame->padding1 + frame->padding2
4666 + frame->outgoing_arguments_size + frame->va_arg_size);
4668 if ((!frame->to_allocate && frame->nregs <= 1)
4669 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
4670 frame->save_regs_using_mov = false;
4672 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
4673 && current_function_is_leaf)
4675 frame->red_zone_size = frame->to_allocate;
4676 if (frame->save_regs_using_mov)
4677 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
4678 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
4679 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
4681 else
4682 frame->red_zone_size = 0;
4683 frame->to_allocate -= frame->red_zone_size;
4684 frame->stack_pointer_offset -= frame->red_zone_size;
4685 #if 0
4686 fprintf (stderr, "nregs: %i\n", frame->nregs);
4687 fprintf (stderr, "size: %i\n", size);
4688 fprintf (stderr, "alignment1: %i\n", stack_alignment_needed);
4689 fprintf (stderr, "padding1: %i\n", frame->padding1);
4690 fprintf (stderr, "va_arg: %i\n", frame->va_arg_size);
4691 fprintf (stderr, "padding2: %i\n", frame->padding2);
4692 fprintf (stderr, "to_allocate: %i\n", frame->to_allocate);
4693 fprintf (stderr, "red_zone_size: %i\n", frame->red_zone_size);
4694 fprintf (stderr, "frame_pointer_offset: %i\n", frame->frame_pointer_offset);
4695 fprintf (stderr, "hard_frame_pointer_offset: %i\n",
4696 frame->hard_frame_pointer_offset);
4697 fprintf (stderr, "stack_pointer_offset: %i\n", frame->stack_pointer_offset);
4698 #endif
4701 /* Emit code to save registers in the prologue. */
4703 static void
4704 ix86_emit_save_regs (void)
4706 int regno;
4707 rtx insn;
4709 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
4710 if (ix86_save_reg (regno, true))
4712 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
4713 RTX_FRAME_RELATED_P (insn) = 1;
4717 /* Emit code to save registers using MOV insns. First register
4718 is restored from POINTER + OFFSET. */
4719 static void
4720 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
4722 int regno;
4723 rtx insn;
4725 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4726 if (ix86_save_reg (regno, true))
4728 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
4729 Pmode, offset),
4730 gen_rtx_REG (Pmode, regno));
4731 RTX_FRAME_RELATED_P (insn) = 1;
4732 offset += UNITS_PER_WORD;
4736 /* Expand prologue or epilogue stack adjustment.
4737 The pattern exist to put a dependency on all ebp-based memory accesses.
4738 STYLE should be negative if instructions should be marked as frame related,
4739 zero if %r11 register is live and cannot be freely used and positive
4740 otherwise. */
4742 static void
4743 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
4745 rtx insn;
4747 if (! TARGET_64BIT)
4748 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
4749 else if (x86_64_immediate_operand (offset, DImode))
4750 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
4751 else
4753 rtx r11;
4754 /* r11 is used by indirect sibcall return as well, set before the
4755 epilogue and used after the epilogue. ATM indirect sibcall
4756 shouldn't be used together with huge frame sizes in one
4757 function because of the frame_size check in sibcall.c. */
4758 gcc_assert (style);
4759 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4760 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
4761 if (style < 0)
4762 RTX_FRAME_RELATED_P (insn) = 1;
4763 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
4764 offset));
4766 if (style < 0)
4767 RTX_FRAME_RELATED_P (insn) = 1;
4770 /* Expand the prologue into a bunch of separate insns. */
4772 void
4773 ix86_expand_prologue (void)
4775 rtx insn;
4776 bool pic_reg_used;
4777 struct ix86_frame frame;
4778 HOST_WIDE_INT allocate;
4780 ix86_compute_frame_layout (&frame);
4782 /* Note: AT&T enter does NOT have reversed args. Enter is probably
4783 slower on all targets. Also sdb doesn't like it. */
4785 if (frame_pointer_needed)
4787 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
4788 RTX_FRAME_RELATED_P (insn) = 1;
4790 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
4791 RTX_FRAME_RELATED_P (insn) = 1;
4794 allocate = frame.to_allocate;
4796 if (!frame.save_regs_using_mov)
4797 ix86_emit_save_regs ();
4798 else
4799 allocate += frame.nregs * UNITS_PER_WORD;
4801 /* When using red zone we may start register saving before allocating
4802 the stack frame saving one cycle of the prologue. */
4803 if (TARGET_RED_ZONE && frame.save_regs_using_mov)
4804 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
4805 : stack_pointer_rtx,
4806 -frame.nregs * UNITS_PER_WORD);
4808 if (allocate == 0)
4810 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
4811 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4812 GEN_INT (-allocate), -1);
4813 else
4815 /* Only valid for Win32. */
4816 rtx eax = gen_rtx_REG (SImode, 0);
4817 bool eax_live = ix86_eax_live_at_start_p ();
4818 rtx t;
4820 gcc_assert (!TARGET_64BIT);
4822 if (eax_live)
4824 emit_insn (gen_push (eax));
4825 allocate -= 4;
4828 emit_move_insn (eax, GEN_INT (allocate));
4830 insn = emit_insn (gen_allocate_stack_worker (eax));
4831 RTX_FRAME_RELATED_P (insn) = 1;
4832 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
4833 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
4834 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
4835 t, REG_NOTES (insn));
4837 if (eax_live)
4839 if (frame_pointer_needed)
4840 t = plus_constant (hard_frame_pointer_rtx,
4841 allocate
4842 - frame.to_allocate
4843 - frame.nregs * UNITS_PER_WORD);
4844 else
4845 t = plus_constant (stack_pointer_rtx, allocate);
4846 emit_move_insn (eax, gen_rtx_MEM (SImode, t));
4850 if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
4852 if (!frame_pointer_needed || !frame.to_allocate)
4853 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
4854 else
4855 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
4856 -frame.nregs * UNITS_PER_WORD);
4859 pic_reg_used = false;
4860 if (pic_offset_table_rtx
4861 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4862 || current_function_profile))
4864 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
4866 if (alt_pic_reg_used != INVALID_REGNUM)
4867 REGNO (pic_offset_table_rtx) = alt_pic_reg_used;
4869 pic_reg_used = true;
4872 if (pic_reg_used)
4874 if (TARGET_64BIT)
4875 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
4876 else
4877 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
4879 /* Even with accurate pre-reload life analysis, we can wind up
4880 deleting all references to the pic register after reload.
4881 Consider if cross-jumping unifies two sides of a branch
4882 controlled by a comparison vs the only read from a global.
4883 In which case, allow the set_got to be deleted, though we're
4884 too late to do anything about the ebx save in the prologue. */
4885 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
4888 /* Prevent function calls from be scheduled before the call to mcount.
4889 In the pic_reg_used case, make sure that the got load isn't deleted. */
4890 if (current_function_profile)
4891 emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
4894 /* Emit code to restore saved registers using MOV insns. First register
4895 is restored from POINTER + OFFSET. */
4896 static void
4897 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
4898 int maybe_eh_return)
4900 int regno;
4901 rtx base_address = gen_rtx_MEM (Pmode, pointer);
4903 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4904 if (ix86_save_reg (regno, maybe_eh_return))
4906 /* Ensure that adjust_address won't be forced to produce pointer
4907 out of range allowed by x86-64 instruction set. */
4908 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
4910 rtx r11;
4912 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4913 emit_move_insn (r11, GEN_INT (offset));
4914 emit_insn (gen_adddi3 (r11, r11, pointer));
4915 base_address = gen_rtx_MEM (Pmode, r11);
4916 offset = 0;
4918 emit_move_insn (gen_rtx_REG (Pmode, regno),
4919 adjust_address (base_address, Pmode, offset));
4920 offset += UNITS_PER_WORD;
4924 /* Restore function stack, frame, and registers. */
4926 void
4927 ix86_expand_epilogue (int style)
4929 int regno;
4930 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
4931 struct ix86_frame frame;
4932 HOST_WIDE_INT offset;
4934 ix86_compute_frame_layout (&frame);
4936 /* Calculate start of saved registers relative to ebp. Special care
4937 must be taken for the normal return case of a function using
4938 eh_return: the eax and edx registers are marked as saved, but not
4939 restored along this path. */
4940 offset = frame.nregs;
4941 if (current_function_calls_eh_return && style != 2)
4942 offset -= 2;
4943 offset *= -UNITS_PER_WORD;
4945 /* If we're only restoring one register and sp is not valid then
4946 using a move instruction to restore the register since it's
4947 less work than reloading sp and popping the register.
4949 The default code result in stack adjustment using add/lea instruction,
4950 while this code results in LEAVE instruction (or discrete equivalent),
4951 so it is profitable in some other cases as well. Especially when there
4952 are no registers to restore. We also use this code when TARGET_USE_LEAVE
4953 and there is exactly one register to pop. This heuristic may need some
4954 tuning in future. */
4955 if ((!sp_valid && frame.nregs <= 1)
4956 || (TARGET_EPILOGUE_USING_MOVE
4957 && cfun->machine->use_fast_prologue_epilogue
4958 && (frame.nregs > 1 || frame.to_allocate))
4959 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
4960 || (frame_pointer_needed && TARGET_USE_LEAVE
4961 && cfun->machine->use_fast_prologue_epilogue
4962 && frame.nregs == 1)
4963 || current_function_calls_eh_return)
4965 /* Restore registers. We can use ebp or esp to address the memory
4966 locations. If both are available, default to ebp, since offsets
4967 are known to be small. Only exception is esp pointing directly to the
4968 end of block of saved registers, where we may simplify addressing
4969 mode. */
4971 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
4972 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
4973 frame.to_allocate, style == 2);
4974 else
4975 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
4976 offset, style == 2);
4978 /* eh_return epilogues need %ecx added to the stack pointer. */
4979 if (style == 2)
4981 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
4983 if (frame_pointer_needed)
4985 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
4986 tmp = plus_constant (tmp, UNITS_PER_WORD);
4987 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
4989 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
4990 emit_move_insn (hard_frame_pointer_rtx, tmp);
4992 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
4993 const0_rtx, style);
4995 else
4997 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
4998 tmp = plus_constant (tmp, (frame.to_allocate
4999 + frame.nregs * UNITS_PER_WORD));
5000 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
5003 else if (!frame_pointer_needed)
5004 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5005 GEN_INT (frame.to_allocate
5006 + frame.nregs * UNITS_PER_WORD),
5007 style);
5008 /* If not an i386, mov & pop is faster than "leave". */
5009 else if (TARGET_USE_LEAVE || optimize_size
5010 || !cfun->machine->use_fast_prologue_epilogue)
5011 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5012 else
5014 pro_epilogue_adjust_stack (stack_pointer_rtx,
5015 hard_frame_pointer_rtx,
5016 const0_rtx, style);
5017 if (TARGET_64BIT)
5018 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5019 else
5020 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5023 else
5025 /* First step is to deallocate the stack frame so that we can
5026 pop the registers. */
5027 if (!sp_valid)
5029 gcc_assert (frame_pointer_needed);
5030 pro_epilogue_adjust_stack (stack_pointer_rtx,
5031 hard_frame_pointer_rtx,
5032 GEN_INT (offset), style);
5034 else if (frame.to_allocate)
5035 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5036 GEN_INT (frame.to_allocate), style);
5038 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5039 if (ix86_save_reg (regno, false))
5041 if (TARGET_64BIT)
5042 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
5043 else
5044 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
5046 if (frame_pointer_needed)
5048 /* Leave results in shorter dependency chains on CPUs that are
5049 able to grok it fast. */
5050 if (TARGET_USE_LEAVE)
5051 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5052 else if (TARGET_64BIT)
5053 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5054 else
5055 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5059 /* Sibcall epilogues don't want a return instruction. */
5060 if (style == 0)
5061 return;
5063 if (current_function_pops_args && current_function_args_size)
5065 rtx popc = GEN_INT (current_function_pops_args);
5067 /* i386 can only pop 64K bytes. If asked to pop more, pop
5068 return address, do explicit add, and jump indirectly to the
5069 caller. */
5071 if (current_function_pops_args >= 65536)
5073 rtx ecx = gen_rtx_REG (SImode, 2);
5075 /* There is no "pascal" calling convention in 64bit ABI. */
5076 gcc_assert (!TARGET_64BIT);
5078 emit_insn (gen_popsi1 (ecx));
5079 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
5080 emit_jump_insn (gen_return_indirect_internal (ecx));
5082 else
5083 emit_jump_insn (gen_return_pop_internal (popc));
5085 else
5086 emit_jump_insn (gen_return_internal ());
5089 /* Reset from the function's potential modifications. */
5091 static void
5092 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
5093 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5095 if (pic_offset_table_rtx)
5096 REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
5099 /* Extract the parts of an RTL expression that is a valid memory address
5100 for an instruction. Return 0 if the structure of the address is
5101 grossly off. Return -1 if the address contains ASHIFT, so it is not
5102 strictly valid, but still used for computing length of lea instruction. */
5105 ix86_decompose_address (rtx addr, struct ix86_address *out)
5107 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
5108 rtx base_reg, index_reg;
5109 HOST_WIDE_INT scale = 1;
5110 rtx scale_rtx = NULL_RTX;
5111 int retval = 1;
5112 enum ix86_address_seg seg = SEG_DEFAULT;
5114 if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
5115 base = addr;
5116 else if (GET_CODE (addr) == PLUS)
5118 rtx addends[4], op;
5119 int n = 0, i;
5121 op = addr;
5124 if (n >= 4)
5125 return 0;
5126 addends[n++] = XEXP (op, 1);
5127 op = XEXP (op, 0);
5129 while (GET_CODE (op) == PLUS);
5130 if (n >= 4)
5131 return 0;
5132 addends[n] = op;
5134 for (i = n; i >= 0; --i)
5136 op = addends[i];
5137 switch (GET_CODE (op))
5139 case MULT:
5140 if (index)
5141 return 0;
5142 index = XEXP (op, 0);
5143 scale_rtx = XEXP (op, 1);
5144 break;
5146 case UNSPEC:
5147 if (XINT (op, 1) == UNSPEC_TP
5148 && TARGET_TLS_DIRECT_SEG_REFS
5149 && seg == SEG_DEFAULT)
5150 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
5151 else
5152 return 0;
5153 break;
5155 case REG:
5156 case SUBREG:
5157 if (!base)
5158 base = op;
5159 else if (!index)
5160 index = op;
5161 else
5162 return 0;
5163 break;
5165 case CONST:
5166 case CONST_INT:
5167 case SYMBOL_REF:
5168 case LABEL_REF:
5169 if (disp)
5170 return 0;
5171 disp = op;
5172 break;
5174 default:
5175 return 0;
5179 else if (GET_CODE (addr) == MULT)
5181 index = XEXP (addr, 0); /* index*scale */
5182 scale_rtx = XEXP (addr, 1);
5184 else if (GET_CODE (addr) == ASHIFT)
5186 rtx tmp;
5188 /* We're called for lea too, which implements ashift on occasion. */
5189 index = XEXP (addr, 0);
5190 tmp = XEXP (addr, 1);
5191 if (GET_CODE (tmp) != CONST_INT)
5192 return 0;
5193 scale = INTVAL (tmp);
5194 if ((unsigned HOST_WIDE_INT) scale > 3)
5195 return 0;
5196 scale = 1 << scale;
5197 retval = -1;
5199 else
5200 disp = addr; /* displacement */
5202 /* Extract the integral value of scale. */
5203 if (scale_rtx)
5205 if (GET_CODE (scale_rtx) != CONST_INT)
5206 return 0;
5207 scale = INTVAL (scale_rtx);
5210 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
5211 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
5213 /* Allow arg pointer and stack pointer as index if there is not scaling. */
5214 if (base_reg && index_reg && scale == 1
5215 && (index_reg == arg_pointer_rtx
5216 || index_reg == frame_pointer_rtx
5217 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
5219 rtx tmp;
5220 tmp = base, base = index, index = tmp;
5221 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
5224 /* Special case: %ebp cannot be encoded as a base without a displacement. */
5225 if ((base_reg == hard_frame_pointer_rtx
5226 || base_reg == frame_pointer_rtx
5227 || base_reg == arg_pointer_rtx) && !disp)
5228 disp = const0_rtx;
5230 /* Special case: on K6, [%esi] makes the instruction vector decoded.
5231 Avoid this by transforming to [%esi+0]. */
5232 if (ix86_tune == PROCESSOR_K6 && !optimize_size
5233 && base_reg && !index_reg && !disp
5234 && REG_P (base_reg)
5235 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
5236 disp = const0_rtx;
5238 /* Special case: encode reg+reg instead of reg*2. */
5239 if (!base && index && scale && scale == 2)
5240 base = index, base_reg = index_reg, scale = 1;
5242 /* Special case: scaling cannot be encoded without base or displacement. */
5243 if (!base && !disp && index && scale != 1)
5244 disp = const0_rtx;
5246 out->base = base;
5247 out->index = index;
5248 out->disp = disp;
5249 out->scale = scale;
5250 out->seg = seg;
5252 return retval;
5255 /* Return cost of the memory address x.
5256 For i386, it is better to use a complex address than let gcc copy
5257 the address into a reg and make a new pseudo. But not if the address
5258 requires to two regs - that would mean more pseudos with longer
5259 lifetimes. */
5260 static int
5261 ix86_address_cost (rtx x)
5263 struct ix86_address parts;
5264 int cost = 1;
5265 int ok = ix86_decompose_address (x, &parts);
5267 gcc_assert (ok);
5269 if (parts.base && GET_CODE (parts.base) == SUBREG)
5270 parts.base = SUBREG_REG (parts.base);
5271 if (parts.index && GET_CODE (parts.index) == SUBREG)
5272 parts.index = SUBREG_REG (parts.index);
5274 /* More complex memory references are better. */
5275 if (parts.disp && parts.disp != const0_rtx)
5276 cost--;
5277 if (parts.seg != SEG_DEFAULT)
5278 cost--;
5280 /* Attempt to minimize number of registers in the address. */
5281 if ((parts.base
5282 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
5283 || (parts.index
5284 && (!REG_P (parts.index)
5285 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
5286 cost++;
5288 if (parts.base
5289 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
5290 && parts.index
5291 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
5292 && parts.base != parts.index)
5293 cost++;
5295 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
5296 since it's predecode logic can't detect the length of instructions
5297 and it degenerates to vector decoded. Increase cost of such
5298 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
5299 to split such addresses or even refuse such addresses at all.
5301 Following addressing modes are affected:
5302 [base+scale*index]
5303 [scale*index+disp]
5304 [base+index]
5306 The first and last case may be avoidable by explicitly coding the zero in
5307 memory address, but I don't have AMD-K6 machine handy to check this
5308 theory. */
5310 if (TARGET_K6
5311 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
5312 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
5313 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
5314 cost += 10;
5316 return cost;
5319 /* If X is a machine specific address (i.e. a symbol or label being
5320 referenced as a displacement from the GOT implemented using an
5321 UNSPEC), then return the base term. Otherwise return X. */
5324 ix86_find_base_term (rtx x)
5326 rtx term;
5328 if (TARGET_64BIT)
5330 if (GET_CODE (x) != CONST)
5331 return x;
5332 term = XEXP (x, 0);
5333 if (GET_CODE (term) == PLUS
5334 && (GET_CODE (XEXP (term, 1)) == CONST_INT
5335 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
5336 term = XEXP (term, 0);
5337 if (GET_CODE (term) != UNSPEC
5338 || XINT (term, 1) != UNSPEC_GOTPCREL)
5339 return x;
5341 term = XVECEXP (term, 0, 0);
5343 if (GET_CODE (term) != SYMBOL_REF
5344 && GET_CODE (term) != LABEL_REF)
5345 return x;
5347 return term;
5350 term = ix86_delegitimize_address (x);
5352 if (GET_CODE (term) != SYMBOL_REF
5353 && GET_CODE (term) != LABEL_REF)
5354 return x;
5356 return term;
5359 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
5360 this is used for to form addresses to local data when -fPIC is in
5361 use. */
5363 static bool
5364 darwin_local_data_pic (rtx disp)
5366 if (GET_CODE (disp) == MINUS)
5368 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
5369 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
5370 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
5372 const char *sym_name = XSTR (XEXP (disp, 1), 0);
5373 if (! strcmp (sym_name, "<pic base>"))
5374 return true;
5378 return false;
5381 /* Determine if a given RTX is a valid constant. We already know this
5382 satisfies CONSTANT_P. */
5384 bool
5385 legitimate_constant_p (rtx x)
5387 switch (GET_CODE (x))
5389 case CONST:
5390 x = XEXP (x, 0);
5392 if (GET_CODE (x) == PLUS)
5394 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5395 return false;
5396 x = XEXP (x, 0);
5399 if (TARGET_MACHO && darwin_local_data_pic (x))
5400 return true;
5402 /* Only some unspecs are valid as "constants". */
5403 if (GET_CODE (x) == UNSPEC)
5404 switch (XINT (x, 1))
5406 case UNSPEC_GOTOFF:
5407 return TARGET_64BIT;
5408 case UNSPEC_TPOFF:
5409 case UNSPEC_NTPOFF:
5410 return local_exec_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
5411 case UNSPEC_DTPOFF:
5412 return local_dynamic_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
5413 default:
5414 return false;
5417 /* We must have drilled down to a symbol. */
5418 if (!symbolic_operand (x, Pmode))
5419 return false;
5420 /* FALLTHRU */
5422 case SYMBOL_REF:
5423 /* TLS symbols are never valid. */
5424 if (tls_symbolic_operand (x, Pmode))
5425 return false;
5426 break;
5428 default:
5429 break;
5432 /* Otherwise we handle everything else in the move patterns. */
5433 return true;
5436 /* Determine if it's legal to put X into the constant pool. This
5437 is not possible for the address of thread-local symbols, which
5438 is checked above. */
5440 static bool
5441 ix86_cannot_force_const_mem (rtx x)
5443 return !legitimate_constant_p (x);
5446 /* Determine if a given RTX is a valid constant address. */
5448 bool
5449 constant_address_p (rtx x)
5451 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
5454 /* Nonzero if the constant value X is a legitimate general operand
5455 when generating PIC code. It is given that flag_pic is on and
5456 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
5458 bool
5459 legitimate_pic_operand_p (rtx x)
5461 rtx inner;
5463 switch (GET_CODE (x))
5465 case CONST:
5466 inner = XEXP (x, 0);
5467 if (GET_CODE (inner) == PLUS
5468 && GET_CODE (XEXP (inner, 1)) == CONST_INT)
5469 inner = XEXP (inner, 0);
5471 /* Only some unspecs are valid as "constants". */
5472 if (GET_CODE (inner) == UNSPEC)
5473 switch (XINT (inner, 1))
5475 case UNSPEC_GOTOFF:
5476 return TARGET_64BIT;
5477 case UNSPEC_TPOFF:
5478 return local_exec_symbolic_operand (XVECEXP (inner, 0, 0), Pmode);
5479 default:
5480 return false;
5482 /* FALLTHRU */
5484 case SYMBOL_REF:
5485 case LABEL_REF:
5486 return legitimate_pic_address_disp_p (x);
5488 default:
5489 return true;
5493 /* Determine if a given CONST RTX is a valid memory displacement
5494 in PIC mode. */
5497 legitimate_pic_address_disp_p (rtx disp)
5499 bool saw_plus;
5501 /* In 64bit mode we can allow direct addresses of symbols and labels
5502 when they are not dynamic symbols. */
5503 if (TARGET_64BIT)
5505 /* TLS references should always be enclosed in UNSPEC. */
5506 if (tls_symbolic_operand (disp, GET_MODE (disp)))
5507 return 0;
5508 if (GET_CODE (disp) == SYMBOL_REF
5509 && !SYMBOL_REF_FAR_ADDR_P (disp)
5510 && SYMBOL_REF_LOCAL_P (disp))
5511 return 1;
5512 if (GET_CODE (disp) == LABEL_REF)
5513 return 1;
5514 if (GET_CODE (disp) == CONST
5515 && GET_CODE (XEXP (disp, 0)) == PLUS)
5517 rtx op0 = XEXP (XEXP (disp, 0), 0);
5518 rtx op1 = XEXP (XEXP (disp, 0), 1);
5520 /* TLS references should always be enclosed in UNSPEC. */
5521 if (tls_symbolic_operand (op0, GET_MODE (op0)))
5522 return 0;
5523 if (((GET_CODE (op0) == SYMBOL_REF
5524 && !SYMBOL_REF_FAR_ADDR_P (op0)
5525 && SYMBOL_REF_LOCAL_P (op0))
5526 || GET_CODE (op0) == LABEL_REF)
5527 && GET_CODE (op1) == CONST_INT
5528 && INTVAL (op1) < 16*1024*1024
5529 && INTVAL (op1) >= -16*1024*1024)
5530 return 1;
5533 if (GET_CODE (disp) != CONST)
5534 return 0;
5535 disp = XEXP (disp, 0);
5537 if (TARGET_64BIT)
5539 /* We are unsafe to allow PLUS expressions. This limit allowed distance
5540 of GOT tables. We should not need these anyway. */
5541 if (GET_CODE (disp) != UNSPEC
5542 || (XINT (disp, 1) != UNSPEC_GOTPCREL
5543 && XINT (disp, 1) != UNSPEC_GOTOFF))
5544 return 0;
5546 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
5547 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
5548 return 0;
5549 return 1;
5552 saw_plus = false;
5553 if (GET_CODE (disp) == PLUS)
5555 if (GET_CODE (XEXP (disp, 1)) != CONST_INT)
5556 return 0;
5557 disp = XEXP (disp, 0);
5558 saw_plus = true;
5561 if (TARGET_MACHO && darwin_local_data_pic (disp))
5562 return 1;
5564 if (GET_CODE (disp) != UNSPEC)
5565 return 0;
5567 switch (XINT (disp, 1))
5569 case UNSPEC_GOT:
5570 if (saw_plus)
5571 return false;
5572 return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF;
5573 case UNSPEC_GOTOFF:
5574 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
5575 While ABI specify also 32bit relocation but we don't produce it in
5576 small PIC model at all. */
5577 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
5578 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
5579 && !TARGET_64BIT)
5580 return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5581 return false;
5582 case UNSPEC_GOTTPOFF:
5583 case UNSPEC_GOTNTPOFF:
5584 case UNSPEC_INDNTPOFF:
5585 if (saw_plus)
5586 return false;
5587 return initial_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5588 case UNSPEC_NTPOFF:
5589 return local_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5590 case UNSPEC_DTPOFF:
5591 return local_dynamic_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5594 return 0;
5597 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
5598 memory address for an instruction. The MODE argument is the machine mode
5599 for the MEM expression that wants to use this address.
5601 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
5602 convert common non-canonical forms to canonical form so that they will
5603 be recognized. */
5606 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
5608 struct ix86_address parts;
5609 rtx base, index, disp;
5610 HOST_WIDE_INT scale;
5611 const char *reason = NULL;
5612 rtx reason_rtx = NULL_RTX;
5614 if (TARGET_DEBUG_ADDR)
5616 fprintf (stderr,
5617 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
5618 GET_MODE_NAME (mode), strict);
5619 debug_rtx (addr);
5622 if (ix86_decompose_address (addr, &parts) <= 0)
5624 reason = "decomposition failed";
5625 goto report_error;
5628 base = parts.base;
5629 index = parts.index;
5630 disp = parts.disp;
5631 scale = parts.scale;
5633 /* Validate base register.
5635 Don't allow SUBREG's that span more than a word here. It can lead to spill
5636 failures when the base is one word out of a two word structure, which is
5637 represented internally as a DImode int. */
5639 if (base)
5641 rtx reg;
5642 reason_rtx = base;
5644 if (REG_P (base))
5645 reg = base;
5646 else if (GET_CODE (base) == SUBREG
5647 && REG_P (SUBREG_REG (base))
5648 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
5649 <= UNITS_PER_WORD)
5650 reg = SUBREG_REG (base);
5651 else
5653 reason = "base is not a register";
5654 goto report_error;
5657 if (GET_MODE (base) != Pmode)
5659 reason = "base is not in Pmode";
5660 goto report_error;
5663 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
5664 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
5666 reason = "base is not valid";
5667 goto report_error;
5671 /* Validate index register.
5673 Don't allow SUBREG's that span more than a word here -- same as above. */
5675 if (index)
5677 rtx reg;
5678 reason_rtx = index;
5680 if (REG_P (index))
5681 reg = index;
5682 else if (GET_CODE (index) == SUBREG
5683 && REG_P (SUBREG_REG (index))
5684 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
5685 <= UNITS_PER_WORD)
5686 reg = SUBREG_REG (index);
5687 else
5689 reason = "index is not a register";
5690 goto report_error;
5693 if (GET_MODE (index) != Pmode)
5695 reason = "index is not in Pmode";
5696 goto report_error;
5699 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
5700 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
5702 reason = "index is not valid";
5703 goto report_error;
5707 /* Validate scale factor. */
5708 if (scale != 1)
5710 reason_rtx = GEN_INT (scale);
5711 if (!index)
5713 reason = "scale without index";
5714 goto report_error;
5717 if (scale != 2 && scale != 4 && scale != 8)
5719 reason = "scale is not a valid multiplier";
5720 goto report_error;
5724 /* Validate displacement. */
5725 if (disp)
5727 reason_rtx = disp;
5729 if (GET_CODE (disp) == CONST
5730 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
5731 switch (XINT (XEXP (disp, 0), 1))
5733 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
5734 used. While ABI specify also 32bit relocations, we don't produce
5735 them at all and use IP relative instead. */
5736 case UNSPEC_GOT:
5737 case UNSPEC_GOTOFF:
5738 gcc_assert (flag_pic);
5739 if (!TARGET_64BIT)
5740 goto is_legitimate_pic;
5741 reason = "64bit address unspec";
5742 goto report_error;
5744 case UNSPEC_GOTPCREL:
5745 gcc_assert (flag_pic);
5746 goto is_legitimate_pic;
5748 case UNSPEC_GOTTPOFF:
5749 case UNSPEC_GOTNTPOFF:
5750 case UNSPEC_INDNTPOFF:
5751 case UNSPEC_NTPOFF:
5752 case UNSPEC_DTPOFF:
5753 break;
5755 default:
5756 reason = "invalid address unspec";
5757 goto report_error;
5760 else if (flag_pic && (SYMBOLIC_CONST (disp)
5761 #if TARGET_MACHO
5762 && !machopic_operand_p (disp)
5763 #endif
5766 is_legitimate_pic:
5767 if (TARGET_64BIT && (index || base))
5769 /* foo@dtpoff(%rX) is ok. */
5770 if (GET_CODE (disp) != CONST
5771 || GET_CODE (XEXP (disp, 0)) != PLUS
5772 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
5773 || GET_CODE (XEXP (XEXP (disp, 0), 1)) != CONST_INT
5774 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
5775 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
5777 reason = "non-constant pic memory reference";
5778 goto report_error;
5781 else if (! legitimate_pic_address_disp_p (disp))
5783 reason = "displacement is an invalid pic construct";
5784 goto report_error;
5787 /* This code used to verify that a symbolic pic displacement
5788 includes the pic_offset_table_rtx register.
5790 While this is good idea, unfortunately these constructs may
5791 be created by "adds using lea" optimization for incorrect
5792 code like:
5794 int a;
5795 int foo(int i)
5797 return *(&a+i);
5800 This code is nonsensical, but results in addressing
5801 GOT table with pic_offset_table_rtx base. We can't
5802 just refuse it easily, since it gets matched by
5803 "addsi3" pattern, that later gets split to lea in the
5804 case output register differs from input. While this
5805 can be handled by separate addsi pattern for this case
5806 that never results in lea, this seems to be easier and
5807 correct fix for crash to disable this test. */
5809 else if (GET_CODE (disp) != LABEL_REF
5810 && GET_CODE (disp) != CONST_INT
5811 && (GET_CODE (disp) != CONST
5812 || !legitimate_constant_p (disp))
5813 && (GET_CODE (disp) != SYMBOL_REF
5814 || !legitimate_constant_p (disp)))
5816 reason = "displacement is not constant";
5817 goto report_error;
5819 else if (TARGET_64BIT
5820 && !x86_64_immediate_operand (disp, VOIDmode))
5822 reason = "displacement is out of range";
5823 goto report_error;
5827 /* Everything looks valid. */
5828 if (TARGET_DEBUG_ADDR)
5829 fprintf (stderr, "Success.\n");
5830 return TRUE;
5832 report_error:
5833 if (TARGET_DEBUG_ADDR)
5835 fprintf (stderr, "Error: %s\n", reason);
5836 debug_rtx (reason_rtx);
5838 return FALSE;
5841 /* Return a unique alias set for the GOT. */
5843 static HOST_WIDE_INT
5844 ix86_GOT_alias_set (void)
5846 static HOST_WIDE_INT set = -1;
5847 if (set == -1)
5848 set = new_alias_set ();
5849 return set;
5852 /* Return a legitimate reference for ORIG (an address) using the
5853 register REG. If REG is 0, a new pseudo is generated.
5855 There are two types of references that must be handled:
5857 1. Global data references must load the address from the GOT, via
5858 the PIC reg. An insn is emitted to do this load, and the reg is
5859 returned.
5861 2. Static data references, constant pool addresses, and code labels
5862 compute the address as an offset from the GOT, whose base is in
5863 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
5864 differentiate them from global data objects. The returned
5865 address is the PIC reg + an unspec constant.
5867 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
5868 reg also appears in the address. */
5870 static rtx
5871 legitimize_pic_address (rtx orig, rtx reg)
5873 rtx addr = orig;
5874 rtx new = orig;
5875 rtx base;
5877 #if TARGET_MACHO
5878 if (reg == 0)
5879 reg = gen_reg_rtx (Pmode);
5880 /* Use the generic Mach-O PIC machinery. */
5881 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
5882 #endif
5884 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
5885 new = addr;
5886 else if (TARGET_64BIT
5887 && ix86_cmodel != CM_SMALL_PIC
5888 && local_symbolic_operand (addr, Pmode))
5890 rtx tmpreg;
5891 /* This symbol may be referenced via a displacement from the PIC
5892 base address (@GOTOFF). */
5894 if (reload_in_progress)
5895 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5896 if (GET_CODE (addr) == CONST)
5897 addr = XEXP (addr, 0);
5898 if (GET_CODE (addr) == PLUS)
5900 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
5901 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
5903 else
5904 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
5905 new = gen_rtx_CONST (Pmode, new);
5906 if (!reg)
5907 tmpreg = gen_reg_rtx (Pmode);
5908 else
5909 tmpreg = reg;
5910 emit_move_insn (tmpreg, new);
5912 if (reg != 0)
5914 new = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
5915 tmpreg, 1, OPTAB_DIRECT);
5916 new = reg;
5918 else new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
5920 else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode))
5922 /* This symbol may be referenced via a displacement from the PIC
5923 base address (@GOTOFF). */
5925 if (reload_in_progress)
5926 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5927 if (GET_CODE (addr) == CONST)
5928 addr = XEXP (addr, 0);
5929 if (GET_CODE (addr) == PLUS)
5931 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
5932 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
5934 else
5935 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
5936 new = gen_rtx_CONST (Pmode, new);
5937 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5939 if (reg != 0)
5941 emit_move_insn (reg, new);
5942 new = reg;
5945 else if (GET_CODE (addr) == SYMBOL_REF)
5947 if (TARGET_64BIT)
5949 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
5950 new = gen_rtx_CONST (Pmode, new);
5951 new = gen_const_mem (Pmode, new);
5952 set_mem_alias_set (new, ix86_GOT_alias_set ());
5954 if (reg == 0)
5955 reg = gen_reg_rtx (Pmode);
5956 /* Use directly gen_movsi, otherwise the address is loaded
5957 into register for CSE. We don't want to CSE this addresses,
5958 instead we CSE addresses from the GOT table, so skip this. */
5959 emit_insn (gen_movsi (reg, new));
5960 new = reg;
5962 else
5964 /* This symbol must be referenced via a load from the
5965 Global Offset Table (@GOT). */
5967 if (reload_in_progress)
5968 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5969 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
5970 new = gen_rtx_CONST (Pmode, new);
5971 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5972 new = gen_const_mem (Pmode, new);
5973 set_mem_alias_set (new, ix86_GOT_alias_set ());
5975 if (reg == 0)
5976 reg = gen_reg_rtx (Pmode);
5977 emit_move_insn (reg, new);
5978 new = reg;
5981 else
5983 if (GET_CODE (addr) == CONST)
5985 addr = XEXP (addr, 0);
5987 /* We must match stuff we generate before. Assume the only
5988 unspecs that can get here are ours. Not that we could do
5989 anything with them anyway.... */
5990 if (GET_CODE (addr) == UNSPEC
5991 || (GET_CODE (addr) == PLUS
5992 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
5993 return orig;
5994 gcc_assert (GET_CODE (addr) == PLUS);
5996 if (GET_CODE (addr) == PLUS)
5998 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
6000 /* Check first to see if this is a constant offset from a @GOTOFF
6001 symbol reference. */
6002 if (local_symbolic_operand (op0, Pmode)
6003 && GET_CODE (op1) == CONST_INT)
6005 if (!TARGET_64BIT)
6007 if (reload_in_progress)
6008 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6009 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
6010 UNSPEC_GOTOFF);
6011 new = gen_rtx_PLUS (Pmode, new, op1);
6012 new = gen_rtx_CONST (Pmode, new);
6013 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6015 if (reg != 0)
6017 emit_move_insn (reg, new);
6018 new = reg;
6021 else
6023 if (INTVAL (op1) < -16*1024*1024
6024 || INTVAL (op1) >= 16*1024*1024)
6025 new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
6028 else
6030 base = legitimize_pic_address (XEXP (addr, 0), reg);
6031 new = legitimize_pic_address (XEXP (addr, 1),
6032 base == reg ? NULL_RTX : reg);
6034 if (GET_CODE (new) == CONST_INT)
6035 new = plus_constant (base, INTVAL (new));
6036 else
6038 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
6040 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
6041 new = XEXP (new, 1);
6043 new = gen_rtx_PLUS (Pmode, base, new);
6048 return new;
6051 /* Load the thread pointer. If TO_REG is true, force it into a register. */
6053 static rtx
6054 get_thread_pointer (int to_reg)
6056 rtx tp, reg, insn;
6058 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
6059 if (!to_reg)
6060 return tp;
6062 reg = gen_reg_rtx (Pmode);
6063 insn = gen_rtx_SET (VOIDmode, reg, tp);
6064 insn = emit_insn (insn);
6066 return reg;
6069 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
6070 false if we expect this to be used for a memory address and true if
6071 we expect to load the address into a register. */
6073 static rtx
6074 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
6076 rtx dest, base, off, pic;
6077 int type;
6079 switch (model)
6081 case TLS_MODEL_GLOBAL_DYNAMIC:
6082 dest = gen_reg_rtx (Pmode);
6083 if (TARGET_64BIT)
6085 rtx rax = gen_rtx_REG (Pmode, 0), insns;
6087 start_sequence ();
6088 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
6089 insns = get_insns ();
6090 end_sequence ();
6092 emit_libcall_block (insns, dest, rax, x);
6094 else
6095 emit_insn (gen_tls_global_dynamic_32 (dest, x));
6096 break;
6098 case TLS_MODEL_LOCAL_DYNAMIC:
6099 base = gen_reg_rtx (Pmode);
6100 if (TARGET_64BIT)
6102 rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
6104 start_sequence ();
6105 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
6106 insns = get_insns ();
6107 end_sequence ();
6109 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
6110 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
6111 emit_libcall_block (insns, base, rax, note);
6113 else
6114 emit_insn (gen_tls_local_dynamic_base_32 (base));
6116 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
6117 off = gen_rtx_CONST (Pmode, off);
6119 return gen_rtx_PLUS (Pmode, base, off);
6121 case TLS_MODEL_INITIAL_EXEC:
6122 if (TARGET_64BIT)
6124 pic = NULL;
6125 type = UNSPEC_GOTNTPOFF;
6127 else if (flag_pic)
6129 if (reload_in_progress)
6130 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6131 pic = pic_offset_table_rtx;
6132 type = TARGET_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
6134 else if (!TARGET_GNU_TLS)
6136 pic = gen_reg_rtx (Pmode);
6137 emit_insn (gen_set_got (pic));
6138 type = UNSPEC_GOTTPOFF;
6140 else
6142 pic = NULL;
6143 type = UNSPEC_INDNTPOFF;
6146 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
6147 off = gen_rtx_CONST (Pmode, off);
6148 if (pic)
6149 off = gen_rtx_PLUS (Pmode, pic, off);
6150 off = gen_const_mem (Pmode, off);
6151 set_mem_alias_set (off, ix86_GOT_alias_set ());
6153 if (TARGET_64BIT || TARGET_GNU_TLS)
6155 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
6156 off = force_reg (Pmode, off);
6157 return gen_rtx_PLUS (Pmode, base, off);
6159 else
6161 base = get_thread_pointer (true);
6162 dest = gen_reg_rtx (Pmode);
6163 emit_insn (gen_subsi3 (dest, base, off));
6165 break;
6167 case TLS_MODEL_LOCAL_EXEC:
6168 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
6169 (TARGET_64BIT || TARGET_GNU_TLS)
6170 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
6171 off = gen_rtx_CONST (Pmode, off);
6173 if (TARGET_64BIT || TARGET_GNU_TLS)
6175 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
6176 return gen_rtx_PLUS (Pmode, base, off);
6178 else
6180 base = get_thread_pointer (true);
6181 dest = gen_reg_rtx (Pmode);
6182 emit_insn (gen_subsi3 (dest, base, off));
6184 break;
6186 default:
6187 gcc_unreachable ();
6190 return dest;
6193 /* Try machine-dependent ways of modifying an illegitimate address
6194 to be legitimate. If we find one, return the new, valid address.
6195 This macro is used in only one place: `memory_address' in explow.c.
6197 OLDX is the address as it was before break_out_memory_refs was called.
6198 In some cases it is useful to look at this to decide what needs to be done.
6200 MODE and WIN are passed so that this macro can use
6201 GO_IF_LEGITIMATE_ADDRESS.
6203 It is always safe for this macro to do nothing. It exists to recognize
6204 opportunities to optimize the output.
6206 For the 80386, we handle X+REG by loading X into a register R and
6207 using R+REG. R will go in a general reg and indexing will be used.
6208 However, if REG is a broken-out memory address or multiplication,
6209 nothing needs to be done because REG can certainly go in a general reg.
6211 When -fpic is used, special handling is needed for symbolic references.
6212 See comments by legitimize_pic_address in i386.c for details. */
6215 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
6217 int changed = 0;
6218 unsigned log;
6220 if (TARGET_DEBUG_ADDR)
6222 fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
6223 GET_MODE_NAME (mode));
6224 debug_rtx (x);
6227 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
6228 if (log)
6229 return legitimize_tls_address (x, log, false);
6230 if (GET_CODE (x) == CONST
6231 && GET_CODE (XEXP (x, 0)) == PLUS
6232 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6233 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
6235 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0), log, false);
6236 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
6239 if (flag_pic && SYMBOLIC_CONST (x))
6240 return legitimize_pic_address (x, 0);
6242 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
6243 if (GET_CODE (x) == ASHIFT
6244 && GET_CODE (XEXP (x, 1)) == CONST_INT
6245 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
6247 changed = 1;
6248 log = INTVAL (XEXP (x, 1));
6249 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
6250 GEN_INT (1 << log));
6253 if (GET_CODE (x) == PLUS)
6255 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
6257 if (GET_CODE (XEXP (x, 0)) == ASHIFT
6258 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6259 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
6261 changed = 1;
6262 log = INTVAL (XEXP (XEXP (x, 0), 1));
6263 XEXP (x, 0) = gen_rtx_MULT (Pmode,
6264 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
6265 GEN_INT (1 << log));
6268 if (GET_CODE (XEXP (x, 1)) == ASHIFT
6269 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
6270 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
6272 changed = 1;
6273 log = INTVAL (XEXP (XEXP (x, 1), 1));
6274 XEXP (x, 1) = gen_rtx_MULT (Pmode,
6275 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
6276 GEN_INT (1 << log));
6279 /* Put multiply first if it isn't already. */
6280 if (GET_CODE (XEXP (x, 1)) == MULT)
6282 rtx tmp = XEXP (x, 0);
6283 XEXP (x, 0) = XEXP (x, 1);
6284 XEXP (x, 1) = tmp;
6285 changed = 1;
6288 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
6289 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
6290 created by virtual register instantiation, register elimination, and
6291 similar optimizations. */
6292 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
6294 changed = 1;
6295 x = gen_rtx_PLUS (Pmode,
6296 gen_rtx_PLUS (Pmode, XEXP (x, 0),
6297 XEXP (XEXP (x, 1), 0)),
6298 XEXP (XEXP (x, 1), 1));
6301 /* Canonicalize
6302 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
6303 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
6304 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
6305 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
6306 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
6307 && CONSTANT_P (XEXP (x, 1)))
6309 rtx constant;
6310 rtx other = NULL_RTX;
6312 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6314 constant = XEXP (x, 1);
6315 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
6317 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
6319 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
6320 other = XEXP (x, 1);
6322 else
6323 constant = 0;
6325 if (constant)
6327 changed = 1;
6328 x = gen_rtx_PLUS (Pmode,
6329 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
6330 XEXP (XEXP (XEXP (x, 0), 1), 0)),
6331 plus_constant (other, INTVAL (constant)));
6335 if (changed && legitimate_address_p (mode, x, FALSE))
6336 return x;
6338 if (GET_CODE (XEXP (x, 0)) == MULT)
6340 changed = 1;
6341 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
6344 if (GET_CODE (XEXP (x, 1)) == MULT)
6346 changed = 1;
6347 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
6350 if (changed
6351 && GET_CODE (XEXP (x, 1)) == REG
6352 && GET_CODE (XEXP (x, 0)) == REG)
6353 return x;
6355 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
6357 changed = 1;
6358 x = legitimize_pic_address (x, 0);
6361 if (changed && legitimate_address_p (mode, x, FALSE))
6362 return x;
6364 if (GET_CODE (XEXP (x, 0)) == REG)
6366 rtx temp = gen_reg_rtx (Pmode);
6367 rtx val = force_operand (XEXP (x, 1), temp);
6368 if (val != temp)
6369 emit_move_insn (temp, val);
6371 XEXP (x, 1) = temp;
6372 return x;
6375 else if (GET_CODE (XEXP (x, 1)) == REG)
6377 rtx temp = gen_reg_rtx (Pmode);
6378 rtx val = force_operand (XEXP (x, 0), temp);
6379 if (val != temp)
6380 emit_move_insn (temp, val);
6382 XEXP (x, 0) = temp;
6383 return x;
6387 return x;
6390 /* Print an integer constant expression in assembler syntax. Addition
6391 and subtraction are the only arithmetic that may appear in these
6392 expressions. FILE is the stdio stream to write to, X is the rtx, and
6393 CODE is the operand print code from the output string. */
6395 static void
6396 output_pic_addr_const (FILE *file, rtx x, int code)
6398 char buf[256];
6400 switch (GET_CODE (x))
6402 case PC:
6403 gcc_assert (flag_pic);
6404 putc ('.', file);
6405 break;
6407 case SYMBOL_REF:
6408 assemble_name (file, XSTR (x, 0));
6409 if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
6410 fputs ("@PLT", file);
6411 break;
6413 case LABEL_REF:
6414 x = XEXP (x, 0);
6415 /* FALLTHRU */
6416 case CODE_LABEL:
6417 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
6418 assemble_name (asm_out_file, buf);
6419 break;
6421 case CONST_INT:
6422 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
6423 break;
6425 case CONST:
6426 /* This used to output parentheses around the expression,
6427 but that does not work on the 386 (either ATT or BSD assembler). */
6428 output_pic_addr_const (file, XEXP (x, 0), code);
6429 break;
6431 case CONST_DOUBLE:
6432 if (GET_MODE (x) == VOIDmode)
6434 /* We can use %d if the number is <32 bits and positive. */
6435 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
6436 fprintf (file, "0x%lx%08lx",
6437 (unsigned long) CONST_DOUBLE_HIGH (x),
6438 (unsigned long) CONST_DOUBLE_LOW (x));
6439 else
6440 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
6442 else
6443 /* We can't handle floating point constants;
6444 PRINT_OPERAND must handle them. */
6445 output_operand_lossage ("floating constant misused");
6446 break;
6448 case PLUS:
6449 /* Some assemblers need integer constants to appear first. */
6450 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
6452 output_pic_addr_const (file, XEXP (x, 0), code);
6453 putc ('+', file);
6454 output_pic_addr_const (file, XEXP (x, 1), code);
6456 else
6458 gcc_assert (GET_CODE (XEXP (x, 1)) == CONST_INT);
6459 output_pic_addr_const (file, XEXP (x, 1), code);
6460 putc ('+', file);
6461 output_pic_addr_const (file, XEXP (x, 0), code);
6463 break;
6465 case MINUS:
6466 if (!TARGET_MACHO)
6467 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
6468 output_pic_addr_const (file, XEXP (x, 0), code);
6469 putc ('-', file);
6470 output_pic_addr_const (file, XEXP (x, 1), code);
6471 if (!TARGET_MACHO)
6472 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
6473 break;
6475 case UNSPEC:
6476 gcc_assert (XVECLEN (x, 0) == 1);
6477 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
6478 switch (XINT (x, 1))
6480 case UNSPEC_GOT:
6481 fputs ("@GOT", file);
6482 break;
6483 case UNSPEC_GOTOFF:
6484 fputs ("@GOTOFF", file);
6485 break;
6486 case UNSPEC_GOTPCREL:
6487 fputs ("@GOTPCREL(%rip)", file);
6488 break;
6489 case UNSPEC_GOTTPOFF:
6490 /* FIXME: This might be @TPOFF in Sun ld too. */
6491 fputs ("@GOTTPOFF", file);
6492 break;
6493 case UNSPEC_TPOFF:
6494 fputs ("@TPOFF", file);
6495 break;
6496 case UNSPEC_NTPOFF:
6497 if (TARGET_64BIT)
6498 fputs ("@TPOFF", file);
6499 else
6500 fputs ("@NTPOFF", file);
6501 break;
6502 case UNSPEC_DTPOFF:
6503 fputs ("@DTPOFF", file);
6504 break;
6505 case UNSPEC_GOTNTPOFF:
6506 if (TARGET_64BIT)
6507 fputs ("@GOTTPOFF(%rip)", file);
6508 else
6509 fputs ("@GOTNTPOFF", file);
6510 break;
6511 case UNSPEC_INDNTPOFF:
6512 fputs ("@INDNTPOFF", file);
6513 break;
6514 default:
6515 output_operand_lossage ("invalid UNSPEC as operand");
6516 break;
6518 break;
6520 default:
6521 output_operand_lossage ("invalid expression as operand");
6525 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6526 We need to emit DTP-relative relocations. */
6528 static void
6529 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
6531 fputs (ASM_LONG, file);
6532 output_addr_const (file, x);
6533 fputs ("@DTPOFF", file);
6534 switch (size)
6536 case 4:
6537 break;
6538 case 8:
6539 fputs (", 0", file);
6540 break;
6541 default:
6542 gcc_unreachable ();
6546 /* In the name of slightly smaller debug output, and to cater to
6547 general assembler lossage, recognize PIC+GOTOFF and turn it back
6548 into a direct symbol reference. */
6550 static rtx
6551 ix86_delegitimize_address (rtx orig_x)
6553 rtx x = orig_x, y;
6555 if (GET_CODE (x) == MEM)
6556 x = XEXP (x, 0);
6558 if (TARGET_64BIT)
6560 if (GET_CODE (x) != CONST
6561 || GET_CODE (XEXP (x, 0)) != UNSPEC
6562 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
6563 || GET_CODE (orig_x) != MEM)
6564 return orig_x;
6565 return XVECEXP (XEXP (x, 0), 0, 0);
6568 if (GET_CODE (x) != PLUS
6569 || GET_CODE (XEXP (x, 1)) != CONST)
6570 return orig_x;
6572 if (GET_CODE (XEXP (x, 0)) == REG
6573 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6574 /* %ebx + GOT/GOTOFF */
6575 y = NULL;
6576 else if (GET_CODE (XEXP (x, 0)) == PLUS)
6578 /* %ebx + %reg * scale + GOT/GOTOFF */
6579 y = XEXP (x, 0);
6580 if (GET_CODE (XEXP (y, 0)) == REG
6581 && REGNO (XEXP (y, 0)) == PIC_OFFSET_TABLE_REGNUM)
6582 y = XEXP (y, 1);
6583 else if (GET_CODE (XEXP (y, 1)) == REG
6584 && REGNO (XEXP (y, 1)) == PIC_OFFSET_TABLE_REGNUM)
6585 y = XEXP (y, 0);
6586 else
6587 return orig_x;
6588 if (GET_CODE (y) != REG
6589 && GET_CODE (y) != MULT
6590 && GET_CODE (y) != ASHIFT)
6591 return orig_x;
6593 else
6594 return orig_x;
6596 x = XEXP (XEXP (x, 1), 0);
6597 if (GET_CODE (x) == UNSPEC
6598 && ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6599 || (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM)))
6601 if (y)
6602 return gen_rtx_PLUS (Pmode, y, XVECEXP (x, 0, 0));
6603 return XVECEXP (x, 0, 0);
6606 if (GET_CODE (x) == PLUS
6607 && GET_CODE (XEXP (x, 0)) == UNSPEC
6608 && GET_CODE (XEXP (x, 1)) == CONST_INT
6609 && ((XINT (XEXP (x, 0), 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6610 || (XINT (XEXP (x, 0), 1) == UNSPEC_GOTOFF
6611 && GET_CODE (orig_x) != MEM)))
6613 x = gen_rtx_PLUS (VOIDmode, XVECEXP (XEXP (x, 0), 0, 0), XEXP (x, 1));
6614 if (y)
6615 return gen_rtx_PLUS (Pmode, y, x);
6616 return x;
6619 return orig_x;
6622 static void
6623 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
6624 int fp, FILE *file)
6626 const char *suffix;
6628 if (mode == CCFPmode || mode == CCFPUmode)
6630 enum rtx_code second_code, bypass_code;
6631 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
6632 gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
6633 code = ix86_fp_compare_code_to_integer (code);
6634 mode = CCmode;
6636 if (reverse)
6637 code = reverse_condition (code);
6639 switch (code)
6641 case EQ:
6642 suffix = "e";
6643 break;
6644 case NE:
6645 suffix = "ne";
6646 break;
6647 case GT:
6648 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
6649 suffix = "g";
6650 break;
6651 case GTU:
6652 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
6653 Those same assemblers have the same but opposite lossage on cmov. */
6654 gcc_assert (mode == CCmode);
6655 suffix = fp ? "nbe" : "a";
6656 break;
6657 case LT:
6658 switch (mode)
6660 case CCNOmode:
6661 case CCGOCmode:
6662 suffix = "s";
6663 break;
6665 case CCmode:
6666 case CCGCmode:
6667 suffix = "l";
6668 break;
6670 default:
6671 gcc_unreachable ();
6673 break;
6674 case LTU:
6675 gcc_assert (mode == CCmode);
6676 suffix = "b";
6677 break;
6678 case GE:
6679 switch (mode)
6681 case CCNOmode:
6682 case CCGOCmode:
6683 suffix = "ns";
6684 break;
6686 case CCmode:
6687 case CCGCmode:
6688 suffix = "ge";
6689 break;
6691 default:
6692 gcc_unreachable ();
6694 break;
6695 case GEU:
6696 /* ??? As above. */
6697 gcc_assert (mode == CCmode);
6698 suffix = fp ? "nb" : "ae";
6699 break;
6700 case LE:
6701 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
6702 suffix = "le";
6703 break;
6704 case LEU:
6705 gcc_assert (mode == CCmode);
6706 suffix = "be";
6707 break;
6708 case UNORDERED:
6709 suffix = fp ? "u" : "p";
6710 break;
6711 case ORDERED:
6712 suffix = fp ? "nu" : "np";
6713 break;
6714 default:
6715 gcc_unreachable ();
6717 fputs (suffix, file);
6720 /* Print the name of register X to FILE based on its machine mode and number.
6721 If CODE is 'w', pretend the mode is HImode.
6722 If CODE is 'b', pretend the mode is QImode.
6723 If CODE is 'k', pretend the mode is SImode.
6724 If CODE is 'q', pretend the mode is DImode.
6725 If CODE is 'h', pretend the reg is the 'high' byte register.
6726 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
6728 void
6729 print_reg (rtx x, int code, FILE *file)
6731 gcc_assert (REGNO (x) != ARG_POINTER_REGNUM
6732 && REGNO (x) != FRAME_POINTER_REGNUM
6733 && REGNO (x) != FLAGS_REG
6734 && REGNO (x) != FPSR_REG);
6736 if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
6737 putc ('%', file);
6739 if (code == 'w' || MMX_REG_P (x))
6740 code = 2;
6741 else if (code == 'b')
6742 code = 1;
6743 else if (code == 'k')
6744 code = 4;
6745 else if (code == 'q')
6746 code = 8;
6747 else if (code == 'y')
6748 code = 3;
6749 else if (code == 'h')
6750 code = 0;
6751 else
6752 code = GET_MODE_SIZE (GET_MODE (x));
6754 /* Irritatingly, AMD extended registers use different naming convention
6755 from the normal registers. */
6756 if (REX_INT_REG_P (x))
6758 gcc_assert (TARGET_64BIT);
6759 switch (code)
6761 case 0:
6762 error ("extended registers have no high halves");
6763 break;
6764 case 1:
6765 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
6766 break;
6767 case 2:
6768 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
6769 break;
6770 case 4:
6771 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
6772 break;
6773 case 8:
6774 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
6775 break;
6776 default:
6777 error ("unsupported operand size for extended register");
6778 break;
6780 return;
6782 switch (code)
6784 case 3:
6785 if (STACK_TOP_P (x))
6787 fputs ("st(0)", file);
6788 break;
6790 /* FALLTHRU */
6791 case 8:
6792 case 4:
6793 case 12:
6794 if (! ANY_FP_REG_P (x))
6795 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
6796 /* FALLTHRU */
6797 case 16:
6798 case 2:
6799 normal:
6800 fputs (hi_reg_name[REGNO (x)], file);
6801 break;
6802 case 1:
6803 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
6804 goto normal;
6805 fputs (qi_reg_name[REGNO (x)], file);
6806 break;
6807 case 0:
6808 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
6809 goto normal;
6810 fputs (qi_high_reg_name[REGNO (x)], file);
6811 break;
6812 default:
6813 gcc_unreachable ();
6817 /* Locate some local-dynamic symbol still in use by this function
6818 so that we can print its name in some tls_local_dynamic_base
6819 pattern. */
6821 static const char *
6822 get_some_local_dynamic_name (void)
6824 rtx insn;
6826 if (cfun->machine->some_ld_name)
6827 return cfun->machine->some_ld_name;
6829 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
6830 if (INSN_P (insn)
6831 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
6832 return cfun->machine->some_ld_name;
6834 gcc_unreachable ();
6837 static int
6838 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
6840 rtx x = *px;
6842 if (GET_CODE (x) == SYMBOL_REF
6843 && local_dynamic_symbolic_operand (x, Pmode))
6845 cfun->machine->some_ld_name = XSTR (x, 0);
6846 return 1;
6849 return 0;
6852 /* Meaning of CODE:
6853 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
6854 C -- print opcode suffix for set/cmov insn.
6855 c -- like C, but print reversed condition
6856 F,f -- likewise, but for floating-point.
6857 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
6858 otherwise nothing
6859 R -- print the prefix for register names.
6860 z -- print the opcode suffix for the size of the current operand.
6861 * -- print a star (in certain assembler syntax)
6862 A -- print an absolute memory reference.
6863 w -- print the operand as if it's a "word" (HImode) even if it isn't.
6864 s -- print a shift double count, followed by the assemblers argument
6865 delimiter.
6866 b -- print the QImode name of the register for the indicated operand.
6867 %b0 would print %al if operands[0] is reg 0.
6868 w -- likewise, print the HImode name of the register.
6869 k -- likewise, print the SImode name of the register.
6870 q -- likewise, print the DImode name of the register.
6871 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
6872 y -- print "st(0)" instead of "st" as a register.
6873 D -- print condition for SSE cmp instruction.
6874 P -- if PIC, print an @PLT suffix.
6875 X -- don't print any sort of PIC '@' suffix for a symbol.
6876 & -- print some in-use local-dynamic symbol name.
6877 H -- print a memory address offset by 8; used for sse high-parts
6880 void
6881 print_operand (FILE *file, rtx x, int code)
6883 if (code)
6885 switch (code)
6887 case '*':
6888 if (ASSEMBLER_DIALECT == ASM_ATT)
6889 putc ('*', file);
6890 return;
6892 case '&':
6893 assemble_name (file, get_some_local_dynamic_name ());
6894 return;
6896 case 'A':
6897 switch (ASSEMBLER_DIALECT)
6899 case ASM_ATT:
6900 putc ('*', file);
6901 break;
6903 case ASM_INTEL:
6904 /* Intel syntax. For absolute addresses, registers should not
6905 be surrounded by braces. */
6906 if (GET_CODE (x) != REG)
6908 putc ('[', file);
6909 PRINT_OPERAND (file, x, 0);
6910 putc (']', file);
6911 return;
6913 break;
6915 default:
6916 gcc_unreachable ();
6919 PRINT_OPERAND (file, x, 0);
6920 return;
6923 case 'L':
6924 if (ASSEMBLER_DIALECT == ASM_ATT)
6925 putc ('l', file);
6926 return;
6928 case 'W':
6929 if (ASSEMBLER_DIALECT == ASM_ATT)
6930 putc ('w', file);
6931 return;
6933 case 'B':
6934 if (ASSEMBLER_DIALECT == ASM_ATT)
6935 putc ('b', file);
6936 return;
6938 case 'Q':
6939 if (ASSEMBLER_DIALECT == ASM_ATT)
6940 putc ('l', file);
6941 return;
6943 case 'S':
6944 if (ASSEMBLER_DIALECT == ASM_ATT)
6945 putc ('s', file);
6946 return;
6948 case 'T':
6949 if (ASSEMBLER_DIALECT == ASM_ATT)
6950 putc ('t', file);
6951 return;
6953 case 'z':
6954 /* 387 opcodes don't get size suffixes if the operands are
6955 registers. */
6956 if (STACK_REG_P (x))
6957 return;
6959 /* Likewise if using Intel opcodes. */
6960 if (ASSEMBLER_DIALECT == ASM_INTEL)
6961 return;
6963 /* This is the size of op from size of operand. */
6964 switch (GET_MODE_SIZE (GET_MODE (x)))
6966 case 2:
6967 #ifdef HAVE_GAS_FILDS_FISTS
6968 putc ('s', file);
6969 #endif
6970 return;
6972 case 4:
6973 if (GET_MODE (x) == SFmode)
6975 putc ('s', file);
6976 return;
6978 else
6979 putc ('l', file);
6980 return;
6982 case 12:
6983 case 16:
6984 putc ('t', file);
6985 return;
6987 case 8:
6988 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
6990 #ifdef GAS_MNEMONICS
6991 putc ('q', file);
6992 #else
6993 putc ('l', file);
6994 putc ('l', file);
6995 #endif
6997 else
6998 putc ('l', file);
6999 return;
7001 default:
7002 gcc_unreachable ();
7005 case 'b':
7006 case 'w':
7007 case 'k':
7008 case 'q':
7009 case 'h':
7010 case 'y':
7011 case 'X':
7012 case 'P':
7013 break;
7015 case 's':
7016 if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
7018 PRINT_OPERAND (file, x, 0);
7019 putc (',', file);
7021 return;
7023 case 'D':
7024 /* Little bit of braindamage here. The SSE compare instructions
7025 does use completely different names for the comparisons that the
7026 fp conditional moves. */
7027 switch (GET_CODE (x))
7029 case EQ:
7030 case UNEQ:
7031 fputs ("eq", file);
7032 break;
7033 case LT:
7034 case UNLT:
7035 fputs ("lt", file);
7036 break;
7037 case LE:
7038 case UNLE:
7039 fputs ("le", file);
7040 break;
7041 case UNORDERED:
7042 fputs ("unord", file);
7043 break;
7044 case NE:
7045 case LTGT:
7046 fputs ("neq", file);
7047 break;
7048 case UNGE:
7049 case GE:
7050 fputs ("nlt", file);
7051 break;
7052 case UNGT:
7053 case GT:
7054 fputs ("nle", file);
7055 break;
7056 case ORDERED:
7057 fputs ("ord", file);
7058 break;
7059 default:
7060 gcc_unreachable ();
7062 return;
7063 case 'O':
7064 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
7065 if (ASSEMBLER_DIALECT == ASM_ATT)
7067 switch (GET_MODE (x))
7069 case HImode: putc ('w', file); break;
7070 case SImode:
7071 case SFmode: putc ('l', file); break;
7072 case DImode:
7073 case DFmode: putc ('q', file); break;
7074 default: gcc_unreachable ();
7076 putc ('.', file);
7078 #endif
7079 return;
7080 case 'C':
7081 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
7082 return;
7083 case 'F':
7084 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
7085 if (ASSEMBLER_DIALECT == ASM_ATT)
7086 putc ('.', file);
7087 #endif
7088 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
7089 return;
7091 /* Like above, but reverse condition */
7092 case 'c':
7093 /* Check to see if argument to %c is really a constant
7094 and not a condition code which needs to be reversed. */
7095 if (!COMPARISON_P (x))
7097 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
7098 return;
7100 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
7101 return;
7102 case 'f':
7103 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
7104 if (ASSEMBLER_DIALECT == ASM_ATT)
7105 putc ('.', file);
7106 #endif
7107 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
7108 return;
7110 case 'H':
7111 /* It doesn't actually matter what mode we use here, as we're
7112 only going to use this for printing. */
7113 x = adjust_address_nv (x, DImode, 8);
7114 break;
7116 case '+':
7118 rtx x;
7120 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
7121 return;
7123 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
7124 if (x)
7126 int pred_val = INTVAL (XEXP (x, 0));
7128 if (pred_val < REG_BR_PROB_BASE * 45 / 100
7129 || pred_val > REG_BR_PROB_BASE * 55 / 100)
7131 int taken = pred_val > REG_BR_PROB_BASE / 2;
7132 int cputaken = final_forward_branch_p (current_output_insn) == 0;
7134 /* Emit hints only in the case default branch prediction
7135 heuristics would fail. */
7136 if (taken != cputaken)
7138 /* We use 3e (DS) prefix for taken branches and
7139 2e (CS) prefix for not taken branches. */
7140 if (taken)
7141 fputs ("ds ; ", file);
7142 else
7143 fputs ("cs ; ", file);
7147 return;
7149 default:
7150 output_operand_lossage ("invalid operand code '%c'", code);
7154 if (GET_CODE (x) == REG)
7155 print_reg (x, code, file);
7157 else if (GET_CODE (x) == MEM)
7159 /* No `byte ptr' prefix for call instructions. */
7160 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
7162 const char * size;
7163 switch (GET_MODE_SIZE (GET_MODE (x)))
7165 case 1: size = "BYTE"; break;
7166 case 2: size = "WORD"; break;
7167 case 4: size = "DWORD"; break;
7168 case 8: size = "QWORD"; break;
7169 case 12: size = "XWORD"; break;
7170 case 16: size = "XMMWORD"; break;
7171 default:
7172 gcc_unreachable ();
7175 /* Check for explicit size override (codes 'b', 'w' and 'k') */
7176 if (code == 'b')
7177 size = "BYTE";
7178 else if (code == 'w')
7179 size = "WORD";
7180 else if (code == 'k')
7181 size = "DWORD";
7183 fputs (size, file);
7184 fputs (" PTR ", file);
7187 x = XEXP (x, 0);
7188 /* Avoid (%rip) for call operands. */
7189 if (CONSTANT_ADDRESS_P (x) && code == 'P'
7190 && GET_CODE (x) != CONST_INT)
7191 output_addr_const (file, x);
7192 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
7193 output_operand_lossage ("invalid constraints for operand");
7194 else
7195 output_address (x);
7198 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
7200 REAL_VALUE_TYPE r;
7201 long l;
7203 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7204 REAL_VALUE_TO_TARGET_SINGLE (r, l);
7206 if (ASSEMBLER_DIALECT == ASM_ATT)
7207 putc ('$', file);
7208 fprintf (file, "0x%08lx", l);
7211 /* These float cases don't actually occur as immediate operands. */
7212 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
7214 char dstr[30];
7216 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
7217 fprintf (file, "%s", dstr);
7220 else if (GET_CODE (x) == CONST_DOUBLE
7221 && GET_MODE (x) == XFmode)
7223 char dstr[30];
7225 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
7226 fprintf (file, "%s", dstr);
7229 else
7231 /* We have patterns that allow zero sets of memory, for instance.
7232 In 64-bit mode, we should probably support all 8-byte vectors,
7233 since we can in fact encode that into an immediate. */
7234 if (GET_CODE (x) == CONST_VECTOR)
7236 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
7237 x = const0_rtx;
7240 if (code != 'P')
7242 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
7244 if (ASSEMBLER_DIALECT == ASM_ATT)
7245 putc ('$', file);
7247 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
7248 || GET_CODE (x) == LABEL_REF)
7250 if (ASSEMBLER_DIALECT == ASM_ATT)
7251 putc ('$', file);
7252 else
7253 fputs ("OFFSET FLAT:", file);
7256 if (GET_CODE (x) == CONST_INT)
7257 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
7258 else if (flag_pic)
7259 output_pic_addr_const (file, x, code);
7260 else
7261 output_addr_const (file, x);
7265 /* Print a memory operand whose address is ADDR. */
7267 void
7268 print_operand_address (FILE *file, rtx addr)
7270 struct ix86_address parts;
7271 rtx base, index, disp;
7272 int scale;
7273 int ok = ix86_decompose_address (addr, &parts);
7275 gcc_assert (ok);
7277 base = parts.base;
7278 index = parts.index;
7279 disp = parts.disp;
7280 scale = parts.scale;
7282 switch (parts.seg)
7284 case SEG_DEFAULT:
7285 break;
7286 case SEG_FS:
7287 case SEG_GS:
7288 if (USER_LABEL_PREFIX[0] == 0)
7289 putc ('%', file);
7290 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
7291 break;
7292 default:
7293 gcc_unreachable ();
7296 if (!base && !index)
7298 /* Displacement only requires special attention. */
7300 if (GET_CODE (disp) == CONST_INT)
7302 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
7304 if (USER_LABEL_PREFIX[0] == 0)
7305 putc ('%', file);
7306 fputs ("ds:", file);
7308 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
7310 else if (flag_pic)
7311 output_pic_addr_const (file, disp, 0);
7312 else
7313 output_addr_const (file, disp);
7315 /* Use one byte shorter RIP relative addressing for 64bit mode. */
7316 if (TARGET_64BIT
7317 && ((GET_CODE (disp) == SYMBOL_REF
7318 && ! tls_symbolic_operand (disp, GET_MODE (disp)))
7319 || GET_CODE (disp) == LABEL_REF
7320 || (GET_CODE (disp) == CONST
7321 && GET_CODE (XEXP (disp, 0)) == PLUS
7322 && (GET_CODE (XEXP (XEXP (disp, 0), 0)) == SYMBOL_REF
7323 || GET_CODE (XEXP (XEXP (disp, 0), 0)) == LABEL_REF)
7324 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)))
7325 fputs ("(%rip)", file);
7327 else
7329 if (ASSEMBLER_DIALECT == ASM_ATT)
7331 if (disp)
7333 if (flag_pic)
7334 output_pic_addr_const (file, disp, 0);
7335 else if (GET_CODE (disp) == LABEL_REF)
7336 output_asm_label (disp);
7337 else
7338 output_addr_const (file, disp);
7341 putc ('(', file);
7342 if (base)
7343 print_reg (base, 0, file);
7344 if (index)
7346 putc (',', file);
7347 print_reg (index, 0, file);
7348 if (scale != 1)
7349 fprintf (file, ",%d", scale);
7351 putc (')', file);
7353 else
7355 rtx offset = NULL_RTX;
7357 if (disp)
7359 /* Pull out the offset of a symbol; print any symbol itself. */
7360 if (GET_CODE (disp) == CONST
7361 && GET_CODE (XEXP (disp, 0)) == PLUS
7362 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
7364 offset = XEXP (XEXP (disp, 0), 1);
7365 disp = gen_rtx_CONST (VOIDmode,
7366 XEXP (XEXP (disp, 0), 0));
7369 if (flag_pic)
7370 output_pic_addr_const (file, disp, 0);
7371 else if (GET_CODE (disp) == LABEL_REF)
7372 output_asm_label (disp);
7373 else if (GET_CODE (disp) == CONST_INT)
7374 offset = disp;
7375 else
7376 output_addr_const (file, disp);
7379 putc ('[', file);
7380 if (base)
7382 print_reg (base, 0, file);
7383 if (offset)
7385 if (INTVAL (offset) >= 0)
7386 putc ('+', file);
7387 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
7390 else if (offset)
7391 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
7392 else
7393 putc ('0', file);
7395 if (index)
7397 putc ('+', file);
7398 print_reg (index, 0, file);
7399 if (scale != 1)
7400 fprintf (file, "*%d", scale);
7402 putc (']', file);
7407 bool
7408 output_addr_const_extra (FILE *file, rtx x)
7410 rtx op;
7412 if (GET_CODE (x) != UNSPEC)
7413 return false;
7415 op = XVECEXP (x, 0, 0);
7416 switch (XINT (x, 1))
7418 case UNSPEC_GOTTPOFF:
7419 output_addr_const (file, op);
7420 /* FIXME: This might be @TPOFF in Sun ld. */
7421 fputs ("@GOTTPOFF", file);
7422 break;
7423 case UNSPEC_TPOFF:
7424 output_addr_const (file, op);
7425 fputs ("@TPOFF", file);
7426 break;
7427 case UNSPEC_NTPOFF:
7428 output_addr_const (file, op);
7429 if (TARGET_64BIT)
7430 fputs ("@TPOFF", file);
7431 else
7432 fputs ("@NTPOFF", file);
7433 break;
7434 case UNSPEC_DTPOFF:
7435 output_addr_const (file, op);
7436 fputs ("@DTPOFF", file);
7437 break;
7438 case UNSPEC_GOTNTPOFF:
7439 output_addr_const (file, op);
7440 if (TARGET_64BIT)
7441 fputs ("@GOTTPOFF(%rip)", file);
7442 else
7443 fputs ("@GOTNTPOFF", file);
7444 break;
7445 case UNSPEC_INDNTPOFF:
7446 output_addr_const (file, op);
7447 fputs ("@INDNTPOFF", file);
7448 break;
7450 default:
7451 return false;
7454 return true;
7457 /* Split one or more DImode RTL references into pairs of SImode
7458 references. The RTL can be REG, offsettable MEM, integer constant, or
7459 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
7460 split and "num" is its length. lo_half and hi_half are output arrays
7461 that parallel "operands". */
7463 void
7464 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
7466 while (num--)
7468 rtx op = operands[num];
7470 /* simplify_subreg refuse to split volatile memory addresses,
7471 but we still have to handle it. */
7472 if (GET_CODE (op) == MEM)
7474 lo_half[num] = adjust_address (op, SImode, 0);
7475 hi_half[num] = adjust_address (op, SImode, 4);
7477 else
7479 lo_half[num] = simplify_gen_subreg (SImode, op,
7480 GET_MODE (op) == VOIDmode
7481 ? DImode : GET_MODE (op), 0);
7482 hi_half[num] = simplify_gen_subreg (SImode, op,
7483 GET_MODE (op) == VOIDmode
7484 ? DImode : GET_MODE (op), 4);
7488 /* Split one or more TImode RTL references into pairs of DImode
7489 references. The RTL can be REG, offsettable MEM, integer constant, or
7490 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
7491 split and "num" is its length. lo_half and hi_half are output arrays
7492 that parallel "operands". */
7494 void
7495 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
7497 while (num--)
7499 rtx op = operands[num];
7501 /* simplify_subreg refuse to split volatile memory addresses, but we
7502 still have to handle it. */
7503 if (GET_CODE (op) == MEM)
7505 lo_half[num] = adjust_address (op, DImode, 0);
7506 hi_half[num] = adjust_address (op, DImode, 8);
7508 else
7510 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
7511 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
7516 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
7517 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
7518 is the expression of the binary operation. The output may either be
7519 emitted here, or returned to the caller, like all output_* functions.
7521 There is no guarantee that the operands are the same mode, as they
7522 might be within FLOAT or FLOAT_EXTEND expressions. */
7524 #ifndef SYSV386_COMPAT
7525 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
7526 wants to fix the assemblers because that causes incompatibility
7527 with gcc. No-one wants to fix gcc because that causes
7528 incompatibility with assemblers... You can use the option of
7529 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
7530 #define SYSV386_COMPAT 1
7531 #endif
7533 const char *
7534 output_387_binary_op (rtx insn, rtx *operands)
7536 static char buf[30];
7537 const char *p;
7538 const char *ssep;
7539 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
7541 #ifdef ENABLE_CHECKING
7542 /* Even if we do not want to check the inputs, this documents input
7543 constraints. Which helps in understanding the following code. */
7544 if (STACK_REG_P (operands[0])
7545 && ((REG_P (operands[1])
7546 && REGNO (operands[0]) == REGNO (operands[1])
7547 && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM))
7548 || (REG_P (operands[2])
7549 && REGNO (operands[0]) == REGNO (operands[2])
7550 && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)))
7551 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
7552 ; /* ok */
7553 else
7554 gcc_assert (is_sse);
7555 #endif
7557 switch (GET_CODE (operands[3]))
7559 case PLUS:
7560 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7561 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7562 p = "fiadd";
7563 else
7564 p = "fadd";
7565 ssep = "add";
7566 break;
7568 case MINUS:
7569 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7570 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7571 p = "fisub";
7572 else
7573 p = "fsub";
7574 ssep = "sub";
7575 break;
7577 case MULT:
7578 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7579 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7580 p = "fimul";
7581 else
7582 p = "fmul";
7583 ssep = "mul";
7584 break;
7586 case DIV:
7587 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7588 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7589 p = "fidiv";
7590 else
7591 p = "fdiv";
7592 ssep = "div";
7593 break;
7595 default:
7596 gcc_unreachable ();
7599 if (is_sse)
7601 strcpy (buf, ssep);
7602 if (GET_MODE (operands[0]) == SFmode)
7603 strcat (buf, "ss\t{%2, %0|%0, %2}");
7604 else
7605 strcat (buf, "sd\t{%2, %0|%0, %2}");
7606 return buf;
7608 strcpy (buf, p);
7610 switch (GET_CODE (operands[3]))
7612 case MULT:
7613 case PLUS:
7614 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
7616 rtx temp = operands[2];
7617 operands[2] = operands[1];
7618 operands[1] = temp;
7621 /* know operands[0] == operands[1]. */
7623 if (GET_CODE (operands[2]) == MEM)
7625 p = "%z2\t%2";
7626 break;
7629 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7631 if (STACK_TOP_P (operands[0]))
7632 /* How is it that we are storing to a dead operand[2]?
7633 Well, presumably operands[1] is dead too. We can't
7634 store the result to st(0) as st(0) gets popped on this
7635 instruction. Instead store to operands[2] (which I
7636 think has to be st(1)). st(1) will be popped later.
7637 gcc <= 2.8.1 didn't have this check and generated
7638 assembly code that the Unixware assembler rejected. */
7639 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7640 else
7641 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7642 break;
7645 if (STACK_TOP_P (operands[0]))
7646 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7647 else
7648 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7649 break;
7651 case MINUS:
7652 case DIV:
7653 if (GET_CODE (operands[1]) == MEM)
7655 p = "r%z1\t%1";
7656 break;
7659 if (GET_CODE (operands[2]) == MEM)
7661 p = "%z2\t%2";
7662 break;
7665 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7667 #if SYSV386_COMPAT
7668 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
7669 derived assemblers, confusingly reverse the direction of
7670 the operation for fsub{r} and fdiv{r} when the
7671 destination register is not st(0). The Intel assembler
7672 doesn't have this brain damage. Read !SYSV386_COMPAT to
7673 figure out what the hardware really does. */
7674 if (STACK_TOP_P (operands[0]))
7675 p = "{p\t%0, %2|rp\t%2, %0}";
7676 else
7677 p = "{rp\t%2, %0|p\t%0, %2}";
7678 #else
7679 if (STACK_TOP_P (operands[0]))
7680 /* As above for fmul/fadd, we can't store to st(0). */
7681 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7682 else
7683 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7684 #endif
7685 break;
7688 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
7690 #if SYSV386_COMPAT
7691 if (STACK_TOP_P (operands[0]))
7692 p = "{rp\t%0, %1|p\t%1, %0}";
7693 else
7694 p = "{p\t%1, %0|rp\t%0, %1}";
7695 #else
7696 if (STACK_TOP_P (operands[0]))
7697 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
7698 else
7699 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
7700 #endif
7701 break;
7704 if (STACK_TOP_P (operands[0]))
7706 if (STACK_TOP_P (operands[1]))
7707 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7708 else
7709 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
7710 break;
7712 else if (STACK_TOP_P (operands[1]))
7714 #if SYSV386_COMPAT
7715 p = "{\t%1, %0|r\t%0, %1}";
7716 #else
7717 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
7718 #endif
7720 else
7722 #if SYSV386_COMPAT
7723 p = "{r\t%2, %0|\t%0, %2}";
7724 #else
7725 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7726 #endif
7728 break;
7730 default:
7731 gcc_unreachable ();
7734 strcat (buf, p);
7735 return buf;
7738 /* Return needed mode for entity in optimize_mode_switching pass. */
7741 ix86_mode_needed (int entity, rtx insn)
7743 enum attr_i387_cw mode;
7745 /* The mode UNINITIALIZED is used to store control word after a
7746 function call or ASM pattern. The mode ANY specify that function
7747 has no requirements on the control word and make no changes in the
7748 bits we are interested in. */
7750 if (CALL_P (insn)
7751 || (NONJUMP_INSN_P (insn)
7752 && (asm_noperands (PATTERN (insn)) >= 0
7753 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
7754 return I387_CW_UNINITIALIZED;
7756 if (recog_memoized (insn) < 0)
7757 return I387_CW_ANY;
7759 mode = get_attr_i387_cw (insn);
7761 switch (entity)
7763 case I387_TRUNC:
7764 if (mode == I387_CW_TRUNC)
7765 return mode;
7766 break;
7768 case I387_FLOOR:
7769 if (mode == I387_CW_FLOOR)
7770 return mode;
7771 break;
7773 case I387_CEIL:
7774 if (mode == I387_CW_CEIL)
7775 return mode;
7776 break;
7778 case I387_MASK_PM:
7779 if (mode == I387_CW_MASK_PM)
7780 return mode;
7781 break;
7783 default:
7784 gcc_unreachable ();
7787 return I387_CW_ANY;
7790 /* Output code to initialize control word copies used by trunc?f?i and
7791 rounding patterns. CURRENT_MODE is set to current control word,
7792 while NEW_MODE is set to new control word. */
7794 void
7795 emit_i387_cw_initialization (int mode)
7797 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
7798 rtx new_mode;
7800 int slot;
7802 rtx reg = gen_reg_rtx (HImode);
7804 emit_insn (gen_x86_fnstcw_1 (stored_mode));
7805 emit_move_insn (reg, stored_mode);
7807 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL || optimize_size)
7809 switch (mode)
7811 case I387_CW_TRUNC:
7812 /* round toward zero (truncate) */
7813 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
7814 slot = SLOT_CW_TRUNC;
7815 break;
7817 case I387_CW_FLOOR:
7818 /* round down toward -oo */
7819 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7820 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
7821 slot = SLOT_CW_FLOOR;
7822 break;
7824 case I387_CW_CEIL:
7825 /* round up toward +oo */
7826 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7827 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
7828 slot = SLOT_CW_CEIL;
7829 break;
7831 case I387_CW_MASK_PM:
7832 /* mask precision exception for nearbyint() */
7833 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
7834 slot = SLOT_CW_MASK_PM;
7835 break;
7837 default:
7838 gcc_unreachable ();
7841 else
7843 switch (mode)
7845 case I387_CW_TRUNC:
7846 /* round toward zero (truncate) */
7847 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
7848 slot = SLOT_CW_TRUNC;
7849 break;
7851 case I387_CW_FLOOR:
7852 /* round down toward -oo */
7853 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
7854 slot = SLOT_CW_FLOOR;
7855 break;
7857 case I387_CW_CEIL:
7858 /* round up toward +oo */
7859 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
7860 slot = SLOT_CW_CEIL;
7861 break;
7863 case I387_CW_MASK_PM:
7864 /* mask precision exception for nearbyint() */
7865 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
7866 slot = SLOT_CW_MASK_PM;
7867 break;
7869 default:
7870 gcc_unreachable ();
7874 gcc_assert (slot < MAX_386_STACK_LOCALS);
7876 new_mode = assign_386_stack_local (HImode, slot);
7877 emit_move_insn (new_mode, reg);
7880 /* Output code for INSN to convert a float to a signed int. OPERANDS
7881 are the insn operands. The output may be [HSD]Imode and the input
7882 operand may be [SDX]Fmode. */
7884 const char *
7885 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
7887 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
7888 int dimode_p = GET_MODE (operands[0]) == DImode;
7889 int round_mode = get_attr_i387_cw (insn);
7891 /* Jump through a hoop or two for DImode, since the hardware has no
7892 non-popping instruction. We used to do this a different way, but
7893 that was somewhat fragile and broke with post-reload splitters. */
7894 if ((dimode_p || fisttp) && !stack_top_dies)
7895 output_asm_insn ("fld\t%y1", operands);
7897 gcc_assert (STACK_TOP_P (operands[1]));
7898 gcc_assert (GET_CODE (operands[0]) == MEM);
7900 if (fisttp)
7901 output_asm_insn ("fisttp%z0\t%0", operands);
7902 else
7904 if (round_mode != I387_CW_ANY)
7905 output_asm_insn ("fldcw\t%3", operands);
7906 if (stack_top_dies || dimode_p)
7907 output_asm_insn ("fistp%z0\t%0", operands);
7908 else
7909 output_asm_insn ("fist%z0\t%0", operands);
7910 if (round_mode != I387_CW_ANY)
7911 output_asm_insn ("fldcw\t%2", operands);
7914 return "";
7917 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
7918 should be used. UNORDERED_P is true when fucom should be used. */
7920 const char *
7921 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
7923 int stack_top_dies;
7924 rtx cmp_op0, cmp_op1;
7925 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
7927 if (eflags_p)
7929 cmp_op0 = operands[0];
7930 cmp_op1 = operands[1];
7932 else
7934 cmp_op0 = operands[1];
7935 cmp_op1 = operands[2];
7938 if (is_sse)
7940 if (GET_MODE (operands[0]) == SFmode)
7941 if (unordered_p)
7942 return "ucomiss\t{%1, %0|%0, %1}";
7943 else
7944 return "comiss\t{%1, %0|%0, %1}";
7945 else
7946 if (unordered_p)
7947 return "ucomisd\t{%1, %0|%0, %1}";
7948 else
7949 return "comisd\t{%1, %0|%0, %1}";
7952 gcc_assert (STACK_TOP_P (cmp_op0));
7954 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
7956 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
7958 if (stack_top_dies)
7960 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
7961 return TARGET_USE_FFREEP ? "ffreep\t%y1" : "fstp\t%y1";
7963 else
7964 return "ftst\n\tfnstsw\t%0";
7967 if (STACK_REG_P (cmp_op1)
7968 && stack_top_dies
7969 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
7970 && REGNO (cmp_op1) != FIRST_STACK_REG)
7972 /* If both the top of the 387 stack dies, and the other operand
7973 is also a stack register that dies, then this must be a
7974 `fcompp' float compare */
7976 if (eflags_p)
7978 /* There is no double popping fcomi variant. Fortunately,
7979 eflags is immune from the fstp's cc clobbering. */
7980 if (unordered_p)
7981 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
7982 else
7983 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
7984 return TARGET_USE_FFREEP ? "ffreep\t%y0" : "fstp\t%y0";
7986 else
7988 if (unordered_p)
7989 return "fucompp\n\tfnstsw\t%0";
7990 else
7991 return "fcompp\n\tfnstsw\t%0";
7994 else
7996 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
7998 static const char * const alt[16] =
8000 "fcom%z2\t%y2\n\tfnstsw\t%0",
8001 "fcomp%z2\t%y2\n\tfnstsw\t%0",
8002 "fucom%z2\t%y2\n\tfnstsw\t%0",
8003 "fucomp%z2\t%y2\n\tfnstsw\t%0",
8005 "ficom%z2\t%y2\n\tfnstsw\t%0",
8006 "ficomp%z2\t%y2\n\tfnstsw\t%0",
8007 NULL,
8008 NULL,
8010 "fcomi\t{%y1, %0|%0, %y1}",
8011 "fcomip\t{%y1, %0|%0, %y1}",
8012 "fucomi\t{%y1, %0|%0, %y1}",
8013 "fucomip\t{%y1, %0|%0, %y1}",
8015 NULL,
8016 NULL,
8017 NULL,
8018 NULL
8021 int mask;
8022 const char *ret;
8024 mask = eflags_p << 3;
8025 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
8026 mask |= unordered_p << 1;
8027 mask |= stack_top_dies;
8029 gcc_assert (mask < 16);
8030 ret = alt[mask];
8031 gcc_assert (ret);
8033 return ret;
8037 void
8038 ix86_output_addr_vec_elt (FILE *file, int value)
8040 const char *directive = ASM_LONG;
8042 #ifdef ASM_QUAD
8043 if (TARGET_64BIT)
8044 directive = ASM_QUAD;
8045 #else
8046 gcc_assert (!TARGET_64BIT);
8047 #endif
8049 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
8052 void
8053 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
8055 if (TARGET_64BIT)
8056 fprintf (file, "%s%s%d-%s%d\n",
8057 ASM_LONG, LPREFIX, value, LPREFIX, rel);
8058 else if (HAVE_AS_GOTOFF_IN_DATA)
8059 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
8060 #if TARGET_MACHO
8061 else if (TARGET_MACHO)
8063 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
8064 machopic_output_function_base_name (file);
8065 fprintf(file, "\n");
8067 #endif
8068 else
8069 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
8070 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
8073 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
8074 for the target. */
8076 void
8077 ix86_expand_clear (rtx dest)
8079 rtx tmp;
8081 /* We play register width games, which are only valid after reload. */
8082 gcc_assert (reload_completed);
8084 /* Avoid HImode and its attendant prefix byte. */
8085 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
8086 dest = gen_rtx_REG (SImode, REGNO (dest));
8088 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
8090 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
8091 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
8093 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
8094 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
8097 emit_insn (tmp);
8100 /* X is an unchanging MEM. If it is a constant pool reference, return
8101 the constant pool rtx, else NULL. */
8104 maybe_get_pool_constant (rtx x)
8106 x = ix86_delegitimize_address (XEXP (x, 0));
8108 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
8109 return get_pool_constant (x);
8111 return NULL_RTX;
8114 void
8115 ix86_expand_move (enum machine_mode mode, rtx operands[])
8117 int strict = (reload_in_progress || reload_completed);
8118 rtx op0, op1;
8119 enum tls_model model;
8121 op0 = operands[0];
8122 op1 = operands[1];
8124 if (GET_CODE (op1) == SYMBOL_REF)
8126 model = SYMBOL_REF_TLS_MODEL (op1);
8127 if (model)
8129 op1 = legitimize_tls_address (op1, model, true);
8130 op1 = force_operand (op1, op0);
8131 if (op1 == op0)
8132 return;
8135 else if (GET_CODE (op1) == CONST
8136 && GET_CODE (XEXP (op1, 0)) == PLUS
8137 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
8139 model = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (op1, 0), 0));
8140 if (model)
8142 rtx addend = XEXP (XEXP (op1, 0), 1);
8143 op1 = legitimize_tls_address (XEXP (XEXP (op1, 0), 0), model, true);
8144 op1 = force_operand (op1, NULL);
8145 op1 = expand_simple_binop (Pmode, PLUS, op1, addend,
8146 op0, 1, OPTAB_DIRECT);
8147 if (op1 == op0)
8148 return;
8152 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
8154 #if TARGET_MACHO
8155 if (MACHOPIC_PURE)
8157 rtx temp = ((reload_in_progress
8158 || ((op0 && GET_CODE (op0) == REG)
8159 && mode == Pmode))
8160 ? op0 : gen_reg_rtx (Pmode));
8161 op1 = machopic_indirect_data_reference (op1, temp);
8162 op1 = machopic_legitimize_pic_address (op1, mode,
8163 temp == op1 ? 0 : temp);
8165 else if (MACHOPIC_INDIRECT)
8166 op1 = machopic_indirect_data_reference (op1, 0);
8167 if (op0 == op1)
8168 return;
8169 #else
8170 if (GET_CODE (op0) == MEM)
8171 op1 = force_reg (Pmode, op1);
8172 else
8173 op1 = legitimize_address (op1, op1, Pmode);
8174 #endif /* TARGET_MACHO */
8176 else
8178 if (GET_CODE (op0) == MEM
8179 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
8180 || !push_operand (op0, mode))
8181 && GET_CODE (op1) == MEM)
8182 op1 = force_reg (mode, op1);
8184 if (push_operand (op0, mode)
8185 && ! general_no_elim_operand (op1, mode))
8186 op1 = copy_to_mode_reg (mode, op1);
8188 /* Force large constants in 64bit compilation into register
8189 to get them CSEed. */
8190 if (TARGET_64BIT && mode == DImode
8191 && immediate_operand (op1, mode)
8192 && !x86_64_zext_immediate_operand (op1, VOIDmode)
8193 && !register_operand (op0, mode)
8194 && optimize && !reload_completed && !reload_in_progress)
8195 op1 = copy_to_mode_reg (mode, op1);
8197 if (FLOAT_MODE_P (mode))
8199 /* If we are loading a floating point constant to a register,
8200 force the value to memory now, since we'll get better code
8201 out the back end. */
8203 if (strict)
8205 else if (GET_CODE (op1) == CONST_DOUBLE)
8207 op1 = validize_mem (force_const_mem (mode, op1));
8208 if (!register_operand (op0, mode))
8210 rtx temp = gen_reg_rtx (mode);
8211 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
8212 emit_move_insn (op0, temp);
8213 return;
8219 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
8222 void
8223 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
8225 rtx op0 = operands[0], op1 = operands[1];
8227 /* Force constants other than zero into memory. We do not know how
8228 the instructions used to build constants modify the upper 64 bits
8229 of the register, once we have that information we may be able
8230 to handle some of them more efficiently. */
8231 if ((reload_in_progress | reload_completed) == 0
8232 && register_operand (op0, mode)
8233 && CONSTANT_P (op1) && op1 != CONST0_RTX (mode))
8234 op1 = validize_mem (force_const_mem (mode, op1));
8236 /* Make operand1 a register if it isn't already. */
8237 if (!no_new_pseudos
8238 && !register_operand (op0, mode)
8239 && !register_operand (op1, mode))
8241 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
8242 return;
8245 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
8248 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
8249 straight to ix86_expand_vector_move. */
8251 void
8252 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
8254 rtx op0, op1, m;
8256 op0 = operands[0];
8257 op1 = operands[1];
8259 if (MEM_P (op1))
8261 /* If we're optimizing for size, movups is the smallest. */
8262 if (optimize_size)
8264 op0 = gen_lowpart (V4SFmode, op0);
8265 op1 = gen_lowpart (V4SFmode, op1);
8266 emit_insn (gen_sse_movups (op0, op1));
8267 return;
8270 /* ??? If we have typed data, then it would appear that using
8271 movdqu is the only way to get unaligned data loaded with
8272 integer type. */
8273 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
8275 op0 = gen_lowpart (V16QImode, op0);
8276 op1 = gen_lowpart (V16QImode, op1);
8277 emit_insn (gen_sse2_movdqu (op0, op1));
8278 return;
8281 if (TARGET_SSE2 && mode == V2DFmode)
8283 rtx zero;
8285 /* When SSE registers are split into halves, we can avoid
8286 writing to the top half twice. */
8287 if (TARGET_SSE_SPLIT_REGS)
8289 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
8290 zero = op0;
8292 else
8294 /* ??? Not sure about the best option for the Intel chips.
8295 The following would seem to satisfy; the register is
8296 entirely cleared, breaking the dependency chain. We
8297 then store to the upper half, with a dependency depth
8298 of one. A rumor has it that Intel recommends two movsd
8299 followed by an unpacklpd, but this is unconfirmed. And
8300 given that the dependency depth of the unpacklpd would
8301 still be one, I'm not sure why this would be better. */
8302 zero = CONST0_RTX (V2DFmode);
8305 m = adjust_address (op1, DFmode, 0);
8306 emit_insn (gen_sse2_loadlpd (op0, zero, m));
8307 m = adjust_address (op1, DFmode, 8);
8308 emit_insn (gen_sse2_loadhpd (op0, op0, m));
8310 else
8312 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
8313 emit_move_insn (op0, CONST0_RTX (mode));
8314 else
8315 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
8317 if (mode != V4SFmode)
8318 op0 = gen_lowpart (V4SFmode, op0);
8319 m = adjust_address (op1, V2SFmode, 0);
8320 emit_insn (gen_sse_loadlps (op0, op0, m));
8321 m = adjust_address (op1, V2SFmode, 8);
8322 emit_insn (gen_sse_loadhps (op0, op0, m));
8325 else if (MEM_P (op0))
8327 /* If we're optimizing for size, movups is the smallest. */
8328 if (optimize_size)
8330 op0 = gen_lowpart (V4SFmode, op0);
8331 op1 = gen_lowpart (V4SFmode, op1);
8332 emit_insn (gen_sse_movups (op0, op1));
8333 return;
8336 /* ??? Similar to above, only less clear because of quote
8337 typeless stores unquote. */
8338 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
8339 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
8341 op0 = gen_lowpart (V16QImode, op0);
8342 op1 = gen_lowpart (V16QImode, op1);
8343 emit_insn (gen_sse2_movdqu (op0, op1));
8344 return;
8347 if (TARGET_SSE2 && mode == V2DFmode)
8349 m = adjust_address (op0, DFmode, 0);
8350 emit_insn (gen_sse2_storelpd (m, op1));
8351 m = adjust_address (op0, DFmode, 8);
8352 emit_insn (gen_sse2_storehpd (m, op1));
8354 else
8356 if (mode != V4SFmode)
8357 op1 = gen_lowpart (V4SFmode, op1);
8358 m = adjust_address (op0, V2SFmode, 0);
8359 emit_insn (gen_sse_storelps (m, op1));
8360 m = adjust_address (op0, V2SFmode, 8);
8361 emit_insn (gen_sse_storehps (m, op1));
8364 else
8365 gcc_unreachable ();
8368 /* Expand a push in MODE. This is some mode for which we do not support
8369 proper push instructions, at least from the registers that we expect
8370 the value to live in. */
8372 void
8373 ix86_expand_push (enum machine_mode mode, rtx x)
8375 rtx tmp;
8377 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
8378 GEN_INT (-GET_MODE_SIZE (mode)),
8379 stack_pointer_rtx, 1, OPTAB_DIRECT);
8380 if (tmp != stack_pointer_rtx)
8381 emit_move_insn (stack_pointer_rtx, tmp);
8383 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
8384 emit_move_insn (tmp, x);
8387 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
8388 destination to use for the operation. If different from the true
8389 destination in operands[0], a copy operation will be required. */
8392 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
8393 rtx operands[])
8395 int matching_memory;
8396 rtx src1, src2, dst;
8398 dst = operands[0];
8399 src1 = operands[1];
8400 src2 = operands[2];
8402 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
8403 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8404 && (rtx_equal_p (dst, src2)
8405 || immediate_operand (src1, mode)))
8407 rtx temp = src1;
8408 src1 = src2;
8409 src2 = temp;
8412 /* If the destination is memory, and we do not have matching source
8413 operands, do things in registers. */
8414 matching_memory = 0;
8415 if (GET_CODE (dst) == MEM)
8417 if (rtx_equal_p (dst, src1))
8418 matching_memory = 1;
8419 else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8420 && rtx_equal_p (dst, src2))
8421 matching_memory = 2;
8422 else
8423 dst = gen_reg_rtx (mode);
8426 /* Both source operands cannot be in memory. */
8427 if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM)
8429 if (matching_memory != 2)
8430 src2 = force_reg (mode, src2);
8431 else
8432 src1 = force_reg (mode, src1);
8435 /* If the operation is not commutable, source 1 cannot be a constant
8436 or non-matching memory. */
8437 if ((CONSTANT_P (src1)
8438 || (!matching_memory && GET_CODE (src1) == MEM))
8439 && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
8440 src1 = force_reg (mode, src1);
8442 src1 = operands[1] = src1;
8443 src2 = operands[2] = src2;
8444 return dst;
8447 /* Similarly, but assume that the destination has already been
8448 set up properly. */
8450 void
8451 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
8452 enum machine_mode mode, rtx operands[])
8454 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
8455 gcc_assert (dst == operands[0]);
8458 /* Attempt to expand a binary operator. Make the expansion closer to the
8459 actual machine, then just general_operand, which will allow 3 separate
8460 memory references (one output, two input) in a single insn. */
8462 void
8463 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
8464 rtx operands[])
8466 rtx src1, src2, dst, op, clob;
8468 dst = ix86_fixup_binary_operands (code, mode, operands);
8469 src1 = operands[1];
8470 src2 = operands[2];
8472 /* Emit the instruction. */
8474 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
8475 if (reload_in_progress)
8477 /* Reload doesn't know about the flags register, and doesn't know that
8478 it doesn't want to clobber it. We can only do this with PLUS. */
8479 gcc_assert (code == PLUS);
8480 emit_insn (op);
8482 else
8484 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8485 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
8488 /* Fix up the destination if needed. */
8489 if (dst != operands[0])
8490 emit_move_insn (operands[0], dst);
8493 /* Return TRUE or FALSE depending on whether the binary operator meets the
8494 appropriate constraints. */
8497 ix86_binary_operator_ok (enum rtx_code code,
8498 enum machine_mode mode ATTRIBUTE_UNUSED,
8499 rtx operands[3])
8501 /* Both source operands cannot be in memory. */
8502 if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM)
8503 return 0;
8504 /* If the operation is not commutable, source 1 cannot be a constant. */
8505 if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
8506 return 0;
8507 /* If the destination is memory, we must have a matching source operand. */
8508 if (GET_CODE (operands[0]) == MEM
8509 && ! (rtx_equal_p (operands[0], operands[1])
8510 || (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8511 && rtx_equal_p (operands[0], operands[2]))))
8512 return 0;
8513 /* If the operation is not commutable and the source 1 is memory, we must
8514 have a matching destination. */
8515 if (GET_CODE (operands[1]) == MEM
8516 && GET_RTX_CLASS (code) != RTX_COMM_ARITH
8517 && ! rtx_equal_p (operands[0], operands[1]))
8518 return 0;
8519 return 1;
8522 /* Attempt to expand a unary operator. Make the expansion closer to the
8523 actual machine, then just general_operand, which will allow 2 separate
8524 memory references (one output, one input) in a single insn. */
8526 void
8527 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
8528 rtx operands[])
8530 int matching_memory;
8531 rtx src, dst, op, clob;
8533 dst = operands[0];
8534 src = operands[1];
8536 /* If the destination is memory, and we do not have matching source
8537 operands, do things in registers. */
8538 matching_memory = 0;
8539 if (MEM_P (dst))
8541 if (rtx_equal_p (dst, src))
8542 matching_memory = 1;
8543 else
8544 dst = gen_reg_rtx (mode);
8547 /* When source operand is memory, destination must match. */
8548 if (MEM_P (src) && !matching_memory)
8549 src = force_reg (mode, src);
8551 /* Emit the instruction. */
8553 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
8554 if (reload_in_progress || code == NOT)
8556 /* Reload doesn't know about the flags register, and doesn't know that
8557 it doesn't want to clobber it. */
8558 gcc_assert (code == NOT);
8559 emit_insn (op);
8561 else
8563 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8564 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
8567 /* Fix up the destination if needed. */
8568 if (dst != operands[0])
8569 emit_move_insn (operands[0], dst);
8572 /* Return TRUE or FALSE depending on whether the unary operator meets the
8573 appropriate constraints. */
8576 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
8577 enum machine_mode mode ATTRIBUTE_UNUSED,
8578 rtx operands[2] ATTRIBUTE_UNUSED)
8580 /* If one of operands is memory, source and destination must match. */
8581 if ((GET_CODE (operands[0]) == MEM
8582 || GET_CODE (operands[1]) == MEM)
8583 && ! rtx_equal_p (operands[0], operands[1]))
8584 return FALSE;
8585 return TRUE;
8588 /* A subroutine of ix86_expand_fp_absneg_operator and copysign expanders.
8589 Create a mask for the sign bit in MODE for an SSE register. If VECT is
8590 true, then replicate the mask for all elements of the vector register.
8591 If INVERT is true, then create a mask excluding the sign bit. */
8594 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
8596 enum machine_mode vec_mode;
8597 HOST_WIDE_INT hi, lo;
8598 int shift = 63;
8599 rtvec v;
8600 rtx mask;
8602 /* Find the sign bit, sign extended to 2*HWI. */
8603 if (mode == SFmode)
8604 lo = 0x80000000, hi = lo < 0;
8605 else if (HOST_BITS_PER_WIDE_INT >= 64)
8606 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
8607 else
8608 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
8610 if (invert)
8611 lo = ~lo, hi = ~hi;
8613 /* Force this value into the low part of a fp vector constant. */
8614 mask = immed_double_const (lo, hi, mode == SFmode ? SImode : DImode);
8615 mask = gen_lowpart (mode, mask);
8617 if (mode == SFmode)
8619 if (vect)
8620 v = gen_rtvec (4, mask, mask, mask, mask);
8621 else
8622 v = gen_rtvec (4, mask, CONST0_RTX (SFmode),
8623 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
8624 vec_mode = V4SFmode;
8626 else
8628 if (vect)
8629 v = gen_rtvec (2, mask, mask);
8630 else
8631 v = gen_rtvec (2, mask, CONST0_RTX (DFmode));
8632 vec_mode = V2DFmode;
8635 return force_reg (vec_mode, gen_rtx_CONST_VECTOR (vec_mode, v));
8638 /* Generate code for floating point ABS or NEG. */
8640 void
8641 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
8642 rtx operands[])
8644 rtx mask, set, use, clob, dst, src;
8645 bool matching_memory;
8646 bool use_sse = false;
8647 bool vector_mode = VECTOR_MODE_P (mode);
8648 enum machine_mode elt_mode = mode;
8650 if (vector_mode)
8652 elt_mode = GET_MODE_INNER (mode);
8653 use_sse = true;
8655 else if (TARGET_SSE_MATH)
8656 use_sse = SSE_FLOAT_MODE_P (mode);
8658 /* NEG and ABS performed with SSE use bitwise mask operations.
8659 Create the appropriate mask now. */
8660 if (use_sse)
8661 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
8662 else
8664 /* When not using SSE, we don't use the mask, but prefer to keep the
8665 same general form of the insn pattern to reduce duplication when
8666 it comes time to split. */
8667 mask = const0_rtx;
8670 dst = operands[0];
8671 src = operands[1];
8673 /* If the destination is memory, and we don't have matching source
8674 operands, do things in registers. */
8675 matching_memory = false;
8676 if (MEM_P (dst))
8678 if (rtx_equal_p (dst, src))
8679 matching_memory = true;
8680 else
8681 dst = gen_reg_rtx (mode);
8683 if (MEM_P (src) && !matching_memory)
8684 src = force_reg (mode, src);
8686 if (vector_mode)
8688 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
8689 set = gen_rtx_SET (VOIDmode, dst, set);
8690 emit_insn (set);
8692 else
8694 set = gen_rtx_fmt_e (code, mode, src);
8695 set = gen_rtx_SET (VOIDmode, dst, set);
8696 use = gen_rtx_USE (VOIDmode, mask);
8697 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8698 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, set, use, clob)));
8701 if (dst != operands[0])
8702 emit_move_insn (operands[0], dst);
8705 /* Expand a copysign operation. Special case operand 0 being a constant. */
8707 void
8708 ix86_expand_copysign (rtx operands[])
8710 enum machine_mode mode, vmode;
8711 rtx dest, op0, op1, mask, nmask;
8713 dest = operands[0];
8714 op0 = operands[1];
8715 op1 = operands[2];
8717 mode = GET_MODE (dest);
8718 vmode = mode == SFmode ? V4SFmode : V2DFmode;
8720 if (GET_CODE (op0) == CONST_DOUBLE)
8722 rtvec v;
8724 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
8725 op0 = simplify_unary_operation (ABS, mode, op0, mode);
8727 if (op0 == CONST0_RTX (mode))
8728 op0 = CONST0_RTX (vmode);
8729 else
8731 if (mode == SFmode)
8732 v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
8733 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
8734 else
8735 v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
8736 op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
8739 mask = ix86_build_signbit_mask (mode, 0, 0);
8741 if (mode == SFmode)
8742 emit_insn (gen_copysignsf3_const (dest, op0, op1, mask));
8743 else
8744 emit_insn (gen_copysigndf3_const (dest, op0, op1, mask));
8746 else
8748 nmask = ix86_build_signbit_mask (mode, 0, 1);
8749 mask = ix86_build_signbit_mask (mode, 0, 0);
8751 if (mode == SFmode)
8752 emit_insn (gen_copysignsf3_var (dest, NULL, op0, op1, nmask, mask));
8753 else
8754 emit_insn (gen_copysigndf3_var (dest, NULL, op0, op1, nmask, mask));
8758 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
8759 be a constant, and so has already been expanded into a vector constant. */
8761 void
8762 ix86_split_copysign_const (rtx operands[])
8764 enum machine_mode mode, vmode;
8765 rtx dest, op0, op1, mask, x;
8767 dest = operands[0];
8768 op0 = operands[1];
8769 op1 = operands[2];
8770 mask = operands[3];
8772 mode = GET_MODE (dest);
8773 vmode = GET_MODE (mask);
8775 dest = simplify_gen_subreg (vmode, dest, mode, 0);
8776 x = gen_rtx_AND (vmode, dest, mask);
8777 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8779 if (op0 != CONST0_RTX (vmode))
8781 x = gen_rtx_IOR (vmode, dest, op0);
8782 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8786 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
8787 so we have to do two masks. */
8789 void
8790 ix86_split_copysign_var (rtx operands[])
8792 enum machine_mode mode, vmode;
8793 rtx dest, scratch, op0, op1, mask, nmask, x;
8795 dest = operands[0];
8796 scratch = operands[1];
8797 op0 = operands[2];
8798 op1 = operands[3];
8799 nmask = operands[4];
8800 mask = operands[5];
8802 mode = GET_MODE (dest);
8803 vmode = GET_MODE (mask);
8805 if (rtx_equal_p (op0, op1))
8807 /* Shouldn't happen often (it's useless, obviously), but when it does
8808 we'd generate incorrect code if we continue below. */
8809 emit_move_insn (dest, op0);
8810 return;
8813 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
8815 gcc_assert (REGNO (op1) == REGNO (scratch));
8817 x = gen_rtx_AND (vmode, scratch, mask);
8818 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
8820 dest = mask;
8821 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
8822 x = gen_rtx_NOT (vmode, dest);
8823 x = gen_rtx_AND (vmode, x, op0);
8824 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8826 else
8828 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
8830 x = gen_rtx_AND (vmode, scratch, mask);
8832 else /* alternative 2,4 */
8834 gcc_assert (REGNO (mask) == REGNO (scratch));
8835 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
8836 x = gen_rtx_AND (vmode, scratch, op1);
8838 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
8840 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
8842 dest = simplify_gen_subreg (vmode, op0, mode, 0);
8843 x = gen_rtx_AND (vmode, dest, nmask);
8845 else /* alternative 3,4 */
8847 gcc_assert (REGNO (nmask) == REGNO (dest));
8848 dest = nmask;
8849 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
8850 x = gen_rtx_AND (vmode, dest, op0);
8852 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8855 x = gen_rtx_IOR (vmode, dest, scratch);
8856 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8859 /* Return TRUE or FALSE depending on whether the first SET in INSN
8860 has source and destination with matching CC modes, and that the
8861 CC mode is at least as constrained as REQ_MODE. */
8864 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
8866 rtx set;
8867 enum machine_mode set_mode;
8869 set = PATTERN (insn);
8870 if (GET_CODE (set) == PARALLEL)
8871 set = XVECEXP (set, 0, 0);
8872 gcc_assert (GET_CODE (set) == SET);
8873 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
8875 set_mode = GET_MODE (SET_DEST (set));
8876 switch (set_mode)
8878 case CCNOmode:
8879 if (req_mode != CCNOmode
8880 && (req_mode != CCmode
8881 || XEXP (SET_SRC (set), 1) != const0_rtx))
8882 return 0;
8883 break;
8884 case CCmode:
8885 if (req_mode == CCGCmode)
8886 return 0;
8887 /* FALLTHRU */
8888 case CCGCmode:
8889 if (req_mode == CCGOCmode || req_mode == CCNOmode)
8890 return 0;
8891 /* FALLTHRU */
8892 case CCGOCmode:
8893 if (req_mode == CCZmode)
8894 return 0;
8895 /* FALLTHRU */
8896 case CCZmode:
8897 break;
8899 default:
8900 gcc_unreachable ();
8903 return (GET_MODE (SET_SRC (set)) == set_mode);
8906 /* Generate insn patterns to do an integer compare of OPERANDS. */
8908 static rtx
8909 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
8911 enum machine_mode cmpmode;
8912 rtx tmp, flags;
8914 cmpmode = SELECT_CC_MODE (code, op0, op1);
8915 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
8917 /* This is very simple, but making the interface the same as in the
8918 FP case makes the rest of the code easier. */
8919 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
8920 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
8922 /* Return the test that should be put into the flags user, i.e.
8923 the bcc, scc, or cmov instruction. */
8924 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
8927 /* Figure out whether to use ordered or unordered fp comparisons.
8928 Return the appropriate mode to use. */
8930 enum machine_mode
8931 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
8933 /* ??? In order to make all comparisons reversible, we do all comparisons
8934 non-trapping when compiling for IEEE. Once gcc is able to distinguish
8935 all forms trapping and nontrapping comparisons, we can make inequality
8936 comparisons trapping again, since it results in better code when using
8937 FCOM based compares. */
8938 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
8941 enum machine_mode
8942 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
8944 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
8945 return ix86_fp_compare_mode (code);
8946 switch (code)
8948 /* Only zero flag is needed. */
8949 case EQ: /* ZF=0 */
8950 case NE: /* ZF!=0 */
8951 return CCZmode;
8952 /* Codes needing carry flag. */
8953 case GEU: /* CF=0 */
8954 case GTU: /* CF=0 & ZF=0 */
8955 case LTU: /* CF=1 */
8956 case LEU: /* CF=1 | ZF=1 */
8957 return CCmode;
8958 /* Codes possibly doable only with sign flag when
8959 comparing against zero. */
8960 case GE: /* SF=OF or SF=0 */
8961 case LT: /* SF<>OF or SF=1 */
8962 if (op1 == const0_rtx)
8963 return CCGOCmode;
8964 else
8965 /* For other cases Carry flag is not required. */
8966 return CCGCmode;
8967 /* Codes doable only with sign flag when comparing
8968 against zero, but we miss jump instruction for it
8969 so we need to use relational tests against overflow
8970 that thus needs to be zero. */
8971 case GT: /* ZF=0 & SF=OF */
8972 case LE: /* ZF=1 | SF<>OF */
8973 if (op1 == const0_rtx)
8974 return CCNOmode;
8975 else
8976 return CCGCmode;
8977 /* strcmp pattern do (use flags) and combine may ask us for proper
8978 mode. */
8979 case USE:
8980 return CCmode;
8981 default:
8982 gcc_unreachable ();
8986 /* Return the fixed registers used for condition codes. */
8988 static bool
8989 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
8991 *p1 = FLAGS_REG;
8992 *p2 = FPSR_REG;
8993 return true;
8996 /* If two condition code modes are compatible, return a condition code
8997 mode which is compatible with both. Otherwise, return
8998 VOIDmode. */
9000 static enum machine_mode
9001 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
9003 if (m1 == m2)
9004 return m1;
9006 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
9007 return VOIDmode;
9009 if ((m1 == CCGCmode && m2 == CCGOCmode)
9010 || (m1 == CCGOCmode && m2 == CCGCmode))
9011 return CCGCmode;
9013 switch (m1)
9015 default:
9016 gcc_unreachable ();
9018 case CCmode:
9019 case CCGCmode:
9020 case CCGOCmode:
9021 case CCNOmode:
9022 case CCZmode:
9023 switch (m2)
9025 default:
9026 return VOIDmode;
9028 case CCmode:
9029 case CCGCmode:
9030 case CCGOCmode:
9031 case CCNOmode:
9032 case CCZmode:
9033 return CCmode;
9036 case CCFPmode:
9037 case CCFPUmode:
9038 /* These are only compatible with themselves, which we already
9039 checked above. */
9040 return VOIDmode;
9044 /* Return true if we should use an FCOMI instruction for this fp comparison. */
9047 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
9049 enum rtx_code swapped_code = swap_condition (code);
9050 return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code))
9051 || (ix86_fp_comparison_cost (swapped_code)
9052 == ix86_fp_comparison_fcomi_cost (swapped_code)));
9055 /* Swap, force into registers, or otherwise massage the two operands
9056 to a fp comparison. The operands are updated in place; the new
9057 comparison code is returned. */
9059 static enum rtx_code
9060 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
9062 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
9063 rtx op0 = *pop0, op1 = *pop1;
9064 enum machine_mode op_mode = GET_MODE (op0);
9065 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
9067 /* All of the unordered compare instructions only work on registers.
9068 The same is true of the fcomi compare instructions. The XFmode
9069 compare instructions require registers except when comparing
9070 against zero or when converting operand 1 from fixed point to
9071 floating point. */
9073 if (!is_sse
9074 && (fpcmp_mode == CCFPUmode
9075 || (op_mode == XFmode
9076 && ! (standard_80387_constant_p (op0) == 1
9077 || standard_80387_constant_p (op1) == 1)
9078 && GET_CODE (op1) != FLOAT)
9079 || ix86_use_fcomi_compare (code)))
9081 op0 = force_reg (op_mode, op0);
9082 op1 = force_reg (op_mode, op1);
9084 else
9086 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
9087 things around if they appear profitable, otherwise force op0
9088 into a register. */
9090 if (standard_80387_constant_p (op0) == 0
9091 || (GET_CODE (op0) == MEM
9092 && ! (standard_80387_constant_p (op1) == 0
9093 || GET_CODE (op1) == MEM)))
9095 rtx tmp;
9096 tmp = op0, op0 = op1, op1 = tmp;
9097 code = swap_condition (code);
9100 if (GET_CODE (op0) != REG)
9101 op0 = force_reg (op_mode, op0);
9103 if (CONSTANT_P (op1))
9105 int tmp = standard_80387_constant_p (op1);
9106 if (tmp == 0)
9107 op1 = validize_mem (force_const_mem (op_mode, op1));
9108 else if (tmp == 1)
9110 if (TARGET_CMOVE)
9111 op1 = force_reg (op_mode, op1);
9113 else
9114 op1 = force_reg (op_mode, op1);
9118 /* Try to rearrange the comparison to make it cheaper. */
9119 if (ix86_fp_comparison_cost (code)
9120 > ix86_fp_comparison_cost (swap_condition (code))
9121 && (GET_CODE (op1) == REG || !no_new_pseudos))
9123 rtx tmp;
9124 tmp = op0, op0 = op1, op1 = tmp;
9125 code = swap_condition (code);
9126 if (GET_CODE (op0) != REG)
9127 op0 = force_reg (op_mode, op0);
9130 *pop0 = op0;
9131 *pop1 = op1;
9132 return code;
9135 /* Convert comparison codes we use to represent FP comparison to integer
9136 code that will result in proper branch. Return UNKNOWN if no such code
9137 is available. */
9139 enum rtx_code
9140 ix86_fp_compare_code_to_integer (enum rtx_code code)
9142 switch (code)
9144 case GT:
9145 return GTU;
9146 case GE:
9147 return GEU;
9148 case ORDERED:
9149 case UNORDERED:
9150 return code;
9151 break;
9152 case UNEQ:
9153 return EQ;
9154 break;
9155 case UNLT:
9156 return LTU;
9157 break;
9158 case UNLE:
9159 return LEU;
9160 break;
9161 case LTGT:
9162 return NE;
9163 break;
9164 default:
9165 return UNKNOWN;
9169 /* Split comparison code CODE into comparisons we can do using branch
9170 instructions. BYPASS_CODE is comparison code for branch that will
9171 branch around FIRST_CODE and SECOND_CODE. If some of branches
9172 is not required, set value to UNKNOWN.
9173 We never require more than two branches. */
9175 void
9176 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
9177 enum rtx_code *first_code,
9178 enum rtx_code *second_code)
9180 *first_code = code;
9181 *bypass_code = UNKNOWN;
9182 *second_code = UNKNOWN;
9184 /* The fcomi comparison sets flags as follows:
9186 cmp ZF PF CF
9187 > 0 0 0
9188 < 0 0 1
9189 = 1 0 0
9190 un 1 1 1 */
9192 switch (code)
9194 case GT: /* GTU - CF=0 & ZF=0 */
9195 case GE: /* GEU - CF=0 */
9196 case ORDERED: /* PF=0 */
9197 case UNORDERED: /* PF=1 */
9198 case UNEQ: /* EQ - ZF=1 */
9199 case UNLT: /* LTU - CF=1 */
9200 case UNLE: /* LEU - CF=1 | ZF=1 */
9201 case LTGT: /* EQ - ZF=0 */
9202 break;
9203 case LT: /* LTU - CF=1 - fails on unordered */
9204 *first_code = UNLT;
9205 *bypass_code = UNORDERED;
9206 break;
9207 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
9208 *first_code = UNLE;
9209 *bypass_code = UNORDERED;
9210 break;
9211 case EQ: /* EQ - ZF=1 - fails on unordered */
9212 *first_code = UNEQ;
9213 *bypass_code = UNORDERED;
9214 break;
9215 case NE: /* NE - ZF=0 - fails on unordered */
9216 *first_code = LTGT;
9217 *second_code = UNORDERED;
9218 break;
9219 case UNGE: /* GEU - CF=0 - fails on unordered */
9220 *first_code = GE;
9221 *second_code = UNORDERED;
9222 break;
9223 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
9224 *first_code = GT;
9225 *second_code = UNORDERED;
9226 break;
9227 default:
9228 gcc_unreachable ();
9230 if (!TARGET_IEEE_FP)
9232 *second_code = UNKNOWN;
9233 *bypass_code = UNKNOWN;
9237 /* Return cost of comparison done fcom + arithmetics operations on AX.
9238 All following functions do use number of instructions as a cost metrics.
9239 In future this should be tweaked to compute bytes for optimize_size and
9240 take into account performance of various instructions on various CPUs. */
9241 static int
9242 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
9244 if (!TARGET_IEEE_FP)
9245 return 4;
9246 /* The cost of code output by ix86_expand_fp_compare. */
9247 switch (code)
9249 case UNLE:
9250 case UNLT:
9251 case LTGT:
9252 case GT:
9253 case GE:
9254 case UNORDERED:
9255 case ORDERED:
9256 case UNEQ:
9257 return 4;
9258 break;
9259 case LT:
9260 case NE:
9261 case EQ:
9262 case UNGE:
9263 return 5;
9264 break;
9265 case LE:
9266 case UNGT:
9267 return 6;
9268 break;
9269 default:
9270 gcc_unreachable ();
9274 /* Return cost of comparison done using fcomi operation.
9275 See ix86_fp_comparison_arithmetics_cost for the metrics. */
9276 static int
9277 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
9279 enum rtx_code bypass_code, first_code, second_code;
9280 /* Return arbitrarily high cost when instruction is not supported - this
9281 prevents gcc from using it. */
9282 if (!TARGET_CMOVE)
9283 return 1024;
9284 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9285 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
9288 /* Return cost of comparison done using sahf operation.
9289 See ix86_fp_comparison_arithmetics_cost for the metrics. */
9290 static int
9291 ix86_fp_comparison_sahf_cost (enum rtx_code code)
9293 enum rtx_code bypass_code, first_code, second_code;
9294 /* Return arbitrarily high cost when instruction is not preferred - this
9295 avoids gcc from using it. */
9296 if (!TARGET_USE_SAHF && !optimize_size)
9297 return 1024;
9298 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9299 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
9302 /* Compute cost of the comparison done using any method.
9303 See ix86_fp_comparison_arithmetics_cost for the metrics. */
9304 static int
9305 ix86_fp_comparison_cost (enum rtx_code code)
9307 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
9308 int min;
9310 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
9311 sahf_cost = ix86_fp_comparison_sahf_cost (code);
9313 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
9314 if (min > sahf_cost)
9315 min = sahf_cost;
9316 if (min > fcomi_cost)
9317 min = fcomi_cost;
9318 return min;
9321 /* Generate insn patterns to do a floating point compare of OPERANDS. */
9323 static rtx
9324 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
9325 rtx *second_test, rtx *bypass_test)
9327 enum machine_mode fpcmp_mode, intcmp_mode;
9328 rtx tmp, tmp2;
9329 int cost = ix86_fp_comparison_cost (code);
9330 enum rtx_code bypass_code, first_code, second_code;
9332 fpcmp_mode = ix86_fp_compare_mode (code);
9333 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
9335 if (second_test)
9336 *second_test = NULL_RTX;
9337 if (bypass_test)
9338 *bypass_test = NULL_RTX;
9340 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9342 /* Do fcomi/sahf based test when profitable. */
9343 if ((bypass_code == UNKNOWN || bypass_test)
9344 && (second_code == UNKNOWN || second_test)
9345 && ix86_fp_comparison_arithmetics_cost (code) > cost)
9347 if (TARGET_CMOVE)
9349 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9350 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
9351 tmp);
9352 emit_insn (tmp);
9354 else
9356 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9357 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
9358 if (!scratch)
9359 scratch = gen_reg_rtx (HImode);
9360 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
9361 emit_insn (gen_x86_sahf_1 (scratch));
9364 /* The FP codes work out to act like unsigned. */
9365 intcmp_mode = fpcmp_mode;
9366 code = first_code;
9367 if (bypass_code != UNKNOWN)
9368 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
9369 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9370 const0_rtx);
9371 if (second_code != UNKNOWN)
9372 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
9373 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9374 const0_rtx);
9376 else
9378 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
9379 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9380 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
9381 if (!scratch)
9382 scratch = gen_reg_rtx (HImode);
9383 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
9385 /* In the unordered case, we have to check C2 for NaN's, which
9386 doesn't happen to work out to anything nice combination-wise.
9387 So do some bit twiddling on the value we've got in AH to come
9388 up with an appropriate set of condition codes. */
9390 intcmp_mode = CCNOmode;
9391 switch (code)
9393 case GT:
9394 case UNGT:
9395 if (code == GT || !TARGET_IEEE_FP)
9397 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
9398 code = EQ;
9400 else
9402 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9403 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
9404 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
9405 intcmp_mode = CCmode;
9406 code = GEU;
9408 break;
9409 case LT:
9410 case UNLT:
9411 if (code == LT && TARGET_IEEE_FP)
9413 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9414 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
9415 intcmp_mode = CCmode;
9416 code = EQ;
9418 else
9420 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
9421 code = NE;
9423 break;
9424 case GE:
9425 case UNGE:
9426 if (code == GE || !TARGET_IEEE_FP)
9428 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
9429 code = EQ;
9431 else
9433 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9434 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
9435 GEN_INT (0x01)));
9436 code = NE;
9438 break;
9439 case LE:
9440 case UNLE:
9441 if (code == LE && TARGET_IEEE_FP)
9443 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9444 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
9445 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
9446 intcmp_mode = CCmode;
9447 code = LTU;
9449 else
9451 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
9452 code = NE;
9454 break;
9455 case EQ:
9456 case UNEQ:
9457 if (code == EQ && TARGET_IEEE_FP)
9459 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9460 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
9461 intcmp_mode = CCmode;
9462 code = EQ;
9464 else
9466 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
9467 code = NE;
9468 break;
9470 break;
9471 case NE:
9472 case LTGT:
9473 if (code == NE && TARGET_IEEE_FP)
9475 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9476 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
9477 GEN_INT (0x40)));
9478 code = NE;
9480 else
9482 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
9483 code = EQ;
9485 break;
9487 case UNORDERED:
9488 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
9489 code = NE;
9490 break;
9491 case ORDERED:
9492 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
9493 code = EQ;
9494 break;
9496 default:
9497 gcc_unreachable ();
9501 /* Return the test that should be put into the flags user, i.e.
9502 the bcc, scc, or cmov instruction. */
9503 return gen_rtx_fmt_ee (code, VOIDmode,
9504 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9505 const0_rtx);
9509 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
9511 rtx op0, op1, ret;
9512 op0 = ix86_compare_op0;
9513 op1 = ix86_compare_op1;
9515 if (second_test)
9516 *second_test = NULL_RTX;
9517 if (bypass_test)
9518 *bypass_test = NULL_RTX;
9520 if (ix86_compare_emitted)
9522 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
9523 ix86_compare_emitted = NULL_RTX;
9525 else if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
9526 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
9527 second_test, bypass_test);
9528 else
9529 ret = ix86_expand_int_compare (code, op0, op1);
9531 return ret;
9534 /* Return true if the CODE will result in nontrivial jump sequence. */
9535 bool
9536 ix86_fp_jump_nontrivial_p (enum rtx_code code)
9538 enum rtx_code bypass_code, first_code, second_code;
9539 if (!TARGET_CMOVE)
9540 return true;
9541 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9542 return bypass_code != UNKNOWN || second_code != UNKNOWN;
9545 void
9546 ix86_expand_branch (enum rtx_code code, rtx label)
9548 rtx tmp;
9550 switch (GET_MODE (ix86_compare_op0))
9552 case QImode:
9553 case HImode:
9554 case SImode:
9555 simple:
9556 tmp = ix86_expand_compare (code, NULL, NULL);
9557 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
9558 gen_rtx_LABEL_REF (VOIDmode, label),
9559 pc_rtx);
9560 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
9561 return;
9563 case SFmode:
9564 case DFmode:
9565 case XFmode:
9567 rtvec vec;
9568 int use_fcomi;
9569 enum rtx_code bypass_code, first_code, second_code;
9571 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
9572 &ix86_compare_op1);
9574 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9576 /* Check whether we will use the natural sequence with one jump. If
9577 so, we can expand jump early. Otherwise delay expansion by
9578 creating compound insn to not confuse optimizers. */
9579 if (bypass_code == UNKNOWN && second_code == UNKNOWN
9580 && TARGET_CMOVE)
9582 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
9583 gen_rtx_LABEL_REF (VOIDmode, label),
9584 pc_rtx, NULL_RTX, NULL_RTX);
9586 else
9588 tmp = gen_rtx_fmt_ee (code, VOIDmode,
9589 ix86_compare_op0, ix86_compare_op1);
9590 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
9591 gen_rtx_LABEL_REF (VOIDmode, label),
9592 pc_rtx);
9593 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
9595 use_fcomi = ix86_use_fcomi_compare (code);
9596 vec = rtvec_alloc (3 + !use_fcomi);
9597 RTVEC_ELT (vec, 0) = tmp;
9598 RTVEC_ELT (vec, 1)
9599 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
9600 RTVEC_ELT (vec, 2)
9601 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
9602 if (! use_fcomi)
9603 RTVEC_ELT (vec, 3)
9604 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
9606 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
9608 return;
9611 case DImode:
9612 if (TARGET_64BIT)
9613 goto simple;
9614 case TImode:
9615 /* Expand DImode branch into multiple compare+branch. */
9617 rtx lo[2], hi[2], label2;
9618 enum rtx_code code1, code2, code3;
9619 enum machine_mode submode;
9621 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
9623 tmp = ix86_compare_op0;
9624 ix86_compare_op0 = ix86_compare_op1;
9625 ix86_compare_op1 = tmp;
9626 code = swap_condition (code);
9628 if (GET_MODE (ix86_compare_op0) == DImode)
9630 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
9631 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
9632 submode = SImode;
9634 else
9636 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
9637 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
9638 submode = DImode;
9641 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
9642 avoid two branches. This costs one extra insn, so disable when
9643 optimizing for size. */
9645 if ((code == EQ || code == NE)
9646 && (!optimize_size
9647 || hi[1] == const0_rtx || lo[1] == const0_rtx))
9649 rtx xor0, xor1;
9651 xor1 = hi[0];
9652 if (hi[1] != const0_rtx)
9653 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
9654 NULL_RTX, 0, OPTAB_WIDEN);
9656 xor0 = lo[0];
9657 if (lo[1] != const0_rtx)
9658 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
9659 NULL_RTX, 0, OPTAB_WIDEN);
9661 tmp = expand_binop (submode, ior_optab, xor1, xor0,
9662 NULL_RTX, 0, OPTAB_WIDEN);
9664 ix86_compare_op0 = tmp;
9665 ix86_compare_op1 = const0_rtx;
9666 ix86_expand_branch (code, label);
9667 return;
9670 /* Otherwise, if we are doing less-than or greater-or-equal-than,
9671 op1 is a constant and the low word is zero, then we can just
9672 examine the high word. */
9674 if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx)
9675 switch (code)
9677 case LT: case LTU: case GE: case GEU:
9678 ix86_compare_op0 = hi[0];
9679 ix86_compare_op1 = hi[1];
9680 ix86_expand_branch (code, label);
9681 return;
9682 default:
9683 break;
9686 /* Otherwise, we need two or three jumps. */
9688 label2 = gen_label_rtx ();
9690 code1 = code;
9691 code2 = swap_condition (code);
9692 code3 = unsigned_condition (code);
9694 switch (code)
9696 case LT: case GT: case LTU: case GTU:
9697 break;
9699 case LE: code1 = LT; code2 = GT; break;
9700 case GE: code1 = GT; code2 = LT; break;
9701 case LEU: code1 = LTU; code2 = GTU; break;
9702 case GEU: code1 = GTU; code2 = LTU; break;
9704 case EQ: code1 = UNKNOWN; code2 = NE; break;
9705 case NE: code2 = UNKNOWN; break;
9707 default:
9708 gcc_unreachable ();
9712 * a < b =>
9713 * if (hi(a) < hi(b)) goto true;
9714 * if (hi(a) > hi(b)) goto false;
9715 * if (lo(a) < lo(b)) goto true;
9716 * false:
9719 ix86_compare_op0 = hi[0];
9720 ix86_compare_op1 = hi[1];
9722 if (code1 != UNKNOWN)
9723 ix86_expand_branch (code1, label);
9724 if (code2 != UNKNOWN)
9725 ix86_expand_branch (code2, label2);
9727 ix86_compare_op0 = lo[0];
9728 ix86_compare_op1 = lo[1];
9729 ix86_expand_branch (code3, label);
9731 if (code2 != UNKNOWN)
9732 emit_label (label2);
9733 return;
9736 default:
9737 gcc_unreachable ();
9741 /* Split branch based on floating point condition. */
9742 void
9743 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
9744 rtx target1, rtx target2, rtx tmp, rtx pushed)
9746 rtx second, bypass;
9747 rtx label = NULL_RTX;
9748 rtx condition;
9749 int bypass_probability = -1, second_probability = -1, probability = -1;
9750 rtx i;
9752 if (target2 != pc_rtx)
9754 rtx tmp = target2;
9755 code = reverse_condition_maybe_unordered (code);
9756 target2 = target1;
9757 target1 = tmp;
9760 condition = ix86_expand_fp_compare (code, op1, op2,
9761 tmp, &second, &bypass);
9763 /* Remove pushed operand from stack. */
9764 if (pushed)
9765 ix86_free_from_memory (GET_MODE (pushed));
9767 if (split_branch_probability >= 0)
9769 /* Distribute the probabilities across the jumps.
9770 Assume the BYPASS and SECOND to be always test
9771 for UNORDERED. */
9772 probability = split_branch_probability;
9774 /* Value of 1 is low enough to make no need for probability
9775 to be updated. Later we may run some experiments and see
9776 if unordered values are more frequent in practice. */
9777 if (bypass)
9778 bypass_probability = 1;
9779 if (second)
9780 second_probability = 1;
9782 if (bypass != NULL_RTX)
9784 label = gen_label_rtx ();
9785 i = emit_jump_insn (gen_rtx_SET
9786 (VOIDmode, pc_rtx,
9787 gen_rtx_IF_THEN_ELSE (VOIDmode,
9788 bypass,
9789 gen_rtx_LABEL_REF (VOIDmode,
9790 label),
9791 pc_rtx)));
9792 if (bypass_probability >= 0)
9793 REG_NOTES (i)
9794 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9795 GEN_INT (bypass_probability),
9796 REG_NOTES (i));
9798 i = emit_jump_insn (gen_rtx_SET
9799 (VOIDmode, pc_rtx,
9800 gen_rtx_IF_THEN_ELSE (VOIDmode,
9801 condition, target1, target2)));
9802 if (probability >= 0)
9803 REG_NOTES (i)
9804 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9805 GEN_INT (probability),
9806 REG_NOTES (i));
9807 if (second != NULL_RTX)
9809 i = emit_jump_insn (gen_rtx_SET
9810 (VOIDmode, pc_rtx,
9811 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
9812 target2)));
9813 if (second_probability >= 0)
9814 REG_NOTES (i)
9815 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9816 GEN_INT (second_probability),
9817 REG_NOTES (i));
9819 if (label != NULL_RTX)
9820 emit_label (label);
9824 ix86_expand_setcc (enum rtx_code code, rtx dest)
9826 rtx ret, tmp, tmpreg, equiv;
9827 rtx second_test, bypass_test;
9829 if (GET_MODE (ix86_compare_op0) == (TARGET_64BIT ? TImode : DImode))
9830 return 0; /* FAIL */
9832 gcc_assert (GET_MODE (dest) == QImode);
9834 ret = ix86_expand_compare (code, &second_test, &bypass_test);
9835 PUT_MODE (ret, QImode);
9837 tmp = dest;
9838 tmpreg = dest;
9840 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
9841 if (bypass_test || second_test)
9843 rtx test = second_test;
9844 int bypass = 0;
9845 rtx tmp2 = gen_reg_rtx (QImode);
9846 if (bypass_test)
9848 gcc_assert (!second_test);
9849 test = bypass_test;
9850 bypass = 1;
9851 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
9853 PUT_MODE (test, QImode);
9854 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
9856 if (bypass)
9857 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
9858 else
9859 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
9862 /* Attach a REG_EQUAL note describing the comparison result. */
9863 if (ix86_compare_op0 && ix86_compare_op1)
9865 equiv = simplify_gen_relational (code, QImode,
9866 GET_MODE (ix86_compare_op0),
9867 ix86_compare_op0, ix86_compare_op1);
9868 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
9871 return 1; /* DONE */
9874 /* Expand comparison setting or clearing carry flag. Return true when
9875 successful and set pop for the operation. */
9876 static bool
9877 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
9879 enum machine_mode mode =
9880 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
9882 /* Do not handle DImode compares that go trought special path. Also we can't
9883 deal with FP compares yet. This is possible to add. */
9884 if (mode == (TARGET_64BIT ? TImode : DImode))
9885 return false;
9886 if (FLOAT_MODE_P (mode))
9888 rtx second_test = NULL, bypass_test = NULL;
9889 rtx compare_op, compare_seq;
9891 /* Shortcut: following common codes never translate into carry flag compares. */
9892 if (code == EQ || code == NE || code == UNEQ || code == LTGT
9893 || code == ORDERED || code == UNORDERED)
9894 return false;
9896 /* These comparisons require zero flag; swap operands so they won't. */
9897 if ((code == GT || code == UNLE || code == LE || code == UNGT)
9898 && !TARGET_IEEE_FP)
9900 rtx tmp = op0;
9901 op0 = op1;
9902 op1 = tmp;
9903 code = swap_condition (code);
9906 /* Try to expand the comparison and verify that we end up with carry flag
9907 based comparison. This is fails to be true only when we decide to expand
9908 comparison using arithmetic that is not too common scenario. */
9909 start_sequence ();
9910 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
9911 &second_test, &bypass_test);
9912 compare_seq = get_insns ();
9913 end_sequence ();
9915 if (second_test || bypass_test)
9916 return false;
9917 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
9918 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
9919 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
9920 else
9921 code = GET_CODE (compare_op);
9922 if (code != LTU && code != GEU)
9923 return false;
9924 emit_insn (compare_seq);
9925 *pop = compare_op;
9926 return true;
9928 if (!INTEGRAL_MODE_P (mode))
9929 return false;
9930 switch (code)
9932 case LTU:
9933 case GEU:
9934 break;
9936 /* Convert a==0 into (unsigned)a<1. */
9937 case EQ:
9938 case NE:
9939 if (op1 != const0_rtx)
9940 return false;
9941 op1 = const1_rtx;
9942 code = (code == EQ ? LTU : GEU);
9943 break;
9945 /* Convert a>b into b<a or a>=b-1. */
9946 case GTU:
9947 case LEU:
9948 if (GET_CODE (op1) == CONST_INT)
9950 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
9951 /* Bail out on overflow. We still can swap operands but that
9952 would force loading of the constant into register. */
9953 if (op1 == const0_rtx
9954 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
9955 return false;
9956 code = (code == GTU ? GEU : LTU);
9958 else
9960 rtx tmp = op1;
9961 op1 = op0;
9962 op0 = tmp;
9963 code = (code == GTU ? LTU : GEU);
9965 break;
9967 /* Convert a>=0 into (unsigned)a<0x80000000. */
9968 case LT:
9969 case GE:
9970 if (mode == DImode || op1 != const0_rtx)
9971 return false;
9972 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
9973 code = (code == LT ? GEU : LTU);
9974 break;
9975 case LE:
9976 case GT:
9977 if (mode == DImode || op1 != constm1_rtx)
9978 return false;
9979 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
9980 code = (code == LE ? GEU : LTU);
9981 break;
9983 default:
9984 return false;
9986 /* Swapping operands may cause constant to appear as first operand. */
9987 if (!nonimmediate_operand (op0, VOIDmode))
9989 if (no_new_pseudos)
9990 return false;
9991 op0 = force_reg (mode, op0);
9993 ix86_compare_op0 = op0;
9994 ix86_compare_op1 = op1;
9995 *pop = ix86_expand_compare (code, NULL, NULL);
9996 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
9997 return true;
10001 ix86_expand_int_movcc (rtx operands[])
10003 enum rtx_code code = GET_CODE (operands[1]), compare_code;
10004 rtx compare_seq, compare_op;
10005 rtx second_test, bypass_test;
10006 enum machine_mode mode = GET_MODE (operands[0]);
10007 bool sign_bit_compare_p = false;;
10009 start_sequence ();
10010 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
10011 compare_seq = get_insns ();
10012 end_sequence ();
10014 compare_code = GET_CODE (compare_op);
10016 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
10017 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
10018 sign_bit_compare_p = true;
10020 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
10021 HImode insns, we'd be swallowed in word prefix ops. */
10023 if ((mode != HImode || TARGET_FAST_PREFIX)
10024 && (mode != (TARGET_64BIT ? TImode : DImode))
10025 && GET_CODE (operands[2]) == CONST_INT
10026 && GET_CODE (operands[3]) == CONST_INT)
10028 rtx out = operands[0];
10029 HOST_WIDE_INT ct = INTVAL (operands[2]);
10030 HOST_WIDE_INT cf = INTVAL (operands[3]);
10031 HOST_WIDE_INT diff;
10033 diff = ct - cf;
10034 /* Sign bit compares are better done using shifts than we do by using
10035 sbb. */
10036 if (sign_bit_compare_p
10037 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
10038 ix86_compare_op1, &compare_op))
10040 /* Detect overlap between destination and compare sources. */
10041 rtx tmp = out;
10043 if (!sign_bit_compare_p)
10045 bool fpcmp = false;
10047 compare_code = GET_CODE (compare_op);
10049 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
10050 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
10052 fpcmp = true;
10053 compare_code = ix86_fp_compare_code_to_integer (compare_code);
10056 /* To simplify rest of code, restrict to the GEU case. */
10057 if (compare_code == LTU)
10059 HOST_WIDE_INT tmp = ct;
10060 ct = cf;
10061 cf = tmp;
10062 compare_code = reverse_condition (compare_code);
10063 code = reverse_condition (code);
10065 else
10067 if (fpcmp)
10068 PUT_CODE (compare_op,
10069 reverse_condition_maybe_unordered
10070 (GET_CODE (compare_op)));
10071 else
10072 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
10074 diff = ct - cf;
10076 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
10077 || reg_overlap_mentioned_p (out, ix86_compare_op1))
10078 tmp = gen_reg_rtx (mode);
10080 if (mode == DImode)
10081 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
10082 else
10083 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
10085 else
10087 if (code == GT || code == GE)
10088 code = reverse_condition (code);
10089 else
10091 HOST_WIDE_INT tmp = ct;
10092 ct = cf;
10093 cf = tmp;
10094 diff = ct - cf;
10096 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
10097 ix86_compare_op1, VOIDmode, 0, -1);
10100 if (diff == 1)
10103 * cmpl op0,op1
10104 * sbbl dest,dest
10105 * [addl dest, ct]
10107 * Size 5 - 8.
10109 if (ct)
10110 tmp = expand_simple_binop (mode, PLUS,
10111 tmp, GEN_INT (ct),
10112 copy_rtx (tmp), 1, OPTAB_DIRECT);
10114 else if (cf == -1)
10117 * cmpl op0,op1
10118 * sbbl dest,dest
10119 * orl $ct, dest
10121 * Size 8.
10123 tmp = expand_simple_binop (mode, IOR,
10124 tmp, GEN_INT (ct),
10125 copy_rtx (tmp), 1, OPTAB_DIRECT);
10127 else if (diff == -1 && ct)
10130 * cmpl op0,op1
10131 * sbbl dest,dest
10132 * notl dest
10133 * [addl dest, cf]
10135 * Size 8 - 11.
10137 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
10138 if (cf)
10139 tmp = expand_simple_binop (mode, PLUS,
10140 copy_rtx (tmp), GEN_INT (cf),
10141 copy_rtx (tmp), 1, OPTAB_DIRECT);
10143 else
10146 * cmpl op0,op1
10147 * sbbl dest,dest
10148 * [notl dest]
10149 * andl cf - ct, dest
10150 * [addl dest, ct]
10152 * Size 8 - 11.
10155 if (cf == 0)
10157 cf = ct;
10158 ct = 0;
10159 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
10162 tmp = expand_simple_binop (mode, AND,
10163 copy_rtx (tmp),
10164 gen_int_mode (cf - ct, mode),
10165 copy_rtx (tmp), 1, OPTAB_DIRECT);
10166 if (ct)
10167 tmp = expand_simple_binop (mode, PLUS,
10168 copy_rtx (tmp), GEN_INT (ct),
10169 copy_rtx (tmp), 1, OPTAB_DIRECT);
10172 if (!rtx_equal_p (tmp, out))
10173 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
10175 return 1; /* DONE */
10178 if (diff < 0)
10180 HOST_WIDE_INT tmp;
10181 tmp = ct, ct = cf, cf = tmp;
10182 diff = -diff;
10183 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
10185 /* We may be reversing unordered compare to normal compare, that
10186 is not valid in general (we may convert non-trapping condition
10187 to trapping one), however on i386 we currently emit all
10188 comparisons unordered. */
10189 compare_code = reverse_condition_maybe_unordered (compare_code);
10190 code = reverse_condition_maybe_unordered (code);
10192 else
10194 compare_code = reverse_condition (compare_code);
10195 code = reverse_condition (code);
10199 compare_code = UNKNOWN;
10200 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
10201 && GET_CODE (ix86_compare_op1) == CONST_INT)
10203 if (ix86_compare_op1 == const0_rtx
10204 && (code == LT || code == GE))
10205 compare_code = code;
10206 else if (ix86_compare_op1 == constm1_rtx)
10208 if (code == LE)
10209 compare_code = LT;
10210 else if (code == GT)
10211 compare_code = GE;
10215 /* Optimize dest = (op0 < 0) ? -1 : cf. */
10216 if (compare_code != UNKNOWN
10217 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
10218 && (cf == -1 || ct == -1))
10220 /* If lea code below could be used, only optimize
10221 if it results in a 2 insn sequence. */
10223 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
10224 || diff == 3 || diff == 5 || diff == 9)
10225 || (compare_code == LT && ct == -1)
10226 || (compare_code == GE && cf == -1))
10229 * notl op1 (if necessary)
10230 * sarl $31, op1
10231 * orl cf, op1
10233 if (ct != -1)
10235 cf = ct;
10236 ct = -1;
10237 code = reverse_condition (code);
10240 out = emit_store_flag (out, code, ix86_compare_op0,
10241 ix86_compare_op1, VOIDmode, 0, -1);
10243 out = expand_simple_binop (mode, IOR,
10244 out, GEN_INT (cf),
10245 out, 1, OPTAB_DIRECT);
10246 if (out != operands[0])
10247 emit_move_insn (operands[0], out);
10249 return 1; /* DONE */
10254 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
10255 || diff == 3 || diff == 5 || diff == 9)
10256 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
10257 && (mode != DImode
10258 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
10261 * xorl dest,dest
10262 * cmpl op1,op2
10263 * setcc dest
10264 * lea cf(dest*(ct-cf)),dest
10266 * Size 14.
10268 * This also catches the degenerate setcc-only case.
10271 rtx tmp;
10272 int nops;
10274 out = emit_store_flag (out, code, ix86_compare_op0,
10275 ix86_compare_op1, VOIDmode, 0, 1);
10277 nops = 0;
10278 /* On x86_64 the lea instruction operates on Pmode, so we need
10279 to get arithmetics done in proper mode to match. */
10280 if (diff == 1)
10281 tmp = copy_rtx (out);
10282 else
10284 rtx out1;
10285 out1 = copy_rtx (out);
10286 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
10287 nops++;
10288 if (diff & 1)
10290 tmp = gen_rtx_PLUS (mode, tmp, out1);
10291 nops++;
10294 if (cf != 0)
10296 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
10297 nops++;
10299 if (!rtx_equal_p (tmp, out))
10301 if (nops == 1)
10302 out = force_operand (tmp, copy_rtx (out));
10303 else
10304 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
10306 if (!rtx_equal_p (out, operands[0]))
10307 emit_move_insn (operands[0], copy_rtx (out));
10309 return 1; /* DONE */
10313 * General case: Jumpful:
10314 * xorl dest,dest cmpl op1, op2
10315 * cmpl op1, op2 movl ct, dest
10316 * setcc dest jcc 1f
10317 * decl dest movl cf, dest
10318 * andl (cf-ct),dest 1:
10319 * addl ct,dest
10321 * Size 20. Size 14.
10323 * This is reasonably steep, but branch mispredict costs are
10324 * high on modern cpus, so consider failing only if optimizing
10325 * for space.
10328 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
10329 && BRANCH_COST >= 2)
10331 if (cf == 0)
10333 cf = ct;
10334 ct = 0;
10335 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
10336 /* We may be reversing unordered compare to normal compare,
10337 that is not valid in general (we may convert non-trapping
10338 condition to trapping one), however on i386 we currently
10339 emit all comparisons unordered. */
10340 code = reverse_condition_maybe_unordered (code);
10341 else
10343 code = reverse_condition (code);
10344 if (compare_code != UNKNOWN)
10345 compare_code = reverse_condition (compare_code);
10349 if (compare_code != UNKNOWN)
10351 /* notl op1 (if needed)
10352 sarl $31, op1
10353 andl (cf-ct), op1
10354 addl ct, op1
10356 For x < 0 (resp. x <= -1) there will be no notl,
10357 so if possible swap the constants to get rid of the
10358 complement.
10359 True/false will be -1/0 while code below (store flag
10360 followed by decrement) is 0/-1, so the constants need
10361 to be exchanged once more. */
10363 if (compare_code == GE || !cf)
10365 code = reverse_condition (code);
10366 compare_code = LT;
10368 else
10370 HOST_WIDE_INT tmp = cf;
10371 cf = ct;
10372 ct = tmp;
10375 out = emit_store_flag (out, code, ix86_compare_op0,
10376 ix86_compare_op1, VOIDmode, 0, -1);
10378 else
10380 out = emit_store_flag (out, code, ix86_compare_op0,
10381 ix86_compare_op1, VOIDmode, 0, 1);
10383 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
10384 copy_rtx (out), 1, OPTAB_DIRECT);
10387 out = expand_simple_binop (mode, AND, copy_rtx (out),
10388 gen_int_mode (cf - ct, mode),
10389 copy_rtx (out), 1, OPTAB_DIRECT);
10390 if (ct)
10391 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
10392 copy_rtx (out), 1, OPTAB_DIRECT);
10393 if (!rtx_equal_p (out, operands[0]))
10394 emit_move_insn (operands[0], copy_rtx (out));
10396 return 1; /* DONE */
10400 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
10402 /* Try a few things more with specific constants and a variable. */
10404 optab op;
10405 rtx var, orig_out, out, tmp;
10407 if (BRANCH_COST <= 2)
10408 return 0; /* FAIL */
10410 /* If one of the two operands is an interesting constant, load a
10411 constant with the above and mask it in with a logical operation. */
10413 if (GET_CODE (operands[2]) == CONST_INT)
10415 var = operands[3];
10416 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
10417 operands[3] = constm1_rtx, op = and_optab;
10418 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
10419 operands[3] = const0_rtx, op = ior_optab;
10420 else
10421 return 0; /* FAIL */
10423 else if (GET_CODE (operands[3]) == CONST_INT)
10425 var = operands[2];
10426 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
10427 operands[2] = constm1_rtx, op = and_optab;
10428 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
10429 operands[2] = const0_rtx, op = ior_optab;
10430 else
10431 return 0; /* FAIL */
10433 else
10434 return 0; /* FAIL */
10436 orig_out = operands[0];
10437 tmp = gen_reg_rtx (mode);
10438 operands[0] = tmp;
10440 /* Recurse to get the constant loaded. */
10441 if (ix86_expand_int_movcc (operands) == 0)
10442 return 0; /* FAIL */
10444 /* Mask in the interesting variable. */
10445 out = expand_binop (mode, op, var, tmp, orig_out, 0,
10446 OPTAB_WIDEN);
10447 if (!rtx_equal_p (out, orig_out))
10448 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
10450 return 1; /* DONE */
10454 * For comparison with above,
10456 * movl cf,dest
10457 * movl ct,tmp
10458 * cmpl op1,op2
10459 * cmovcc tmp,dest
10461 * Size 15.
10464 if (! nonimmediate_operand (operands[2], mode))
10465 operands[2] = force_reg (mode, operands[2]);
10466 if (! nonimmediate_operand (operands[3], mode))
10467 operands[3] = force_reg (mode, operands[3]);
10469 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
10471 rtx tmp = gen_reg_rtx (mode);
10472 emit_move_insn (tmp, operands[3]);
10473 operands[3] = tmp;
10475 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
10477 rtx tmp = gen_reg_rtx (mode);
10478 emit_move_insn (tmp, operands[2]);
10479 operands[2] = tmp;
10482 if (! register_operand (operands[2], VOIDmode)
10483 && (mode == QImode
10484 || ! register_operand (operands[3], VOIDmode)))
10485 operands[2] = force_reg (mode, operands[2]);
10487 if (mode == QImode
10488 && ! register_operand (operands[3], VOIDmode))
10489 operands[3] = force_reg (mode, operands[3]);
10491 emit_insn (compare_seq);
10492 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10493 gen_rtx_IF_THEN_ELSE (mode,
10494 compare_op, operands[2],
10495 operands[3])));
10496 if (bypass_test)
10497 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
10498 gen_rtx_IF_THEN_ELSE (mode,
10499 bypass_test,
10500 copy_rtx (operands[3]),
10501 copy_rtx (operands[0]))));
10502 if (second_test)
10503 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
10504 gen_rtx_IF_THEN_ELSE (mode,
10505 second_test,
10506 copy_rtx (operands[2]),
10507 copy_rtx (operands[0]))));
10509 return 1; /* DONE */
10512 /* Swap, force into registers, or otherwise massage the two operands
10513 to an sse comparison with a mask result. Thus we differ a bit from
10514 ix86_prepare_fp_compare_args which expects to produce a flags result.
10516 The DEST operand exists to help determine whether to commute commutative
10517 operators. The POP0/POP1 operands are updated in place. The new
10518 comparison code is returned, or UNKNOWN if not implementable. */
10520 static enum rtx_code
10521 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
10522 rtx *pop0, rtx *pop1)
10524 rtx tmp;
10526 switch (code)
10528 case LTGT:
10529 case UNEQ:
10530 /* We have no LTGT as an operator. We could implement it with
10531 NE & ORDERED, but this requires an extra temporary. It's
10532 not clear that it's worth it. */
10533 return UNKNOWN;
10535 case LT:
10536 case LE:
10537 case UNGT:
10538 case UNGE:
10539 /* These are supported directly. */
10540 break;
10542 case EQ:
10543 case NE:
10544 case UNORDERED:
10545 case ORDERED:
10546 /* For commutative operators, try to canonicalize the destination
10547 operand to be first in the comparison - this helps reload to
10548 avoid extra moves. */
10549 if (!dest || !rtx_equal_p (dest, *pop1))
10550 break;
10551 /* FALLTHRU */
10553 case GE:
10554 case GT:
10555 case UNLE:
10556 case UNLT:
10557 /* These are not supported directly. Swap the comparison operands
10558 to transform into something that is supported. */
10559 tmp = *pop0;
10560 *pop0 = *pop1;
10561 *pop1 = tmp;
10562 code = swap_condition (code);
10563 break;
10565 default:
10566 gcc_unreachable ();
10569 return code;
10572 /* Detect conditional moves that exactly match min/max operational
10573 semantics. Note that this is IEEE safe, as long as we don't
10574 interchange the operands.
10576 Returns FALSE if this conditional move doesn't match a MIN/MAX,
10577 and TRUE if the operation is successful and instructions are emitted. */
10579 static bool
10580 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
10581 rtx cmp_op1, rtx if_true, rtx if_false)
10583 enum machine_mode mode;
10584 bool is_min;
10585 rtx tmp;
10587 if (code == LT)
10589 else if (code == UNGE)
10591 tmp = if_true;
10592 if_true = if_false;
10593 if_false = tmp;
10595 else
10596 return false;
10598 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
10599 is_min = true;
10600 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
10601 is_min = false;
10602 else
10603 return false;
10605 mode = GET_MODE (dest);
10607 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
10608 but MODE may be a vector mode and thus not appropriate. */
10609 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
10611 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
10612 rtvec v;
10614 if_true = force_reg (mode, if_true);
10615 v = gen_rtvec (2, if_true, if_false);
10616 tmp = gen_rtx_UNSPEC (mode, v, u);
10618 else
10620 code = is_min ? SMIN : SMAX;
10621 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
10624 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
10625 return true;
10628 /* Expand an sse vector comparison. Return the register with the result. */
10630 static rtx
10631 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
10632 rtx op_true, rtx op_false)
10634 enum machine_mode mode = GET_MODE (dest);
10635 rtx x;
10637 cmp_op0 = force_reg (mode, cmp_op0);
10638 if (!nonimmediate_operand (cmp_op1, mode))
10639 cmp_op1 = force_reg (mode, cmp_op1);
10641 if (optimize
10642 || reg_overlap_mentioned_p (dest, op_true)
10643 || reg_overlap_mentioned_p (dest, op_false))
10644 dest = gen_reg_rtx (mode);
10646 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
10647 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10649 return dest;
10652 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
10653 operations. This is used for both scalar and vector conditional moves. */
10655 static void
10656 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
10658 enum machine_mode mode = GET_MODE (dest);
10659 rtx t2, t3, x;
10661 if (op_false == CONST0_RTX (mode))
10663 op_true = force_reg (mode, op_true);
10664 x = gen_rtx_AND (mode, cmp, op_true);
10665 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10667 else if (op_true == CONST0_RTX (mode))
10669 op_false = force_reg (mode, op_false);
10670 x = gen_rtx_NOT (mode, cmp);
10671 x = gen_rtx_AND (mode, x, op_false);
10672 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10674 else
10676 op_true = force_reg (mode, op_true);
10677 op_false = force_reg (mode, op_false);
10679 t2 = gen_reg_rtx (mode);
10680 if (optimize)
10681 t3 = gen_reg_rtx (mode);
10682 else
10683 t3 = dest;
10685 x = gen_rtx_AND (mode, op_true, cmp);
10686 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
10688 x = gen_rtx_NOT (mode, cmp);
10689 x = gen_rtx_AND (mode, x, op_false);
10690 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
10692 x = gen_rtx_IOR (mode, t3, t2);
10693 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10697 /* Expand a floating-point conditional move. Return true if successful. */
10700 ix86_expand_fp_movcc (rtx operands[])
10702 enum machine_mode mode = GET_MODE (operands[0]);
10703 enum rtx_code code = GET_CODE (operands[1]);
10704 rtx tmp, compare_op, second_test, bypass_test;
10706 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
10708 enum machine_mode cmode;
10710 /* Since we've no cmove for sse registers, don't force bad register
10711 allocation just to gain access to it. Deny movcc when the
10712 comparison mode doesn't match the move mode. */
10713 cmode = GET_MODE (ix86_compare_op0);
10714 if (cmode == VOIDmode)
10715 cmode = GET_MODE (ix86_compare_op1);
10716 if (cmode != mode)
10717 return 0;
10719 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
10720 &ix86_compare_op0,
10721 &ix86_compare_op1);
10722 if (code == UNKNOWN)
10723 return 0;
10725 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
10726 ix86_compare_op1, operands[2],
10727 operands[3]))
10728 return 1;
10730 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
10731 ix86_compare_op1, operands[2], operands[3]);
10732 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
10733 return 1;
10736 /* The floating point conditional move instructions don't directly
10737 support conditions resulting from a signed integer comparison. */
10739 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
10741 /* The floating point conditional move instructions don't directly
10742 support signed integer comparisons. */
10744 if (!fcmov_comparison_operator (compare_op, VOIDmode))
10746 gcc_assert (!second_test && !bypass_test);
10747 tmp = gen_reg_rtx (QImode);
10748 ix86_expand_setcc (code, tmp);
10749 code = NE;
10750 ix86_compare_op0 = tmp;
10751 ix86_compare_op1 = const0_rtx;
10752 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
10754 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
10756 tmp = gen_reg_rtx (mode);
10757 emit_move_insn (tmp, operands[3]);
10758 operands[3] = tmp;
10760 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
10762 tmp = gen_reg_rtx (mode);
10763 emit_move_insn (tmp, operands[2]);
10764 operands[2] = tmp;
10767 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10768 gen_rtx_IF_THEN_ELSE (mode, compare_op,
10769 operands[2], operands[3])));
10770 if (bypass_test)
10771 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10772 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
10773 operands[3], operands[0])));
10774 if (second_test)
10775 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10776 gen_rtx_IF_THEN_ELSE (mode, second_test,
10777 operands[2], operands[0])));
10779 return 1;
10782 /* Expand a floating-point vector conditional move; a vcond operation
10783 rather than a movcc operation. */
10785 bool
10786 ix86_expand_fp_vcond (rtx operands[])
10788 enum rtx_code code = GET_CODE (operands[3]);
10789 rtx cmp;
10791 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
10792 &operands[4], &operands[5]);
10793 if (code == UNKNOWN)
10794 return false;
10796 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
10797 operands[5], operands[1], operands[2]))
10798 return true;
10800 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
10801 operands[1], operands[2]);
10802 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
10803 return true;
10806 /* Expand a signed integral vector conditional move. */
10808 bool
10809 ix86_expand_int_vcond (rtx operands[])
10811 enum machine_mode mode = GET_MODE (operands[0]);
10812 enum rtx_code code = GET_CODE (operands[3]);
10813 bool negate = false;
10814 rtx x, cop0, cop1;
10816 cop0 = operands[4];
10817 cop1 = operands[5];
10819 /* Canonicalize the comparison to EQ, GT, GTU. */
10820 switch (code)
10822 case EQ:
10823 case GT:
10824 case GTU:
10825 break;
10827 case NE:
10828 case LE:
10829 case LEU:
10830 code = reverse_condition (code);
10831 negate = true;
10832 break;
10834 case GE:
10835 case GEU:
10836 code = reverse_condition (code);
10837 negate = true;
10838 /* FALLTHRU */
10840 case LT:
10841 case LTU:
10842 code = swap_condition (code);
10843 x = cop0, cop0 = cop1, cop1 = x;
10844 break;
10846 default:
10847 gcc_unreachable ();
10850 /* Unsigned parallel compare is not supported by the hardware. Play some
10851 tricks to turn this into a signed comparison against 0. */
10852 if (code == GTU)
10854 switch (mode)
10856 case V4SImode:
10858 rtx t1, t2, mask;
10860 /* Perform a parallel modulo subtraction. */
10861 t1 = gen_reg_rtx (mode);
10862 emit_insn (gen_subv4si3 (t1, cop0, cop1));
10864 /* Extract the original sign bit of op0. */
10865 mask = GEN_INT (-0x80000000);
10866 mask = gen_rtx_CONST_VECTOR (mode,
10867 gen_rtvec (4, mask, mask, mask, mask));
10868 mask = force_reg (mode, mask);
10869 t2 = gen_reg_rtx (mode);
10870 emit_insn (gen_andv4si3 (t2, cop0, mask));
10872 /* XOR it back into the result of the subtraction. This results
10873 in the sign bit set iff we saw unsigned underflow. */
10874 x = gen_reg_rtx (mode);
10875 emit_insn (gen_xorv4si3 (x, t1, t2));
10877 code = GT;
10879 break;
10881 case V16QImode:
10882 case V8HImode:
10883 /* Perform a parallel unsigned saturating subtraction. */
10884 x = gen_reg_rtx (mode);
10885 emit_insn (gen_rtx_SET (VOIDmode, x,
10886 gen_rtx_US_MINUS (mode, cop0, cop1)));
10888 code = EQ;
10889 negate = !negate;
10890 break;
10892 default:
10893 gcc_unreachable ();
10896 cop0 = x;
10897 cop1 = CONST0_RTX (mode);
10900 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
10901 operands[1+negate], operands[2-negate]);
10903 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
10904 operands[2-negate]);
10905 return true;
10908 /* Expand conditional increment or decrement using adb/sbb instructions.
10909 The default case using setcc followed by the conditional move can be
10910 done by generic code. */
10912 ix86_expand_int_addcc (rtx operands[])
10914 enum rtx_code code = GET_CODE (operands[1]);
10915 rtx compare_op;
10916 rtx val = const0_rtx;
10917 bool fpcmp = false;
10918 enum machine_mode mode = GET_MODE (operands[0]);
10920 if (operands[3] != const1_rtx
10921 && operands[3] != constm1_rtx)
10922 return 0;
10923 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
10924 ix86_compare_op1, &compare_op))
10925 return 0;
10926 code = GET_CODE (compare_op);
10928 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
10929 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
10931 fpcmp = true;
10932 code = ix86_fp_compare_code_to_integer (code);
10935 if (code != LTU)
10937 val = constm1_rtx;
10938 if (fpcmp)
10939 PUT_CODE (compare_op,
10940 reverse_condition_maybe_unordered
10941 (GET_CODE (compare_op)));
10942 else
10943 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
10945 PUT_MODE (compare_op, mode);
10947 /* Construct either adc or sbb insn. */
10948 if ((code == LTU) == (operands[3] == constm1_rtx))
10950 switch (GET_MODE (operands[0]))
10952 case QImode:
10953 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
10954 break;
10955 case HImode:
10956 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
10957 break;
10958 case SImode:
10959 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
10960 break;
10961 case DImode:
10962 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
10963 break;
10964 default:
10965 gcc_unreachable ();
10968 else
10970 switch (GET_MODE (operands[0]))
10972 case QImode:
10973 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
10974 break;
10975 case HImode:
10976 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
10977 break;
10978 case SImode:
10979 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
10980 break;
10981 case DImode:
10982 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
10983 break;
10984 default:
10985 gcc_unreachable ();
10988 return 1; /* DONE */
10992 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
10993 works for floating pointer parameters and nonoffsetable memories.
10994 For pushes, it returns just stack offsets; the values will be saved
10995 in the right order. Maximally three parts are generated. */
10997 static int
10998 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
11000 int size;
11002 if (!TARGET_64BIT)
11003 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
11004 else
11005 size = (GET_MODE_SIZE (mode) + 4) / 8;
11007 gcc_assert (GET_CODE (operand) != REG || !MMX_REGNO_P (REGNO (operand)));
11008 gcc_assert (size >= 2 && size <= 3);
11010 /* Optimize constant pool reference to immediates. This is used by fp
11011 moves, that force all constants to memory to allow combining. */
11012 if (GET_CODE (operand) == MEM && MEM_READONLY_P (operand))
11014 rtx tmp = maybe_get_pool_constant (operand);
11015 if (tmp)
11016 operand = tmp;
11019 if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand))
11021 /* The only non-offsetable memories we handle are pushes. */
11022 int ok = push_operand (operand, VOIDmode);
11024 gcc_assert (ok);
11026 operand = copy_rtx (operand);
11027 PUT_MODE (operand, Pmode);
11028 parts[0] = parts[1] = parts[2] = operand;
11029 return size;
11032 if (GET_CODE (operand) == CONST_VECTOR)
11034 enum machine_mode imode = int_mode_for_mode (mode);
11035 /* Caution: if we looked through a constant pool memory above,
11036 the operand may actually have a different mode now. That's
11037 ok, since we want to pun this all the way back to an integer. */
11038 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
11039 gcc_assert (operand != NULL);
11040 mode = imode;
11043 if (!TARGET_64BIT)
11045 if (mode == DImode)
11046 split_di (&operand, 1, &parts[0], &parts[1]);
11047 else
11049 if (REG_P (operand))
11051 gcc_assert (reload_completed);
11052 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
11053 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
11054 if (size == 3)
11055 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
11057 else if (offsettable_memref_p (operand))
11059 operand = adjust_address (operand, SImode, 0);
11060 parts[0] = operand;
11061 parts[1] = adjust_address (operand, SImode, 4);
11062 if (size == 3)
11063 parts[2] = adjust_address (operand, SImode, 8);
11065 else if (GET_CODE (operand) == CONST_DOUBLE)
11067 REAL_VALUE_TYPE r;
11068 long l[4];
11070 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
11071 switch (mode)
11073 case XFmode:
11074 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
11075 parts[2] = gen_int_mode (l[2], SImode);
11076 break;
11077 case DFmode:
11078 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
11079 break;
11080 default:
11081 gcc_unreachable ();
11083 parts[1] = gen_int_mode (l[1], SImode);
11084 parts[0] = gen_int_mode (l[0], SImode);
11086 else
11087 gcc_unreachable ();
11090 else
11092 if (mode == TImode)
11093 split_ti (&operand, 1, &parts[0], &parts[1]);
11094 if (mode == XFmode || mode == TFmode)
11096 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
11097 if (REG_P (operand))
11099 gcc_assert (reload_completed);
11100 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
11101 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
11103 else if (offsettable_memref_p (operand))
11105 operand = adjust_address (operand, DImode, 0);
11106 parts[0] = operand;
11107 parts[1] = adjust_address (operand, upper_mode, 8);
11109 else if (GET_CODE (operand) == CONST_DOUBLE)
11111 REAL_VALUE_TYPE r;
11112 long l[4];
11114 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
11115 real_to_target (l, &r, mode);
11117 /* Do not use shift by 32 to avoid warning on 32bit systems. */
11118 if (HOST_BITS_PER_WIDE_INT >= 64)
11119 parts[0]
11120 = gen_int_mode
11121 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
11122 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
11123 DImode);
11124 else
11125 parts[0] = immed_double_const (l[0], l[1], DImode);
11127 if (upper_mode == SImode)
11128 parts[1] = gen_int_mode (l[2], SImode);
11129 else if (HOST_BITS_PER_WIDE_INT >= 64)
11130 parts[1]
11131 = gen_int_mode
11132 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
11133 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
11134 DImode);
11135 else
11136 parts[1] = immed_double_const (l[2], l[3], DImode);
11138 else
11139 gcc_unreachable ();
11143 return size;
11146 /* Emit insns to perform a move or push of DI, DF, and XF values.
11147 Return false when normal moves are needed; true when all required
11148 insns have been emitted. Operands 2-4 contain the input values
11149 int the correct order; operands 5-7 contain the output values. */
11151 void
11152 ix86_split_long_move (rtx operands[])
11154 rtx part[2][3];
11155 int nparts;
11156 int push = 0;
11157 int collisions = 0;
11158 enum machine_mode mode = GET_MODE (operands[0]);
11160 /* The DFmode expanders may ask us to move double.
11161 For 64bit target this is single move. By hiding the fact
11162 here we simplify i386.md splitters. */
11163 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
11165 /* Optimize constant pool reference to immediates. This is used by
11166 fp moves, that force all constants to memory to allow combining. */
11168 if (GET_CODE (operands[1]) == MEM
11169 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
11170 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
11171 operands[1] = get_pool_constant (XEXP (operands[1], 0));
11172 if (push_operand (operands[0], VOIDmode))
11174 operands[0] = copy_rtx (operands[0]);
11175 PUT_MODE (operands[0], Pmode);
11177 else
11178 operands[0] = gen_lowpart (DImode, operands[0]);
11179 operands[1] = gen_lowpart (DImode, operands[1]);
11180 emit_move_insn (operands[0], operands[1]);
11181 return;
11184 /* The only non-offsettable memory we handle is push. */
11185 if (push_operand (operands[0], VOIDmode))
11186 push = 1;
11187 else
11188 gcc_assert (GET_CODE (operands[0]) != MEM
11189 || offsettable_memref_p (operands[0]));
11191 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
11192 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
11194 /* When emitting push, take care for source operands on the stack. */
11195 if (push && GET_CODE (operands[1]) == MEM
11196 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
11198 if (nparts == 3)
11199 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
11200 XEXP (part[1][2], 0));
11201 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
11202 XEXP (part[1][1], 0));
11205 /* We need to do copy in the right order in case an address register
11206 of the source overlaps the destination. */
11207 if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM)
11209 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
11210 collisions++;
11211 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
11212 collisions++;
11213 if (nparts == 3
11214 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
11215 collisions++;
11217 /* Collision in the middle part can be handled by reordering. */
11218 if (collisions == 1 && nparts == 3
11219 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
11221 rtx tmp;
11222 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
11223 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
11226 /* If there are more collisions, we can't handle it by reordering.
11227 Do an lea to the last part and use only one colliding move. */
11228 else if (collisions > 1)
11230 rtx base;
11232 collisions = 1;
11234 base = part[0][nparts - 1];
11236 /* Handle the case when the last part isn't valid for lea.
11237 Happens in 64-bit mode storing the 12-byte XFmode. */
11238 if (GET_MODE (base) != Pmode)
11239 base = gen_rtx_REG (Pmode, REGNO (base));
11241 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
11242 part[1][0] = replace_equiv_address (part[1][0], base);
11243 part[1][1] = replace_equiv_address (part[1][1],
11244 plus_constant (base, UNITS_PER_WORD));
11245 if (nparts == 3)
11246 part[1][2] = replace_equiv_address (part[1][2],
11247 plus_constant (base, 8));
11251 if (push)
11253 if (!TARGET_64BIT)
11255 if (nparts == 3)
11257 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
11258 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
11259 emit_move_insn (part[0][2], part[1][2]);
11262 else
11264 /* In 64bit mode we don't have 32bit push available. In case this is
11265 register, it is OK - we will just use larger counterpart. We also
11266 retype memory - these comes from attempt to avoid REX prefix on
11267 moving of second half of TFmode value. */
11268 if (GET_MODE (part[1][1]) == SImode)
11270 switch (GET_CODE (part[1][1]))
11272 case MEM:
11273 part[1][1] = adjust_address (part[1][1], DImode, 0);
11274 break;
11276 case REG:
11277 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
11278 break;
11280 default:
11281 gcc_unreachable ();
11284 if (GET_MODE (part[1][0]) == SImode)
11285 part[1][0] = part[1][1];
11288 emit_move_insn (part[0][1], part[1][1]);
11289 emit_move_insn (part[0][0], part[1][0]);
11290 return;
11293 /* Choose correct order to not overwrite the source before it is copied. */
11294 if ((REG_P (part[0][0])
11295 && REG_P (part[1][1])
11296 && (REGNO (part[0][0]) == REGNO (part[1][1])
11297 || (nparts == 3
11298 && REGNO (part[0][0]) == REGNO (part[1][2]))))
11299 || (collisions > 0
11300 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
11302 if (nparts == 3)
11304 operands[2] = part[0][2];
11305 operands[3] = part[0][1];
11306 operands[4] = part[0][0];
11307 operands[5] = part[1][2];
11308 operands[6] = part[1][1];
11309 operands[7] = part[1][0];
11311 else
11313 operands[2] = part[0][1];
11314 operands[3] = part[0][0];
11315 operands[5] = part[1][1];
11316 operands[6] = part[1][0];
11319 else
11321 if (nparts == 3)
11323 operands[2] = part[0][0];
11324 operands[3] = part[0][1];
11325 operands[4] = part[0][2];
11326 operands[5] = part[1][0];
11327 operands[6] = part[1][1];
11328 operands[7] = part[1][2];
11330 else
11332 operands[2] = part[0][0];
11333 operands[3] = part[0][1];
11334 operands[5] = part[1][0];
11335 operands[6] = part[1][1];
11339 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
11340 if (optimize_size)
11342 if (GET_CODE (operands[5]) == CONST_INT
11343 && operands[5] != const0_rtx
11344 && REG_P (operands[2]))
11346 if (GET_CODE (operands[6]) == CONST_INT
11347 && INTVAL (operands[6]) == INTVAL (operands[5]))
11348 operands[6] = operands[2];
11350 if (nparts == 3
11351 && GET_CODE (operands[7]) == CONST_INT
11352 && INTVAL (operands[7]) == INTVAL (operands[5]))
11353 operands[7] = operands[2];
11356 if (nparts == 3
11357 && GET_CODE (operands[6]) == CONST_INT
11358 && operands[6] != const0_rtx
11359 && REG_P (operands[3])
11360 && GET_CODE (operands[7]) == CONST_INT
11361 && INTVAL (operands[7]) == INTVAL (operands[6]))
11362 operands[7] = operands[3];
11365 emit_move_insn (operands[2], operands[5]);
11366 emit_move_insn (operands[3], operands[6]);
11367 if (nparts == 3)
11368 emit_move_insn (operands[4], operands[7]);
11370 return;
11373 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
11374 left shift by a constant, either using a single shift or
11375 a sequence of add instructions. */
11377 static void
11378 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
11380 if (count == 1)
11382 emit_insn ((mode == DImode
11383 ? gen_addsi3
11384 : gen_adddi3) (operand, operand, operand));
11386 else if (!optimize_size
11387 && count * ix86_cost->add <= ix86_cost->shift_const)
11389 int i;
11390 for (i=0; i<count; i++)
11392 emit_insn ((mode == DImode
11393 ? gen_addsi3
11394 : gen_adddi3) (operand, operand, operand));
11397 else
11398 emit_insn ((mode == DImode
11399 ? gen_ashlsi3
11400 : gen_ashldi3) (operand, operand, GEN_INT (count)));
11403 void
11404 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
11406 rtx low[2], high[2];
11407 int count;
11408 const int single_width = mode == DImode ? 32 : 64;
11410 if (GET_CODE (operands[2]) == CONST_INT)
11412 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
11413 count = INTVAL (operands[2]) & (single_width * 2 - 1);
11415 if (count >= single_width)
11417 emit_move_insn (high[0], low[1]);
11418 emit_move_insn (low[0], const0_rtx);
11420 if (count > single_width)
11421 ix86_expand_ashl_const (high[0], count - single_width, mode);
11423 else
11425 if (!rtx_equal_p (operands[0], operands[1]))
11426 emit_move_insn (operands[0], operands[1]);
11427 emit_insn ((mode == DImode
11428 ? gen_x86_shld_1
11429 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
11430 ix86_expand_ashl_const (low[0], count, mode);
11432 return;
11435 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11437 if (operands[1] == const1_rtx)
11439 /* Assuming we've chosen a QImode capable registers, then 1 << N
11440 can be done with two 32/64-bit shifts, no branches, no cmoves. */
11441 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
11443 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
11445 ix86_expand_clear (low[0]);
11446 ix86_expand_clear (high[0]);
11447 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
11449 d = gen_lowpart (QImode, low[0]);
11450 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
11451 s = gen_rtx_EQ (QImode, flags, const0_rtx);
11452 emit_insn (gen_rtx_SET (VOIDmode, d, s));
11454 d = gen_lowpart (QImode, high[0]);
11455 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
11456 s = gen_rtx_NE (QImode, flags, const0_rtx);
11457 emit_insn (gen_rtx_SET (VOIDmode, d, s));
11460 /* Otherwise, we can get the same results by manually performing
11461 a bit extract operation on bit 5/6, and then performing the two
11462 shifts. The two methods of getting 0/1 into low/high are exactly
11463 the same size. Avoiding the shift in the bit extract case helps
11464 pentium4 a bit; no one else seems to care much either way. */
11465 else
11467 rtx x;
11469 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
11470 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
11471 else
11472 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
11473 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
11475 emit_insn ((mode == DImode
11476 ? gen_lshrsi3
11477 : gen_lshrdi3) (high[0], high[0], GEN_INT (mode == DImode ? 5 : 6)));
11478 emit_insn ((mode == DImode
11479 ? gen_andsi3
11480 : gen_anddi3) (high[0], high[0], GEN_INT (1)));
11481 emit_move_insn (low[0], high[0]);
11482 emit_insn ((mode == DImode
11483 ? gen_xorsi3
11484 : gen_xordi3) (low[0], low[0], GEN_INT (1)));
11487 emit_insn ((mode == DImode
11488 ? gen_ashlsi3
11489 : gen_ashldi3) (low[0], low[0], operands[2]));
11490 emit_insn ((mode == DImode
11491 ? gen_ashlsi3
11492 : gen_ashldi3) (high[0], high[0], operands[2]));
11493 return;
11496 if (operands[1] == constm1_rtx)
11498 /* For -1 << N, we can avoid the shld instruction, because we
11499 know that we're shifting 0...31/63 ones into a -1. */
11500 emit_move_insn (low[0], constm1_rtx);
11501 if (optimize_size)
11502 emit_move_insn (high[0], low[0]);
11503 else
11504 emit_move_insn (high[0], constm1_rtx);
11506 else
11508 if (!rtx_equal_p (operands[0], operands[1]))
11509 emit_move_insn (operands[0], operands[1]);
11511 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11512 emit_insn ((mode == DImode
11513 ? gen_x86_shld_1
11514 : gen_x86_64_shld) (high[0], low[0], operands[2]));
11517 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
11519 if (TARGET_CMOVE && scratch)
11521 ix86_expand_clear (scratch);
11522 emit_insn ((mode == DImode
11523 ? gen_x86_shift_adj_1
11524 : gen_x86_64_shift_adj) (high[0], low[0], operands[2], scratch));
11526 else
11527 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
11530 void
11531 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
11533 rtx low[2], high[2];
11534 int count;
11535 const int single_width = mode == DImode ? 32 : 64;
11537 if (GET_CODE (operands[2]) == CONST_INT)
11539 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
11540 count = INTVAL (operands[2]) & (single_width * 2 - 1);
11542 if (count == single_width * 2 - 1)
11544 emit_move_insn (high[0], high[1]);
11545 emit_insn ((mode == DImode
11546 ? gen_ashrsi3
11547 : gen_ashrdi3) (high[0], high[0],
11548 GEN_INT (single_width - 1)));
11549 emit_move_insn (low[0], high[0]);
11552 else if (count >= single_width)
11554 emit_move_insn (low[0], high[1]);
11555 emit_move_insn (high[0], low[0]);
11556 emit_insn ((mode == DImode
11557 ? gen_ashrsi3
11558 : gen_ashrdi3) (high[0], high[0],
11559 GEN_INT (single_width - 1)));
11560 if (count > single_width)
11561 emit_insn ((mode == DImode
11562 ? gen_ashrsi3
11563 : gen_ashrdi3) (low[0], low[0],
11564 GEN_INT (count - single_width)));
11566 else
11568 if (!rtx_equal_p (operands[0], operands[1]))
11569 emit_move_insn (operands[0], operands[1]);
11570 emit_insn ((mode == DImode
11571 ? gen_x86_shrd_1
11572 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
11573 emit_insn ((mode == DImode
11574 ? gen_ashrsi3
11575 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
11578 else
11580 if (!rtx_equal_p (operands[0], operands[1]))
11581 emit_move_insn (operands[0], operands[1]);
11583 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11585 emit_insn ((mode == DImode
11586 ? gen_x86_shrd_1
11587 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
11588 emit_insn ((mode == DImode
11589 ? gen_ashrsi3
11590 : gen_ashrdi3) (high[0], high[0], operands[2]));
11592 if (TARGET_CMOVE && scratch)
11594 emit_move_insn (scratch, high[0]);
11595 emit_insn ((mode == DImode
11596 ? gen_ashrsi3
11597 : gen_ashrdi3) (scratch, scratch,
11598 GEN_INT (single_width - 1)));
11599 emit_insn ((mode == DImode
11600 ? gen_x86_shift_adj_1
11601 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
11602 scratch));
11604 else
11605 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
11609 void
11610 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
11612 rtx low[2], high[2];
11613 int count;
11614 const int single_width = mode == DImode ? 32 : 64;
11616 if (GET_CODE (operands[2]) == CONST_INT)
11618 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
11619 count = INTVAL (operands[2]) & (single_width * 2 - 1);
11621 if (count >= single_width)
11623 emit_move_insn (low[0], high[1]);
11624 ix86_expand_clear (high[0]);
11626 if (count > single_width)
11627 emit_insn ((mode == DImode
11628 ? gen_lshrsi3
11629 : gen_lshrdi3) (low[0], low[0],
11630 GEN_INT (count - single_width)));
11632 else
11634 if (!rtx_equal_p (operands[0], operands[1]))
11635 emit_move_insn (operands[0], operands[1]);
11636 emit_insn ((mode == DImode
11637 ? gen_x86_shrd_1
11638 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
11639 emit_insn ((mode == DImode
11640 ? gen_lshrsi3
11641 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
11644 else
11646 if (!rtx_equal_p (operands[0], operands[1]))
11647 emit_move_insn (operands[0], operands[1]);
11649 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11651 emit_insn ((mode == DImode
11652 ? gen_x86_shrd_1
11653 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
11654 emit_insn ((mode == DImode
11655 ? gen_lshrsi3
11656 : gen_lshrdi3) (high[0], high[0], operands[2]));
11658 /* Heh. By reversing the arguments, we can reuse this pattern. */
11659 if (TARGET_CMOVE && scratch)
11661 ix86_expand_clear (scratch);
11662 emit_insn ((mode == DImode
11663 ? gen_x86_shift_adj_1
11664 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
11665 scratch));
11667 else
11668 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
11672 /* Helper function for the string operations below. Dest VARIABLE whether
11673 it is aligned to VALUE bytes. If true, jump to the label. */
11674 static rtx
11675 ix86_expand_aligntest (rtx variable, int value)
11677 rtx label = gen_label_rtx ();
11678 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
11679 if (GET_MODE (variable) == DImode)
11680 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
11681 else
11682 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
11683 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
11684 1, label);
11685 return label;
11688 /* Adjust COUNTER by the VALUE. */
11689 static void
11690 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
11692 if (GET_MODE (countreg) == DImode)
11693 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
11694 else
11695 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
11698 /* Zero extend possibly SImode EXP to Pmode register. */
11700 ix86_zero_extend_to_Pmode (rtx exp)
11702 rtx r;
11703 if (GET_MODE (exp) == VOIDmode)
11704 return force_reg (Pmode, exp);
11705 if (GET_MODE (exp) == Pmode)
11706 return copy_to_mode_reg (Pmode, exp);
11707 r = gen_reg_rtx (Pmode);
11708 emit_insn (gen_zero_extendsidi2 (r, exp));
11709 return r;
11712 /* Expand string move (memcpy) operation. Use i386 string operations when
11713 profitable. expand_clrmem contains similar code. */
11715 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp)
11717 rtx srcreg, destreg, countreg, srcexp, destexp;
11718 enum machine_mode counter_mode;
11719 HOST_WIDE_INT align = 0;
11720 unsigned HOST_WIDE_INT count = 0;
11722 if (GET_CODE (align_exp) == CONST_INT)
11723 align = INTVAL (align_exp);
11725 /* Can't use any of this if the user has appropriated esi or edi. */
11726 if (global_regs[4] || global_regs[5])
11727 return 0;
11729 /* This simple hack avoids all inlining code and simplifies code below. */
11730 if (!TARGET_ALIGN_STRINGOPS)
11731 align = 64;
11733 if (GET_CODE (count_exp) == CONST_INT)
11735 count = INTVAL (count_exp);
11736 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
11737 return 0;
11740 /* Figure out proper mode for counter. For 32bits it is always SImode,
11741 for 64bits use SImode when possible, otherwise DImode.
11742 Set count to number of bytes copied when known at compile time. */
11743 if (!TARGET_64BIT
11744 || GET_MODE (count_exp) == SImode
11745 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
11746 counter_mode = SImode;
11747 else
11748 counter_mode = DImode;
11750 gcc_assert (counter_mode == SImode || counter_mode == DImode);
11752 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
11753 if (destreg != XEXP (dst, 0))
11754 dst = replace_equiv_address_nv (dst, destreg);
11755 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
11756 if (srcreg != XEXP (src, 0))
11757 src = replace_equiv_address_nv (src, srcreg);
11759 /* When optimizing for size emit simple rep ; movsb instruction for
11760 counts not divisible by 4, except when (movsl;)*(movsw;)?(movsb;)?
11761 sequence is shorter than mov{b,l} $count, %{ecx,cl}; rep; movsb.
11762 Sice of (movsl;)*(movsw;)?(movsb;)? sequence is
11763 count / 4 + (count & 3), the other sequence is either 4 or 7 bytes,
11764 but we don't know whether upper 24 (resp. 56) bits of %ecx will be
11765 known to be zero or not. The rep; movsb sequence causes higher
11766 register pressure though, so take that into account. */
11768 if ((!optimize || optimize_size)
11769 && (count == 0
11770 || ((count & 0x03)
11771 && (!optimize_size
11772 || count > 5 * 4
11773 || (count & 3) + count / 4 > 6))))
11775 emit_insn (gen_cld ());
11776 countreg = ix86_zero_extend_to_Pmode (count_exp);
11777 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
11778 srcexp = gen_rtx_PLUS (Pmode, srcreg, countreg);
11779 emit_insn (gen_rep_mov (destreg, dst, srcreg, src, countreg,
11780 destexp, srcexp));
11783 /* For constant aligned (or small unaligned) copies use rep movsl
11784 followed by code copying the rest. For PentiumPro ensure 8 byte
11785 alignment to allow rep movsl acceleration. */
11787 else if (count != 0
11788 && (align >= 8
11789 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
11790 || optimize_size || count < (unsigned int) 64))
11792 unsigned HOST_WIDE_INT offset = 0;
11793 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
11794 rtx srcmem, dstmem;
11796 emit_insn (gen_cld ());
11797 if (count & ~(size - 1))
11799 if ((TARGET_SINGLE_STRINGOP || optimize_size) && count < 5 * 4)
11801 enum machine_mode movs_mode = size == 4 ? SImode : DImode;
11803 while (offset < (count & ~(size - 1)))
11805 srcmem = adjust_automodify_address_nv (src, movs_mode,
11806 srcreg, offset);
11807 dstmem = adjust_automodify_address_nv (dst, movs_mode,
11808 destreg, offset);
11809 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11810 offset += size;
11813 else
11815 countreg = GEN_INT ((count >> (size == 4 ? 2 : 3))
11816 & (TARGET_64BIT ? -1 : 0x3fffffff));
11817 countreg = copy_to_mode_reg (counter_mode, countreg);
11818 countreg = ix86_zero_extend_to_Pmode (countreg);
11820 destexp = gen_rtx_ASHIFT (Pmode, countreg,
11821 GEN_INT (size == 4 ? 2 : 3));
11822 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
11823 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
11825 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
11826 countreg, destexp, srcexp));
11827 offset = count & ~(size - 1);
11830 if (size == 8 && (count & 0x04))
11832 srcmem = adjust_automodify_address_nv (src, SImode, srcreg,
11833 offset);
11834 dstmem = adjust_automodify_address_nv (dst, SImode, destreg,
11835 offset);
11836 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11837 offset += 4;
11839 if (count & 0x02)
11841 srcmem = adjust_automodify_address_nv (src, HImode, srcreg,
11842 offset);
11843 dstmem = adjust_automodify_address_nv (dst, HImode, destreg,
11844 offset);
11845 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11846 offset += 2;
11848 if (count & 0x01)
11850 srcmem = adjust_automodify_address_nv (src, QImode, srcreg,
11851 offset);
11852 dstmem = adjust_automodify_address_nv (dst, QImode, destreg,
11853 offset);
11854 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11857 /* The generic code based on the glibc implementation:
11858 - align destination to 4 bytes (8 byte alignment is used for PentiumPro
11859 allowing accelerated copying there)
11860 - copy the data using rep movsl
11861 - copy the rest. */
11862 else
11864 rtx countreg2;
11865 rtx label = NULL;
11866 rtx srcmem, dstmem;
11867 int desired_alignment = (TARGET_PENTIUMPRO
11868 && (count == 0 || count >= (unsigned int) 260)
11869 ? 8 : UNITS_PER_WORD);
11870 /* Get rid of MEM_OFFSETs, they won't be accurate. */
11871 dst = change_address (dst, BLKmode, destreg);
11872 src = change_address (src, BLKmode, srcreg);
11874 /* In case we don't know anything about the alignment, default to
11875 library version, since it is usually equally fast and result in
11876 shorter code.
11878 Also emit call when we know that the count is large and call overhead
11879 will not be important. */
11880 if (!TARGET_INLINE_ALL_STRINGOPS
11881 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
11882 return 0;
11884 if (TARGET_SINGLE_STRINGOP)
11885 emit_insn (gen_cld ());
11887 countreg2 = gen_reg_rtx (Pmode);
11888 countreg = copy_to_mode_reg (counter_mode, count_exp);
11890 /* We don't use loops to align destination and to copy parts smaller
11891 than 4 bytes, because gcc is able to optimize such code better (in
11892 the case the destination or the count really is aligned, gcc is often
11893 able to predict the branches) and also it is friendlier to the
11894 hardware branch prediction.
11896 Using loops is beneficial for generic case, because we can
11897 handle small counts using the loops. Many CPUs (such as Athlon)
11898 have large REP prefix setup costs.
11900 This is quite costly. Maybe we can revisit this decision later or
11901 add some customizability to this code. */
11903 if (count == 0 && align < desired_alignment)
11905 label = gen_label_rtx ();
11906 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
11907 LEU, 0, counter_mode, 1, label);
11909 if (align <= 1)
11911 rtx label = ix86_expand_aligntest (destreg, 1);
11912 srcmem = change_address (src, QImode, srcreg);
11913 dstmem = change_address (dst, QImode, destreg);
11914 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11915 ix86_adjust_counter (countreg, 1);
11916 emit_label (label);
11917 LABEL_NUSES (label) = 1;
11919 if (align <= 2)
11921 rtx label = ix86_expand_aligntest (destreg, 2);
11922 srcmem = change_address (src, HImode, srcreg);
11923 dstmem = change_address (dst, HImode, destreg);
11924 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11925 ix86_adjust_counter (countreg, 2);
11926 emit_label (label);
11927 LABEL_NUSES (label) = 1;
11929 if (align <= 4 && desired_alignment > 4)
11931 rtx label = ix86_expand_aligntest (destreg, 4);
11932 srcmem = change_address (src, SImode, srcreg);
11933 dstmem = change_address (dst, SImode, destreg);
11934 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11935 ix86_adjust_counter (countreg, 4);
11936 emit_label (label);
11937 LABEL_NUSES (label) = 1;
11940 if (label && desired_alignment > 4 && !TARGET_64BIT)
11942 emit_label (label);
11943 LABEL_NUSES (label) = 1;
11944 label = NULL_RTX;
11946 if (!TARGET_SINGLE_STRINGOP)
11947 emit_insn (gen_cld ());
11948 if (TARGET_64BIT)
11950 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
11951 GEN_INT (3)));
11952 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
11954 else
11956 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
11957 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
11959 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
11960 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
11961 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
11962 countreg2, destexp, srcexp));
11964 if (label)
11966 emit_label (label);
11967 LABEL_NUSES (label) = 1;
11969 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
11971 srcmem = change_address (src, SImode, srcreg);
11972 dstmem = change_address (dst, SImode, destreg);
11973 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11975 if ((align <= 4 || count == 0) && TARGET_64BIT)
11977 rtx label = ix86_expand_aligntest (countreg, 4);
11978 srcmem = change_address (src, SImode, srcreg);
11979 dstmem = change_address (dst, SImode, destreg);
11980 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11981 emit_label (label);
11982 LABEL_NUSES (label) = 1;
11984 if (align > 2 && count != 0 && (count & 2))
11986 srcmem = change_address (src, HImode, srcreg);
11987 dstmem = change_address (dst, HImode, destreg);
11988 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11990 if (align <= 2 || count == 0)
11992 rtx label = ix86_expand_aligntest (countreg, 2);
11993 srcmem = change_address (src, HImode, srcreg);
11994 dstmem = change_address (dst, HImode, destreg);
11995 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11996 emit_label (label);
11997 LABEL_NUSES (label) = 1;
11999 if (align > 1 && count != 0 && (count & 1))
12001 srcmem = change_address (src, QImode, srcreg);
12002 dstmem = change_address (dst, QImode, destreg);
12003 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12005 if (align <= 1 || count == 0)
12007 rtx label = ix86_expand_aligntest (countreg, 1);
12008 srcmem = change_address (src, QImode, srcreg);
12009 dstmem = change_address (dst, QImode, destreg);
12010 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12011 emit_label (label);
12012 LABEL_NUSES (label) = 1;
12016 return 1;
12019 /* Expand string clear operation (bzero). Use i386 string operations when
12020 profitable. expand_movmem contains similar code. */
12022 ix86_expand_clrmem (rtx dst, rtx count_exp, rtx align_exp)
12024 rtx destreg, zeroreg, countreg, destexp;
12025 enum machine_mode counter_mode;
12026 HOST_WIDE_INT align = 0;
12027 unsigned HOST_WIDE_INT count = 0;
12029 if (GET_CODE (align_exp) == CONST_INT)
12030 align = INTVAL (align_exp);
12032 /* Can't use any of this if the user has appropriated esi. */
12033 if (global_regs[4])
12034 return 0;
12036 /* This simple hack avoids all inlining code and simplifies code below. */
12037 if (!TARGET_ALIGN_STRINGOPS)
12038 align = 32;
12040 if (GET_CODE (count_exp) == CONST_INT)
12042 count = INTVAL (count_exp);
12043 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
12044 return 0;
12046 /* Figure out proper mode for counter. For 32bits it is always SImode,
12047 for 64bits use SImode when possible, otherwise DImode.
12048 Set count to number of bytes copied when known at compile time. */
12049 if (!TARGET_64BIT
12050 || GET_MODE (count_exp) == SImode
12051 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
12052 counter_mode = SImode;
12053 else
12054 counter_mode = DImode;
12056 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
12057 if (destreg != XEXP (dst, 0))
12058 dst = replace_equiv_address_nv (dst, destreg);
12061 /* When optimizing for size emit simple rep ; movsb instruction for
12062 counts not divisible by 4. The movl $N, %ecx; rep; stosb
12063 sequence is 7 bytes long, so if optimizing for size and count is
12064 small enough that some stosl, stosw and stosb instructions without
12065 rep are shorter, fall back into the next if. */
12067 if ((!optimize || optimize_size)
12068 && (count == 0
12069 || ((count & 0x03)
12070 && (!optimize_size || (count & 0x03) + (count >> 2) > 7))))
12072 emit_insn (gen_cld ());
12074 countreg = ix86_zero_extend_to_Pmode (count_exp);
12075 zeroreg = copy_to_mode_reg (QImode, const0_rtx);
12076 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
12077 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg, destexp));
12079 else if (count != 0
12080 && (align >= 8
12081 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
12082 || optimize_size || count < (unsigned int) 64))
12084 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
12085 unsigned HOST_WIDE_INT offset = 0;
12087 emit_insn (gen_cld ());
12089 zeroreg = copy_to_mode_reg (size == 4 ? SImode : DImode, const0_rtx);
12090 if (count & ~(size - 1))
12092 unsigned HOST_WIDE_INT repcount;
12093 unsigned int max_nonrep;
12095 repcount = count >> (size == 4 ? 2 : 3);
12096 if (!TARGET_64BIT)
12097 repcount &= 0x3fffffff;
12099 /* movl $N, %ecx; rep; stosl is 7 bytes, while N x stosl is N bytes.
12100 movl $N, %ecx; rep; stosq is 8 bytes, while N x stosq is 2xN
12101 bytes. In both cases the latter seems to be faster for small
12102 values of N. */
12103 max_nonrep = size == 4 ? 7 : 4;
12104 if (!optimize_size)
12105 switch (ix86_tune)
12107 case PROCESSOR_PENTIUM4:
12108 case PROCESSOR_NOCONA:
12109 max_nonrep = 3;
12110 break;
12111 default:
12112 break;
12115 if (repcount <= max_nonrep)
12116 while (repcount-- > 0)
12118 rtx mem = adjust_automodify_address_nv (dst,
12119 GET_MODE (zeroreg),
12120 destreg, offset);
12121 emit_insn (gen_strset (destreg, mem, zeroreg));
12122 offset += size;
12124 else
12126 countreg = copy_to_mode_reg (counter_mode, GEN_INT (repcount));
12127 countreg = ix86_zero_extend_to_Pmode (countreg);
12128 destexp = gen_rtx_ASHIFT (Pmode, countreg,
12129 GEN_INT (size == 4 ? 2 : 3));
12130 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
12131 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg,
12132 destexp));
12133 offset = count & ~(size - 1);
12136 if (size == 8 && (count & 0x04))
12138 rtx mem = adjust_automodify_address_nv (dst, SImode, destreg,
12139 offset);
12140 emit_insn (gen_strset (destreg, mem,
12141 gen_rtx_SUBREG (SImode, zeroreg, 0)));
12142 offset += 4;
12144 if (count & 0x02)
12146 rtx mem = adjust_automodify_address_nv (dst, HImode, destreg,
12147 offset);
12148 emit_insn (gen_strset (destreg, mem,
12149 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12150 offset += 2;
12152 if (count & 0x01)
12154 rtx mem = adjust_automodify_address_nv (dst, QImode, destreg,
12155 offset);
12156 emit_insn (gen_strset (destreg, mem,
12157 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12160 else
12162 rtx countreg2;
12163 rtx label = NULL;
12164 /* Compute desired alignment of the string operation. */
12165 int desired_alignment = (TARGET_PENTIUMPRO
12166 && (count == 0 || count >= (unsigned int) 260)
12167 ? 8 : UNITS_PER_WORD);
12169 /* In case we don't know anything about the alignment, default to
12170 library version, since it is usually equally fast and result in
12171 shorter code.
12173 Also emit call when we know that the count is large and call overhead
12174 will not be important. */
12175 if (!TARGET_INLINE_ALL_STRINGOPS
12176 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
12177 return 0;
12179 if (TARGET_SINGLE_STRINGOP)
12180 emit_insn (gen_cld ());
12182 countreg2 = gen_reg_rtx (Pmode);
12183 countreg = copy_to_mode_reg (counter_mode, count_exp);
12184 zeroreg = copy_to_mode_reg (Pmode, const0_rtx);
12185 /* Get rid of MEM_OFFSET, it won't be accurate. */
12186 dst = change_address (dst, BLKmode, destreg);
12188 if (count == 0 && align < desired_alignment)
12190 label = gen_label_rtx ();
12191 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
12192 LEU, 0, counter_mode, 1, label);
12194 if (align <= 1)
12196 rtx label = ix86_expand_aligntest (destreg, 1);
12197 emit_insn (gen_strset (destreg, dst,
12198 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12199 ix86_adjust_counter (countreg, 1);
12200 emit_label (label);
12201 LABEL_NUSES (label) = 1;
12203 if (align <= 2)
12205 rtx label = ix86_expand_aligntest (destreg, 2);
12206 emit_insn (gen_strset (destreg, dst,
12207 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12208 ix86_adjust_counter (countreg, 2);
12209 emit_label (label);
12210 LABEL_NUSES (label) = 1;
12212 if (align <= 4 && desired_alignment > 4)
12214 rtx label = ix86_expand_aligntest (destreg, 4);
12215 emit_insn (gen_strset (destreg, dst,
12216 (TARGET_64BIT
12217 ? gen_rtx_SUBREG (SImode, zeroreg, 0)
12218 : zeroreg)));
12219 ix86_adjust_counter (countreg, 4);
12220 emit_label (label);
12221 LABEL_NUSES (label) = 1;
12224 if (label && desired_alignment > 4 && !TARGET_64BIT)
12226 emit_label (label);
12227 LABEL_NUSES (label) = 1;
12228 label = NULL_RTX;
12231 if (!TARGET_SINGLE_STRINGOP)
12232 emit_insn (gen_cld ());
12233 if (TARGET_64BIT)
12235 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
12236 GEN_INT (3)));
12237 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
12239 else
12241 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
12242 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
12244 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
12245 emit_insn (gen_rep_stos (destreg, countreg2, dst, zeroreg, destexp));
12247 if (label)
12249 emit_label (label);
12250 LABEL_NUSES (label) = 1;
12253 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
12254 emit_insn (gen_strset (destreg, dst,
12255 gen_rtx_SUBREG (SImode, zeroreg, 0)));
12256 if (TARGET_64BIT && (align <= 4 || count == 0))
12258 rtx label = ix86_expand_aligntest (countreg, 4);
12259 emit_insn (gen_strset (destreg, dst,
12260 gen_rtx_SUBREG (SImode, zeroreg, 0)));
12261 emit_label (label);
12262 LABEL_NUSES (label) = 1;
12264 if (align > 2 && count != 0 && (count & 2))
12265 emit_insn (gen_strset (destreg, dst,
12266 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12267 if (align <= 2 || count == 0)
12269 rtx label = ix86_expand_aligntest (countreg, 2);
12270 emit_insn (gen_strset (destreg, dst,
12271 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12272 emit_label (label);
12273 LABEL_NUSES (label) = 1;
12275 if (align > 1 && count != 0 && (count & 1))
12276 emit_insn (gen_strset (destreg, dst,
12277 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12278 if (align <= 1 || count == 0)
12280 rtx label = ix86_expand_aligntest (countreg, 1);
12281 emit_insn (gen_strset (destreg, dst,
12282 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12283 emit_label (label);
12284 LABEL_NUSES (label) = 1;
12287 return 1;
12290 /* Expand strlen. */
12292 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
12294 rtx addr, scratch1, scratch2, scratch3, scratch4;
12296 /* The generic case of strlen expander is long. Avoid it's
12297 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
12299 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
12300 && !TARGET_INLINE_ALL_STRINGOPS
12301 && !optimize_size
12302 && (GET_CODE (align) != CONST_INT || INTVAL (align) < 4))
12303 return 0;
12305 addr = force_reg (Pmode, XEXP (src, 0));
12306 scratch1 = gen_reg_rtx (Pmode);
12308 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
12309 && !optimize_size)
12311 /* Well it seems that some optimizer does not combine a call like
12312 foo(strlen(bar), strlen(bar));
12313 when the move and the subtraction is done here. It does calculate
12314 the length just once when these instructions are done inside of
12315 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
12316 often used and I use one fewer register for the lifetime of
12317 output_strlen_unroll() this is better. */
12319 emit_move_insn (out, addr);
12321 ix86_expand_strlensi_unroll_1 (out, src, align);
12323 /* strlensi_unroll_1 returns the address of the zero at the end of
12324 the string, like memchr(), so compute the length by subtracting
12325 the start address. */
12326 if (TARGET_64BIT)
12327 emit_insn (gen_subdi3 (out, out, addr));
12328 else
12329 emit_insn (gen_subsi3 (out, out, addr));
12331 else
12333 rtx unspec;
12334 scratch2 = gen_reg_rtx (Pmode);
12335 scratch3 = gen_reg_rtx (Pmode);
12336 scratch4 = force_reg (Pmode, constm1_rtx);
12338 emit_move_insn (scratch3, addr);
12339 eoschar = force_reg (QImode, eoschar);
12341 emit_insn (gen_cld ());
12342 src = replace_equiv_address_nv (src, scratch3);
12344 /* If .md starts supporting :P, this can be done in .md. */
12345 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
12346 scratch4), UNSPEC_SCAS);
12347 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
12348 if (TARGET_64BIT)
12350 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
12351 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
12353 else
12355 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
12356 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
12359 return 1;
12362 /* Expand the appropriate insns for doing strlen if not just doing
12363 repnz; scasb
12365 out = result, initialized with the start address
12366 align_rtx = alignment of the address.
12367 scratch = scratch register, initialized with the startaddress when
12368 not aligned, otherwise undefined
12370 This is just the body. It needs the initializations mentioned above and
12371 some address computing at the end. These things are done in i386.md. */
12373 static void
12374 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
12376 int align;
12377 rtx tmp;
12378 rtx align_2_label = NULL_RTX;
12379 rtx align_3_label = NULL_RTX;
12380 rtx align_4_label = gen_label_rtx ();
12381 rtx end_0_label = gen_label_rtx ();
12382 rtx mem;
12383 rtx tmpreg = gen_reg_rtx (SImode);
12384 rtx scratch = gen_reg_rtx (SImode);
12385 rtx cmp;
12387 align = 0;
12388 if (GET_CODE (align_rtx) == CONST_INT)
12389 align = INTVAL (align_rtx);
12391 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
12393 /* Is there a known alignment and is it less than 4? */
12394 if (align < 4)
12396 rtx scratch1 = gen_reg_rtx (Pmode);
12397 emit_move_insn (scratch1, out);
12398 /* Is there a known alignment and is it not 2? */
12399 if (align != 2)
12401 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
12402 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
12404 /* Leave just the 3 lower bits. */
12405 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
12406 NULL_RTX, 0, OPTAB_WIDEN);
12408 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
12409 Pmode, 1, align_4_label);
12410 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
12411 Pmode, 1, align_2_label);
12412 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
12413 Pmode, 1, align_3_label);
12415 else
12417 /* Since the alignment is 2, we have to check 2 or 0 bytes;
12418 check if is aligned to 4 - byte. */
12420 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
12421 NULL_RTX, 0, OPTAB_WIDEN);
12423 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
12424 Pmode, 1, align_4_label);
12427 mem = change_address (src, QImode, out);
12429 /* Now compare the bytes. */
12431 /* Compare the first n unaligned byte on a byte per byte basis. */
12432 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
12433 QImode, 1, end_0_label);
12435 /* Increment the address. */
12436 if (TARGET_64BIT)
12437 emit_insn (gen_adddi3 (out, out, const1_rtx));
12438 else
12439 emit_insn (gen_addsi3 (out, out, const1_rtx));
12441 /* Not needed with an alignment of 2 */
12442 if (align != 2)
12444 emit_label (align_2_label);
12446 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
12447 end_0_label);
12449 if (TARGET_64BIT)
12450 emit_insn (gen_adddi3 (out, out, const1_rtx));
12451 else
12452 emit_insn (gen_addsi3 (out, out, const1_rtx));
12454 emit_label (align_3_label);
12457 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
12458 end_0_label);
12460 if (TARGET_64BIT)
12461 emit_insn (gen_adddi3 (out, out, const1_rtx));
12462 else
12463 emit_insn (gen_addsi3 (out, out, const1_rtx));
12466 /* Generate loop to check 4 bytes at a time. It is not a good idea to
12467 align this loop. It gives only huge programs, but does not help to
12468 speed up. */
12469 emit_label (align_4_label);
12471 mem = change_address (src, SImode, out);
12472 emit_move_insn (scratch, mem);
12473 if (TARGET_64BIT)
12474 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
12475 else
12476 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
12478 /* This formula yields a nonzero result iff one of the bytes is zero.
12479 This saves three branches inside loop and many cycles. */
12481 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
12482 emit_insn (gen_one_cmplsi2 (scratch, scratch));
12483 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
12484 emit_insn (gen_andsi3 (tmpreg, tmpreg,
12485 gen_int_mode (0x80808080, SImode)));
12486 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
12487 align_4_label);
12489 if (TARGET_CMOVE)
12491 rtx reg = gen_reg_rtx (SImode);
12492 rtx reg2 = gen_reg_rtx (Pmode);
12493 emit_move_insn (reg, tmpreg);
12494 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
12496 /* If zero is not in the first two bytes, move two bytes forward. */
12497 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
12498 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12499 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
12500 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
12501 gen_rtx_IF_THEN_ELSE (SImode, tmp,
12502 reg,
12503 tmpreg)));
12504 /* Emit lea manually to avoid clobbering of flags. */
12505 emit_insn (gen_rtx_SET (SImode, reg2,
12506 gen_rtx_PLUS (Pmode, out, const2_rtx)));
12508 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12509 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
12510 emit_insn (gen_rtx_SET (VOIDmode, out,
12511 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
12512 reg2,
12513 out)));
12516 else
12518 rtx end_2_label = gen_label_rtx ();
12519 /* Is zero in the first two bytes? */
12521 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
12522 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12523 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
12524 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
12525 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
12526 pc_rtx);
12527 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
12528 JUMP_LABEL (tmp) = end_2_label;
12530 /* Not in the first two. Move two bytes forward. */
12531 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
12532 if (TARGET_64BIT)
12533 emit_insn (gen_adddi3 (out, out, const2_rtx));
12534 else
12535 emit_insn (gen_addsi3 (out, out, const2_rtx));
12537 emit_label (end_2_label);
12541 /* Avoid branch in fixing the byte. */
12542 tmpreg = gen_lowpart (QImode, tmpreg);
12543 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
12544 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
12545 if (TARGET_64BIT)
12546 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
12547 else
12548 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
12550 emit_label (end_0_label);
12553 void
12554 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
12555 rtx callarg2 ATTRIBUTE_UNUSED,
12556 rtx pop, int sibcall)
12558 rtx use = NULL, call;
12560 if (pop == const0_rtx)
12561 pop = NULL;
12562 gcc_assert (!TARGET_64BIT || !pop);
12564 #if TARGET_MACHO
12565 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
12566 fnaddr = machopic_indirect_call_target (fnaddr);
12567 #else
12568 /* Static functions and indirect calls don't need the pic register. */
12569 if (! TARGET_64BIT && flag_pic
12570 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
12571 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
12572 use_reg (&use, pic_offset_table_rtx);
12574 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
12576 rtx al = gen_rtx_REG (QImode, 0);
12577 emit_move_insn (al, callarg2);
12578 use_reg (&use, al);
12580 #endif /* TARGET_MACHO */
12582 if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
12584 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
12585 fnaddr = gen_rtx_MEM (QImode, fnaddr);
12587 if (sibcall && TARGET_64BIT
12588 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
12590 rtx addr;
12591 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
12592 fnaddr = gen_rtx_REG (Pmode, FIRST_REX_INT_REG + 3 /* R11 */);
12593 emit_move_insn (fnaddr, addr);
12594 fnaddr = gen_rtx_MEM (QImode, fnaddr);
12597 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
12598 if (retval)
12599 call = gen_rtx_SET (VOIDmode, retval, call);
12600 if (pop)
12602 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
12603 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
12604 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
12607 call = emit_call_insn (call);
12608 if (use)
12609 CALL_INSN_FUNCTION_USAGE (call) = use;
12613 /* Clear stack slot assignments remembered from previous functions.
12614 This is called from INIT_EXPANDERS once before RTL is emitted for each
12615 function. */
12617 static struct machine_function *
12618 ix86_init_machine_status (void)
12620 struct machine_function *f;
12622 f = ggc_alloc_cleared (sizeof (struct machine_function));
12623 f->use_fast_prologue_epilogue_nregs = -1;
12625 return f;
12628 /* Return a MEM corresponding to a stack slot with mode MODE.
12629 Allocate a new slot if necessary.
12631 The RTL for a function can have several slots available: N is
12632 which slot to use. */
12635 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
12637 struct stack_local_entry *s;
12639 gcc_assert (n < MAX_386_STACK_LOCALS);
12641 for (s = ix86_stack_locals; s; s = s->next)
12642 if (s->mode == mode && s->n == n)
12643 return s->rtl;
12645 s = (struct stack_local_entry *)
12646 ggc_alloc (sizeof (struct stack_local_entry));
12647 s->n = n;
12648 s->mode = mode;
12649 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
12651 s->next = ix86_stack_locals;
12652 ix86_stack_locals = s;
12653 return s->rtl;
12656 /* Construct the SYMBOL_REF for the tls_get_addr function. */
12658 static GTY(()) rtx ix86_tls_symbol;
12660 ix86_tls_get_addr (void)
12663 if (!ix86_tls_symbol)
12665 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
12666 (TARGET_GNU_TLS && !TARGET_64BIT)
12667 ? "___tls_get_addr"
12668 : "__tls_get_addr");
12671 return ix86_tls_symbol;
12674 /* Calculate the length of the memory address in the instruction
12675 encoding. Does not include the one-byte modrm, opcode, or prefix. */
12678 memory_address_length (rtx addr)
12680 struct ix86_address parts;
12681 rtx base, index, disp;
12682 int len;
12683 int ok;
12685 if (GET_CODE (addr) == PRE_DEC
12686 || GET_CODE (addr) == POST_INC
12687 || GET_CODE (addr) == PRE_MODIFY
12688 || GET_CODE (addr) == POST_MODIFY)
12689 return 0;
12691 ok = ix86_decompose_address (addr, &parts);
12692 gcc_assert (ok);
12694 if (parts.base && GET_CODE (parts.base) == SUBREG)
12695 parts.base = SUBREG_REG (parts.base);
12696 if (parts.index && GET_CODE (parts.index) == SUBREG)
12697 parts.index = SUBREG_REG (parts.index);
12699 base = parts.base;
12700 index = parts.index;
12701 disp = parts.disp;
12702 len = 0;
12704 /* Rule of thumb:
12705 - esp as the base always wants an index,
12706 - ebp as the base always wants a displacement. */
12708 /* Register Indirect. */
12709 if (base && !index && !disp)
12711 /* esp (for its index) and ebp (for its displacement) need
12712 the two-byte modrm form. */
12713 if (addr == stack_pointer_rtx
12714 || addr == arg_pointer_rtx
12715 || addr == frame_pointer_rtx
12716 || addr == hard_frame_pointer_rtx)
12717 len = 1;
12720 /* Direct Addressing. */
12721 else if (disp && !base && !index)
12722 len = 4;
12724 else
12726 /* Find the length of the displacement constant. */
12727 if (disp)
12729 if (GET_CODE (disp) == CONST_INT
12730 && CONST_OK_FOR_LETTER_P (INTVAL (disp), 'K')
12731 && base)
12732 len = 1;
12733 else
12734 len = 4;
12736 /* ebp always wants a displacement. */
12737 else if (base == hard_frame_pointer_rtx)
12738 len = 1;
12740 /* An index requires the two-byte modrm form.... */
12741 if (index
12742 /* ...like esp, which always wants an index. */
12743 || base == stack_pointer_rtx
12744 || base == arg_pointer_rtx
12745 || base == frame_pointer_rtx)
12746 len += 1;
12749 return len;
12752 /* Compute default value for "length_immediate" attribute. When SHORTFORM
12753 is set, expect that insn have 8bit immediate alternative. */
12755 ix86_attr_length_immediate_default (rtx insn, int shortform)
12757 int len = 0;
12758 int i;
12759 extract_insn_cached (insn);
12760 for (i = recog_data.n_operands - 1; i >= 0; --i)
12761 if (CONSTANT_P (recog_data.operand[i]))
12763 gcc_assert (!len);
12764 if (shortform
12765 && GET_CODE (recog_data.operand[i]) == CONST_INT
12766 && CONST_OK_FOR_LETTER_P (INTVAL (recog_data.operand[i]), 'K'))
12767 len = 1;
12768 else
12770 switch (get_attr_mode (insn))
12772 case MODE_QI:
12773 len+=1;
12774 break;
12775 case MODE_HI:
12776 len+=2;
12777 break;
12778 case MODE_SI:
12779 len+=4;
12780 break;
12781 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
12782 case MODE_DI:
12783 len+=4;
12784 break;
12785 default:
12786 fatal_insn ("unknown insn mode", insn);
12790 return len;
12792 /* Compute default value for "length_address" attribute. */
12794 ix86_attr_length_address_default (rtx insn)
12796 int i;
12798 if (get_attr_type (insn) == TYPE_LEA)
12800 rtx set = PATTERN (insn);
12802 if (GET_CODE (set) == PARALLEL)
12803 set = XVECEXP (set, 0, 0);
12805 gcc_assert (GET_CODE (set) == SET);
12807 return memory_address_length (SET_SRC (set));
12810 extract_insn_cached (insn);
12811 for (i = recog_data.n_operands - 1; i >= 0; --i)
12812 if (GET_CODE (recog_data.operand[i]) == MEM)
12814 return memory_address_length (XEXP (recog_data.operand[i], 0));
12815 break;
12817 return 0;
12820 /* Return the maximum number of instructions a cpu can issue. */
12822 static int
12823 ix86_issue_rate (void)
12825 switch (ix86_tune)
12827 case PROCESSOR_PENTIUM:
12828 case PROCESSOR_K6:
12829 return 2;
12831 case PROCESSOR_PENTIUMPRO:
12832 case PROCESSOR_PENTIUM4:
12833 case PROCESSOR_ATHLON:
12834 case PROCESSOR_K8:
12835 case PROCESSOR_NOCONA:
12836 return 3;
12838 default:
12839 return 1;
12843 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
12844 by DEP_INSN and nothing set by DEP_INSN. */
12846 static int
12847 ix86_flags_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
12849 rtx set, set2;
12851 /* Simplify the test for uninteresting insns. */
12852 if (insn_type != TYPE_SETCC
12853 && insn_type != TYPE_ICMOV
12854 && insn_type != TYPE_FCMOV
12855 && insn_type != TYPE_IBR)
12856 return 0;
12858 if ((set = single_set (dep_insn)) != 0)
12860 set = SET_DEST (set);
12861 set2 = NULL_RTX;
12863 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
12864 && XVECLEN (PATTERN (dep_insn), 0) == 2
12865 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
12866 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
12868 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
12869 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
12871 else
12872 return 0;
12874 if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG)
12875 return 0;
12877 /* This test is true if the dependent insn reads the flags but
12878 not any other potentially set register. */
12879 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
12880 return 0;
12882 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
12883 return 0;
12885 return 1;
12888 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
12889 address with operands set by DEP_INSN. */
12891 static int
12892 ix86_agi_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
12894 rtx addr;
12896 if (insn_type == TYPE_LEA
12897 && TARGET_PENTIUM)
12899 addr = PATTERN (insn);
12901 if (GET_CODE (addr) == PARALLEL)
12902 addr = XVECEXP (addr, 0, 0);
12904 gcc_assert (GET_CODE (addr) == SET);
12906 addr = SET_SRC (addr);
12908 else
12910 int i;
12911 extract_insn_cached (insn);
12912 for (i = recog_data.n_operands - 1; i >= 0; --i)
12913 if (GET_CODE (recog_data.operand[i]) == MEM)
12915 addr = XEXP (recog_data.operand[i], 0);
12916 goto found;
12918 return 0;
12919 found:;
12922 return modified_in_p (addr, dep_insn);
12925 static int
12926 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
12928 enum attr_type insn_type, dep_insn_type;
12929 enum attr_memory memory;
12930 rtx set, set2;
12931 int dep_insn_code_number;
12933 /* Anti and output dependencies have zero cost on all CPUs. */
12934 if (REG_NOTE_KIND (link) != 0)
12935 return 0;
12937 dep_insn_code_number = recog_memoized (dep_insn);
12939 /* If we can't recognize the insns, we can't really do anything. */
12940 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
12941 return cost;
12943 insn_type = get_attr_type (insn);
12944 dep_insn_type = get_attr_type (dep_insn);
12946 switch (ix86_tune)
12948 case PROCESSOR_PENTIUM:
12949 /* Address Generation Interlock adds a cycle of latency. */
12950 if (ix86_agi_dependant (insn, dep_insn, insn_type))
12951 cost += 1;
12953 /* ??? Compares pair with jump/setcc. */
12954 if (ix86_flags_dependant (insn, dep_insn, insn_type))
12955 cost = 0;
12957 /* Floating point stores require value to be ready one cycle earlier. */
12958 if (insn_type == TYPE_FMOV
12959 && get_attr_memory (insn) == MEMORY_STORE
12960 && !ix86_agi_dependant (insn, dep_insn, insn_type))
12961 cost += 1;
12962 break;
12964 case PROCESSOR_PENTIUMPRO:
12965 memory = get_attr_memory (insn);
12967 /* INT->FP conversion is expensive. */
12968 if (get_attr_fp_int_src (dep_insn))
12969 cost += 5;
12971 /* There is one cycle extra latency between an FP op and a store. */
12972 if (insn_type == TYPE_FMOV
12973 && (set = single_set (dep_insn)) != NULL_RTX
12974 && (set2 = single_set (insn)) != NULL_RTX
12975 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
12976 && GET_CODE (SET_DEST (set2)) == MEM)
12977 cost += 1;
12979 /* Show ability of reorder buffer to hide latency of load by executing
12980 in parallel with previous instruction in case
12981 previous instruction is not needed to compute the address. */
12982 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
12983 && !ix86_agi_dependant (insn, dep_insn, insn_type))
12985 /* Claim moves to take one cycle, as core can issue one load
12986 at time and the next load can start cycle later. */
12987 if (dep_insn_type == TYPE_IMOV
12988 || dep_insn_type == TYPE_FMOV)
12989 cost = 1;
12990 else if (cost > 1)
12991 cost--;
12993 break;
12995 case PROCESSOR_K6:
12996 memory = get_attr_memory (insn);
12998 /* The esp dependency is resolved before the instruction is really
12999 finished. */
13000 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
13001 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
13002 return 1;
13004 /* INT->FP conversion is expensive. */
13005 if (get_attr_fp_int_src (dep_insn))
13006 cost += 5;
13008 /* Show ability of reorder buffer to hide latency of load by executing
13009 in parallel with previous instruction in case
13010 previous instruction is not needed to compute the address. */
13011 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
13012 && !ix86_agi_dependant (insn, dep_insn, insn_type))
13014 /* Claim moves to take one cycle, as core can issue one load
13015 at time and the next load can start cycle later. */
13016 if (dep_insn_type == TYPE_IMOV
13017 || dep_insn_type == TYPE_FMOV)
13018 cost = 1;
13019 else if (cost > 2)
13020 cost -= 2;
13021 else
13022 cost = 1;
13024 break;
13026 case PROCESSOR_ATHLON:
13027 case PROCESSOR_K8:
13028 memory = get_attr_memory (insn);
13030 /* Show ability of reorder buffer to hide latency of load by executing
13031 in parallel with previous instruction in case
13032 previous instruction is not needed to compute the address. */
13033 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
13034 && !ix86_agi_dependant (insn, dep_insn, insn_type))
13036 enum attr_unit unit = get_attr_unit (insn);
13037 int loadcost = 3;
13039 /* Because of the difference between the length of integer and
13040 floating unit pipeline preparation stages, the memory operands
13041 for floating point are cheaper.
13043 ??? For Athlon it the difference is most probably 2. */
13044 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
13045 loadcost = 3;
13046 else
13047 loadcost = TARGET_ATHLON ? 2 : 0;
13049 if (cost >= loadcost)
13050 cost -= loadcost;
13051 else
13052 cost = 0;
13055 default:
13056 break;
13059 return cost;
13062 /* How many alternative schedules to try. This should be as wide as the
13063 scheduling freedom in the DFA, but no wider. Making this value too
13064 large results extra work for the scheduler. */
13066 static int
13067 ia32_multipass_dfa_lookahead (void)
13069 if (ix86_tune == PROCESSOR_PENTIUM)
13070 return 2;
13072 if (ix86_tune == PROCESSOR_PENTIUMPRO
13073 || ix86_tune == PROCESSOR_K6)
13074 return 1;
13076 else
13077 return 0;
13081 /* Compute the alignment given to a constant that is being placed in memory.
13082 EXP is the constant and ALIGN is the alignment that the object would
13083 ordinarily have.
13084 The value of this function is used instead of that alignment to align
13085 the object. */
13088 ix86_constant_alignment (tree exp, int align)
13090 if (TREE_CODE (exp) == REAL_CST)
13092 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
13093 return 64;
13094 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
13095 return 128;
13097 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
13098 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
13099 return BITS_PER_WORD;
13101 return align;
13104 /* Compute the alignment for a static variable.
13105 TYPE is the data type, and ALIGN is the alignment that
13106 the object would ordinarily have. The value of this function is used
13107 instead of that alignment to align the object. */
13110 ix86_data_alignment (tree type, int align)
13112 if (AGGREGATE_TYPE_P (type)
13113 && TYPE_SIZE (type)
13114 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
13115 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 256
13116 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 256)
13117 return 256;
13119 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
13120 to 16byte boundary. */
13121 if (TARGET_64BIT)
13123 if (AGGREGATE_TYPE_P (type)
13124 && TYPE_SIZE (type)
13125 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
13126 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
13127 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
13128 return 128;
13131 if (TREE_CODE (type) == ARRAY_TYPE)
13133 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
13134 return 64;
13135 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
13136 return 128;
13138 else if (TREE_CODE (type) == COMPLEX_TYPE)
13141 if (TYPE_MODE (type) == DCmode && align < 64)
13142 return 64;
13143 if (TYPE_MODE (type) == XCmode && align < 128)
13144 return 128;
13146 else if ((TREE_CODE (type) == RECORD_TYPE
13147 || TREE_CODE (type) == UNION_TYPE
13148 || TREE_CODE (type) == QUAL_UNION_TYPE)
13149 && TYPE_FIELDS (type))
13151 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
13152 return 64;
13153 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
13154 return 128;
13156 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
13157 || TREE_CODE (type) == INTEGER_TYPE)
13159 if (TYPE_MODE (type) == DFmode && align < 64)
13160 return 64;
13161 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
13162 return 128;
13165 return align;
13168 /* Compute the alignment for a local variable.
13169 TYPE is the data type, and ALIGN is the alignment that
13170 the object would ordinarily have. The value of this macro is used
13171 instead of that alignment to align the object. */
13174 ix86_local_alignment (tree type, int align)
13176 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
13177 to 16byte boundary. */
13178 if (TARGET_64BIT)
13180 if (AGGREGATE_TYPE_P (type)
13181 && TYPE_SIZE (type)
13182 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
13183 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
13184 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
13185 return 128;
13187 if (TREE_CODE (type) == ARRAY_TYPE)
13189 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
13190 return 64;
13191 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
13192 return 128;
13194 else if (TREE_CODE (type) == COMPLEX_TYPE)
13196 if (TYPE_MODE (type) == DCmode && align < 64)
13197 return 64;
13198 if (TYPE_MODE (type) == XCmode && align < 128)
13199 return 128;
13201 else if ((TREE_CODE (type) == RECORD_TYPE
13202 || TREE_CODE (type) == UNION_TYPE
13203 || TREE_CODE (type) == QUAL_UNION_TYPE)
13204 && TYPE_FIELDS (type))
13206 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
13207 return 64;
13208 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
13209 return 128;
13211 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
13212 || TREE_CODE (type) == INTEGER_TYPE)
13215 if (TYPE_MODE (type) == DFmode && align < 64)
13216 return 64;
13217 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
13218 return 128;
13220 return align;
13223 /* Emit RTL insns to initialize the variable parts of a trampoline.
13224 FNADDR is an RTX for the address of the function's pure code.
13225 CXT is an RTX for the static chain value for the function. */
13226 void
13227 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
13229 if (!TARGET_64BIT)
13231 /* Compute offset from the end of the jmp to the target function. */
13232 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
13233 plus_constant (tramp, 10),
13234 NULL_RTX, 1, OPTAB_DIRECT);
13235 emit_move_insn (gen_rtx_MEM (QImode, tramp),
13236 gen_int_mode (0xb9, QImode));
13237 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
13238 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
13239 gen_int_mode (0xe9, QImode));
13240 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
13242 else
13244 int offset = 0;
13245 /* Try to load address using shorter movl instead of movabs.
13246 We may want to support movq for kernel mode, but kernel does not use
13247 trampolines at the moment. */
13248 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
13250 fnaddr = copy_to_mode_reg (DImode, fnaddr);
13251 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13252 gen_int_mode (0xbb41, HImode));
13253 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
13254 gen_lowpart (SImode, fnaddr));
13255 offset += 6;
13257 else
13259 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13260 gen_int_mode (0xbb49, HImode));
13261 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
13262 fnaddr);
13263 offset += 10;
13265 /* Load static chain using movabs to r10. */
13266 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13267 gen_int_mode (0xba49, HImode));
13268 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
13269 cxt);
13270 offset += 10;
13271 /* Jump to the r11 */
13272 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13273 gen_int_mode (0xff49, HImode));
13274 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
13275 gen_int_mode (0xe3, QImode));
13276 offset += 3;
13277 gcc_assert (offset <= TRAMPOLINE_SIZE);
13280 #ifdef ENABLE_EXECUTE_STACK
13281 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
13282 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
13283 #endif
13286 /* Codes for all the SSE/MMX builtins. */
13287 enum ix86_builtins
13289 IX86_BUILTIN_ADDPS,
13290 IX86_BUILTIN_ADDSS,
13291 IX86_BUILTIN_DIVPS,
13292 IX86_BUILTIN_DIVSS,
13293 IX86_BUILTIN_MULPS,
13294 IX86_BUILTIN_MULSS,
13295 IX86_BUILTIN_SUBPS,
13296 IX86_BUILTIN_SUBSS,
13298 IX86_BUILTIN_CMPEQPS,
13299 IX86_BUILTIN_CMPLTPS,
13300 IX86_BUILTIN_CMPLEPS,
13301 IX86_BUILTIN_CMPGTPS,
13302 IX86_BUILTIN_CMPGEPS,
13303 IX86_BUILTIN_CMPNEQPS,
13304 IX86_BUILTIN_CMPNLTPS,
13305 IX86_BUILTIN_CMPNLEPS,
13306 IX86_BUILTIN_CMPNGTPS,
13307 IX86_BUILTIN_CMPNGEPS,
13308 IX86_BUILTIN_CMPORDPS,
13309 IX86_BUILTIN_CMPUNORDPS,
13310 IX86_BUILTIN_CMPNEPS,
13311 IX86_BUILTIN_CMPEQSS,
13312 IX86_BUILTIN_CMPLTSS,
13313 IX86_BUILTIN_CMPLESS,
13314 IX86_BUILTIN_CMPNEQSS,
13315 IX86_BUILTIN_CMPNLTSS,
13316 IX86_BUILTIN_CMPNLESS,
13317 IX86_BUILTIN_CMPNGTSS,
13318 IX86_BUILTIN_CMPNGESS,
13319 IX86_BUILTIN_CMPORDSS,
13320 IX86_BUILTIN_CMPUNORDSS,
13321 IX86_BUILTIN_CMPNESS,
13323 IX86_BUILTIN_COMIEQSS,
13324 IX86_BUILTIN_COMILTSS,
13325 IX86_BUILTIN_COMILESS,
13326 IX86_BUILTIN_COMIGTSS,
13327 IX86_BUILTIN_COMIGESS,
13328 IX86_BUILTIN_COMINEQSS,
13329 IX86_BUILTIN_UCOMIEQSS,
13330 IX86_BUILTIN_UCOMILTSS,
13331 IX86_BUILTIN_UCOMILESS,
13332 IX86_BUILTIN_UCOMIGTSS,
13333 IX86_BUILTIN_UCOMIGESS,
13334 IX86_BUILTIN_UCOMINEQSS,
13336 IX86_BUILTIN_CVTPI2PS,
13337 IX86_BUILTIN_CVTPS2PI,
13338 IX86_BUILTIN_CVTSI2SS,
13339 IX86_BUILTIN_CVTSI642SS,
13340 IX86_BUILTIN_CVTSS2SI,
13341 IX86_BUILTIN_CVTSS2SI64,
13342 IX86_BUILTIN_CVTTPS2PI,
13343 IX86_BUILTIN_CVTTSS2SI,
13344 IX86_BUILTIN_CVTTSS2SI64,
13346 IX86_BUILTIN_MAXPS,
13347 IX86_BUILTIN_MAXSS,
13348 IX86_BUILTIN_MINPS,
13349 IX86_BUILTIN_MINSS,
13351 IX86_BUILTIN_LOADUPS,
13352 IX86_BUILTIN_STOREUPS,
13353 IX86_BUILTIN_MOVSS,
13355 IX86_BUILTIN_MOVHLPS,
13356 IX86_BUILTIN_MOVLHPS,
13357 IX86_BUILTIN_LOADHPS,
13358 IX86_BUILTIN_LOADLPS,
13359 IX86_BUILTIN_STOREHPS,
13360 IX86_BUILTIN_STORELPS,
13362 IX86_BUILTIN_MASKMOVQ,
13363 IX86_BUILTIN_MOVMSKPS,
13364 IX86_BUILTIN_PMOVMSKB,
13366 IX86_BUILTIN_MOVNTPS,
13367 IX86_BUILTIN_MOVNTQ,
13369 IX86_BUILTIN_LOADDQU,
13370 IX86_BUILTIN_STOREDQU,
13372 IX86_BUILTIN_PACKSSWB,
13373 IX86_BUILTIN_PACKSSDW,
13374 IX86_BUILTIN_PACKUSWB,
13376 IX86_BUILTIN_PADDB,
13377 IX86_BUILTIN_PADDW,
13378 IX86_BUILTIN_PADDD,
13379 IX86_BUILTIN_PADDQ,
13380 IX86_BUILTIN_PADDSB,
13381 IX86_BUILTIN_PADDSW,
13382 IX86_BUILTIN_PADDUSB,
13383 IX86_BUILTIN_PADDUSW,
13384 IX86_BUILTIN_PSUBB,
13385 IX86_BUILTIN_PSUBW,
13386 IX86_BUILTIN_PSUBD,
13387 IX86_BUILTIN_PSUBQ,
13388 IX86_BUILTIN_PSUBSB,
13389 IX86_BUILTIN_PSUBSW,
13390 IX86_BUILTIN_PSUBUSB,
13391 IX86_BUILTIN_PSUBUSW,
13393 IX86_BUILTIN_PAND,
13394 IX86_BUILTIN_PANDN,
13395 IX86_BUILTIN_POR,
13396 IX86_BUILTIN_PXOR,
13398 IX86_BUILTIN_PAVGB,
13399 IX86_BUILTIN_PAVGW,
13401 IX86_BUILTIN_PCMPEQB,
13402 IX86_BUILTIN_PCMPEQW,
13403 IX86_BUILTIN_PCMPEQD,
13404 IX86_BUILTIN_PCMPGTB,
13405 IX86_BUILTIN_PCMPGTW,
13406 IX86_BUILTIN_PCMPGTD,
13408 IX86_BUILTIN_PMADDWD,
13410 IX86_BUILTIN_PMAXSW,
13411 IX86_BUILTIN_PMAXUB,
13412 IX86_BUILTIN_PMINSW,
13413 IX86_BUILTIN_PMINUB,
13415 IX86_BUILTIN_PMULHUW,
13416 IX86_BUILTIN_PMULHW,
13417 IX86_BUILTIN_PMULLW,
13419 IX86_BUILTIN_PSADBW,
13420 IX86_BUILTIN_PSHUFW,
13422 IX86_BUILTIN_PSLLW,
13423 IX86_BUILTIN_PSLLD,
13424 IX86_BUILTIN_PSLLQ,
13425 IX86_BUILTIN_PSRAW,
13426 IX86_BUILTIN_PSRAD,
13427 IX86_BUILTIN_PSRLW,
13428 IX86_BUILTIN_PSRLD,
13429 IX86_BUILTIN_PSRLQ,
13430 IX86_BUILTIN_PSLLWI,
13431 IX86_BUILTIN_PSLLDI,
13432 IX86_BUILTIN_PSLLQI,
13433 IX86_BUILTIN_PSRAWI,
13434 IX86_BUILTIN_PSRADI,
13435 IX86_BUILTIN_PSRLWI,
13436 IX86_BUILTIN_PSRLDI,
13437 IX86_BUILTIN_PSRLQI,
13439 IX86_BUILTIN_PUNPCKHBW,
13440 IX86_BUILTIN_PUNPCKHWD,
13441 IX86_BUILTIN_PUNPCKHDQ,
13442 IX86_BUILTIN_PUNPCKLBW,
13443 IX86_BUILTIN_PUNPCKLWD,
13444 IX86_BUILTIN_PUNPCKLDQ,
13446 IX86_BUILTIN_SHUFPS,
13448 IX86_BUILTIN_RCPPS,
13449 IX86_BUILTIN_RCPSS,
13450 IX86_BUILTIN_RSQRTPS,
13451 IX86_BUILTIN_RSQRTSS,
13452 IX86_BUILTIN_SQRTPS,
13453 IX86_BUILTIN_SQRTSS,
13455 IX86_BUILTIN_UNPCKHPS,
13456 IX86_BUILTIN_UNPCKLPS,
13458 IX86_BUILTIN_ANDPS,
13459 IX86_BUILTIN_ANDNPS,
13460 IX86_BUILTIN_ORPS,
13461 IX86_BUILTIN_XORPS,
13463 IX86_BUILTIN_EMMS,
13464 IX86_BUILTIN_LDMXCSR,
13465 IX86_BUILTIN_STMXCSR,
13466 IX86_BUILTIN_SFENCE,
13468 /* 3DNow! Original */
13469 IX86_BUILTIN_FEMMS,
13470 IX86_BUILTIN_PAVGUSB,
13471 IX86_BUILTIN_PF2ID,
13472 IX86_BUILTIN_PFACC,
13473 IX86_BUILTIN_PFADD,
13474 IX86_BUILTIN_PFCMPEQ,
13475 IX86_BUILTIN_PFCMPGE,
13476 IX86_BUILTIN_PFCMPGT,
13477 IX86_BUILTIN_PFMAX,
13478 IX86_BUILTIN_PFMIN,
13479 IX86_BUILTIN_PFMUL,
13480 IX86_BUILTIN_PFRCP,
13481 IX86_BUILTIN_PFRCPIT1,
13482 IX86_BUILTIN_PFRCPIT2,
13483 IX86_BUILTIN_PFRSQIT1,
13484 IX86_BUILTIN_PFRSQRT,
13485 IX86_BUILTIN_PFSUB,
13486 IX86_BUILTIN_PFSUBR,
13487 IX86_BUILTIN_PI2FD,
13488 IX86_BUILTIN_PMULHRW,
13490 /* 3DNow! Athlon Extensions */
13491 IX86_BUILTIN_PF2IW,
13492 IX86_BUILTIN_PFNACC,
13493 IX86_BUILTIN_PFPNACC,
13494 IX86_BUILTIN_PI2FW,
13495 IX86_BUILTIN_PSWAPDSI,
13496 IX86_BUILTIN_PSWAPDSF,
13498 /* SSE2 */
13499 IX86_BUILTIN_ADDPD,
13500 IX86_BUILTIN_ADDSD,
13501 IX86_BUILTIN_DIVPD,
13502 IX86_BUILTIN_DIVSD,
13503 IX86_BUILTIN_MULPD,
13504 IX86_BUILTIN_MULSD,
13505 IX86_BUILTIN_SUBPD,
13506 IX86_BUILTIN_SUBSD,
13508 IX86_BUILTIN_CMPEQPD,
13509 IX86_BUILTIN_CMPLTPD,
13510 IX86_BUILTIN_CMPLEPD,
13511 IX86_BUILTIN_CMPGTPD,
13512 IX86_BUILTIN_CMPGEPD,
13513 IX86_BUILTIN_CMPNEQPD,
13514 IX86_BUILTIN_CMPNLTPD,
13515 IX86_BUILTIN_CMPNLEPD,
13516 IX86_BUILTIN_CMPNGTPD,
13517 IX86_BUILTIN_CMPNGEPD,
13518 IX86_BUILTIN_CMPORDPD,
13519 IX86_BUILTIN_CMPUNORDPD,
13520 IX86_BUILTIN_CMPNEPD,
13521 IX86_BUILTIN_CMPEQSD,
13522 IX86_BUILTIN_CMPLTSD,
13523 IX86_BUILTIN_CMPLESD,
13524 IX86_BUILTIN_CMPNEQSD,
13525 IX86_BUILTIN_CMPNLTSD,
13526 IX86_BUILTIN_CMPNLESD,
13527 IX86_BUILTIN_CMPORDSD,
13528 IX86_BUILTIN_CMPUNORDSD,
13529 IX86_BUILTIN_CMPNESD,
13531 IX86_BUILTIN_COMIEQSD,
13532 IX86_BUILTIN_COMILTSD,
13533 IX86_BUILTIN_COMILESD,
13534 IX86_BUILTIN_COMIGTSD,
13535 IX86_BUILTIN_COMIGESD,
13536 IX86_BUILTIN_COMINEQSD,
13537 IX86_BUILTIN_UCOMIEQSD,
13538 IX86_BUILTIN_UCOMILTSD,
13539 IX86_BUILTIN_UCOMILESD,
13540 IX86_BUILTIN_UCOMIGTSD,
13541 IX86_BUILTIN_UCOMIGESD,
13542 IX86_BUILTIN_UCOMINEQSD,
13544 IX86_BUILTIN_MAXPD,
13545 IX86_BUILTIN_MAXSD,
13546 IX86_BUILTIN_MINPD,
13547 IX86_BUILTIN_MINSD,
13549 IX86_BUILTIN_ANDPD,
13550 IX86_BUILTIN_ANDNPD,
13551 IX86_BUILTIN_ORPD,
13552 IX86_BUILTIN_XORPD,
13554 IX86_BUILTIN_SQRTPD,
13555 IX86_BUILTIN_SQRTSD,
13557 IX86_BUILTIN_UNPCKHPD,
13558 IX86_BUILTIN_UNPCKLPD,
13560 IX86_BUILTIN_SHUFPD,
13562 IX86_BUILTIN_LOADUPD,
13563 IX86_BUILTIN_STOREUPD,
13564 IX86_BUILTIN_MOVSD,
13566 IX86_BUILTIN_LOADHPD,
13567 IX86_BUILTIN_LOADLPD,
13569 IX86_BUILTIN_CVTDQ2PD,
13570 IX86_BUILTIN_CVTDQ2PS,
13572 IX86_BUILTIN_CVTPD2DQ,
13573 IX86_BUILTIN_CVTPD2PI,
13574 IX86_BUILTIN_CVTPD2PS,
13575 IX86_BUILTIN_CVTTPD2DQ,
13576 IX86_BUILTIN_CVTTPD2PI,
13578 IX86_BUILTIN_CVTPI2PD,
13579 IX86_BUILTIN_CVTSI2SD,
13580 IX86_BUILTIN_CVTSI642SD,
13582 IX86_BUILTIN_CVTSD2SI,
13583 IX86_BUILTIN_CVTSD2SI64,
13584 IX86_BUILTIN_CVTSD2SS,
13585 IX86_BUILTIN_CVTSS2SD,
13586 IX86_BUILTIN_CVTTSD2SI,
13587 IX86_BUILTIN_CVTTSD2SI64,
13589 IX86_BUILTIN_CVTPS2DQ,
13590 IX86_BUILTIN_CVTPS2PD,
13591 IX86_BUILTIN_CVTTPS2DQ,
13593 IX86_BUILTIN_MOVNTI,
13594 IX86_BUILTIN_MOVNTPD,
13595 IX86_BUILTIN_MOVNTDQ,
13597 /* SSE2 MMX */
13598 IX86_BUILTIN_MASKMOVDQU,
13599 IX86_BUILTIN_MOVMSKPD,
13600 IX86_BUILTIN_PMOVMSKB128,
13602 IX86_BUILTIN_PACKSSWB128,
13603 IX86_BUILTIN_PACKSSDW128,
13604 IX86_BUILTIN_PACKUSWB128,
13606 IX86_BUILTIN_PADDB128,
13607 IX86_BUILTIN_PADDW128,
13608 IX86_BUILTIN_PADDD128,
13609 IX86_BUILTIN_PADDQ128,
13610 IX86_BUILTIN_PADDSB128,
13611 IX86_BUILTIN_PADDSW128,
13612 IX86_BUILTIN_PADDUSB128,
13613 IX86_BUILTIN_PADDUSW128,
13614 IX86_BUILTIN_PSUBB128,
13615 IX86_BUILTIN_PSUBW128,
13616 IX86_BUILTIN_PSUBD128,
13617 IX86_BUILTIN_PSUBQ128,
13618 IX86_BUILTIN_PSUBSB128,
13619 IX86_BUILTIN_PSUBSW128,
13620 IX86_BUILTIN_PSUBUSB128,
13621 IX86_BUILTIN_PSUBUSW128,
13623 IX86_BUILTIN_PAND128,
13624 IX86_BUILTIN_PANDN128,
13625 IX86_BUILTIN_POR128,
13626 IX86_BUILTIN_PXOR128,
13628 IX86_BUILTIN_PAVGB128,
13629 IX86_BUILTIN_PAVGW128,
13631 IX86_BUILTIN_PCMPEQB128,
13632 IX86_BUILTIN_PCMPEQW128,
13633 IX86_BUILTIN_PCMPEQD128,
13634 IX86_BUILTIN_PCMPGTB128,
13635 IX86_BUILTIN_PCMPGTW128,
13636 IX86_BUILTIN_PCMPGTD128,
13638 IX86_BUILTIN_PMADDWD128,
13640 IX86_BUILTIN_PMAXSW128,
13641 IX86_BUILTIN_PMAXUB128,
13642 IX86_BUILTIN_PMINSW128,
13643 IX86_BUILTIN_PMINUB128,
13645 IX86_BUILTIN_PMULUDQ,
13646 IX86_BUILTIN_PMULUDQ128,
13647 IX86_BUILTIN_PMULHUW128,
13648 IX86_BUILTIN_PMULHW128,
13649 IX86_BUILTIN_PMULLW128,
13651 IX86_BUILTIN_PSADBW128,
13652 IX86_BUILTIN_PSHUFHW,
13653 IX86_BUILTIN_PSHUFLW,
13654 IX86_BUILTIN_PSHUFD,
13656 IX86_BUILTIN_PSLLW128,
13657 IX86_BUILTIN_PSLLD128,
13658 IX86_BUILTIN_PSLLQ128,
13659 IX86_BUILTIN_PSRAW128,
13660 IX86_BUILTIN_PSRAD128,
13661 IX86_BUILTIN_PSRLW128,
13662 IX86_BUILTIN_PSRLD128,
13663 IX86_BUILTIN_PSRLQ128,
13664 IX86_BUILTIN_PSLLDQI128,
13665 IX86_BUILTIN_PSLLWI128,
13666 IX86_BUILTIN_PSLLDI128,
13667 IX86_BUILTIN_PSLLQI128,
13668 IX86_BUILTIN_PSRAWI128,
13669 IX86_BUILTIN_PSRADI128,
13670 IX86_BUILTIN_PSRLDQI128,
13671 IX86_BUILTIN_PSRLWI128,
13672 IX86_BUILTIN_PSRLDI128,
13673 IX86_BUILTIN_PSRLQI128,
13675 IX86_BUILTIN_PUNPCKHBW128,
13676 IX86_BUILTIN_PUNPCKHWD128,
13677 IX86_BUILTIN_PUNPCKHDQ128,
13678 IX86_BUILTIN_PUNPCKHQDQ128,
13679 IX86_BUILTIN_PUNPCKLBW128,
13680 IX86_BUILTIN_PUNPCKLWD128,
13681 IX86_BUILTIN_PUNPCKLDQ128,
13682 IX86_BUILTIN_PUNPCKLQDQ128,
13684 IX86_BUILTIN_CLFLUSH,
13685 IX86_BUILTIN_MFENCE,
13686 IX86_BUILTIN_LFENCE,
13688 /* Prescott New Instructions. */
13689 IX86_BUILTIN_ADDSUBPS,
13690 IX86_BUILTIN_HADDPS,
13691 IX86_BUILTIN_HSUBPS,
13692 IX86_BUILTIN_MOVSHDUP,
13693 IX86_BUILTIN_MOVSLDUP,
13694 IX86_BUILTIN_ADDSUBPD,
13695 IX86_BUILTIN_HADDPD,
13696 IX86_BUILTIN_HSUBPD,
13697 IX86_BUILTIN_LDDQU,
13699 IX86_BUILTIN_MONITOR,
13700 IX86_BUILTIN_MWAIT,
13702 IX86_BUILTIN_VEC_INIT_V2SI,
13703 IX86_BUILTIN_VEC_INIT_V4HI,
13704 IX86_BUILTIN_VEC_INIT_V8QI,
13705 IX86_BUILTIN_VEC_EXT_V2DF,
13706 IX86_BUILTIN_VEC_EXT_V2DI,
13707 IX86_BUILTIN_VEC_EXT_V4SF,
13708 IX86_BUILTIN_VEC_EXT_V4SI,
13709 IX86_BUILTIN_VEC_EXT_V8HI,
13710 IX86_BUILTIN_VEC_EXT_V2SI,
13711 IX86_BUILTIN_VEC_EXT_V4HI,
13712 IX86_BUILTIN_VEC_SET_V8HI,
13713 IX86_BUILTIN_VEC_SET_V4HI,
13715 IX86_BUILTIN_MAX
13718 #define def_builtin(MASK, NAME, TYPE, CODE) \
13719 do { \
13720 if ((MASK) & target_flags \
13721 && (!((MASK) & MASK_64BIT) || TARGET_64BIT)) \
13722 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
13723 NULL, NULL_TREE); \
13724 } while (0)
13726 /* Bits for builtin_description.flag. */
13728 /* Set when we don't support the comparison natively, and should
13729 swap_comparison in order to support it. */
13730 #define BUILTIN_DESC_SWAP_OPERANDS 1
13732 struct builtin_description
13734 const unsigned int mask;
13735 const enum insn_code icode;
13736 const char *const name;
13737 const enum ix86_builtins code;
13738 const enum rtx_code comparison;
13739 const unsigned int flag;
13742 static const struct builtin_description bdesc_comi[] =
13744 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
13745 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
13746 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
13747 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
13748 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
13749 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
13750 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
13751 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
13752 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
13753 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
13754 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
13755 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
13756 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
13757 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
13758 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
13759 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
13760 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
13761 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
13762 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
13763 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
13764 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
13765 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
13766 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
13767 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
13770 static const struct builtin_description bdesc_2arg[] =
13772 /* SSE */
13773 { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
13774 { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
13775 { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
13776 { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
13777 { MASK_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
13778 { MASK_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
13779 { MASK_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
13780 { MASK_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
13782 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
13783 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
13784 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
13785 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT,
13786 BUILTIN_DESC_SWAP_OPERANDS },
13787 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE,
13788 BUILTIN_DESC_SWAP_OPERANDS },
13789 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
13790 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, 0 },
13791 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, 0 },
13792 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, 0 },
13793 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE,
13794 BUILTIN_DESC_SWAP_OPERANDS },
13795 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT,
13796 BUILTIN_DESC_SWAP_OPERANDS },
13797 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, 0 },
13798 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
13799 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
13800 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
13801 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
13802 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, 0 },
13803 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, 0 },
13804 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, 0 },
13805 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE,
13806 BUILTIN_DESC_SWAP_OPERANDS },
13807 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT,
13808 BUILTIN_DESC_SWAP_OPERANDS },
13809 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 },
13811 { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
13812 { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
13813 { MASK_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
13814 { MASK_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
13816 { MASK_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
13817 { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
13818 { MASK_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
13819 { MASK_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
13821 { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
13822 { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
13823 { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
13824 { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
13825 { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
13827 /* MMX */
13828 { MASK_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
13829 { MASK_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
13830 { MASK_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
13831 { MASK_MMX, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
13832 { MASK_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
13833 { MASK_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
13834 { MASK_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
13835 { MASK_MMX, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
13837 { MASK_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
13838 { MASK_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
13839 { MASK_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
13840 { MASK_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
13841 { MASK_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
13842 { MASK_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
13843 { MASK_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
13844 { MASK_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
13846 { MASK_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
13847 { MASK_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
13848 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
13850 { MASK_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
13851 { MASK_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
13852 { MASK_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
13853 { MASK_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
13855 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
13856 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
13858 { MASK_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
13859 { MASK_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
13860 { MASK_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
13861 { MASK_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
13862 { MASK_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
13863 { MASK_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
13865 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
13866 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
13867 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
13868 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
13870 { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
13871 { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
13872 { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
13873 { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
13874 { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
13875 { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
13877 /* Special. */
13878 { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
13879 { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
13880 { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
13882 { MASK_SSE, CODE_FOR_sse_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
13883 { MASK_SSE, CODE_FOR_sse_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
13884 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
13886 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
13887 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
13888 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
13889 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
13890 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
13891 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
13893 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
13894 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
13895 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
13896 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
13897 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
13898 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
13900 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
13901 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
13902 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
13903 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 },
13905 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
13906 { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
13908 /* SSE2 */
13909 { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
13910 { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
13911 { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
13912 { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
13913 { MASK_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
13914 { MASK_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
13915 { MASK_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
13916 { MASK_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
13918 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
13919 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
13920 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
13921 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT,
13922 BUILTIN_DESC_SWAP_OPERANDS },
13923 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE,
13924 BUILTIN_DESC_SWAP_OPERANDS },
13925 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
13926 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, 0 },
13927 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, 0 },
13928 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, 0 },
13929 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE,
13930 BUILTIN_DESC_SWAP_OPERANDS },
13931 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT,
13932 BUILTIN_DESC_SWAP_OPERANDS },
13933 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, 0 },
13934 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
13935 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
13936 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
13937 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
13938 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, 0 },
13939 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, 0 },
13940 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, 0 },
13941 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, 0 },
13943 { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
13944 { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
13945 { MASK_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
13946 { MASK_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
13948 { MASK_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
13949 { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
13950 { MASK_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
13951 { MASK_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
13953 { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
13954 { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
13955 { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
13957 /* SSE2 MMX */
13958 { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
13959 { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
13960 { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
13961 { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
13962 { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
13963 { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
13964 { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
13965 { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
13967 { MASK_MMX, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
13968 { MASK_MMX, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
13969 { MASK_MMX, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
13970 { MASK_MMX, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
13971 { MASK_MMX, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
13972 { MASK_MMX, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
13973 { MASK_MMX, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
13974 { MASK_MMX, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
13976 { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
13977 { MASK_SSE2, CODE_FOR_sse2_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
13979 { MASK_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
13980 { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
13981 { MASK_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
13982 { MASK_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
13984 { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
13985 { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
13987 { MASK_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
13988 { MASK_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
13989 { MASK_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
13990 { MASK_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
13991 { MASK_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
13992 { MASK_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
13994 { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
13995 { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
13996 { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
13997 { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
13999 { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
14000 { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
14001 { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
14002 { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
14003 { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
14004 { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
14005 { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
14006 { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
14008 { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
14009 { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
14010 { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
14012 { MASK_SSE2, CODE_FOR_sse2_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
14013 { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
14015 { MASK_SSE2, CODE_FOR_sse2_umulsidi3, 0, IX86_BUILTIN_PMULUDQ, 0, 0 },
14016 { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, 0, 0 },
14018 { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
14019 { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
14020 { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
14022 { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
14023 { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
14024 { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
14026 { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
14027 { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
14029 { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
14031 { MASK_SSE2, CODE_FOR_sse2_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
14032 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
14033 { MASK_SSE2, CODE_FOR_sse2_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
14034 { MASK_SSE2, CODE_FOR_sse2_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
14036 /* SSE3 MMX */
14037 { MASK_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
14038 { MASK_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
14039 { MASK_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
14040 { MASK_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
14041 { MASK_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
14042 { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 }
14045 static const struct builtin_description bdesc_1arg[] =
14047 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
14048 { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
14050 { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
14051 { MASK_SSE, CODE_FOR_sse_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
14052 { MASK_SSE, CODE_FOR_sse_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
14054 { MASK_SSE, CODE_FOR_sse_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
14055 { MASK_SSE, CODE_FOR_sse_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
14056 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
14057 { MASK_SSE, CODE_FOR_sse_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
14058 { MASK_SSE, CODE_FOR_sse_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
14059 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
14061 { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
14062 { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
14064 { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
14066 { MASK_SSE2, CODE_FOR_sse2_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
14067 { MASK_SSE2, CODE_FOR_sse2_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
14069 { MASK_SSE2, CODE_FOR_sse2_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
14070 { MASK_SSE2, CODE_FOR_sse2_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
14071 { MASK_SSE2, CODE_FOR_sse2_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
14072 { MASK_SSE2, CODE_FOR_sse2_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
14073 { MASK_SSE2, CODE_FOR_sse2_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
14075 { MASK_SSE2, CODE_FOR_sse2_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
14077 { MASK_SSE2, CODE_FOR_sse2_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
14078 { MASK_SSE2, CODE_FOR_sse2_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
14079 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
14080 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
14082 { MASK_SSE2, CODE_FOR_sse2_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
14083 { MASK_SSE2, CODE_FOR_sse2_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
14084 { MASK_SSE2, CODE_FOR_sse2_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
14086 /* SSE3 */
14087 { MASK_SSE3, CODE_FOR_sse3_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
14088 { MASK_SSE3, CODE_FOR_sse3_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
14091 static void
14092 ix86_init_builtins (void)
14094 if (TARGET_MMX)
14095 ix86_init_mmx_sse_builtins ();
14098 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
14099 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
14100 builtins. */
14101 static void
14102 ix86_init_mmx_sse_builtins (void)
14104 const struct builtin_description * d;
14105 size_t i;
14107 tree V16QI_type_node = build_vector_type_for_mode (intQI_type_node, V16QImode);
14108 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
14109 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
14110 tree V2DI_type_node
14111 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
14112 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
14113 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
14114 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
14115 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
14116 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
14117 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
14119 tree pchar_type_node = build_pointer_type (char_type_node);
14120 tree pcchar_type_node = build_pointer_type (
14121 build_type_variant (char_type_node, 1, 0));
14122 tree pfloat_type_node = build_pointer_type (float_type_node);
14123 tree pcfloat_type_node = build_pointer_type (
14124 build_type_variant (float_type_node, 1, 0));
14125 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
14126 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
14127 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
14129 /* Comparisons. */
14130 tree int_ftype_v4sf_v4sf
14131 = build_function_type_list (integer_type_node,
14132 V4SF_type_node, V4SF_type_node, NULL_TREE);
14133 tree v4si_ftype_v4sf_v4sf
14134 = build_function_type_list (V4SI_type_node,
14135 V4SF_type_node, V4SF_type_node, NULL_TREE);
14136 /* MMX/SSE/integer conversions. */
14137 tree int_ftype_v4sf
14138 = build_function_type_list (integer_type_node,
14139 V4SF_type_node, NULL_TREE);
14140 tree int64_ftype_v4sf
14141 = build_function_type_list (long_long_integer_type_node,
14142 V4SF_type_node, NULL_TREE);
14143 tree int_ftype_v8qi
14144 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
14145 tree v4sf_ftype_v4sf_int
14146 = build_function_type_list (V4SF_type_node,
14147 V4SF_type_node, integer_type_node, NULL_TREE);
14148 tree v4sf_ftype_v4sf_int64
14149 = build_function_type_list (V4SF_type_node,
14150 V4SF_type_node, long_long_integer_type_node,
14151 NULL_TREE);
14152 tree v4sf_ftype_v4sf_v2si
14153 = build_function_type_list (V4SF_type_node,
14154 V4SF_type_node, V2SI_type_node, NULL_TREE);
14156 /* Miscellaneous. */
14157 tree v8qi_ftype_v4hi_v4hi
14158 = build_function_type_list (V8QI_type_node,
14159 V4HI_type_node, V4HI_type_node, NULL_TREE);
14160 tree v4hi_ftype_v2si_v2si
14161 = build_function_type_list (V4HI_type_node,
14162 V2SI_type_node, V2SI_type_node, NULL_TREE);
14163 tree v4sf_ftype_v4sf_v4sf_int
14164 = build_function_type_list (V4SF_type_node,
14165 V4SF_type_node, V4SF_type_node,
14166 integer_type_node, NULL_TREE);
14167 tree v2si_ftype_v4hi_v4hi
14168 = build_function_type_list (V2SI_type_node,
14169 V4HI_type_node, V4HI_type_node, NULL_TREE);
14170 tree v4hi_ftype_v4hi_int
14171 = build_function_type_list (V4HI_type_node,
14172 V4HI_type_node, integer_type_node, NULL_TREE);
14173 tree v4hi_ftype_v4hi_di
14174 = build_function_type_list (V4HI_type_node,
14175 V4HI_type_node, long_long_unsigned_type_node,
14176 NULL_TREE);
14177 tree v2si_ftype_v2si_di
14178 = build_function_type_list (V2SI_type_node,
14179 V2SI_type_node, long_long_unsigned_type_node,
14180 NULL_TREE);
14181 tree void_ftype_void
14182 = build_function_type (void_type_node, void_list_node);
14183 tree void_ftype_unsigned
14184 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
14185 tree void_ftype_unsigned_unsigned
14186 = build_function_type_list (void_type_node, unsigned_type_node,
14187 unsigned_type_node, NULL_TREE);
14188 tree void_ftype_pcvoid_unsigned_unsigned
14189 = build_function_type_list (void_type_node, const_ptr_type_node,
14190 unsigned_type_node, unsigned_type_node,
14191 NULL_TREE);
14192 tree unsigned_ftype_void
14193 = build_function_type (unsigned_type_node, void_list_node);
14194 tree v2si_ftype_v4sf
14195 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
14196 /* Loads/stores. */
14197 tree void_ftype_v8qi_v8qi_pchar
14198 = build_function_type_list (void_type_node,
14199 V8QI_type_node, V8QI_type_node,
14200 pchar_type_node, NULL_TREE);
14201 tree v4sf_ftype_pcfloat
14202 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
14203 /* @@@ the type is bogus */
14204 tree v4sf_ftype_v4sf_pv2si
14205 = build_function_type_list (V4SF_type_node,
14206 V4SF_type_node, pv2si_type_node, NULL_TREE);
14207 tree void_ftype_pv2si_v4sf
14208 = build_function_type_list (void_type_node,
14209 pv2si_type_node, V4SF_type_node, NULL_TREE);
14210 tree void_ftype_pfloat_v4sf
14211 = build_function_type_list (void_type_node,
14212 pfloat_type_node, V4SF_type_node, NULL_TREE);
14213 tree void_ftype_pdi_di
14214 = build_function_type_list (void_type_node,
14215 pdi_type_node, long_long_unsigned_type_node,
14216 NULL_TREE);
14217 tree void_ftype_pv2di_v2di
14218 = build_function_type_list (void_type_node,
14219 pv2di_type_node, V2DI_type_node, NULL_TREE);
14220 /* Normal vector unops. */
14221 tree v4sf_ftype_v4sf
14222 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
14224 /* Normal vector binops. */
14225 tree v4sf_ftype_v4sf_v4sf
14226 = build_function_type_list (V4SF_type_node,
14227 V4SF_type_node, V4SF_type_node, NULL_TREE);
14228 tree v8qi_ftype_v8qi_v8qi
14229 = build_function_type_list (V8QI_type_node,
14230 V8QI_type_node, V8QI_type_node, NULL_TREE);
14231 tree v4hi_ftype_v4hi_v4hi
14232 = build_function_type_list (V4HI_type_node,
14233 V4HI_type_node, V4HI_type_node, NULL_TREE);
14234 tree v2si_ftype_v2si_v2si
14235 = build_function_type_list (V2SI_type_node,
14236 V2SI_type_node, V2SI_type_node, NULL_TREE);
14237 tree di_ftype_di_di
14238 = build_function_type_list (long_long_unsigned_type_node,
14239 long_long_unsigned_type_node,
14240 long_long_unsigned_type_node, NULL_TREE);
14242 tree v2si_ftype_v2sf
14243 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
14244 tree v2sf_ftype_v2si
14245 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
14246 tree v2si_ftype_v2si
14247 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
14248 tree v2sf_ftype_v2sf
14249 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
14250 tree v2sf_ftype_v2sf_v2sf
14251 = build_function_type_list (V2SF_type_node,
14252 V2SF_type_node, V2SF_type_node, NULL_TREE);
14253 tree v2si_ftype_v2sf_v2sf
14254 = build_function_type_list (V2SI_type_node,
14255 V2SF_type_node, V2SF_type_node, NULL_TREE);
14256 tree pint_type_node = build_pointer_type (integer_type_node);
14257 tree pdouble_type_node = build_pointer_type (double_type_node);
14258 tree pcdouble_type_node = build_pointer_type (
14259 build_type_variant (double_type_node, 1, 0));
14260 tree int_ftype_v2df_v2df
14261 = build_function_type_list (integer_type_node,
14262 V2DF_type_node, V2DF_type_node, NULL_TREE);
14264 tree ti_ftype_ti_ti
14265 = build_function_type_list (intTI_type_node,
14266 intTI_type_node, intTI_type_node, NULL_TREE);
14267 tree void_ftype_pcvoid
14268 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
14269 tree v4sf_ftype_v4si
14270 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
14271 tree v4si_ftype_v4sf
14272 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
14273 tree v2df_ftype_v4si
14274 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
14275 tree v4si_ftype_v2df
14276 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
14277 tree v2si_ftype_v2df
14278 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
14279 tree v4sf_ftype_v2df
14280 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
14281 tree v2df_ftype_v2si
14282 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
14283 tree v2df_ftype_v4sf
14284 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
14285 tree int_ftype_v2df
14286 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
14287 tree int64_ftype_v2df
14288 = build_function_type_list (long_long_integer_type_node,
14289 V2DF_type_node, NULL_TREE);
14290 tree v2df_ftype_v2df_int
14291 = build_function_type_list (V2DF_type_node,
14292 V2DF_type_node, integer_type_node, NULL_TREE);
14293 tree v2df_ftype_v2df_int64
14294 = build_function_type_list (V2DF_type_node,
14295 V2DF_type_node, long_long_integer_type_node,
14296 NULL_TREE);
14297 tree v4sf_ftype_v4sf_v2df
14298 = build_function_type_list (V4SF_type_node,
14299 V4SF_type_node, V2DF_type_node, NULL_TREE);
14300 tree v2df_ftype_v2df_v4sf
14301 = build_function_type_list (V2DF_type_node,
14302 V2DF_type_node, V4SF_type_node, NULL_TREE);
14303 tree v2df_ftype_v2df_v2df_int
14304 = build_function_type_list (V2DF_type_node,
14305 V2DF_type_node, V2DF_type_node,
14306 integer_type_node,
14307 NULL_TREE);
14308 tree v2df_ftype_v2df_pcdouble
14309 = build_function_type_list (V2DF_type_node,
14310 V2DF_type_node, pcdouble_type_node, NULL_TREE);
14311 tree void_ftype_pdouble_v2df
14312 = build_function_type_list (void_type_node,
14313 pdouble_type_node, V2DF_type_node, NULL_TREE);
14314 tree void_ftype_pint_int
14315 = build_function_type_list (void_type_node,
14316 pint_type_node, integer_type_node, NULL_TREE);
14317 tree void_ftype_v16qi_v16qi_pchar
14318 = build_function_type_list (void_type_node,
14319 V16QI_type_node, V16QI_type_node,
14320 pchar_type_node, NULL_TREE);
14321 tree v2df_ftype_pcdouble
14322 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
14323 tree v2df_ftype_v2df_v2df
14324 = build_function_type_list (V2DF_type_node,
14325 V2DF_type_node, V2DF_type_node, NULL_TREE);
14326 tree v16qi_ftype_v16qi_v16qi
14327 = build_function_type_list (V16QI_type_node,
14328 V16QI_type_node, V16QI_type_node, NULL_TREE);
14329 tree v8hi_ftype_v8hi_v8hi
14330 = build_function_type_list (V8HI_type_node,
14331 V8HI_type_node, V8HI_type_node, NULL_TREE);
14332 tree v4si_ftype_v4si_v4si
14333 = build_function_type_list (V4SI_type_node,
14334 V4SI_type_node, V4SI_type_node, NULL_TREE);
14335 tree v2di_ftype_v2di_v2di
14336 = build_function_type_list (V2DI_type_node,
14337 V2DI_type_node, V2DI_type_node, NULL_TREE);
14338 tree v2di_ftype_v2df_v2df
14339 = build_function_type_list (V2DI_type_node,
14340 V2DF_type_node, V2DF_type_node, NULL_TREE);
14341 tree v2df_ftype_v2df
14342 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
14343 tree v2di_ftype_v2di_int
14344 = build_function_type_list (V2DI_type_node,
14345 V2DI_type_node, integer_type_node, NULL_TREE);
14346 tree v4si_ftype_v4si_int
14347 = build_function_type_list (V4SI_type_node,
14348 V4SI_type_node, integer_type_node, NULL_TREE);
14349 tree v8hi_ftype_v8hi_int
14350 = build_function_type_list (V8HI_type_node,
14351 V8HI_type_node, integer_type_node, NULL_TREE);
14352 tree v8hi_ftype_v8hi_v2di
14353 = build_function_type_list (V8HI_type_node,
14354 V8HI_type_node, V2DI_type_node, NULL_TREE);
14355 tree v4si_ftype_v4si_v2di
14356 = build_function_type_list (V4SI_type_node,
14357 V4SI_type_node, V2DI_type_node, NULL_TREE);
14358 tree v4si_ftype_v8hi_v8hi
14359 = build_function_type_list (V4SI_type_node,
14360 V8HI_type_node, V8HI_type_node, NULL_TREE);
14361 tree di_ftype_v8qi_v8qi
14362 = build_function_type_list (long_long_unsigned_type_node,
14363 V8QI_type_node, V8QI_type_node, NULL_TREE);
14364 tree di_ftype_v2si_v2si
14365 = build_function_type_list (long_long_unsigned_type_node,
14366 V2SI_type_node, V2SI_type_node, NULL_TREE);
14367 tree v2di_ftype_v16qi_v16qi
14368 = build_function_type_list (V2DI_type_node,
14369 V16QI_type_node, V16QI_type_node, NULL_TREE);
14370 tree v2di_ftype_v4si_v4si
14371 = build_function_type_list (V2DI_type_node,
14372 V4SI_type_node, V4SI_type_node, NULL_TREE);
14373 tree int_ftype_v16qi
14374 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
14375 tree v16qi_ftype_pcchar
14376 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
14377 tree void_ftype_pchar_v16qi
14378 = build_function_type_list (void_type_node,
14379 pchar_type_node, V16QI_type_node, NULL_TREE);
14381 tree float80_type;
14382 tree float128_type;
14383 tree ftype;
14385 /* The __float80 type. */
14386 if (TYPE_MODE (long_double_type_node) == XFmode)
14387 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
14388 "__float80");
14389 else
14391 /* The __float80 type. */
14392 float80_type = make_node (REAL_TYPE);
14393 TYPE_PRECISION (float80_type) = 80;
14394 layout_type (float80_type);
14395 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
14398 float128_type = make_node (REAL_TYPE);
14399 TYPE_PRECISION (float128_type) = 128;
14400 layout_type (float128_type);
14401 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
14403 /* Add all builtins that are more or less simple operations on two
14404 operands. */
14405 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
14407 /* Use one of the operands; the target can have a different mode for
14408 mask-generating compares. */
14409 enum machine_mode mode;
14410 tree type;
14412 if (d->name == 0)
14413 continue;
14414 mode = insn_data[d->icode].operand[1].mode;
14416 switch (mode)
14418 case V16QImode:
14419 type = v16qi_ftype_v16qi_v16qi;
14420 break;
14421 case V8HImode:
14422 type = v8hi_ftype_v8hi_v8hi;
14423 break;
14424 case V4SImode:
14425 type = v4si_ftype_v4si_v4si;
14426 break;
14427 case V2DImode:
14428 type = v2di_ftype_v2di_v2di;
14429 break;
14430 case V2DFmode:
14431 type = v2df_ftype_v2df_v2df;
14432 break;
14433 case TImode:
14434 type = ti_ftype_ti_ti;
14435 break;
14436 case V4SFmode:
14437 type = v4sf_ftype_v4sf_v4sf;
14438 break;
14439 case V8QImode:
14440 type = v8qi_ftype_v8qi_v8qi;
14441 break;
14442 case V4HImode:
14443 type = v4hi_ftype_v4hi_v4hi;
14444 break;
14445 case V2SImode:
14446 type = v2si_ftype_v2si_v2si;
14447 break;
14448 case DImode:
14449 type = di_ftype_di_di;
14450 break;
14452 default:
14453 gcc_unreachable ();
14456 /* Override for comparisons. */
14457 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
14458 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3)
14459 type = v4si_ftype_v4sf_v4sf;
14461 if (d->icode == CODE_FOR_sse2_maskcmpv2df3
14462 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
14463 type = v2di_ftype_v2df_v2df;
14465 def_builtin (d->mask, d->name, type, d->code);
14468 /* Add the remaining MMX insns with somewhat more complicated types. */
14469 def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
14470 def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
14471 def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD);
14472 def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ);
14474 def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW);
14475 def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD);
14476 def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ);
14478 def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
14479 def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
14481 def_builtin (MASK_MMX, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
14482 def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
14484 /* comi/ucomi insns. */
14485 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
14486 if (d->mask == MASK_SSE2)
14487 def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
14488 else
14489 def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
14491 def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
14492 def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
14493 def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
14495 def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
14496 def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
14497 def_builtin (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
14498 def_builtin (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
14499 def_builtin (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
14500 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
14501 def_builtin (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
14502 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
14503 def_builtin (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
14504 def_builtin (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
14505 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
14507 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
14509 def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
14510 def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
14512 def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
14513 def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
14514 def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
14515 def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
14517 def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
14518 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
14519 def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
14520 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
14522 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
14524 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
14526 def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
14527 def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
14528 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
14529 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
14530 def_builtin (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
14531 def_builtin (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
14533 def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
14535 /* Original 3DNow! */
14536 def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
14537 def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
14538 def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
14539 def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
14540 def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
14541 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
14542 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
14543 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
14544 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
14545 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
14546 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
14547 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
14548 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
14549 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
14550 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
14551 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
14552 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
14553 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
14554 def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
14555 def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
14557 /* 3DNow! extension as used in the Athlon CPU. */
14558 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
14559 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
14560 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
14561 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
14562 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
14563 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
14565 /* SSE2 */
14566 def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
14568 def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
14569 def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
14571 def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADHPD);
14572 def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADLPD);
14574 def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
14575 def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
14576 def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
14577 def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
14578 def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
14580 def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
14581 def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
14582 def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
14583 def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
14585 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
14586 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
14588 def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
14590 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
14591 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
14593 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
14594 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
14595 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
14596 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
14597 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
14599 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
14601 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
14602 def_builtin (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
14603 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
14604 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
14606 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
14607 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
14608 def_builtin (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
14610 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
14611 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
14612 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
14613 def_builtin (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
14615 def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
14616 def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
14617 def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
14619 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
14620 def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
14622 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
14623 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
14625 def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128);
14626 def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128);
14627 def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
14629 def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRLW128);
14630 def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRLD128);
14631 def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
14633 def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRAW128);
14634 def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRAD128);
14636 def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
14637 def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
14638 def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
14639 def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
14641 def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
14642 def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
14643 def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
14644 def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
14646 def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
14647 def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
14649 def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
14651 /* Prescott New Instructions. */
14652 def_builtin (MASK_SSE3, "__builtin_ia32_monitor",
14653 void_ftype_pcvoid_unsigned_unsigned,
14654 IX86_BUILTIN_MONITOR);
14655 def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
14656 void_ftype_unsigned_unsigned,
14657 IX86_BUILTIN_MWAIT);
14658 def_builtin (MASK_SSE3, "__builtin_ia32_movshdup",
14659 v4sf_ftype_v4sf,
14660 IX86_BUILTIN_MOVSHDUP);
14661 def_builtin (MASK_SSE3, "__builtin_ia32_movsldup",
14662 v4sf_ftype_v4sf,
14663 IX86_BUILTIN_MOVSLDUP);
14664 def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
14665 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
14667 /* Access to the vec_init patterns. */
14668 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
14669 integer_type_node, NULL_TREE);
14670 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v2si",
14671 ftype, IX86_BUILTIN_VEC_INIT_V2SI);
14673 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
14674 short_integer_type_node,
14675 short_integer_type_node,
14676 short_integer_type_node, NULL_TREE);
14677 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v4hi",
14678 ftype, IX86_BUILTIN_VEC_INIT_V4HI);
14680 ftype = build_function_type_list (V8QI_type_node, char_type_node,
14681 char_type_node, char_type_node,
14682 char_type_node, char_type_node,
14683 char_type_node, char_type_node,
14684 char_type_node, NULL_TREE);
14685 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v8qi",
14686 ftype, IX86_BUILTIN_VEC_INIT_V8QI);
14688 /* Access to the vec_extract patterns. */
14689 ftype = build_function_type_list (double_type_node, V2DF_type_node,
14690 integer_type_node, NULL_TREE);
14691 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2df",
14692 ftype, IX86_BUILTIN_VEC_EXT_V2DF);
14694 ftype = build_function_type_list (long_long_integer_type_node,
14695 V2DI_type_node, integer_type_node,
14696 NULL_TREE);
14697 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2di",
14698 ftype, IX86_BUILTIN_VEC_EXT_V2DI);
14700 ftype = build_function_type_list (float_type_node, V4SF_type_node,
14701 integer_type_node, NULL_TREE);
14702 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4sf",
14703 ftype, IX86_BUILTIN_VEC_EXT_V4SF);
14705 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
14706 integer_type_node, NULL_TREE);
14707 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4si",
14708 ftype, IX86_BUILTIN_VEC_EXT_V4SI);
14710 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
14711 integer_type_node, NULL_TREE);
14712 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v8hi",
14713 ftype, IX86_BUILTIN_VEC_EXT_V8HI);
14715 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
14716 integer_type_node, NULL_TREE);
14717 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_ext_v4hi",
14718 ftype, IX86_BUILTIN_VEC_EXT_V4HI);
14720 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
14721 integer_type_node, NULL_TREE);
14722 def_builtin (MASK_MMX, "__builtin_ia32_vec_ext_v2si",
14723 ftype, IX86_BUILTIN_VEC_EXT_V2SI);
14725 /* Access to the vec_set patterns. */
14726 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
14727 intHI_type_node,
14728 integer_type_node, NULL_TREE);
14729 def_builtin (MASK_SSE, "__builtin_ia32_vec_set_v8hi",
14730 ftype, IX86_BUILTIN_VEC_SET_V8HI);
14732 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
14733 intHI_type_node,
14734 integer_type_node, NULL_TREE);
14735 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_set_v4hi",
14736 ftype, IX86_BUILTIN_VEC_SET_V4HI);
14739 /* Errors in the source file can cause expand_expr to return const0_rtx
14740 where we expect a vector. To avoid crashing, use one of the vector
14741 clear instructions. */
14742 static rtx
14743 safe_vector_operand (rtx x, enum machine_mode mode)
14745 if (x == const0_rtx)
14746 x = CONST0_RTX (mode);
14747 return x;
14750 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
14752 static rtx
14753 ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
14755 rtx pat, xops[3];
14756 tree arg0 = TREE_VALUE (arglist);
14757 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14758 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14759 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14760 enum machine_mode tmode = insn_data[icode].operand[0].mode;
14761 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
14762 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
14764 if (VECTOR_MODE_P (mode0))
14765 op0 = safe_vector_operand (op0, mode0);
14766 if (VECTOR_MODE_P (mode1))
14767 op1 = safe_vector_operand (op1, mode1);
14769 if (optimize || !target
14770 || GET_MODE (target) != tmode
14771 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14772 target = gen_reg_rtx (tmode);
14774 if (GET_MODE (op1) == SImode && mode1 == TImode)
14776 rtx x = gen_reg_rtx (V4SImode);
14777 emit_insn (gen_sse2_loadd (x, op1));
14778 op1 = gen_lowpart (TImode, x);
14781 /* The insn must want input operands in the same modes as the
14782 result. */
14783 gcc_assert ((GET_MODE (op0) == mode0 || GET_MODE (op0) == VOIDmode)
14784 && (GET_MODE (op1) == mode1 || GET_MODE (op1) == VOIDmode));
14786 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
14787 op0 = copy_to_mode_reg (mode0, op0);
14788 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
14789 op1 = copy_to_mode_reg (mode1, op1);
14791 /* ??? Using ix86_fixup_binary_operands is problematic when
14792 we've got mismatched modes. Fake it. */
14794 xops[0] = target;
14795 xops[1] = op0;
14796 xops[2] = op1;
14798 if (tmode == mode0 && tmode == mode1)
14800 target = ix86_fixup_binary_operands (UNKNOWN, tmode, xops);
14801 op0 = xops[1];
14802 op1 = xops[2];
14804 else if (optimize || !ix86_binary_operator_ok (UNKNOWN, tmode, xops))
14806 op0 = force_reg (mode0, op0);
14807 op1 = force_reg (mode1, op1);
14808 target = gen_reg_rtx (tmode);
14811 pat = GEN_FCN (icode) (target, op0, op1);
14812 if (! pat)
14813 return 0;
14814 emit_insn (pat);
14815 return target;
14818 /* Subroutine of ix86_expand_builtin to take care of stores. */
14820 static rtx
14821 ix86_expand_store_builtin (enum insn_code icode, tree arglist)
14823 rtx pat;
14824 tree arg0 = TREE_VALUE (arglist);
14825 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14826 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14827 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14828 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
14829 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
14831 if (VECTOR_MODE_P (mode1))
14832 op1 = safe_vector_operand (op1, mode1);
14834 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14835 op1 = copy_to_mode_reg (mode1, op1);
14837 pat = GEN_FCN (icode) (op0, op1);
14838 if (pat)
14839 emit_insn (pat);
14840 return 0;
14843 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
14845 static rtx
14846 ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
14847 rtx target, int do_load)
14849 rtx pat;
14850 tree arg0 = TREE_VALUE (arglist);
14851 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14852 enum machine_mode tmode = insn_data[icode].operand[0].mode;
14853 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
14855 if (optimize || !target
14856 || GET_MODE (target) != tmode
14857 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14858 target = gen_reg_rtx (tmode);
14859 if (do_load)
14860 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14861 else
14863 if (VECTOR_MODE_P (mode0))
14864 op0 = safe_vector_operand (op0, mode0);
14866 if ((optimize && !register_operand (op0, mode0))
14867 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14868 op0 = copy_to_mode_reg (mode0, op0);
14871 pat = GEN_FCN (icode) (target, op0);
14872 if (! pat)
14873 return 0;
14874 emit_insn (pat);
14875 return target;
14878 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
14879 sqrtss, rsqrtss, rcpss. */
14881 static rtx
14882 ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
14884 rtx pat;
14885 tree arg0 = TREE_VALUE (arglist);
14886 rtx op1, op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14887 enum machine_mode tmode = insn_data[icode].operand[0].mode;
14888 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
14890 if (optimize || !target
14891 || GET_MODE (target) != tmode
14892 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14893 target = gen_reg_rtx (tmode);
14895 if (VECTOR_MODE_P (mode0))
14896 op0 = safe_vector_operand (op0, mode0);
14898 if ((optimize && !register_operand (op0, mode0))
14899 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14900 op0 = copy_to_mode_reg (mode0, op0);
14902 op1 = op0;
14903 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
14904 op1 = copy_to_mode_reg (mode0, op1);
14906 pat = GEN_FCN (icode) (target, op0, op1);
14907 if (! pat)
14908 return 0;
14909 emit_insn (pat);
14910 return target;
14913 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
14915 static rtx
14916 ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
14917 rtx target)
14919 rtx pat;
14920 tree arg0 = TREE_VALUE (arglist);
14921 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14922 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14923 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14924 rtx op2;
14925 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
14926 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
14927 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
14928 enum rtx_code comparison = d->comparison;
14930 if (VECTOR_MODE_P (mode0))
14931 op0 = safe_vector_operand (op0, mode0);
14932 if (VECTOR_MODE_P (mode1))
14933 op1 = safe_vector_operand (op1, mode1);
14935 /* Swap operands if we have a comparison that isn't available in
14936 hardware. */
14937 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
14939 rtx tmp = gen_reg_rtx (mode1);
14940 emit_move_insn (tmp, op1);
14941 op1 = op0;
14942 op0 = tmp;
14945 if (optimize || !target
14946 || GET_MODE (target) != tmode
14947 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
14948 target = gen_reg_rtx (tmode);
14950 if ((optimize && !register_operand (op0, mode0))
14951 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
14952 op0 = copy_to_mode_reg (mode0, op0);
14953 if ((optimize && !register_operand (op1, mode1))
14954 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
14955 op1 = copy_to_mode_reg (mode1, op1);
14957 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
14958 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
14959 if (! pat)
14960 return 0;
14961 emit_insn (pat);
14962 return target;
14965 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
14967 static rtx
14968 ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
14969 rtx target)
14971 rtx pat;
14972 tree arg0 = TREE_VALUE (arglist);
14973 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14974 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14975 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14976 rtx op2;
14977 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
14978 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
14979 enum rtx_code comparison = d->comparison;
14981 if (VECTOR_MODE_P (mode0))
14982 op0 = safe_vector_operand (op0, mode0);
14983 if (VECTOR_MODE_P (mode1))
14984 op1 = safe_vector_operand (op1, mode1);
14986 /* Swap operands if we have a comparison that isn't available in
14987 hardware. */
14988 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
14990 rtx tmp = op1;
14991 op1 = op0;
14992 op0 = tmp;
14995 target = gen_reg_rtx (SImode);
14996 emit_move_insn (target, const0_rtx);
14997 target = gen_rtx_SUBREG (QImode, target, 0);
14999 if ((optimize && !register_operand (op0, mode0))
15000 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
15001 op0 = copy_to_mode_reg (mode0, op0);
15002 if ((optimize && !register_operand (op1, mode1))
15003 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
15004 op1 = copy_to_mode_reg (mode1, op1);
15006 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
15007 pat = GEN_FCN (d->icode) (op0, op1);
15008 if (! pat)
15009 return 0;
15010 emit_insn (pat);
15011 emit_insn (gen_rtx_SET (VOIDmode,
15012 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
15013 gen_rtx_fmt_ee (comparison, QImode,
15014 SET_DEST (pat),
15015 const0_rtx)));
15017 return SUBREG_REG (target);
15020 /* Return the integer constant in ARG. Constrain it to be in the range
15021 of the subparts of VEC_TYPE; issue an error if not. */
15023 static int
15024 get_element_number (tree vec_type, tree arg)
15026 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
15028 if (!host_integerp (arg, 1)
15029 || (elt = tree_low_cst (arg, 1), elt > max))
15031 error ("selector must be an integer constant in the range 0..%wi", max);
15032 return 0;
15035 return elt;
15038 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
15039 ix86_expand_vector_init. We DO have language-level syntax for this, in
15040 the form of (type){ init-list }. Except that since we can't place emms
15041 instructions from inside the compiler, we can't allow the use of MMX
15042 registers unless the user explicitly asks for it. So we do *not* define
15043 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
15044 we have builtins invoked by mmintrin.h that gives us license to emit
15045 these sorts of instructions. */
15047 static rtx
15048 ix86_expand_vec_init_builtin (tree type, tree arglist, rtx target)
15050 enum machine_mode tmode = TYPE_MODE (type);
15051 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
15052 int i, n_elt = GET_MODE_NUNITS (tmode);
15053 rtvec v = rtvec_alloc (n_elt);
15055 gcc_assert (VECTOR_MODE_P (tmode));
15057 for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
15059 rtx x = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
15060 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
15063 gcc_assert (arglist == NULL);
15065 if (!target || !register_operand (target, tmode))
15066 target = gen_reg_rtx (tmode);
15068 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
15069 return target;
15072 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
15073 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
15074 had a language-level syntax for referencing vector elements. */
15076 static rtx
15077 ix86_expand_vec_ext_builtin (tree arglist, rtx target)
15079 enum machine_mode tmode, mode0;
15080 tree arg0, arg1;
15081 int elt;
15082 rtx op0;
15084 arg0 = TREE_VALUE (arglist);
15085 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15087 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15088 elt = get_element_number (TREE_TYPE (arg0), arg1);
15090 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15091 mode0 = TYPE_MODE (TREE_TYPE (arg0));
15092 gcc_assert (VECTOR_MODE_P (mode0));
15094 op0 = force_reg (mode0, op0);
15096 if (optimize || !target || !register_operand (target, tmode))
15097 target = gen_reg_rtx (tmode);
15099 ix86_expand_vector_extract (true, target, op0, elt);
15101 return target;
15104 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
15105 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
15106 a language-level syntax for referencing vector elements. */
15108 static rtx
15109 ix86_expand_vec_set_builtin (tree arglist)
15111 enum machine_mode tmode, mode1;
15112 tree arg0, arg1, arg2;
15113 int elt;
15114 rtx op0, op1;
15116 arg0 = TREE_VALUE (arglist);
15117 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15118 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15120 tmode = TYPE_MODE (TREE_TYPE (arg0));
15121 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15122 gcc_assert (VECTOR_MODE_P (tmode));
15124 op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
15125 op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
15126 elt = get_element_number (TREE_TYPE (arg0), arg2);
15128 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
15129 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
15131 op0 = force_reg (tmode, op0);
15132 op1 = force_reg (mode1, op1);
15134 ix86_expand_vector_set (true, op0, op1, elt);
15136 return op0;
15139 /* Expand an expression EXP that calls a built-in function,
15140 with result going to TARGET if that's convenient
15141 (and in mode MODE if that's convenient).
15142 SUBTARGET may be used as the target for computing one of EXP's operands.
15143 IGNORE is nonzero if the value is to be ignored. */
15145 static rtx
15146 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
15147 enum machine_mode mode ATTRIBUTE_UNUSED,
15148 int ignore ATTRIBUTE_UNUSED)
15150 const struct builtin_description *d;
15151 size_t i;
15152 enum insn_code icode;
15153 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
15154 tree arglist = TREE_OPERAND (exp, 1);
15155 tree arg0, arg1, arg2;
15156 rtx op0, op1, op2, pat;
15157 enum machine_mode tmode, mode0, mode1, mode2;
15158 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15160 switch (fcode)
15162 case IX86_BUILTIN_EMMS:
15163 emit_insn (gen_mmx_emms ());
15164 return 0;
15166 case IX86_BUILTIN_SFENCE:
15167 emit_insn (gen_sse_sfence ());
15168 return 0;
15170 case IX86_BUILTIN_MASKMOVQ:
15171 case IX86_BUILTIN_MASKMOVDQU:
15172 icode = (fcode == IX86_BUILTIN_MASKMOVQ
15173 ? CODE_FOR_mmx_maskmovq
15174 : CODE_FOR_sse2_maskmovdqu);
15175 /* Note the arg order is different from the operand order. */
15176 arg1 = TREE_VALUE (arglist);
15177 arg2 = TREE_VALUE (TREE_CHAIN (arglist));
15178 arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15179 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15180 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15181 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
15182 mode0 = insn_data[icode].operand[0].mode;
15183 mode1 = insn_data[icode].operand[1].mode;
15184 mode2 = insn_data[icode].operand[2].mode;
15186 op0 = force_reg (Pmode, op0);
15187 op0 = gen_rtx_MEM (mode1, op0);
15189 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15190 op0 = copy_to_mode_reg (mode0, op0);
15191 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
15192 op1 = copy_to_mode_reg (mode1, op1);
15193 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
15194 op2 = copy_to_mode_reg (mode2, op2);
15195 pat = GEN_FCN (icode) (op0, op1, op2);
15196 if (! pat)
15197 return 0;
15198 emit_insn (pat);
15199 return 0;
15201 case IX86_BUILTIN_SQRTSS:
15202 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2, arglist, target);
15203 case IX86_BUILTIN_RSQRTSS:
15204 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2, arglist, target);
15205 case IX86_BUILTIN_RCPSS:
15206 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, arglist, target);
15208 case IX86_BUILTIN_LOADUPS:
15209 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
15211 case IX86_BUILTIN_STOREUPS:
15212 return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
15214 case IX86_BUILTIN_LOADHPS:
15215 case IX86_BUILTIN_LOADLPS:
15216 case IX86_BUILTIN_LOADHPD:
15217 case IX86_BUILTIN_LOADLPD:
15218 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_loadhps
15219 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_loadlps
15220 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_loadhpd
15221 : CODE_FOR_sse2_loadlpd);
15222 arg0 = TREE_VALUE (arglist);
15223 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15224 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15225 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15226 tmode = insn_data[icode].operand[0].mode;
15227 mode0 = insn_data[icode].operand[1].mode;
15228 mode1 = insn_data[icode].operand[2].mode;
15230 op0 = force_reg (mode0, op0);
15231 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
15232 if (optimize || target == 0
15233 || GET_MODE (target) != tmode
15234 || !register_operand (target, tmode))
15235 target = gen_reg_rtx (tmode);
15236 pat = GEN_FCN (icode) (target, op0, op1);
15237 if (! pat)
15238 return 0;
15239 emit_insn (pat);
15240 return target;
15242 case IX86_BUILTIN_STOREHPS:
15243 case IX86_BUILTIN_STORELPS:
15244 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_storehps
15245 : CODE_FOR_sse_storelps);
15246 arg0 = TREE_VALUE (arglist);
15247 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15248 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15249 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15250 mode0 = insn_data[icode].operand[0].mode;
15251 mode1 = insn_data[icode].operand[1].mode;
15253 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15254 op1 = force_reg (mode1, op1);
15256 pat = GEN_FCN (icode) (op0, op1);
15257 if (! pat)
15258 return 0;
15259 emit_insn (pat);
15260 return const0_rtx;
15262 case IX86_BUILTIN_MOVNTPS:
15263 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
15264 case IX86_BUILTIN_MOVNTQ:
15265 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
15267 case IX86_BUILTIN_LDMXCSR:
15268 op0 = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
15269 target = assign_386_stack_local (SImode, SLOT_TEMP);
15270 emit_move_insn (target, op0);
15271 emit_insn (gen_sse_ldmxcsr (target));
15272 return 0;
15274 case IX86_BUILTIN_STMXCSR:
15275 target = assign_386_stack_local (SImode, SLOT_TEMP);
15276 emit_insn (gen_sse_stmxcsr (target));
15277 return copy_to_mode_reg (SImode, target);
15279 case IX86_BUILTIN_SHUFPS:
15280 case IX86_BUILTIN_SHUFPD:
15281 icode = (fcode == IX86_BUILTIN_SHUFPS
15282 ? CODE_FOR_sse_shufps
15283 : CODE_FOR_sse2_shufpd);
15284 arg0 = TREE_VALUE (arglist);
15285 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15286 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15287 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15288 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15289 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
15290 tmode = insn_data[icode].operand[0].mode;
15291 mode0 = insn_data[icode].operand[1].mode;
15292 mode1 = insn_data[icode].operand[2].mode;
15293 mode2 = insn_data[icode].operand[3].mode;
15295 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15296 op0 = copy_to_mode_reg (mode0, op0);
15297 if ((optimize && !register_operand (op1, mode1))
15298 || !(*insn_data[icode].operand[2].predicate) (op1, mode1))
15299 op1 = copy_to_mode_reg (mode1, op1);
15300 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
15302 /* @@@ better error message */
15303 error ("mask must be an immediate");
15304 return gen_reg_rtx (tmode);
15306 if (optimize || target == 0
15307 || GET_MODE (target) != tmode
15308 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15309 target = gen_reg_rtx (tmode);
15310 pat = GEN_FCN (icode) (target, op0, op1, op2);
15311 if (! pat)
15312 return 0;
15313 emit_insn (pat);
15314 return target;
15316 case IX86_BUILTIN_PSHUFW:
15317 case IX86_BUILTIN_PSHUFD:
15318 case IX86_BUILTIN_PSHUFHW:
15319 case IX86_BUILTIN_PSHUFLW:
15320 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
15321 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
15322 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
15323 : CODE_FOR_mmx_pshufw);
15324 arg0 = TREE_VALUE (arglist);
15325 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15326 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15327 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15328 tmode = insn_data[icode].operand[0].mode;
15329 mode1 = insn_data[icode].operand[1].mode;
15330 mode2 = insn_data[icode].operand[2].mode;
15332 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
15333 op0 = copy_to_mode_reg (mode1, op0);
15334 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
15336 /* @@@ better error message */
15337 error ("mask must be an immediate");
15338 return const0_rtx;
15340 if (target == 0
15341 || GET_MODE (target) != tmode
15342 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15343 target = gen_reg_rtx (tmode);
15344 pat = GEN_FCN (icode) (target, op0, op1);
15345 if (! pat)
15346 return 0;
15347 emit_insn (pat);
15348 return target;
15350 case IX86_BUILTIN_PSLLDQI128:
15351 case IX86_BUILTIN_PSRLDQI128:
15352 icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
15353 : CODE_FOR_sse2_lshrti3);
15354 arg0 = TREE_VALUE (arglist);
15355 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15356 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15357 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15358 tmode = insn_data[icode].operand[0].mode;
15359 mode1 = insn_data[icode].operand[1].mode;
15360 mode2 = insn_data[icode].operand[2].mode;
15362 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
15364 op0 = copy_to_reg (op0);
15365 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
15367 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
15369 error ("shift must be an immediate");
15370 return const0_rtx;
15372 target = gen_reg_rtx (V2DImode);
15373 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0), op0, op1);
15374 if (! pat)
15375 return 0;
15376 emit_insn (pat);
15377 return target;
15379 case IX86_BUILTIN_FEMMS:
15380 emit_insn (gen_mmx_femms ());
15381 return NULL_RTX;
15383 case IX86_BUILTIN_PAVGUSB:
15384 return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3, arglist, target);
15386 case IX86_BUILTIN_PF2ID:
15387 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id, arglist, target, 0);
15389 case IX86_BUILTIN_PFACC:
15390 return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3, arglist, target);
15392 case IX86_BUILTIN_PFADD:
15393 return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3, arglist, target);
15395 case IX86_BUILTIN_PFCMPEQ:
15396 return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3, arglist, target);
15398 case IX86_BUILTIN_PFCMPGE:
15399 return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3, arglist, target);
15401 case IX86_BUILTIN_PFCMPGT:
15402 return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3, arglist, target);
15404 case IX86_BUILTIN_PFMAX:
15405 return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3, arglist, target);
15407 case IX86_BUILTIN_PFMIN:
15408 return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3, arglist, target);
15410 case IX86_BUILTIN_PFMUL:
15411 return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3, arglist, target);
15413 case IX86_BUILTIN_PFRCP:
15414 return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2, arglist, target, 0);
15416 case IX86_BUILTIN_PFRCPIT1:
15417 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3, arglist, target);
15419 case IX86_BUILTIN_PFRCPIT2:
15420 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3, arglist, target);
15422 case IX86_BUILTIN_PFRSQIT1:
15423 return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3, arglist, target);
15425 case IX86_BUILTIN_PFRSQRT:
15426 return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2, arglist, target, 0);
15428 case IX86_BUILTIN_PFSUB:
15429 return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3, arglist, target);
15431 case IX86_BUILTIN_PFSUBR:
15432 return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3, arglist, target);
15434 case IX86_BUILTIN_PI2FD:
15435 return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2, arglist, target, 0);
15437 case IX86_BUILTIN_PMULHRW:
15438 return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3, arglist, target);
15440 case IX86_BUILTIN_PF2IW:
15441 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw, arglist, target, 0);
15443 case IX86_BUILTIN_PFNACC:
15444 return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3, arglist, target);
15446 case IX86_BUILTIN_PFPNACC:
15447 return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3, arglist, target);
15449 case IX86_BUILTIN_PI2FW:
15450 return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw, arglist, target, 0);
15452 case IX86_BUILTIN_PSWAPDSI:
15453 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2, arglist, target, 0);
15455 case IX86_BUILTIN_PSWAPDSF:
15456 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, arglist, target, 0);
15458 case IX86_BUILTIN_SQRTSD:
15459 return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, arglist, target);
15460 case IX86_BUILTIN_LOADUPD:
15461 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
15462 case IX86_BUILTIN_STOREUPD:
15463 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
15465 case IX86_BUILTIN_MFENCE:
15466 emit_insn (gen_sse2_mfence ());
15467 return 0;
15468 case IX86_BUILTIN_LFENCE:
15469 emit_insn (gen_sse2_lfence ());
15470 return 0;
15472 case IX86_BUILTIN_CLFLUSH:
15473 arg0 = TREE_VALUE (arglist);
15474 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15475 icode = CODE_FOR_sse2_clflush;
15476 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
15477 op0 = copy_to_mode_reg (Pmode, op0);
15479 emit_insn (gen_sse2_clflush (op0));
15480 return 0;
15482 case IX86_BUILTIN_MOVNTPD:
15483 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
15484 case IX86_BUILTIN_MOVNTDQ:
15485 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
15486 case IX86_BUILTIN_MOVNTI:
15487 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
15489 case IX86_BUILTIN_LOADDQU:
15490 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
15491 case IX86_BUILTIN_STOREDQU:
15492 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
15494 case IX86_BUILTIN_MONITOR:
15495 arg0 = TREE_VALUE (arglist);
15496 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15497 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15498 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15499 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15500 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
15501 if (!REG_P (op0))
15502 op0 = copy_to_mode_reg (SImode, op0);
15503 if (!REG_P (op1))
15504 op1 = copy_to_mode_reg (SImode, op1);
15505 if (!REG_P (op2))
15506 op2 = copy_to_mode_reg (SImode, op2);
15507 emit_insn (gen_sse3_monitor (op0, op1, op2));
15508 return 0;
15510 case IX86_BUILTIN_MWAIT:
15511 arg0 = TREE_VALUE (arglist);
15512 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15513 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15514 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15515 if (!REG_P (op0))
15516 op0 = copy_to_mode_reg (SImode, op0);
15517 if (!REG_P (op1))
15518 op1 = copy_to_mode_reg (SImode, op1);
15519 emit_insn (gen_sse3_mwait (op0, op1));
15520 return 0;
15522 case IX86_BUILTIN_LDDQU:
15523 return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist,
15524 target, 1);
15526 case IX86_BUILTIN_VEC_INIT_V2SI:
15527 case IX86_BUILTIN_VEC_INIT_V4HI:
15528 case IX86_BUILTIN_VEC_INIT_V8QI:
15529 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
15531 case IX86_BUILTIN_VEC_EXT_V2DF:
15532 case IX86_BUILTIN_VEC_EXT_V2DI:
15533 case IX86_BUILTIN_VEC_EXT_V4SF:
15534 case IX86_BUILTIN_VEC_EXT_V4SI:
15535 case IX86_BUILTIN_VEC_EXT_V8HI:
15536 case IX86_BUILTIN_VEC_EXT_V2SI:
15537 case IX86_BUILTIN_VEC_EXT_V4HI:
15538 return ix86_expand_vec_ext_builtin (arglist, target);
15540 case IX86_BUILTIN_VEC_SET_V8HI:
15541 case IX86_BUILTIN_VEC_SET_V4HI:
15542 return ix86_expand_vec_set_builtin (arglist);
15544 default:
15545 break;
15548 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
15549 if (d->code == fcode)
15551 /* Compares are treated specially. */
15552 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
15553 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3
15554 || d->icode == CODE_FOR_sse2_maskcmpv2df3
15555 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
15556 return ix86_expand_sse_compare (d, arglist, target);
15558 return ix86_expand_binop_builtin (d->icode, arglist, target);
15561 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
15562 if (d->code == fcode)
15563 return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
15565 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
15566 if (d->code == fcode)
15567 return ix86_expand_sse_comi (d, arglist, target);
15569 gcc_unreachable ();
15572 /* Store OPERAND to the memory after reload is completed. This means
15573 that we can't easily use assign_stack_local. */
15575 ix86_force_to_memory (enum machine_mode mode, rtx operand)
15577 rtx result;
15579 gcc_assert (reload_completed);
15580 if (TARGET_RED_ZONE)
15582 result = gen_rtx_MEM (mode,
15583 gen_rtx_PLUS (Pmode,
15584 stack_pointer_rtx,
15585 GEN_INT (-RED_ZONE_SIZE)));
15586 emit_move_insn (result, operand);
15588 else if (!TARGET_RED_ZONE && TARGET_64BIT)
15590 switch (mode)
15592 case HImode:
15593 case SImode:
15594 operand = gen_lowpart (DImode, operand);
15595 /* FALLTHRU */
15596 case DImode:
15597 emit_insn (
15598 gen_rtx_SET (VOIDmode,
15599 gen_rtx_MEM (DImode,
15600 gen_rtx_PRE_DEC (DImode,
15601 stack_pointer_rtx)),
15602 operand));
15603 break;
15604 default:
15605 gcc_unreachable ();
15607 result = gen_rtx_MEM (mode, stack_pointer_rtx);
15609 else
15611 switch (mode)
15613 case DImode:
15615 rtx operands[2];
15616 split_di (&operand, 1, operands, operands + 1);
15617 emit_insn (
15618 gen_rtx_SET (VOIDmode,
15619 gen_rtx_MEM (SImode,
15620 gen_rtx_PRE_DEC (Pmode,
15621 stack_pointer_rtx)),
15622 operands[1]));
15623 emit_insn (
15624 gen_rtx_SET (VOIDmode,
15625 gen_rtx_MEM (SImode,
15626 gen_rtx_PRE_DEC (Pmode,
15627 stack_pointer_rtx)),
15628 operands[0]));
15630 break;
15631 case HImode:
15632 /* It is better to store HImodes as SImodes. */
15633 if (!TARGET_PARTIAL_REG_STALL)
15634 operand = gen_lowpart (SImode, operand);
15635 /* FALLTHRU */
15636 case SImode:
15637 emit_insn (
15638 gen_rtx_SET (VOIDmode,
15639 gen_rtx_MEM (GET_MODE (operand),
15640 gen_rtx_PRE_DEC (SImode,
15641 stack_pointer_rtx)),
15642 operand));
15643 break;
15644 default:
15645 gcc_unreachable ();
15647 result = gen_rtx_MEM (mode, stack_pointer_rtx);
15649 return result;
15652 /* Free operand from the memory. */
15653 void
15654 ix86_free_from_memory (enum machine_mode mode)
15656 if (!TARGET_RED_ZONE)
15658 int size;
15660 if (mode == DImode || TARGET_64BIT)
15661 size = 8;
15662 else if (mode == HImode && TARGET_PARTIAL_REG_STALL)
15663 size = 2;
15664 else
15665 size = 4;
15666 /* Use LEA to deallocate stack space. In peephole2 it will be converted
15667 to pop or add instruction if registers are available. */
15668 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
15669 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
15670 GEN_INT (size))));
15674 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
15675 QImode must go into class Q_REGS.
15676 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
15677 movdf to do mem-to-mem moves through integer regs. */
15678 enum reg_class
15679 ix86_preferred_reload_class (rtx x, enum reg_class class)
15681 /* We're only allowed to return a subclass of CLASS. Many of the
15682 following checks fail for NO_REGS, so eliminate that early. */
15683 if (class == NO_REGS)
15684 return NO_REGS;
15686 /* All classes can load zeros. */
15687 if (x == CONST0_RTX (GET_MODE (x)))
15688 return class;
15690 /* Floating-point constants need more complex checks. */
15691 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
15693 /* General regs can load everything. */
15694 if (reg_class_subset_p (class, GENERAL_REGS))
15695 return class;
15697 /* Floats can load 0 and 1 plus some others. Note that we eliminated
15698 zero above. We only want to wind up preferring 80387 registers if
15699 we plan on doing computation with them. */
15700 if (TARGET_80387
15701 && (TARGET_MIX_SSE_I387
15702 || !(TARGET_SSE_MATH && SSE_FLOAT_MODE_P (GET_MODE (x))))
15703 && standard_80387_constant_p (x))
15705 /* Limit class to non-sse. */
15706 if (class == FLOAT_SSE_REGS)
15707 return FLOAT_REGS;
15708 if (class == FP_TOP_SSE_REGS)
15709 return FP_TOP_REG;
15710 if (class == FP_SECOND_SSE_REGS)
15711 return FP_SECOND_REG;
15712 if (class == FLOAT_INT_REGS || class == FLOAT_REGS)
15713 return class;
15716 return NO_REGS;
15718 if (MAYBE_MMX_CLASS_P (class) && CONSTANT_P (x))
15719 return NO_REGS;
15720 if (MAYBE_SSE_CLASS_P (class) && CONSTANT_P (x))
15721 return NO_REGS;
15723 /* Generally when we see PLUS here, it's the function invariant
15724 (plus soft-fp const_int). Which can only be computed into general
15725 regs. */
15726 if (GET_CODE (x) == PLUS)
15727 return reg_class_subset_p (class, GENERAL_REGS) ? class : NO_REGS;
15729 /* QImode constants are easy to load, but non-constant QImode data
15730 must go into Q_REGS. */
15731 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
15733 if (reg_class_subset_p (class, Q_REGS))
15734 return class;
15735 if (reg_class_subset_p (Q_REGS, class))
15736 return Q_REGS;
15737 return NO_REGS;
15740 return class;
15743 /* If we are copying between general and FP registers, we need a memory
15744 location. The same is true for SSE and MMX registers.
15746 The macro can't work reliably when one of the CLASSES is class containing
15747 registers from multiple units (SSE, MMX, integer). We avoid this by never
15748 combining those units in single alternative in the machine description.
15749 Ensure that this constraint holds to avoid unexpected surprises.
15751 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
15752 enforce these sanity checks. */
15755 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
15756 enum machine_mode mode, int strict)
15758 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
15759 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
15760 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
15761 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
15762 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
15763 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
15765 gcc_assert (!strict);
15766 return true;
15769 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
15770 return true;
15772 /* ??? This is a lie. We do have moves between mmx/general, and for
15773 mmx/sse2. But by saying we need secondary memory we discourage the
15774 register allocator from using the mmx registers unless needed. */
15775 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
15776 return true;
15778 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
15780 /* SSE1 doesn't have any direct moves from other classes. */
15781 if (!TARGET_SSE2)
15782 return true;
15784 /* If the target says that inter-unit moves are more expensive
15785 than moving through memory, then don't generate them. */
15786 if (!TARGET_INTER_UNIT_MOVES && !optimize_size)
15787 return true;
15789 /* Between SSE and general, we have moves no larger than word size. */
15790 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
15791 return true;
15793 /* ??? For the cost of one register reformat penalty, we could use
15794 the same instructions to move SFmode and DFmode data, but the
15795 relevant move patterns don't support those alternatives. */
15796 if (mode == SFmode || mode == DFmode)
15797 return true;
15800 return false;
15803 /* Return true if the registers in CLASS cannot represent the change from
15804 modes FROM to TO. */
15806 bool
15807 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
15808 enum reg_class class)
15810 if (from == to)
15811 return false;
15813 /* x87 registers can't do subreg at all, as all values are reformatted
15814 to extended precision. */
15815 if (MAYBE_FLOAT_CLASS_P (class))
15816 return true;
15818 if (MAYBE_SSE_CLASS_P (class) || MAYBE_MMX_CLASS_P (class))
15820 /* Vector registers do not support QI or HImode loads. If we don't
15821 disallow a change to these modes, reload will assume it's ok to
15822 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
15823 the vec_dupv4hi pattern. */
15824 if (GET_MODE_SIZE (from) < 4)
15825 return true;
15827 /* Vector registers do not support subreg with nonzero offsets, which
15828 are otherwise valid for integer registers. Since we can't see
15829 whether we have a nonzero offset from here, prohibit all
15830 nonparadoxical subregs changing size. */
15831 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
15832 return true;
15835 return false;
15838 /* Return the cost of moving data from a register in class CLASS1 to
15839 one in class CLASS2.
15841 It is not required that the cost always equal 2 when FROM is the same as TO;
15842 on some machines it is expensive to move between registers if they are not
15843 general registers. */
15846 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
15847 enum reg_class class2)
15849 /* In case we require secondary memory, compute cost of the store followed
15850 by load. In order to avoid bad register allocation choices, we need
15851 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
15853 if (ix86_secondary_memory_needed (class1, class2, mode, 0))
15855 int cost = 1;
15857 cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
15858 MEMORY_MOVE_COST (mode, class1, 1));
15859 cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
15860 MEMORY_MOVE_COST (mode, class2, 1));
15862 /* In case of copying from general_purpose_register we may emit multiple
15863 stores followed by single load causing memory size mismatch stall.
15864 Count this as arbitrarily high cost of 20. */
15865 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
15866 cost += 20;
15868 /* In the case of FP/MMX moves, the registers actually overlap, and we
15869 have to switch modes in order to treat them differently. */
15870 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
15871 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
15872 cost += 20;
15874 return cost;
15877 /* Moves between SSE/MMX and integer unit are expensive. */
15878 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
15879 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
15880 return ix86_cost->mmxsse_to_integer;
15881 if (MAYBE_FLOAT_CLASS_P (class1))
15882 return ix86_cost->fp_move;
15883 if (MAYBE_SSE_CLASS_P (class1))
15884 return ix86_cost->sse_move;
15885 if (MAYBE_MMX_CLASS_P (class1))
15886 return ix86_cost->mmx_move;
15887 return 2;
15890 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
15892 bool
15893 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
15895 /* Flags and only flags can only hold CCmode values. */
15896 if (CC_REGNO_P (regno))
15897 return GET_MODE_CLASS (mode) == MODE_CC;
15898 if (GET_MODE_CLASS (mode) == MODE_CC
15899 || GET_MODE_CLASS (mode) == MODE_RANDOM
15900 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
15901 return 0;
15902 if (FP_REGNO_P (regno))
15903 return VALID_FP_MODE_P (mode);
15904 if (SSE_REGNO_P (regno))
15906 /* We implement the move patterns for all vector modes into and
15907 out of SSE registers, even when no operation instructions
15908 are available. */
15909 return (VALID_SSE_REG_MODE (mode)
15910 || VALID_SSE2_REG_MODE (mode)
15911 || VALID_MMX_REG_MODE (mode)
15912 || VALID_MMX_REG_MODE_3DNOW (mode));
15914 if (MMX_REGNO_P (regno))
15916 /* We implement the move patterns for 3DNOW modes even in MMX mode,
15917 so if the register is available at all, then we can move data of
15918 the given mode into or out of it. */
15919 return (VALID_MMX_REG_MODE (mode)
15920 || VALID_MMX_REG_MODE_3DNOW (mode));
15923 if (mode == QImode)
15925 /* Take care for QImode values - they can be in non-QI regs,
15926 but then they do cause partial register stalls. */
15927 if (regno < 4 || TARGET_64BIT)
15928 return 1;
15929 if (!TARGET_PARTIAL_REG_STALL)
15930 return 1;
15931 return reload_in_progress || reload_completed;
15933 /* We handle both integer and floats in the general purpose registers. */
15934 else if (VALID_INT_MODE_P (mode))
15935 return 1;
15936 else if (VALID_FP_MODE_P (mode))
15937 return 1;
15938 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
15939 on to use that value in smaller contexts, this can easily force a
15940 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
15941 supporting DImode, allow it. */
15942 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
15943 return 1;
15945 return 0;
15948 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
15949 tieable integer mode. */
15951 static bool
15952 ix86_tieable_integer_mode_p (enum machine_mode mode)
15954 switch (mode)
15956 case HImode:
15957 case SImode:
15958 return true;
15960 case QImode:
15961 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
15963 case DImode:
15964 return TARGET_64BIT;
15966 default:
15967 return false;
15971 /* Return true if MODE1 is accessible in a register that can hold MODE2
15972 without copying. That is, all register classes that can hold MODE2
15973 can also hold MODE1. */
15975 bool
15976 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
15978 if (mode1 == mode2)
15979 return true;
15981 if (ix86_tieable_integer_mode_p (mode1)
15982 && ix86_tieable_integer_mode_p (mode2))
15983 return true;
15985 /* MODE2 being XFmode implies fp stack or general regs, which means we
15986 can tie any smaller floating point modes to it. Note that we do not
15987 tie this with TFmode. */
15988 if (mode2 == XFmode)
15989 return mode1 == SFmode || mode1 == DFmode;
15991 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
15992 that we can tie it with SFmode. */
15993 if (mode2 == DFmode)
15994 return mode1 == SFmode;
15996 /* If MODE2 is only appropriate for an SSE register, then tie with
15997 any other mode acceptable to SSE registers. */
15998 if (GET_MODE_SIZE (mode2) >= 8
15999 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
16000 return ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1);
16002 /* If MODE2 is appropriate for an MMX (or SSE) register, then tie
16003 with any other mode acceptable to MMX registers. */
16004 if (GET_MODE_SIZE (mode2) == 8
16005 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
16006 return ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1);
16008 return false;
16011 /* Return the cost of moving data of mode M between a
16012 register and memory. A value of 2 is the default; this cost is
16013 relative to those in `REGISTER_MOVE_COST'.
16015 If moving between registers and memory is more expensive than
16016 between two registers, you should define this macro to express the
16017 relative cost.
16019 Model also increased moving costs of QImode registers in non
16020 Q_REGS classes.
16023 ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
16025 if (FLOAT_CLASS_P (class))
16027 int index;
16028 switch (mode)
16030 case SFmode:
16031 index = 0;
16032 break;
16033 case DFmode:
16034 index = 1;
16035 break;
16036 case XFmode:
16037 index = 2;
16038 break;
16039 default:
16040 return 100;
16042 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
16044 if (SSE_CLASS_P (class))
16046 int index;
16047 switch (GET_MODE_SIZE (mode))
16049 case 4:
16050 index = 0;
16051 break;
16052 case 8:
16053 index = 1;
16054 break;
16055 case 16:
16056 index = 2;
16057 break;
16058 default:
16059 return 100;
16061 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
16063 if (MMX_CLASS_P (class))
16065 int index;
16066 switch (GET_MODE_SIZE (mode))
16068 case 4:
16069 index = 0;
16070 break;
16071 case 8:
16072 index = 1;
16073 break;
16074 default:
16075 return 100;
16077 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
16079 switch (GET_MODE_SIZE (mode))
16081 case 1:
16082 if (in)
16083 return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
16084 : ix86_cost->movzbl_load);
16085 else
16086 return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
16087 : ix86_cost->int_store[0] + 4);
16088 break;
16089 case 2:
16090 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
16091 default:
16092 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
16093 if (mode == TFmode)
16094 mode = XFmode;
16095 return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
16096 * (((int) GET_MODE_SIZE (mode)
16097 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
16101 /* Compute a (partial) cost for rtx X. Return true if the complete
16102 cost has been computed, and false if subexpressions should be
16103 scanned. In either case, *TOTAL contains the cost result. */
16105 static bool
16106 ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
16108 enum machine_mode mode = GET_MODE (x);
16110 switch (code)
16112 case CONST_INT:
16113 case CONST:
16114 case LABEL_REF:
16115 case SYMBOL_REF:
16116 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
16117 *total = 3;
16118 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
16119 *total = 2;
16120 else if (flag_pic && SYMBOLIC_CONST (x)
16121 && (!TARGET_64BIT
16122 || (!GET_CODE (x) != LABEL_REF
16123 && (GET_CODE (x) != SYMBOL_REF
16124 || !SYMBOL_REF_LOCAL_P (x)))))
16125 *total = 1;
16126 else
16127 *total = 0;
16128 return true;
16130 case CONST_DOUBLE:
16131 if (mode == VOIDmode)
16132 *total = 0;
16133 else
16134 switch (standard_80387_constant_p (x))
16136 case 1: /* 0.0 */
16137 *total = 1;
16138 break;
16139 default: /* Other constants */
16140 *total = 2;
16141 break;
16142 case 0:
16143 case -1:
16144 /* Start with (MEM (SYMBOL_REF)), since that's where
16145 it'll probably end up. Add a penalty for size. */
16146 *total = (COSTS_N_INSNS (1)
16147 + (flag_pic != 0 && !TARGET_64BIT)
16148 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
16149 break;
16151 return true;
16153 case ZERO_EXTEND:
16154 /* The zero extensions is often completely free on x86_64, so make
16155 it as cheap as possible. */
16156 if (TARGET_64BIT && mode == DImode
16157 && GET_MODE (XEXP (x, 0)) == SImode)
16158 *total = 1;
16159 else if (TARGET_ZERO_EXTEND_WITH_AND)
16160 *total = COSTS_N_INSNS (ix86_cost->add);
16161 else
16162 *total = COSTS_N_INSNS (ix86_cost->movzx);
16163 return false;
16165 case SIGN_EXTEND:
16166 *total = COSTS_N_INSNS (ix86_cost->movsx);
16167 return false;
16169 case ASHIFT:
16170 if (GET_CODE (XEXP (x, 1)) == CONST_INT
16171 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
16173 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
16174 if (value == 1)
16176 *total = COSTS_N_INSNS (ix86_cost->add);
16177 return false;
16179 if ((value == 2 || value == 3)
16180 && ix86_cost->lea <= ix86_cost->shift_const)
16182 *total = COSTS_N_INSNS (ix86_cost->lea);
16183 return false;
16186 /* FALLTHRU */
16188 case ROTATE:
16189 case ASHIFTRT:
16190 case LSHIFTRT:
16191 case ROTATERT:
16192 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
16194 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
16196 if (INTVAL (XEXP (x, 1)) > 32)
16197 *total = COSTS_N_INSNS(ix86_cost->shift_const + 2);
16198 else
16199 *total = COSTS_N_INSNS(ix86_cost->shift_const * 2);
16201 else
16203 if (GET_CODE (XEXP (x, 1)) == AND)
16204 *total = COSTS_N_INSNS(ix86_cost->shift_var * 2);
16205 else
16206 *total = COSTS_N_INSNS(ix86_cost->shift_var * 6 + 2);
16209 else
16211 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
16212 *total = COSTS_N_INSNS (ix86_cost->shift_const);
16213 else
16214 *total = COSTS_N_INSNS (ix86_cost->shift_var);
16216 return false;
16218 case MULT:
16219 if (FLOAT_MODE_P (mode))
16221 *total = COSTS_N_INSNS (ix86_cost->fmul);
16222 return false;
16224 else
16226 rtx op0 = XEXP (x, 0);
16227 rtx op1 = XEXP (x, 1);
16228 int nbits;
16229 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
16231 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
16232 for (nbits = 0; value != 0; value &= value - 1)
16233 nbits++;
16235 else
16236 /* This is arbitrary. */
16237 nbits = 7;
16239 /* Compute costs correctly for widening multiplication. */
16240 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND)
16241 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
16242 == GET_MODE_SIZE (mode))
16244 int is_mulwiden = 0;
16245 enum machine_mode inner_mode = GET_MODE (op0);
16247 if (GET_CODE (op0) == GET_CODE (op1))
16248 is_mulwiden = 1, op1 = XEXP (op1, 0);
16249 else if (GET_CODE (op1) == CONST_INT)
16251 if (GET_CODE (op0) == SIGN_EXTEND)
16252 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
16253 == INTVAL (op1);
16254 else
16255 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
16258 if (is_mulwiden)
16259 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
16262 *total = COSTS_N_INSNS (ix86_cost->mult_init[MODE_INDEX (mode)]
16263 + nbits * ix86_cost->mult_bit)
16264 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code);
16266 return true;
16269 case DIV:
16270 case UDIV:
16271 case MOD:
16272 case UMOD:
16273 if (FLOAT_MODE_P (mode))
16274 *total = COSTS_N_INSNS (ix86_cost->fdiv);
16275 else
16276 *total = COSTS_N_INSNS (ix86_cost->divide[MODE_INDEX (mode)]);
16277 return false;
16279 case PLUS:
16280 if (FLOAT_MODE_P (mode))
16281 *total = COSTS_N_INSNS (ix86_cost->fadd);
16282 else if (GET_MODE_CLASS (mode) == MODE_INT
16283 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
16285 if (GET_CODE (XEXP (x, 0)) == PLUS
16286 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
16287 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
16288 && CONSTANT_P (XEXP (x, 1)))
16290 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
16291 if (val == 2 || val == 4 || val == 8)
16293 *total = COSTS_N_INSNS (ix86_cost->lea);
16294 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
16295 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
16296 outer_code);
16297 *total += rtx_cost (XEXP (x, 1), outer_code);
16298 return true;
16301 else if (GET_CODE (XEXP (x, 0)) == MULT
16302 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
16304 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
16305 if (val == 2 || val == 4 || val == 8)
16307 *total = COSTS_N_INSNS (ix86_cost->lea);
16308 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
16309 *total += rtx_cost (XEXP (x, 1), outer_code);
16310 return true;
16313 else if (GET_CODE (XEXP (x, 0)) == PLUS)
16315 *total = COSTS_N_INSNS (ix86_cost->lea);
16316 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
16317 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
16318 *total += rtx_cost (XEXP (x, 1), outer_code);
16319 return true;
16322 /* FALLTHRU */
16324 case MINUS:
16325 if (FLOAT_MODE_P (mode))
16327 *total = COSTS_N_INSNS (ix86_cost->fadd);
16328 return false;
16330 /* FALLTHRU */
16332 case AND:
16333 case IOR:
16334 case XOR:
16335 if (!TARGET_64BIT && mode == DImode)
16337 *total = (COSTS_N_INSNS (ix86_cost->add) * 2
16338 + (rtx_cost (XEXP (x, 0), outer_code)
16339 << (GET_MODE (XEXP (x, 0)) != DImode))
16340 + (rtx_cost (XEXP (x, 1), outer_code)
16341 << (GET_MODE (XEXP (x, 1)) != DImode)));
16342 return true;
16344 /* FALLTHRU */
16346 case NEG:
16347 if (FLOAT_MODE_P (mode))
16349 *total = COSTS_N_INSNS (ix86_cost->fchs);
16350 return false;
16352 /* FALLTHRU */
16354 case NOT:
16355 if (!TARGET_64BIT && mode == DImode)
16356 *total = COSTS_N_INSNS (ix86_cost->add * 2);
16357 else
16358 *total = COSTS_N_INSNS (ix86_cost->add);
16359 return false;
16361 case COMPARE:
16362 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
16363 && XEXP (XEXP (x, 0), 1) == const1_rtx
16364 && GET_CODE (XEXP (XEXP (x, 0), 2)) == CONST_INT
16365 && XEXP (x, 1) == const0_rtx)
16367 /* This kind of construct is implemented using test[bwl].
16368 Treat it as if we had an AND. */
16369 *total = (COSTS_N_INSNS (ix86_cost->add)
16370 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
16371 + rtx_cost (const1_rtx, outer_code));
16372 return true;
16374 return false;
16376 case FLOAT_EXTEND:
16377 if (!TARGET_SSE_MATH
16378 || mode == XFmode
16379 || (mode == DFmode && !TARGET_SSE2))
16380 *total = 0;
16381 return false;
16383 case ABS:
16384 if (FLOAT_MODE_P (mode))
16385 *total = COSTS_N_INSNS (ix86_cost->fabs);
16386 return false;
16388 case SQRT:
16389 if (FLOAT_MODE_P (mode))
16390 *total = COSTS_N_INSNS (ix86_cost->fsqrt);
16391 return false;
16393 case UNSPEC:
16394 if (XINT (x, 1) == UNSPEC_TP)
16395 *total = 0;
16396 return false;
16398 default:
16399 return false;
16403 #if TARGET_MACHO
16405 static int current_machopic_label_num;
16407 /* Given a symbol name and its associated stub, write out the
16408 definition of the stub. */
16410 void
16411 machopic_output_stub (FILE *file, const char *symb, const char *stub)
16413 unsigned int length;
16414 char *binder_name, *symbol_name, lazy_ptr_name[32];
16415 int label = ++current_machopic_label_num;
16417 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
16418 symb = (*targetm.strip_name_encoding) (symb);
16420 length = strlen (stub);
16421 binder_name = alloca (length + 32);
16422 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
16424 length = strlen (symb);
16425 symbol_name = alloca (length + 32);
16426 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
16428 sprintf (lazy_ptr_name, "L%d$lz", label);
16430 if (MACHOPIC_PURE)
16431 machopic_picsymbol_stub_section ();
16432 else
16433 machopic_symbol_stub_section ();
16435 fprintf (file, "%s:\n", stub);
16436 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
16438 if (MACHOPIC_PURE)
16440 fprintf (file, "\tcall LPC$%d\nLPC$%d:\tpopl %%eax\n", label, label);
16441 fprintf (file, "\tmovl %s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
16442 fprintf (file, "\tjmp %%edx\n");
16444 else
16445 fprintf (file, "\tjmp *%s\n", lazy_ptr_name);
16447 fprintf (file, "%s:\n", binder_name);
16449 if (MACHOPIC_PURE)
16451 fprintf (file, "\tlea %s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
16452 fprintf (file, "\tpushl %%eax\n");
16454 else
16455 fprintf (file, "\t pushl $%s\n", lazy_ptr_name);
16457 fprintf (file, "\tjmp dyld_stub_binding_helper\n");
16459 machopic_lazy_symbol_ptr_section ();
16460 fprintf (file, "%s:\n", lazy_ptr_name);
16461 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
16462 fprintf (file, "\t.long %s\n", binder_name);
16464 #endif /* TARGET_MACHO */
16466 /* Order the registers for register allocator. */
16468 void
16469 x86_order_regs_for_local_alloc (void)
16471 int pos = 0;
16472 int i;
16474 /* First allocate the local general purpose registers. */
16475 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
16476 if (GENERAL_REGNO_P (i) && call_used_regs[i])
16477 reg_alloc_order [pos++] = i;
16479 /* Global general purpose registers. */
16480 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
16481 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
16482 reg_alloc_order [pos++] = i;
16484 /* x87 registers come first in case we are doing FP math
16485 using them. */
16486 if (!TARGET_SSE_MATH)
16487 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
16488 reg_alloc_order [pos++] = i;
16490 /* SSE registers. */
16491 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
16492 reg_alloc_order [pos++] = i;
16493 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
16494 reg_alloc_order [pos++] = i;
16496 /* x87 registers. */
16497 if (TARGET_SSE_MATH)
16498 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
16499 reg_alloc_order [pos++] = i;
16501 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
16502 reg_alloc_order [pos++] = i;
16504 /* Initialize the rest of array as we do not allocate some registers
16505 at all. */
16506 while (pos < FIRST_PSEUDO_REGISTER)
16507 reg_alloc_order [pos++] = 0;
16510 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
16511 struct attribute_spec.handler. */
16512 static tree
16513 ix86_handle_struct_attribute (tree *node, tree name,
16514 tree args ATTRIBUTE_UNUSED,
16515 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
16517 tree *type = NULL;
16518 if (DECL_P (*node))
16520 if (TREE_CODE (*node) == TYPE_DECL)
16521 type = &TREE_TYPE (*node);
16523 else
16524 type = node;
16526 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
16527 || TREE_CODE (*type) == UNION_TYPE)))
16529 warning (OPT_Wattributes, "%qs attribute ignored",
16530 IDENTIFIER_POINTER (name));
16531 *no_add_attrs = true;
16534 else if ((is_attribute_p ("ms_struct", name)
16535 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
16536 || ((is_attribute_p ("gcc_struct", name)
16537 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
16539 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
16540 IDENTIFIER_POINTER (name));
16541 *no_add_attrs = true;
16544 return NULL_TREE;
16547 static bool
16548 ix86_ms_bitfield_layout_p (tree record_type)
16550 return (TARGET_MS_BITFIELD_LAYOUT &&
16551 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
16552 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
16555 /* Returns an expression indicating where the this parameter is
16556 located on entry to the FUNCTION. */
16558 static rtx
16559 x86_this_parameter (tree function)
16561 tree type = TREE_TYPE (function);
16563 if (TARGET_64BIT)
16565 int n = aggregate_value_p (TREE_TYPE (type), type) != 0;
16566 return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]);
16569 if (ix86_function_regparm (type, function) > 0)
16571 tree parm;
16573 parm = TYPE_ARG_TYPES (type);
16574 /* Figure out whether or not the function has a variable number of
16575 arguments. */
16576 for (; parm; parm = TREE_CHAIN (parm))
16577 if (TREE_VALUE (parm) == void_type_node)
16578 break;
16579 /* If not, the this parameter is in the first argument. */
16580 if (parm)
16582 int regno = 0;
16583 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
16584 regno = 2;
16585 return gen_rtx_REG (SImode, regno);
16589 if (aggregate_value_p (TREE_TYPE (type), type))
16590 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8));
16591 else
16592 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4));
16595 /* Determine whether x86_output_mi_thunk can succeed. */
16597 static bool
16598 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
16599 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
16600 HOST_WIDE_INT vcall_offset, tree function)
16602 /* 64-bit can handle anything. */
16603 if (TARGET_64BIT)
16604 return true;
16606 /* For 32-bit, everything's fine if we have one free register. */
16607 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
16608 return true;
16610 /* Need a free register for vcall_offset. */
16611 if (vcall_offset)
16612 return false;
16614 /* Need a free register for GOT references. */
16615 if (flag_pic && !(*targetm.binds_local_p) (function))
16616 return false;
16618 /* Otherwise ok. */
16619 return true;
16622 /* Output the assembler code for a thunk function. THUNK_DECL is the
16623 declaration for the thunk function itself, FUNCTION is the decl for
16624 the target function. DELTA is an immediate constant offset to be
16625 added to THIS. If VCALL_OFFSET is nonzero, the word at
16626 *(*this + vcall_offset) should be added to THIS. */
16628 static void
16629 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
16630 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
16631 HOST_WIDE_INT vcall_offset, tree function)
16633 rtx xops[3];
16634 rtx this = x86_this_parameter (function);
16635 rtx this_reg, tmp;
16637 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
16638 pull it in now and let DELTA benefit. */
16639 if (REG_P (this))
16640 this_reg = this;
16641 else if (vcall_offset)
16643 /* Put the this parameter into %eax. */
16644 xops[0] = this;
16645 xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
16646 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
16648 else
16649 this_reg = NULL_RTX;
16651 /* Adjust the this parameter by a fixed constant. */
16652 if (delta)
16654 xops[0] = GEN_INT (delta);
16655 xops[1] = this_reg ? this_reg : this;
16656 if (TARGET_64BIT)
16658 if (!x86_64_general_operand (xops[0], DImode))
16660 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
16661 xops[1] = tmp;
16662 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
16663 xops[0] = tmp;
16664 xops[1] = this;
16666 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
16668 else
16669 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
16672 /* Adjust the this parameter by a value stored in the vtable. */
16673 if (vcall_offset)
16675 if (TARGET_64BIT)
16676 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
16677 else
16679 int tmp_regno = 2 /* ECX */;
16680 if (lookup_attribute ("fastcall",
16681 TYPE_ATTRIBUTES (TREE_TYPE (function))))
16682 tmp_regno = 0 /* EAX */;
16683 tmp = gen_rtx_REG (SImode, tmp_regno);
16686 xops[0] = gen_rtx_MEM (Pmode, this_reg);
16687 xops[1] = tmp;
16688 if (TARGET_64BIT)
16689 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
16690 else
16691 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
16693 /* Adjust the this parameter. */
16694 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
16695 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
16697 rtx tmp2 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
16698 xops[0] = GEN_INT (vcall_offset);
16699 xops[1] = tmp2;
16700 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
16701 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
16703 xops[1] = this_reg;
16704 if (TARGET_64BIT)
16705 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
16706 else
16707 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
16710 /* If necessary, drop THIS back to its stack slot. */
16711 if (this_reg && this_reg != this)
16713 xops[0] = this_reg;
16714 xops[1] = this;
16715 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
16718 xops[0] = XEXP (DECL_RTL (function), 0);
16719 if (TARGET_64BIT)
16721 if (!flag_pic || (*targetm.binds_local_p) (function))
16722 output_asm_insn ("jmp\t%P0", xops);
16723 else
16725 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
16726 tmp = gen_rtx_CONST (Pmode, tmp);
16727 tmp = gen_rtx_MEM (QImode, tmp);
16728 xops[0] = tmp;
16729 output_asm_insn ("jmp\t%A0", xops);
16732 else
16734 if (!flag_pic || (*targetm.binds_local_p) (function))
16735 output_asm_insn ("jmp\t%P0", xops);
16736 else
16737 #if TARGET_MACHO
16738 if (TARGET_MACHO)
16740 rtx sym_ref = XEXP (DECL_RTL (function), 0);
16741 tmp = (gen_rtx_SYMBOL_REF
16742 (Pmode,
16743 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
16744 tmp = gen_rtx_MEM (QImode, tmp);
16745 xops[0] = tmp;
16746 output_asm_insn ("jmp\t%0", xops);
16748 else
16749 #endif /* TARGET_MACHO */
16751 tmp = gen_rtx_REG (SImode, 2 /* ECX */);
16752 output_set_got (tmp);
16754 xops[1] = tmp;
16755 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
16756 output_asm_insn ("jmp\t{*}%1", xops);
16761 static void
16762 x86_file_start (void)
16764 default_file_start ();
16765 if (X86_FILE_START_VERSION_DIRECTIVE)
16766 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
16767 if (X86_FILE_START_FLTUSED)
16768 fputs ("\t.global\t__fltused\n", asm_out_file);
16769 if (ix86_asm_dialect == ASM_INTEL)
16770 fputs ("\t.intel_syntax\n", asm_out_file);
16774 x86_field_alignment (tree field, int computed)
16776 enum machine_mode mode;
16777 tree type = TREE_TYPE (field);
16779 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
16780 return computed;
16781 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
16782 ? get_inner_array_type (type) : type);
16783 if (mode == DFmode || mode == DCmode
16784 || GET_MODE_CLASS (mode) == MODE_INT
16785 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
16786 return MIN (32, computed);
16787 return computed;
16790 /* Output assembler code to FILE to increment profiler label # LABELNO
16791 for profiling a function entry. */
16792 void
16793 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
16795 if (TARGET_64BIT)
16796 if (flag_pic)
16798 #ifndef NO_PROFILE_COUNTERS
16799 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
16800 #endif
16801 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
16803 else
16805 #ifndef NO_PROFILE_COUNTERS
16806 fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno);
16807 #endif
16808 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
16810 else if (flag_pic)
16812 #ifndef NO_PROFILE_COUNTERS
16813 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
16814 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
16815 #endif
16816 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
16818 else
16820 #ifndef NO_PROFILE_COUNTERS
16821 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
16822 PROFILE_COUNT_REGISTER);
16823 #endif
16824 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
16828 /* We don't have exact information about the insn sizes, but we may assume
16829 quite safely that we are informed about all 1 byte insns and memory
16830 address sizes. This is enough to eliminate unnecessary padding in
16831 99% of cases. */
16833 static int
16834 min_insn_size (rtx insn)
16836 int l = 0;
16838 if (!INSN_P (insn) || !active_insn_p (insn))
16839 return 0;
16841 /* Discard alignments we've emit and jump instructions. */
16842 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
16843 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
16844 return 0;
16845 if (GET_CODE (insn) == JUMP_INSN
16846 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
16847 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
16848 return 0;
16850 /* Important case - calls are always 5 bytes.
16851 It is common to have many calls in the row. */
16852 if (GET_CODE (insn) == CALL_INSN
16853 && symbolic_reference_mentioned_p (PATTERN (insn))
16854 && !SIBLING_CALL_P (insn))
16855 return 5;
16856 if (get_attr_length (insn) <= 1)
16857 return 1;
16859 /* For normal instructions we may rely on the sizes of addresses
16860 and the presence of symbol to require 4 bytes of encoding.
16861 This is not the case for jumps where references are PC relative. */
16862 if (GET_CODE (insn) != JUMP_INSN)
16864 l = get_attr_length_address (insn);
16865 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
16866 l = 4;
16868 if (l)
16869 return 1+l;
16870 else
16871 return 2;
16874 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
16875 window. */
16877 static void
16878 ix86_avoid_jump_misspredicts (void)
16880 rtx insn, start = get_insns ();
16881 int nbytes = 0, njumps = 0;
16882 int isjump = 0;
16884 /* Look for all minimal intervals of instructions containing 4 jumps.
16885 The intervals are bounded by START and INSN. NBYTES is the total
16886 size of instructions in the interval including INSN and not including
16887 START. When the NBYTES is smaller than 16 bytes, it is possible
16888 that the end of START and INSN ends up in the same 16byte page.
16890 The smallest offset in the page INSN can start is the case where START
16891 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
16892 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
16894 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
16897 nbytes += min_insn_size (insn);
16898 if (dump_file)
16899 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
16900 INSN_UID (insn), min_insn_size (insn));
16901 if ((GET_CODE (insn) == JUMP_INSN
16902 && GET_CODE (PATTERN (insn)) != ADDR_VEC
16903 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
16904 || GET_CODE (insn) == CALL_INSN)
16905 njumps++;
16906 else
16907 continue;
16909 while (njumps > 3)
16911 start = NEXT_INSN (start);
16912 if ((GET_CODE (start) == JUMP_INSN
16913 && GET_CODE (PATTERN (start)) != ADDR_VEC
16914 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
16915 || GET_CODE (start) == CALL_INSN)
16916 njumps--, isjump = 1;
16917 else
16918 isjump = 0;
16919 nbytes -= min_insn_size (start);
16921 gcc_assert (njumps >= 0);
16922 if (dump_file)
16923 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
16924 INSN_UID (start), INSN_UID (insn), nbytes);
16926 if (njumps == 3 && isjump && nbytes < 16)
16928 int padsize = 15 - nbytes + min_insn_size (insn);
16930 if (dump_file)
16931 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
16932 INSN_UID (insn), padsize);
16933 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
16938 /* AMD Athlon works faster
16939 when RET is not destination of conditional jump or directly preceded
16940 by other jump instruction. We avoid the penalty by inserting NOP just
16941 before the RET instructions in such cases. */
16942 static void
16943 ix86_pad_returns (void)
16945 edge e;
16946 edge_iterator ei;
16948 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
16950 basic_block bb = e->src;
16951 rtx ret = BB_END (bb);
16952 rtx prev;
16953 bool replace = false;
16955 if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN
16956 || !maybe_hot_bb_p (bb))
16957 continue;
16958 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
16959 if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL)
16960 break;
16961 if (prev && GET_CODE (prev) == CODE_LABEL)
16963 edge e;
16964 edge_iterator ei;
16966 FOR_EACH_EDGE (e, ei, bb->preds)
16967 if (EDGE_FREQUENCY (e) && e->src->index >= 0
16968 && !(e->flags & EDGE_FALLTHRU))
16969 replace = true;
16971 if (!replace)
16973 prev = prev_active_insn (ret);
16974 if (prev
16975 && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev))
16976 || GET_CODE (prev) == CALL_INSN))
16977 replace = true;
16978 /* Empty functions get branch mispredict even when the jump destination
16979 is not visible to us. */
16980 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
16981 replace = true;
16983 if (replace)
16985 emit_insn_before (gen_return_internal_long (), ret);
16986 delete_insn (ret);
16991 /* Implement machine specific optimizations. We implement padding of returns
16992 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
16993 static void
16994 ix86_reorg (void)
16996 if (TARGET_ATHLON_K8 && optimize && !optimize_size)
16997 ix86_pad_returns ();
16998 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
16999 ix86_avoid_jump_misspredicts ();
17002 /* Return nonzero when QImode register that must be represented via REX prefix
17003 is used. */
17004 bool
17005 x86_extended_QIreg_mentioned_p (rtx insn)
17007 int i;
17008 extract_insn_cached (insn);
17009 for (i = 0; i < recog_data.n_operands; i++)
17010 if (REG_P (recog_data.operand[i])
17011 && REGNO (recog_data.operand[i]) >= 4)
17012 return true;
17013 return false;
17016 /* Return nonzero when P points to register encoded via REX prefix.
17017 Called via for_each_rtx. */
17018 static int
17019 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
17021 unsigned int regno;
17022 if (!REG_P (*p))
17023 return 0;
17024 regno = REGNO (*p);
17025 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
17028 /* Return true when INSN mentions register that must be encoded using REX
17029 prefix. */
17030 bool
17031 x86_extended_reg_mentioned_p (rtx insn)
17033 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
17036 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
17037 optabs would emit if we didn't have TFmode patterns. */
17039 void
17040 x86_emit_floatuns (rtx operands[2])
17042 rtx neglab, donelab, i0, i1, f0, in, out;
17043 enum machine_mode mode, inmode;
17045 inmode = GET_MODE (operands[1]);
17046 gcc_assert (inmode == SImode || inmode == DImode);
17048 out = operands[0];
17049 in = force_reg (inmode, operands[1]);
17050 mode = GET_MODE (out);
17051 neglab = gen_label_rtx ();
17052 donelab = gen_label_rtx ();
17053 i1 = gen_reg_rtx (Pmode);
17054 f0 = gen_reg_rtx (mode);
17056 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab);
17058 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
17059 emit_jump_insn (gen_jump (donelab));
17060 emit_barrier ();
17062 emit_label (neglab);
17064 i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
17065 i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
17066 i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
17067 expand_float (f0, i0, 0);
17068 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
17070 emit_label (donelab);
17073 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
17074 with all elements equal to VAR. Return true if successful. */
17076 static bool
17077 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
17078 rtx target, rtx val)
17080 enum machine_mode smode, wsmode, wvmode;
17081 rtx x;
17083 switch (mode)
17085 case V2SImode:
17086 case V2SFmode:
17087 if (!mmx_ok && !TARGET_SSE)
17088 return false;
17089 /* FALLTHRU */
17091 case V2DFmode:
17092 case V2DImode:
17093 case V4SFmode:
17094 case V4SImode:
17095 val = force_reg (GET_MODE_INNER (mode), val);
17096 x = gen_rtx_VEC_DUPLICATE (mode, val);
17097 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17098 return true;
17100 case V4HImode:
17101 if (!mmx_ok)
17102 return false;
17103 if (TARGET_SSE || TARGET_3DNOW_A)
17105 val = gen_lowpart (SImode, val);
17106 x = gen_rtx_TRUNCATE (HImode, val);
17107 x = gen_rtx_VEC_DUPLICATE (mode, x);
17108 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17109 return true;
17111 else
17113 smode = HImode;
17114 wsmode = SImode;
17115 wvmode = V2SImode;
17116 goto widen;
17119 case V8QImode:
17120 if (!mmx_ok)
17121 return false;
17122 smode = QImode;
17123 wsmode = HImode;
17124 wvmode = V4HImode;
17125 goto widen;
17126 case V8HImode:
17127 smode = HImode;
17128 wsmode = SImode;
17129 wvmode = V4SImode;
17130 goto widen;
17131 case V16QImode:
17132 smode = QImode;
17133 wsmode = HImode;
17134 wvmode = V8HImode;
17135 goto widen;
17136 widen:
17137 /* Replicate the value once into the next wider mode and recurse. */
17138 val = convert_modes (wsmode, smode, val, true);
17139 x = expand_simple_binop (wsmode, ASHIFT, val,
17140 GEN_INT (GET_MODE_BITSIZE (smode)),
17141 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17142 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
17144 x = gen_reg_rtx (wvmode);
17145 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
17146 gcc_unreachable ();
17147 emit_move_insn (target, gen_lowpart (mode, x));
17148 return true;
17150 default:
17151 return false;
17155 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
17156 whose low element is VAR, and other elements are zero. Return true
17157 if successful. */
17159 static bool
17160 ix86_expand_vector_init_low_nonzero (bool mmx_ok, enum machine_mode mode,
17161 rtx target, rtx var)
17163 enum machine_mode vsimode;
17164 rtx x;
17166 switch (mode)
17168 case V2SFmode:
17169 case V2SImode:
17170 if (!mmx_ok && !TARGET_SSE)
17171 return false;
17172 /* FALLTHRU */
17174 case V2DFmode:
17175 case V2DImode:
17176 var = force_reg (GET_MODE_INNER (mode), var);
17177 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
17178 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17179 return true;
17181 case V4SFmode:
17182 case V4SImode:
17183 var = force_reg (GET_MODE_INNER (mode), var);
17184 x = gen_rtx_VEC_DUPLICATE (mode, var);
17185 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
17186 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17187 return true;
17189 case V8HImode:
17190 case V16QImode:
17191 vsimode = V4SImode;
17192 goto widen;
17193 case V4HImode:
17194 case V8QImode:
17195 if (!mmx_ok)
17196 return false;
17197 vsimode = V2SImode;
17198 goto widen;
17199 widen:
17200 /* Zero extend the variable element to SImode and recurse. */
17201 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
17203 x = gen_reg_rtx (vsimode);
17204 if (!ix86_expand_vector_init_low_nonzero (mmx_ok, vsimode, x, var))
17205 gcc_unreachable ();
17207 emit_move_insn (target, gen_lowpart (mode, x));
17208 return true;
17210 default:
17211 return false;
17215 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
17216 consisting of the values in VALS. It is known that all elements
17217 except ONE_VAR are constants. Return true if successful. */
17219 static bool
17220 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
17221 rtx target, rtx vals, int one_var)
17223 rtx var = XVECEXP (vals, 0, one_var);
17224 enum machine_mode wmode;
17225 rtx const_vec, x;
17227 const_vec = copy_rtx (vals);
17228 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
17229 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
17231 switch (mode)
17233 case V2DFmode:
17234 case V2DImode:
17235 case V2SFmode:
17236 case V2SImode:
17237 /* For the two element vectors, it's just as easy to use
17238 the general case. */
17239 return false;
17241 case V4SFmode:
17242 case V4SImode:
17243 case V8HImode:
17244 case V4HImode:
17245 break;
17247 case V16QImode:
17248 wmode = V8HImode;
17249 goto widen;
17250 case V8QImode:
17251 wmode = V4HImode;
17252 goto widen;
17253 widen:
17254 /* There's no way to set one QImode entry easily. Combine
17255 the variable value with its adjacent constant value, and
17256 promote to an HImode set. */
17257 x = XVECEXP (vals, 0, one_var ^ 1);
17258 if (one_var & 1)
17260 var = convert_modes (HImode, QImode, var, true);
17261 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
17262 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17263 x = GEN_INT (INTVAL (x) & 0xff);
17265 else
17267 var = convert_modes (HImode, QImode, var, true);
17268 x = gen_int_mode (INTVAL (x) << 8, HImode);
17270 if (x != const0_rtx)
17271 var = expand_simple_binop (HImode, IOR, var, x, var,
17272 1, OPTAB_LIB_WIDEN);
17274 x = gen_reg_rtx (wmode);
17275 emit_move_insn (x, gen_lowpart (wmode, const_vec));
17276 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
17278 emit_move_insn (target, gen_lowpart (mode, x));
17279 return true;
17281 default:
17282 return false;
17285 emit_move_insn (target, const_vec);
17286 ix86_expand_vector_set (mmx_ok, target, var, one_var);
17287 return true;
17290 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
17291 all values variable, and none identical. */
17293 static void
17294 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
17295 rtx target, rtx vals)
17297 enum machine_mode half_mode = GET_MODE_INNER (mode);
17298 rtx op0 = NULL, op1 = NULL;
17299 bool use_vec_concat = false;
17301 switch (mode)
17303 case V2SFmode:
17304 case V2SImode:
17305 if (!mmx_ok && !TARGET_SSE)
17306 break;
17307 /* FALLTHRU */
17309 case V2DFmode:
17310 case V2DImode:
17311 /* For the two element vectors, we always implement VEC_CONCAT. */
17312 op0 = XVECEXP (vals, 0, 0);
17313 op1 = XVECEXP (vals, 0, 1);
17314 use_vec_concat = true;
17315 break;
17317 case V4SFmode:
17318 half_mode = V2SFmode;
17319 goto half;
17320 case V4SImode:
17321 half_mode = V2SImode;
17322 goto half;
17323 half:
17325 rtvec v;
17327 /* For V4SF and V4SI, we implement a concat of two V2 vectors.
17328 Recurse to load the two halves. */
17330 op0 = gen_reg_rtx (half_mode);
17331 v = gen_rtvec (2, XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1));
17332 ix86_expand_vector_init (false, op0, gen_rtx_PARALLEL (half_mode, v));
17334 op1 = gen_reg_rtx (half_mode);
17335 v = gen_rtvec (2, XVECEXP (vals, 0, 2), XVECEXP (vals, 0, 3));
17336 ix86_expand_vector_init (false, op1, gen_rtx_PARALLEL (half_mode, v));
17338 use_vec_concat = true;
17340 break;
17342 case V8HImode:
17343 case V16QImode:
17344 case V4HImode:
17345 case V8QImode:
17346 break;
17348 default:
17349 gcc_unreachable ();
17352 if (use_vec_concat)
17354 if (!register_operand (op0, half_mode))
17355 op0 = force_reg (half_mode, op0);
17356 if (!register_operand (op1, half_mode))
17357 op1 = force_reg (half_mode, op1);
17359 emit_insn (gen_rtx_SET (VOIDmode, target,
17360 gen_rtx_VEC_CONCAT (mode, op0, op1)));
17362 else
17364 int i, j, n_elts, n_words, n_elt_per_word;
17365 enum machine_mode inner_mode;
17366 rtx words[4], shift;
17368 inner_mode = GET_MODE_INNER (mode);
17369 n_elts = GET_MODE_NUNITS (mode);
17370 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
17371 n_elt_per_word = n_elts / n_words;
17372 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
17374 for (i = 0; i < n_words; ++i)
17376 rtx word = NULL_RTX;
17378 for (j = 0; j < n_elt_per_word; ++j)
17380 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
17381 elt = convert_modes (word_mode, inner_mode, elt, true);
17383 if (j == 0)
17384 word = elt;
17385 else
17387 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
17388 word, 1, OPTAB_LIB_WIDEN);
17389 word = expand_simple_binop (word_mode, IOR, word, elt,
17390 word, 1, OPTAB_LIB_WIDEN);
17394 words[i] = word;
17397 if (n_words == 1)
17398 emit_move_insn (target, gen_lowpart (mode, words[0]));
17399 else if (n_words == 2)
17401 rtx tmp = gen_reg_rtx (mode);
17402 emit_insn (gen_rtx_CLOBBER (VOIDmode, tmp));
17403 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
17404 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
17405 emit_move_insn (target, tmp);
17407 else if (n_words == 4)
17409 rtx tmp = gen_reg_rtx (V4SImode);
17410 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
17411 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
17412 emit_move_insn (target, gen_lowpart (mode, tmp));
17414 else
17415 gcc_unreachable ();
17419 /* Initialize vector TARGET via VALS. Suppress the use of MMX
17420 instructions unless MMX_OK is true. */
17422 void
17423 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
17425 enum machine_mode mode = GET_MODE (target);
17426 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17427 int n_elts = GET_MODE_NUNITS (mode);
17428 int n_var = 0, one_var = -1;
17429 bool all_same = true, all_const_zero = true;
17430 int i;
17431 rtx x;
17433 for (i = 0; i < n_elts; ++i)
17435 x = XVECEXP (vals, 0, i);
17436 if (!CONSTANT_P (x))
17437 n_var++, one_var = i;
17438 else if (x != CONST0_RTX (inner_mode))
17439 all_const_zero = false;
17440 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
17441 all_same = false;
17444 /* Constants are best loaded from the constant pool. */
17445 if (n_var == 0)
17447 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
17448 return;
17451 /* If all values are identical, broadcast the value. */
17452 if (all_same
17453 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
17454 XVECEXP (vals, 0, 0)))
17455 return;
17457 /* Values where only one field is non-constant are best loaded from
17458 the pool and overwritten via move later. */
17459 if (n_var == 1)
17461 if (all_const_zero && one_var == 0
17462 && ix86_expand_vector_init_low_nonzero (mmx_ok, mode, target,
17463 XVECEXP (vals, 0, 0)))
17464 return;
17466 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
17467 return;
17470 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
17473 void
17474 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
17476 enum machine_mode mode = GET_MODE (target);
17477 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17478 bool use_vec_merge = false;
17479 rtx tmp;
17481 switch (mode)
17483 case V2SFmode:
17484 case V2SImode:
17485 if (mmx_ok)
17487 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
17488 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
17489 if (elt == 0)
17490 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
17491 else
17492 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
17493 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17494 return;
17496 break;
17498 case V2DFmode:
17499 case V2DImode:
17501 rtx op0, op1;
17503 /* For the two element vectors, we implement a VEC_CONCAT with
17504 the extraction of the other element. */
17506 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
17507 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
17509 if (elt == 0)
17510 op0 = val, op1 = tmp;
17511 else
17512 op0 = tmp, op1 = val;
17514 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
17515 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17517 return;
17519 case V4SFmode:
17520 switch (elt)
17522 case 0:
17523 use_vec_merge = true;
17524 break;
17526 case 1:
17527 /* tmp = target = A B C D */
17528 tmp = copy_to_reg (target);
17529 /* target = A A B B */
17530 emit_insn (gen_sse_unpcklps (target, target, target));
17531 /* target = X A B B */
17532 ix86_expand_vector_set (false, target, val, 0);
17533 /* target = A X C D */
17534 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17535 GEN_INT (1), GEN_INT (0),
17536 GEN_INT (2+4), GEN_INT (3+4)));
17537 return;
17539 case 2:
17540 /* tmp = target = A B C D */
17541 tmp = copy_to_reg (target);
17542 /* tmp = X B C D */
17543 ix86_expand_vector_set (false, tmp, val, 0);
17544 /* target = A B X D */
17545 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17546 GEN_INT (0), GEN_INT (1),
17547 GEN_INT (0+4), GEN_INT (3+4)));
17548 return;
17550 case 3:
17551 /* tmp = target = A B C D */
17552 tmp = copy_to_reg (target);
17553 /* tmp = X B C D */
17554 ix86_expand_vector_set (false, tmp, val, 0);
17555 /* target = A B X D */
17556 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17557 GEN_INT (0), GEN_INT (1),
17558 GEN_INT (2+4), GEN_INT (0+4)));
17559 return;
17561 default:
17562 gcc_unreachable ();
17564 break;
17566 case V4SImode:
17567 /* Element 0 handled by vec_merge below. */
17568 if (elt == 0)
17570 use_vec_merge = true;
17571 break;
17574 if (TARGET_SSE2)
17576 /* With SSE2, use integer shuffles to swap element 0 and ELT,
17577 store into element 0, then shuffle them back. */
17579 rtx order[4];
17581 order[0] = GEN_INT (elt);
17582 order[1] = const1_rtx;
17583 order[2] = const2_rtx;
17584 order[3] = GEN_INT (3);
17585 order[elt] = const0_rtx;
17587 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
17588 order[1], order[2], order[3]));
17590 ix86_expand_vector_set (false, target, val, 0);
17592 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
17593 order[1], order[2], order[3]));
17595 else
17597 /* For SSE1, we have to reuse the V4SF code. */
17598 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
17599 gen_lowpart (SFmode, val), elt);
17601 return;
17603 case V8HImode:
17604 use_vec_merge = TARGET_SSE2;
17605 break;
17606 case V4HImode:
17607 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
17608 break;
17610 case V16QImode:
17611 case V8QImode:
17612 default:
17613 break;
17616 if (use_vec_merge)
17618 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
17619 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
17620 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17622 else
17624 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
17626 emit_move_insn (mem, target);
17628 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
17629 emit_move_insn (tmp, val);
17631 emit_move_insn (target, mem);
17635 void
17636 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
17638 enum machine_mode mode = GET_MODE (vec);
17639 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17640 bool use_vec_extr = false;
17641 rtx tmp;
17643 switch (mode)
17645 case V2SImode:
17646 case V2SFmode:
17647 if (!mmx_ok)
17648 break;
17649 /* FALLTHRU */
17651 case V2DFmode:
17652 case V2DImode:
17653 use_vec_extr = true;
17654 break;
17656 case V4SFmode:
17657 switch (elt)
17659 case 0:
17660 tmp = vec;
17661 break;
17663 case 1:
17664 case 3:
17665 tmp = gen_reg_rtx (mode);
17666 emit_insn (gen_sse_shufps_1 (tmp, vec, vec,
17667 GEN_INT (elt), GEN_INT (elt),
17668 GEN_INT (elt+4), GEN_INT (elt+4)));
17669 break;
17671 case 2:
17672 tmp = gen_reg_rtx (mode);
17673 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
17674 break;
17676 default:
17677 gcc_unreachable ();
17679 vec = tmp;
17680 use_vec_extr = true;
17681 elt = 0;
17682 break;
17684 case V4SImode:
17685 if (TARGET_SSE2)
17687 switch (elt)
17689 case 0:
17690 tmp = vec;
17691 break;
17693 case 1:
17694 case 3:
17695 tmp = gen_reg_rtx (mode);
17696 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
17697 GEN_INT (elt), GEN_INT (elt),
17698 GEN_INT (elt), GEN_INT (elt)));
17699 break;
17701 case 2:
17702 tmp = gen_reg_rtx (mode);
17703 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
17704 break;
17706 default:
17707 gcc_unreachable ();
17709 vec = tmp;
17710 use_vec_extr = true;
17711 elt = 0;
17713 else
17715 /* For SSE1, we have to reuse the V4SF code. */
17716 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
17717 gen_lowpart (V4SFmode, vec), elt);
17718 return;
17720 break;
17722 case V8HImode:
17723 use_vec_extr = TARGET_SSE2;
17724 break;
17725 case V4HImode:
17726 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
17727 break;
17729 case V16QImode:
17730 case V8QImode:
17731 /* ??? Could extract the appropriate HImode element and shift. */
17732 default:
17733 break;
17736 if (use_vec_extr)
17738 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
17739 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
17741 /* Let the rtl optimizers know about the zero extension performed. */
17742 if (inner_mode == HImode)
17744 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
17745 target = gen_lowpart (SImode, target);
17748 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17750 else
17752 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
17754 emit_move_insn (mem, vec);
17756 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
17757 emit_move_insn (target, tmp);
17761 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
17762 pattern to reduce; DEST is the destination; IN is the input vector. */
17764 void
17765 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
17767 rtx tmp1, tmp2, tmp3;
17769 tmp1 = gen_reg_rtx (V4SFmode);
17770 tmp2 = gen_reg_rtx (V4SFmode);
17771 tmp3 = gen_reg_rtx (V4SFmode);
17773 emit_insn (gen_sse_movhlps (tmp1, in, in));
17774 emit_insn (fn (tmp2, tmp1, in));
17776 emit_insn (gen_sse_shufps_1 (tmp3, tmp2, tmp2,
17777 GEN_INT (1), GEN_INT (1),
17778 GEN_INT (1+4), GEN_INT (1+4)));
17779 emit_insn (fn (dest, tmp2, tmp3));
17782 /* Implements target hook vector_mode_supported_p. */
17783 static bool
17784 ix86_vector_mode_supported_p (enum machine_mode mode)
17786 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
17787 return true;
17788 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
17789 return true;
17790 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
17791 return true;
17792 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
17793 return true;
17794 return false;
17797 /* Worker function for TARGET_MD_ASM_CLOBBERS.
17799 We do this in the new i386 backend to maintain source compatibility
17800 with the old cc0-based compiler. */
17802 static tree
17803 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
17804 tree inputs ATTRIBUTE_UNUSED,
17805 tree clobbers)
17807 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
17808 clobbers);
17809 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
17810 clobbers);
17811 clobbers = tree_cons (NULL_TREE, build_string (7, "dirflag"),
17812 clobbers);
17813 return clobbers;
17816 /* Return true if this goes in small data/bss. */
17818 static bool
17819 ix86_in_large_data_p (tree exp)
17821 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
17822 return false;
17824 /* Functions are never large data. */
17825 if (TREE_CODE (exp) == FUNCTION_DECL)
17826 return false;
17828 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
17830 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
17831 if (strcmp (section, ".ldata") == 0
17832 || strcmp (section, ".lbss") == 0)
17833 return true;
17834 return false;
17836 else
17838 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
17840 /* If this is an incomplete type with size 0, then we can't put it
17841 in data because it might be too big when completed. */
17842 if (!size || size > ix86_section_threshold)
17843 return true;
17846 return false;
17848 static void
17849 ix86_encode_section_info (tree decl, rtx rtl, int first)
17851 default_encode_section_info (decl, rtl, first);
17853 if (TREE_CODE (decl) == VAR_DECL
17854 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
17855 && ix86_in_large_data_p (decl))
17856 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
17859 /* Worker function for REVERSE_CONDITION. */
17861 enum rtx_code
17862 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
17864 return (mode != CCFPmode && mode != CCFPUmode
17865 ? reverse_condition (code)
17866 : reverse_condition_maybe_unordered (code));
17869 /* Output code to perform an x87 FP register move, from OPERANDS[1]
17870 to OPERANDS[0]. */
17872 const char *
17873 output_387_reg_move (rtx insn, rtx *operands)
17875 if (REG_P (operands[1])
17876 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
17878 if (REGNO (operands[0]) == FIRST_STACK_REG
17879 && TARGET_USE_FFREEP)
17880 return "ffreep\t%y0";
17881 return "fstp\t%y0";
17883 if (STACK_TOP_P (operands[0]))
17884 return "fld%z1\t%y1";
17885 return "fst\t%y0";
17888 /* Output code to perform a conditional jump to LABEL, if C2 flag in
17889 FP status register is set. */
17891 void
17892 ix86_emit_fp_unordered_jump (rtx label)
17894 rtx reg = gen_reg_rtx (HImode);
17895 rtx temp;
17897 emit_insn (gen_x86_fnstsw_1 (reg));
17899 if (TARGET_USE_SAHF)
17901 emit_insn (gen_x86_sahf_1 (reg));
17903 temp = gen_rtx_REG (CCmode, FLAGS_REG);
17904 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
17906 else
17908 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
17910 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
17911 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
17914 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
17915 gen_rtx_LABEL_REF (VOIDmode, label),
17916 pc_rtx);
17917 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
17918 emit_jump_insn (temp);
17921 /* Output code to perform a log1p XFmode calculation. */
17923 void ix86_emit_i387_log1p (rtx op0, rtx op1)
17925 rtx label1 = gen_label_rtx ();
17926 rtx label2 = gen_label_rtx ();
17928 rtx tmp = gen_reg_rtx (XFmode);
17929 rtx tmp2 = gen_reg_rtx (XFmode);
17931 emit_insn (gen_absxf2 (tmp, op1));
17932 emit_insn (gen_cmpxf (tmp,
17933 CONST_DOUBLE_FROM_REAL_VALUE (
17934 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
17935 XFmode)));
17936 emit_jump_insn (gen_bge (label1));
17938 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
17939 emit_insn (gen_fyl2xp1_xf3 (op0, tmp2, op1));
17940 emit_jump (label2);
17942 emit_label (label1);
17943 emit_move_insn (tmp, CONST1_RTX (XFmode));
17944 emit_insn (gen_addxf3 (tmp, op1, tmp));
17945 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
17946 emit_insn (gen_fyl2x_xf3 (op0, tmp2, tmp));
17948 emit_label (label2);
17951 /* Solaris named-section hook. Parameters are as for
17952 named_section_real. */
17954 static void
17955 i386_solaris_elf_named_section (const char *name, unsigned int flags,
17956 tree decl)
17958 /* With Binutils 2.15, the "@unwind" marker must be specified on
17959 every occurrence of the ".eh_frame" section, not just the first
17960 one. */
17961 if (TARGET_64BIT
17962 && strcmp (name, ".eh_frame") == 0)
17964 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
17965 flags & SECTION_WRITE ? "aw" : "a");
17966 return;
17968 default_elf_asm_named_section (name, flags, decl);
17971 /* Return the mangling of TYPE if it is an extended fundamental type. */
17973 static const char *
17974 ix86_mangle_fundamental_type (tree type)
17976 switch (TYPE_MODE (type))
17978 case TFmode:
17979 /* __float128 is "g". */
17980 return "g";
17981 case XFmode:
17982 /* "long double" or __float80 is "e". */
17983 return "e";
17984 default:
17985 return NULL;
17989 /* For 32-bit code we can save PIC register setup by using
17990 __stack_chk_fail_local hidden function instead of calling
17991 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
17992 register, so it is better to call __stack_chk_fail directly. */
17994 static tree
17995 ix86_stack_protect_fail (void)
17997 return TARGET_64BIT
17998 ? default_external_stack_protect_fail ()
17999 : default_hidden_stack_protect_fail ();
18002 /* Select a format to encode pointers in exception handling data. CODE
18003 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
18004 true if the symbol may be affected by dynamic relocations.
18006 ??? All x86 object file formats are capable of representing this.
18007 After all, the relocation needed is the same as for the call insn.
18008 Whether or not a particular assembler allows us to enter such, I
18009 guess we'll have to see. */
18011 asm_preferred_eh_data_format (int code, int global)
18013 if (flag_pic)
18015 int type = DW_EH_PE_sdata8;
18016 if (!TARGET_64BIT
18017 || ix86_cmodel == CM_SMALL_PIC
18018 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
18019 type = DW_EH_PE_sdata4;
18020 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
18022 if (ix86_cmodel == CM_SMALL
18023 || (ix86_cmodel == CM_MEDIUM && code))
18024 return DW_EH_PE_udata4;
18025 return DW_EH_PE_absptr;
18028 #include "gt-i386.h"