Remove old autovect-branch by moving to "dead" directory.
[official-gcc.git] / old-autovect-branch / gcc / config / i386 / i386.c
blob1a55038ecd468805add9a6ee866047145f1c0268
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
20 Boston, MA 02110-1301, USA. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "tree-gimple.h"
51 #include "dwarf2.h"
53 #ifndef CHECK_STACK_LIMIT
54 #define CHECK_STACK_LIMIT (-1)
55 #endif
57 /* Return index of given mode in mult and division cost tables. */
58 #define MODE_INDEX(mode) \
59 ((mode) == QImode ? 0 \
60 : (mode) == HImode ? 1 \
61 : (mode) == SImode ? 2 \
62 : (mode) == DImode ? 3 \
63 : 4)
65 /* Processor costs (relative to an add) */
66 static const
67 struct processor_costs size_cost = { /* costs for tunning for size */
68 2, /* cost of an add instruction */
69 3, /* cost of a lea instruction */
70 2, /* variable shift costs */
71 3, /* constant shift costs */
72 {3, 3, 3, 3, 5}, /* cost of starting a multiply */
73 0, /* cost of multiply per each bit set */
74 {3, 3, 3, 3, 5}, /* cost of a divide/mod */
75 3, /* cost of movsx */
76 3, /* cost of movzx */
77 0, /* "large" insn */
78 2, /* MOVE_RATIO */
79 2, /* cost for loading QImode using movzbl */
80 {2, 2, 2}, /* cost of loading integer registers
81 in QImode, HImode and SImode.
82 Relative to reg-reg move (2). */
83 {2, 2, 2}, /* cost of storing integer registers */
84 2, /* cost of reg,reg fld/fst */
85 {2, 2, 2}, /* cost of loading fp registers
86 in SFmode, DFmode and XFmode */
87 {2, 2, 2}, /* cost of loading integer registers */
88 3, /* cost of moving MMX register */
89 {3, 3}, /* cost of loading MMX registers
90 in SImode and DImode */
91 {3, 3}, /* cost of storing MMX registers
92 in SImode and DImode */
93 3, /* cost of moving SSE register */
94 {3, 3, 3}, /* cost of loading SSE registers
95 in SImode, DImode and TImode */
96 {3, 3, 3}, /* cost of storing SSE registers
97 in SImode, DImode and TImode */
98 3, /* MMX or SSE register to integer */
99 0, /* size of prefetch block */
100 0, /* number of parallel prefetches */
101 1, /* Branch cost */
102 2, /* cost of FADD and FSUB insns. */
103 2, /* cost of FMUL instruction. */
104 2, /* cost of FDIV instruction. */
105 2, /* cost of FABS instruction. */
106 2, /* cost of FCHS instruction. */
107 2, /* cost of FSQRT instruction. */
110 /* Processor costs (relative to an add) */
111 static const
112 struct processor_costs i386_cost = { /* 386 specific costs */
113 1, /* cost of an add instruction */
114 1, /* cost of a lea instruction */
115 3, /* variable shift costs */
116 2, /* constant shift costs */
117 {6, 6, 6, 6, 6}, /* cost of starting a multiply */
118 1, /* cost of multiply per each bit set */
119 {23, 23, 23, 23, 23}, /* cost of a divide/mod */
120 3, /* cost of movsx */
121 2, /* cost of movzx */
122 15, /* "large" insn */
123 3, /* MOVE_RATIO */
124 4, /* cost for loading QImode using movzbl */
125 {2, 4, 2}, /* cost of loading integer registers
126 in QImode, HImode and SImode.
127 Relative to reg-reg move (2). */
128 {2, 4, 2}, /* cost of storing integer registers */
129 2, /* cost of reg,reg fld/fst */
130 {8, 8, 8}, /* cost of loading fp registers
131 in SFmode, DFmode and XFmode */
132 {8, 8, 8}, /* cost of loading integer registers */
133 2, /* cost of moving MMX register */
134 {4, 8}, /* cost of loading MMX registers
135 in SImode and DImode */
136 {4, 8}, /* cost of storing MMX registers
137 in SImode and DImode */
138 2, /* cost of moving SSE register */
139 {4, 8, 16}, /* cost of loading SSE registers
140 in SImode, DImode and TImode */
141 {4, 8, 16}, /* cost of storing SSE registers
142 in SImode, DImode and TImode */
143 3, /* MMX or SSE register to integer */
144 0, /* size of prefetch block */
145 0, /* number of parallel prefetches */
146 1, /* Branch cost */
147 23, /* cost of FADD and FSUB insns. */
148 27, /* cost of FMUL instruction. */
149 88, /* cost of FDIV instruction. */
150 22, /* cost of FABS instruction. */
151 24, /* cost of FCHS instruction. */
152 122, /* cost of FSQRT instruction. */
155 static const
156 struct processor_costs i486_cost = { /* 486 specific costs */
157 1, /* cost of an add instruction */
158 1, /* cost of a lea instruction */
159 3, /* variable shift costs */
160 2, /* constant shift costs */
161 {12, 12, 12, 12, 12}, /* cost of starting a multiply */
162 1, /* cost of multiply per each bit set */
163 {40, 40, 40, 40, 40}, /* cost of a divide/mod */
164 3, /* cost of movsx */
165 2, /* cost of movzx */
166 15, /* "large" insn */
167 3, /* MOVE_RATIO */
168 4, /* cost for loading QImode using movzbl */
169 {2, 4, 2}, /* cost of loading integer registers
170 in QImode, HImode and SImode.
171 Relative to reg-reg move (2). */
172 {2, 4, 2}, /* cost of storing integer registers */
173 2, /* cost of reg,reg fld/fst */
174 {8, 8, 8}, /* cost of loading fp registers
175 in SFmode, DFmode and XFmode */
176 {8, 8, 8}, /* cost of loading integer registers */
177 2, /* cost of moving MMX register */
178 {4, 8}, /* cost of loading MMX registers
179 in SImode and DImode */
180 {4, 8}, /* cost of storing MMX registers
181 in SImode and DImode */
182 2, /* cost of moving SSE register */
183 {4, 8, 16}, /* cost of loading SSE registers
184 in SImode, DImode and TImode */
185 {4, 8, 16}, /* cost of storing SSE registers
186 in SImode, DImode and TImode */
187 3, /* MMX or SSE register to integer */
188 0, /* size of prefetch block */
189 0, /* number of parallel prefetches */
190 1, /* Branch cost */
191 8, /* cost of FADD and FSUB insns. */
192 16, /* cost of FMUL instruction. */
193 73, /* cost of FDIV instruction. */
194 3, /* cost of FABS instruction. */
195 3, /* cost of FCHS instruction. */
196 83, /* cost of FSQRT instruction. */
199 static const
200 struct processor_costs pentium_cost = {
201 1, /* cost of an add instruction */
202 1, /* cost of a lea instruction */
203 4, /* variable shift costs */
204 1, /* constant shift costs */
205 {11, 11, 11, 11, 11}, /* cost of starting a multiply */
206 0, /* cost of multiply per each bit set */
207 {25, 25, 25, 25, 25}, /* cost of a divide/mod */
208 3, /* cost of movsx */
209 2, /* cost of movzx */
210 8, /* "large" insn */
211 6, /* MOVE_RATIO */
212 6, /* cost for loading QImode using movzbl */
213 {2, 4, 2}, /* cost of loading integer registers
214 in QImode, HImode and SImode.
215 Relative to reg-reg move (2). */
216 {2, 4, 2}, /* cost of storing integer registers */
217 2, /* cost of reg,reg fld/fst */
218 {2, 2, 6}, /* cost of loading fp registers
219 in SFmode, DFmode and XFmode */
220 {4, 4, 6}, /* cost of loading integer registers */
221 8, /* cost of moving MMX register */
222 {8, 8}, /* cost of loading MMX registers
223 in SImode and DImode */
224 {8, 8}, /* cost of storing MMX registers
225 in SImode and DImode */
226 2, /* cost of moving SSE register */
227 {4, 8, 16}, /* cost of loading SSE registers
228 in SImode, DImode and TImode */
229 {4, 8, 16}, /* cost of storing SSE registers
230 in SImode, DImode and TImode */
231 3, /* MMX or SSE register to integer */
232 0, /* size of prefetch block */
233 0, /* number of parallel prefetches */
234 2, /* Branch cost */
235 3, /* cost of FADD and FSUB insns. */
236 3, /* cost of FMUL instruction. */
237 39, /* cost of FDIV instruction. */
238 1, /* cost of FABS instruction. */
239 1, /* cost of FCHS instruction. */
240 70, /* cost of FSQRT instruction. */
243 static const
244 struct processor_costs pentiumpro_cost = {
245 1, /* cost of an add instruction */
246 1, /* cost of a lea instruction */
247 1, /* variable shift costs */
248 1, /* constant shift costs */
249 {4, 4, 4, 4, 4}, /* cost of starting a multiply */
250 0, /* cost of multiply per each bit set */
251 {17, 17, 17, 17, 17}, /* cost of a divide/mod */
252 1, /* cost of movsx */
253 1, /* cost of movzx */
254 8, /* "large" insn */
255 6, /* MOVE_RATIO */
256 2, /* cost for loading QImode using movzbl */
257 {4, 4, 4}, /* cost of loading integer registers
258 in QImode, HImode and SImode.
259 Relative to reg-reg move (2). */
260 {2, 2, 2}, /* cost of storing integer registers */
261 2, /* cost of reg,reg fld/fst */
262 {2, 2, 6}, /* cost of loading fp registers
263 in SFmode, DFmode and XFmode */
264 {4, 4, 6}, /* cost of loading integer registers */
265 2, /* cost of moving MMX register */
266 {2, 2}, /* cost of loading MMX registers
267 in SImode and DImode */
268 {2, 2}, /* cost of storing MMX registers
269 in SImode and DImode */
270 2, /* cost of moving SSE register */
271 {2, 2, 8}, /* cost of loading SSE registers
272 in SImode, DImode and TImode */
273 {2, 2, 8}, /* cost of storing SSE registers
274 in SImode, DImode and TImode */
275 3, /* MMX or SSE register to integer */
276 32, /* size of prefetch block */
277 6, /* number of parallel prefetches */
278 2, /* Branch cost */
279 3, /* cost of FADD and FSUB insns. */
280 5, /* cost of FMUL instruction. */
281 56, /* cost of FDIV instruction. */
282 2, /* cost of FABS instruction. */
283 2, /* cost of FCHS instruction. */
284 56, /* cost of FSQRT instruction. */
287 static const
288 struct processor_costs k6_cost = {
289 1, /* cost of an add instruction */
290 2, /* cost of a lea instruction */
291 1, /* variable shift costs */
292 1, /* constant shift costs */
293 {3, 3, 3, 3, 3}, /* cost of starting a multiply */
294 0, /* cost of multiply per each bit set */
295 {18, 18, 18, 18, 18}, /* cost of a divide/mod */
296 2, /* cost of movsx */
297 2, /* cost of movzx */
298 8, /* "large" insn */
299 4, /* MOVE_RATIO */
300 3, /* cost for loading QImode using movzbl */
301 {4, 5, 4}, /* cost of loading integer registers
302 in QImode, HImode and SImode.
303 Relative to reg-reg move (2). */
304 {2, 3, 2}, /* cost of storing integer registers */
305 4, /* cost of reg,reg fld/fst */
306 {6, 6, 6}, /* cost of loading fp registers
307 in SFmode, DFmode and XFmode */
308 {4, 4, 4}, /* cost of loading integer registers */
309 2, /* cost of moving MMX register */
310 {2, 2}, /* cost of loading MMX registers
311 in SImode and DImode */
312 {2, 2}, /* cost of storing MMX registers
313 in SImode and DImode */
314 2, /* cost of moving SSE register */
315 {2, 2, 8}, /* cost of loading SSE registers
316 in SImode, DImode and TImode */
317 {2, 2, 8}, /* cost of storing SSE registers
318 in SImode, DImode and TImode */
319 6, /* MMX or SSE register to integer */
320 32, /* size of prefetch block */
321 1, /* number of parallel prefetches */
322 1, /* Branch cost */
323 2, /* cost of FADD and FSUB insns. */
324 2, /* cost of FMUL instruction. */
325 56, /* cost of FDIV instruction. */
326 2, /* cost of FABS instruction. */
327 2, /* cost of FCHS instruction. */
328 56, /* cost of FSQRT instruction. */
331 static const
332 struct processor_costs athlon_cost = {
333 1, /* cost of an add instruction */
334 2, /* cost of a lea instruction */
335 1, /* variable shift costs */
336 1, /* constant shift costs */
337 {5, 5, 5, 5, 5}, /* cost of starting a multiply */
338 0, /* cost of multiply per each bit set */
339 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
340 1, /* cost of movsx */
341 1, /* cost of movzx */
342 8, /* "large" insn */
343 9, /* MOVE_RATIO */
344 4, /* cost for loading QImode using movzbl */
345 {3, 4, 3}, /* cost of loading integer registers
346 in QImode, HImode and SImode.
347 Relative to reg-reg move (2). */
348 {3, 4, 3}, /* cost of storing integer registers */
349 4, /* cost of reg,reg fld/fst */
350 {4, 4, 12}, /* cost of loading fp registers
351 in SFmode, DFmode and XFmode */
352 {6, 6, 8}, /* cost of loading integer registers */
353 2, /* cost of moving MMX register */
354 {4, 4}, /* cost of loading MMX registers
355 in SImode and DImode */
356 {4, 4}, /* cost of storing MMX registers
357 in SImode and DImode */
358 2, /* cost of moving SSE register */
359 {4, 4, 6}, /* cost of loading SSE registers
360 in SImode, DImode and TImode */
361 {4, 4, 5}, /* cost of storing SSE registers
362 in SImode, DImode and TImode */
363 5, /* MMX or SSE register to integer */
364 64, /* size of prefetch block */
365 6, /* number of parallel prefetches */
366 5, /* Branch cost */
367 4, /* cost of FADD and FSUB insns. */
368 4, /* cost of FMUL instruction. */
369 24, /* cost of FDIV instruction. */
370 2, /* cost of FABS instruction. */
371 2, /* cost of FCHS instruction. */
372 35, /* cost of FSQRT instruction. */
375 static const
376 struct processor_costs k8_cost = {
377 1, /* cost of an add instruction */
378 2, /* cost of a lea instruction */
379 1, /* variable shift costs */
380 1, /* constant shift costs */
381 {3, 4, 3, 4, 5}, /* cost of starting a multiply */
382 0, /* cost of multiply per each bit set */
383 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
384 1, /* cost of movsx */
385 1, /* cost of movzx */
386 8, /* "large" insn */
387 9, /* MOVE_RATIO */
388 4, /* cost for loading QImode using movzbl */
389 {3, 4, 3}, /* cost of loading integer registers
390 in QImode, HImode and SImode.
391 Relative to reg-reg move (2). */
392 {3, 4, 3}, /* cost of storing integer registers */
393 4, /* cost of reg,reg fld/fst */
394 {4, 4, 12}, /* cost of loading fp registers
395 in SFmode, DFmode and XFmode */
396 {6, 6, 8}, /* cost of loading integer registers */
397 2, /* cost of moving MMX register */
398 {3, 3}, /* cost of loading MMX registers
399 in SImode and DImode */
400 {4, 4}, /* cost of storing MMX registers
401 in SImode and DImode */
402 2, /* cost of moving SSE register */
403 {4, 3, 6}, /* cost of loading SSE registers
404 in SImode, DImode and TImode */
405 {4, 4, 5}, /* cost of storing SSE registers
406 in SImode, DImode and TImode */
407 5, /* MMX or SSE register to integer */
408 64, /* size of prefetch block */
409 6, /* number of parallel prefetches */
410 5, /* Branch cost */
411 4, /* cost of FADD and FSUB insns. */
412 4, /* cost of FMUL instruction. */
413 19, /* cost of FDIV instruction. */
414 2, /* cost of FABS instruction. */
415 2, /* cost of FCHS instruction. */
416 35, /* cost of FSQRT instruction. */
419 static const
420 struct processor_costs pentium4_cost = {
421 1, /* cost of an add instruction */
422 3, /* cost of a lea instruction */
423 4, /* variable shift costs */
424 4, /* constant shift costs */
425 {15, 15, 15, 15, 15}, /* cost of starting a multiply */
426 0, /* cost of multiply per each bit set */
427 {56, 56, 56, 56, 56}, /* cost of a divide/mod */
428 1, /* cost of movsx */
429 1, /* cost of movzx */
430 16, /* "large" insn */
431 6, /* MOVE_RATIO */
432 2, /* cost for loading QImode using movzbl */
433 {4, 5, 4}, /* cost of loading integer registers
434 in QImode, HImode and SImode.
435 Relative to reg-reg move (2). */
436 {2, 3, 2}, /* cost of storing integer registers */
437 2, /* cost of reg,reg fld/fst */
438 {2, 2, 6}, /* cost of loading fp registers
439 in SFmode, DFmode and XFmode */
440 {4, 4, 6}, /* cost of loading integer registers */
441 2, /* cost of moving MMX register */
442 {2, 2}, /* cost of loading MMX registers
443 in SImode and DImode */
444 {2, 2}, /* cost of storing MMX registers
445 in SImode and DImode */
446 12, /* cost of moving SSE register */
447 {12, 12, 12}, /* cost of loading SSE registers
448 in SImode, DImode and TImode */
449 {2, 2, 8}, /* cost of storing SSE registers
450 in SImode, DImode and TImode */
451 10, /* MMX or SSE register to integer */
452 64, /* size of prefetch block */
453 6, /* number of parallel prefetches */
454 2, /* Branch cost */
455 5, /* cost of FADD and FSUB insns. */
456 7, /* cost of FMUL instruction. */
457 43, /* cost of FDIV instruction. */
458 2, /* cost of FABS instruction. */
459 2, /* cost of FCHS instruction. */
460 43, /* cost of FSQRT instruction. */
463 static const
464 struct processor_costs nocona_cost = {
465 1, /* cost of an add instruction */
466 1, /* cost of a lea instruction */
467 1, /* variable shift costs */
468 1, /* constant shift costs */
469 {10, 10, 10, 10, 10}, /* cost of starting a multiply */
470 0, /* cost of multiply per each bit set */
471 {66, 66, 66, 66, 66}, /* cost of a divide/mod */
472 1, /* cost of movsx */
473 1, /* cost of movzx */
474 16, /* "large" insn */
475 17, /* MOVE_RATIO */
476 4, /* cost for loading QImode using movzbl */
477 {4, 4, 4}, /* cost of loading integer registers
478 in QImode, HImode and SImode.
479 Relative to reg-reg move (2). */
480 {4, 4, 4}, /* cost of storing integer registers */
481 3, /* cost of reg,reg fld/fst */
482 {12, 12, 12}, /* cost of loading fp registers
483 in SFmode, DFmode and XFmode */
484 {4, 4, 4}, /* cost of loading integer registers */
485 6, /* cost of moving MMX register */
486 {12, 12}, /* cost of loading MMX registers
487 in SImode and DImode */
488 {12, 12}, /* cost of storing MMX registers
489 in SImode and DImode */
490 6, /* cost of moving SSE register */
491 {12, 12, 12}, /* cost of loading SSE registers
492 in SImode, DImode and TImode */
493 {12, 12, 12}, /* cost of storing SSE registers
494 in SImode, DImode and TImode */
495 8, /* MMX or SSE register to integer */
496 128, /* size of prefetch block */
497 8, /* number of parallel prefetches */
498 1, /* Branch cost */
499 6, /* cost of FADD and FSUB insns. */
500 8, /* cost of FMUL instruction. */
501 40, /* cost of FDIV instruction. */
502 3, /* cost of FABS instruction. */
503 3, /* cost of FCHS instruction. */
504 44, /* cost of FSQRT instruction. */
507 const struct processor_costs *ix86_cost = &pentium_cost;
509 /* Processor feature/optimization bitmasks. */
510 #define m_386 (1<<PROCESSOR_I386)
511 #define m_486 (1<<PROCESSOR_I486)
512 #define m_PENT (1<<PROCESSOR_PENTIUM)
513 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
514 #define m_K6 (1<<PROCESSOR_K6)
515 #define m_ATHLON (1<<PROCESSOR_ATHLON)
516 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
517 #define m_K8 (1<<PROCESSOR_K8)
518 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
519 #define m_NOCONA (1<<PROCESSOR_NOCONA)
521 const int x86_use_leave = m_386 | m_K6 | m_ATHLON_K8;
522 const int x86_push_memory = m_386 | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
523 const int x86_zero_extend_with_and = m_486 | m_PENT;
524 const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA /* m_386 | m_K6 */;
525 const int x86_double_with_add = ~m_386;
526 const int x86_use_bit_test = m_386;
527 const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6;
528 const int x86_cmove = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
529 const int x86_fisttp = m_NOCONA;
530 const int x86_3dnow_a = m_ATHLON_K8;
531 const int x86_deep_branch = m_PPRO | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
532 /* Branch hints were put in P4 based on simulation result. But
533 after P4 was made, no performance benefit was observed with
534 branch hints. It also increases the code size. As the result,
535 icc never generates branch hints. */
536 const int x86_branch_hints = 0;
537 const int x86_use_sahf = m_PPRO | m_K6 | m_PENT4 | m_NOCONA;
538 const int x86_partial_reg_stall = m_PPRO;
539 const int x86_use_himode_fiop = m_386 | m_486 | m_K6;
540 const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT);
541 const int x86_use_mov0 = m_K6;
542 const int x86_use_cltd = ~(m_PENT | m_K6);
543 const int x86_read_modify_write = ~m_PENT;
544 const int x86_read_modify = ~(m_PENT | m_PPRO);
545 const int x86_split_long_moves = m_PPRO;
546 const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486 | m_ATHLON_K8;
547 const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
548 const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
549 const int x86_qimode_math = ~(0);
550 const int x86_promote_qi_regs = 0;
551 const int x86_himode_math = ~(m_PPRO);
552 const int x86_promote_hi_regs = m_PPRO;
553 const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA;
554 const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA;
555 const int x86_add_esp_4 = m_ATHLON_K8 | m_K6 | m_PENT4 | m_NOCONA;
556 const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6 | m_386 | m_486 | m_PENT4 | m_NOCONA;
557 const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO);
558 const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
559 const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
560 const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO;
561 const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO;
562 const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO;
563 const int x86_decompose_lea = m_PENT4 | m_NOCONA;
564 const int x86_shift1 = ~m_486;
565 const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
566 const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO;
567 /* Set for machines where the type and dependencies are resolved on SSE
568 register parts instead of whole registers, so we may maintain just
569 lower part of scalar values in proper format leaving the upper part
570 undefined. */
571 const int x86_sse_split_regs = m_ATHLON_K8;
572 const int x86_sse_typeless_stores = m_ATHLON_K8;
573 const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
574 const int x86_use_ffreep = m_ATHLON_K8;
575 const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6;
577 /* ??? Allowing interunit moves makes it all too easy for the compiler to put
578 integer data in xmm registers. Which results in pretty abysmal code. */
579 const int x86_inter_unit_moves = 0 /* ~(m_ATHLON_K8) */;
581 const int x86_ext_80387_constants = m_K6 | m_ATHLON | m_PENT4 | m_NOCONA | m_PPRO;
582 /* Some CPU cores are not able to predict more than 4 branch instructions in
583 the 16 byte window. */
584 const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
585 const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6 | m_PENT;
586 const int x86_use_bt = m_ATHLON_K8;
587 /* Compare and exchange was added for 80486. */
588 const int x86_cmpxchg = ~m_386;
589 /* Exchange and add was added for 80486. */
590 const int x86_xadd = ~m_386;
592 /* In case the average insn count for single function invocation is
593 lower than this constant, emit fast (but longer) prologue and
594 epilogue code. */
595 #define FAST_PROLOGUE_INSN_COUNT 20
597 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
598 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
599 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
600 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
602 /* Array of the smallest class containing reg number REGNO, indexed by
603 REGNO. Used by REGNO_REG_CLASS in i386.h. */
605 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
607 /* ax, dx, cx, bx */
608 AREG, DREG, CREG, BREG,
609 /* si, di, bp, sp */
610 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
611 /* FP registers */
612 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
613 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
614 /* arg pointer */
615 NON_Q_REGS,
616 /* flags, fpsr, dirflag, frame */
617 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
618 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
619 SSE_REGS, SSE_REGS,
620 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
621 MMX_REGS, MMX_REGS,
622 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
623 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
624 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
625 SSE_REGS, SSE_REGS,
628 /* The "default" register map used in 32bit mode. */
630 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
632 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
633 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
634 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
635 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
636 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
637 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
638 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
641 static int const x86_64_int_parameter_registers[6] =
643 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
644 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
647 static int const x86_64_int_return_registers[4] =
649 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
652 /* The "default" register map used in 64bit mode. */
653 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
655 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
656 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
657 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
658 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
659 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
660 8,9,10,11,12,13,14,15, /* extended integer registers */
661 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
664 /* Define the register numbers to be used in Dwarf debugging information.
665 The SVR4 reference port C compiler uses the following register numbers
666 in its Dwarf output code:
667 0 for %eax (gcc regno = 0)
668 1 for %ecx (gcc regno = 2)
669 2 for %edx (gcc regno = 1)
670 3 for %ebx (gcc regno = 3)
671 4 for %esp (gcc regno = 7)
672 5 for %ebp (gcc regno = 6)
673 6 for %esi (gcc regno = 4)
674 7 for %edi (gcc regno = 5)
675 The following three DWARF register numbers are never generated by
676 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
677 believes these numbers have these meanings.
678 8 for %eip (no gcc equivalent)
679 9 for %eflags (gcc regno = 17)
680 10 for %trapno (no gcc equivalent)
681 It is not at all clear how we should number the FP stack registers
682 for the x86 architecture. If the version of SDB on x86/svr4 were
683 a bit less brain dead with respect to floating-point then we would
684 have a precedent to follow with respect to DWARF register numbers
685 for x86 FP registers, but the SDB on x86/svr4 is so completely
686 broken with respect to FP registers that it is hardly worth thinking
687 of it as something to strive for compatibility with.
688 The version of x86/svr4 SDB I have at the moment does (partially)
689 seem to believe that DWARF register number 11 is associated with
690 the x86 register %st(0), but that's about all. Higher DWARF
691 register numbers don't seem to be associated with anything in
692 particular, and even for DWARF regno 11, SDB only seems to under-
693 stand that it should say that a variable lives in %st(0) (when
694 asked via an `=' command) if we said it was in DWARF regno 11,
695 but SDB still prints garbage when asked for the value of the
696 variable in question (via a `/' command).
697 (Also note that the labels SDB prints for various FP stack regs
698 when doing an `x' command are all wrong.)
699 Note that these problems generally don't affect the native SVR4
700 C compiler because it doesn't allow the use of -O with -g and
701 because when it is *not* optimizing, it allocates a memory
702 location for each floating-point variable, and the memory
703 location is what gets described in the DWARF AT_location
704 attribute for the variable in question.
705 Regardless of the severe mental illness of the x86/svr4 SDB, we
706 do something sensible here and we use the following DWARF
707 register numbers. Note that these are all stack-top-relative
708 numbers.
709 11 for %st(0) (gcc regno = 8)
710 12 for %st(1) (gcc regno = 9)
711 13 for %st(2) (gcc regno = 10)
712 14 for %st(3) (gcc regno = 11)
713 15 for %st(4) (gcc regno = 12)
714 16 for %st(5) (gcc regno = 13)
715 17 for %st(6) (gcc regno = 14)
716 18 for %st(7) (gcc regno = 15)
718 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
720 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
721 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
722 -1, 9, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
723 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
724 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
725 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
726 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
729 /* Test and compare insns in i386.md store the information needed to
730 generate branch and scc insns here. */
732 rtx ix86_compare_op0 = NULL_RTX;
733 rtx ix86_compare_op1 = NULL_RTX;
734 rtx ix86_compare_emitted = NULL_RTX;
736 /* Size of the register save area. */
737 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
739 /* Define the structure for the machine field in struct function. */
741 struct stack_local_entry GTY(())
743 unsigned short mode;
744 unsigned short n;
745 rtx rtl;
746 struct stack_local_entry *next;
749 /* Structure describing stack frame layout.
750 Stack grows downward:
752 [arguments]
753 <- ARG_POINTER
754 saved pc
756 saved frame pointer if frame_pointer_needed
757 <- HARD_FRAME_POINTER
758 [saved regs]
760 [padding1] \
762 [va_arg registers] (
763 > to_allocate <- FRAME_POINTER
764 [frame] (
766 [padding2] /
768 struct ix86_frame
770 int nregs;
771 int padding1;
772 int va_arg_size;
773 HOST_WIDE_INT frame;
774 int padding2;
775 int outgoing_arguments_size;
776 int red_zone_size;
778 HOST_WIDE_INT to_allocate;
779 /* The offsets relative to ARG_POINTER. */
780 HOST_WIDE_INT frame_pointer_offset;
781 HOST_WIDE_INT hard_frame_pointer_offset;
782 HOST_WIDE_INT stack_pointer_offset;
784 /* When save_regs_using_mov is set, emit prologue using
785 move instead of push instructions. */
786 bool save_regs_using_mov;
789 /* Code model option. */
790 enum cmodel ix86_cmodel;
791 /* Asm dialect. */
792 enum asm_dialect ix86_asm_dialect = ASM_ATT;
793 /* TLS dialext. */
794 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
796 /* Which unit we are generating floating point math for. */
797 enum fpmath_unit ix86_fpmath;
799 /* Which cpu are we scheduling for. */
800 enum processor_type ix86_tune;
801 /* Which instruction set architecture to use. */
802 enum processor_type ix86_arch;
804 /* true if sse prefetch instruction is not NOOP. */
805 int x86_prefetch_sse;
807 /* ix86_regparm_string as a number */
808 static int ix86_regparm;
810 /* Preferred alignment for stack boundary in bits. */
811 unsigned int ix86_preferred_stack_boundary;
813 /* Values 1-5: see jump.c */
814 int ix86_branch_cost;
816 /* Variables which are this size or smaller are put in the data/bss
817 or ldata/lbss sections. */
819 int ix86_section_threshold = 65536;
821 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
822 char internal_label_prefix[16];
823 int internal_label_prefix_len;
825 static bool ix86_handle_option (size_t, const char *, int);
826 static void output_pic_addr_const (FILE *, rtx, int);
827 static void put_condition_code (enum rtx_code, enum machine_mode,
828 int, int, FILE *);
829 static const char *get_some_local_dynamic_name (void);
830 static int get_some_local_dynamic_name_1 (rtx *, void *);
831 static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx);
832 static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *,
833 rtx *);
834 static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *);
835 static enum machine_mode ix86_cc_modes_compatible (enum machine_mode,
836 enum machine_mode);
837 static rtx get_thread_pointer (int);
838 static rtx legitimize_tls_address (rtx, enum tls_model, int);
839 static void get_pc_thunk_name (char [32], unsigned int);
840 static rtx gen_push (rtx);
841 static int ix86_flags_dependant (rtx, rtx, enum attr_type);
842 static int ix86_agi_dependant (rtx, rtx, enum attr_type);
843 static struct machine_function * ix86_init_machine_status (void);
844 static int ix86_split_to_parts (rtx, rtx *, enum machine_mode);
845 static int ix86_nsaved_regs (void);
846 static void ix86_emit_save_regs (void);
847 static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT);
848 static void ix86_emit_restore_regs_using_mov (rtx, HOST_WIDE_INT, int);
849 static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT);
850 static HOST_WIDE_INT ix86_GOT_alias_set (void);
851 static void ix86_adjust_counter (rtx, HOST_WIDE_INT);
852 static rtx ix86_expand_aligntest (rtx, int);
853 static void ix86_expand_strlensi_unroll_1 (rtx, rtx, rtx);
854 static int ix86_issue_rate (void);
855 static int ix86_adjust_cost (rtx, rtx, rtx, int);
856 static int ia32_multipass_dfa_lookahead (void);
857 static void ix86_init_mmx_sse_builtins (void);
858 static rtx x86_this_parameter (tree);
859 static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
860 HOST_WIDE_INT, tree);
861 static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
862 static void x86_file_start (void);
863 static void ix86_reorg (void);
864 static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*);
865 static tree ix86_build_builtin_va_list (void);
866 static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
867 tree, int *, int);
868 static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
869 static bool ix86_vector_mode_supported_p (enum machine_mode);
871 static int ix86_address_cost (rtx);
872 static bool ix86_cannot_force_const_mem (rtx);
873 static rtx ix86_delegitimize_address (rtx);
875 static void i386_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
877 struct builtin_description;
878 static rtx ix86_expand_sse_comi (const struct builtin_description *,
879 tree, rtx);
880 static rtx ix86_expand_sse_compare (const struct builtin_description *,
881 tree, rtx);
882 static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx);
883 static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int);
884 static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx);
885 static rtx ix86_expand_store_builtin (enum insn_code, tree);
886 static rtx safe_vector_operand (rtx, enum machine_mode);
887 static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *);
888 static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code);
889 static int ix86_fp_comparison_fcomi_cost (enum rtx_code code);
890 static int ix86_fp_comparison_sahf_cost (enum rtx_code code);
891 static int ix86_fp_comparison_cost (enum rtx_code code);
892 static unsigned int ix86_select_alt_pic_regnum (void);
893 static int ix86_save_reg (unsigned int, int);
894 static void ix86_compute_frame_layout (struct ix86_frame *);
895 static int ix86_comp_type_attributes (tree, tree);
896 static int ix86_function_regparm (tree, tree);
897 const struct attribute_spec ix86_attribute_table[];
898 static bool ix86_function_ok_for_sibcall (tree, tree);
899 static tree ix86_handle_cconv_attribute (tree *, tree, tree, int, bool *);
900 static int ix86_value_regno (enum machine_mode, tree, tree);
901 static bool contains_128bit_aligned_vector_p (tree);
902 static rtx ix86_struct_value_rtx (tree, int);
903 static bool ix86_ms_bitfield_layout_p (tree);
904 static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *);
905 static int extended_reg_mentioned_1 (rtx *, void *);
906 static bool ix86_rtx_costs (rtx, int, int, int *);
907 static int min_insn_size (rtx);
908 static tree ix86_md_asm_clobbers (tree outputs, tree inputs, tree clobbers);
909 static bool ix86_must_pass_in_stack (enum machine_mode mode, tree type);
910 static bool ix86_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
911 tree, bool);
912 static void ix86_init_builtins (void);
913 static rtx ix86_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
914 static const char *ix86_mangle_fundamental_type (tree);
915 static tree ix86_stack_protect_fail (void);
916 static rtx ix86_internal_arg_pointer (void);
917 static void ix86_dwarf_handle_frame_unspec (const char *, rtx, int);
919 /* This function is only used on Solaris. */
920 static void i386_solaris_elf_named_section (const char *, unsigned int, tree)
921 ATTRIBUTE_UNUSED;
923 /* Register class used for passing given 64bit part of the argument.
924 These represent classes as documented by the PS ABI, with the exception
925 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
926 use SF or DFmode move instead of DImode to avoid reformatting penalties.
928 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
929 whenever possible (upper half does contain padding).
931 enum x86_64_reg_class
933 X86_64_NO_CLASS,
934 X86_64_INTEGER_CLASS,
935 X86_64_INTEGERSI_CLASS,
936 X86_64_SSE_CLASS,
937 X86_64_SSESF_CLASS,
938 X86_64_SSEDF_CLASS,
939 X86_64_SSEUP_CLASS,
940 X86_64_X87_CLASS,
941 X86_64_X87UP_CLASS,
942 X86_64_COMPLEX_X87_CLASS,
943 X86_64_MEMORY_CLASS
945 static const char * const x86_64_reg_class_name[] = {
946 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
947 "sseup", "x87", "x87up", "cplx87", "no"
950 #define MAX_CLASSES 4
952 /* Table of constants used by fldpi, fldln2, etc.... */
953 static REAL_VALUE_TYPE ext_80387_constants_table [5];
954 static bool ext_80387_constants_init = 0;
955 static void init_ext_80387_constants (void);
956 static bool ix86_in_large_data_p (tree) ATTRIBUTE_UNUSED;
957 static void ix86_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
958 static void x86_64_elf_unique_section (tree decl, int reloc) ATTRIBUTE_UNUSED;
959 static section *x86_64_elf_select_section (tree decl, int reloc,
960 unsigned HOST_WIDE_INT align)
961 ATTRIBUTE_UNUSED;
963 /* Initialize the GCC target structure. */
964 #undef TARGET_ATTRIBUTE_TABLE
965 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
966 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
967 # undef TARGET_MERGE_DECL_ATTRIBUTES
968 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
969 #endif
971 #undef TARGET_COMP_TYPE_ATTRIBUTES
972 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
974 #undef TARGET_INIT_BUILTINS
975 #define TARGET_INIT_BUILTINS ix86_init_builtins
976 #undef TARGET_EXPAND_BUILTIN
977 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
979 #undef TARGET_ASM_FUNCTION_EPILOGUE
980 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
982 #undef TARGET_ENCODE_SECTION_INFO
983 #ifndef SUBTARGET_ENCODE_SECTION_INFO
984 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
985 #else
986 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
987 #endif
989 #undef TARGET_ASM_OPEN_PAREN
990 #define TARGET_ASM_OPEN_PAREN ""
991 #undef TARGET_ASM_CLOSE_PAREN
992 #define TARGET_ASM_CLOSE_PAREN ""
994 #undef TARGET_ASM_ALIGNED_HI_OP
995 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
996 #undef TARGET_ASM_ALIGNED_SI_OP
997 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
998 #ifdef ASM_QUAD
999 #undef TARGET_ASM_ALIGNED_DI_OP
1000 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
1001 #endif
1003 #undef TARGET_ASM_UNALIGNED_HI_OP
1004 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
1005 #undef TARGET_ASM_UNALIGNED_SI_OP
1006 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
1007 #undef TARGET_ASM_UNALIGNED_DI_OP
1008 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
1010 #undef TARGET_SCHED_ADJUST_COST
1011 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
1012 #undef TARGET_SCHED_ISSUE_RATE
1013 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
1014 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1015 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1016 ia32_multipass_dfa_lookahead
1018 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1019 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
1021 #ifdef HAVE_AS_TLS
1022 #undef TARGET_HAVE_TLS
1023 #define TARGET_HAVE_TLS true
1024 #endif
1025 #undef TARGET_CANNOT_FORCE_CONST_MEM
1026 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
1028 #undef TARGET_DELEGITIMIZE_ADDRESS
1029 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
1031 #undef TARGET_MS_BITFIELD_LAYOUT_P
1032 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
1034 #if TARGET_MACHO
1035 #undef TARGET_BINDS_LOCAL_P
1036 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1037 #endif
1039 #undef TARGET_ASM_OUTPUT_MI_THUNK
1040 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
1041 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1042 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
1044 #undef TARGET_ASM_FILE_START
1045 #define TARGET_ASM_FILE_START x86_file_start
1047 #undef TARGET_DEFAULT_TARGET_FLAGS
1048 #define TARGET_DEFAULT_TARGET_FLAGS \
1049 (TARGET_DEFAULT \
1050 | TARGET_64BIT_DEFAULT \
1051 | TARGET_SUBTARGET_DEFAULT \
1052 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
1054 #undef TARGET_HANDLE_OPTION
1055 #define TARGET_HANDLE_OPTION ix86_handle_option
1057 #undef TARGET_RTX_COSTS
1058 #define TARGET_RTX_COSTS ix86_rtx_costs
1059 #undef TARGET_ADDRESS_COST
1060 #define TARGET_ADDRESS_COST ix86_address_cost
1062 #undef TARGET_FIXED_CONDITION_CODE_REGS
1063 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
1064 #undef TARGET_CC_MODES_COMPATIBLE
1065 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
1067 #undef TARGET_MACHINE_DEPENDENT_REORG
1068 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
1070 #undef TARGET_BUILD_BUILTIN_VA_LIST
1071 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
1073 #undef TARGET_MD_ASM_CLOBBERS
1074 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
1076 #undef TARGET_PROMOTE_PROTOTYPES
1077 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1078 #undef TARGET_STRUCT_VALUE_RTX
1079 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
1080 #undef TARGET_SETUP_INCOMING_VARARGS
1081 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
1082 #undef TARGET_MUST_PASS_IN_STACK
1083 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
1084 #undef TARGET_PASS_BY_REFERENCE
1085 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
1086 #undef TARGET_INTERNAL_ARG_POINTER
1087 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
1088 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
1089 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC ix86_dwarf_handle_frame_unspec
1091 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1092 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
1094 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1095 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
1096 #undef TARGET_VECTORIZE_BUILTIN_EXTRACT_EVEN
1097 #define TARGET_VECTORIZE_BUILTIN_EXTRACT_EVEN \
1098 interleave_vectorize_builtin_extract_even
1099 #undef TARGET_VECTORIZE_BUILTIN_EXTRACT_ODD
1100 #define TARGET_VECTORIZE_BUILTIN_EXTRACT_ODD \
1101 interleave_vectorize_builtin_extract_odd
1103 #ifdef HAVE_AS_TLS
1104 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1105 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
1106 #endif
1108 #ifdef SUBTARGET_INSERT_ATTRIBUTES
1109 #undef TARGET_INSERT_ATTRIBUTES
1110 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
1111 #endif
1113 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
1114 #define TARGET_MANGLE_FUNDAMENTAL_TYPE ix86_mangle_fundamental_type
1116 #undef TARGET_STACK_PROTECT_FAIL
1117 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
1119 #undef TARGET_FUNCTION_VALUE
1120 #define TARGET_FUNCTION_VALUE ix86_function_value
1122 struct gcc_target targetm = TARGET_INITIALIZER;
1125 /* The svr4 ABI for the i386 says that records and unions are returned
1126 in memory. */
1127 #ifndef DEFAULT_PCC_STRUCT_RETURN
1128 #define DEFAULT_PCC_STRUCT_RETURN 1
1129 #endif
1131 /* Implement TARGET_HANDLE_OPTION. */
1133 static bool
1134 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
1136 switch (code)
1138 case OPT_m3dnow:
1139 if (!value)
1141 target_flags &= ~MASK_3DNOW_A;
1142 target_flags_explicit |= MASK_3DNOW_A;
1144 return true;
1146 case OPT_mmmx:
1147 if (!value)
1149 target_flags &= ~(MASK_3DNOW | MASK_3DNOW_A);
1150 target_flags_explicit |= MASK_3DNOW | MASK_3DNOW_A;
1152 return true;
1154 case OPT_msse:
1155 if (!value)
1157 target_flags &= ~(MASK_SSE2 | MASK_SSE3);
1158 target_flags_explicit |= MASK_SSE2 | MASK_SSE3;
1160 return true;
1162 case OPT_msse2:
1163 if (!value)
1165 target_flags &= ~MASK_SSE3;
1166 target_flags_explicit |= MASK_SSE3;
1168 return true;
1170 default:
1171 return true;
1175 /* Sometimes certain combinations of command options do not make
1176 sense on a particular target machine. You can define a macro
1177 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1178 defined, is executed once just after all the command options have
1179 been parsed.
1181 Don't use this macro to turn on various extra optimizations for
1182 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1184 void
1185 override_options (void)
1187 int i;
1188 int ix86_tune_defaulted = 0;
1190 /* Comes from final.c -- no real reason to change it. */
1191 #define MAX_CODE_ALIGN 16
1193 static struct ptt
1195 const struct processor_costs *cost; /* Processor costs */
1196 const int target_enable; /* Target flags to enable. */
1197 const int target_disable; /* Target flags to disable. */
1198 const int align_loop; /* Default alignments. */
1199 const int align_loop_max_skip;
1200 const int align_jump;
1201 const int align_jump_max_skip;
1202 const int align_func;
1204 const processor_target_table[PROCESSOR_max] =
1206 {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
1207 {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
1208 {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
1209 {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
1210 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1211 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1212 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1213 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1214 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0}
1217 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1218 static struct pta
1220 const char *const name; /* processor name or nickname. */
1221 const enum processor_type processor;
1222 const enum pta_flags
1224 PTA_SSE = 1,
1225 PTA_SSE2 = 2,
1226 PTA_SSE3 = 4,
1227 PTA_MMX = 8,
1228 PTA_PREFETCH_SSE = 16,
1229 PTA_3DNOW = 32,
1230 PTA_3DNOW_A = 64,
1231 PTA_64BIT = 128
1232 } flags;
1234 const processor_alias_table[] =
1236 {"i386", PROCESSOR_I386, 0},
1237 {"i486", PROCESSOR_I486, 0},
1238 {"i586", PROCESSOR_PENTIUM, 0},
1239 {"pentium", PROCESSOR_PENTIUM, 0},
1240 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
1241 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
1242 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1243 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1244 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
1245 {"i686", PROCESSOR_PENTIUMPRO, 0},
1246 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
1247 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
1248 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1249 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1250 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1251 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1252 | PTA_MMX | PTA_PREFETCH_SSE},
1253 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1254 | PTA_MMX | PTA_PREFETCH_SSE},
1255 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1256 | PTA_MMX | PTA_PREFETCH_SSE},
1257 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1258 | PTA_MMX | PTA_PREFETCH_SSE},
1259 {"k6", PROCESSOR_K6, PTA_MMX},
1260 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1261 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1262 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1263 | PTA_3DNOW_A},
1264 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
1265 | PTA_3DNOW | PTA_3DNOW_A},
1266 {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1267 | PTA_3DNOW_A | PTA_SSE},
1268 {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1269 | PTA_3DNOW_A | PTA_SSE},
1270 {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1271 | PTA_3DNOW_A | PTA_SSE},
1272 {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT
1273 | PTA_SSE | PTA_SSE2 },
1274 {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1275 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1276 {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1277 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1278 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1279 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1280 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1281 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1284 int const pta_size = ARRAY_SIZE (processor_alias_table);
1286 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1287 SUBTARGET_OVERRIDE_OPTIONS;
1288 #endif
1290 /* Set the default values for switches whose default depends on TARGET_64BIT
1291 in case they weren't overwritten by command line options. */
1292 if (TARGET_64BIT)
1294 if (flag_omit_frame_pointer == 2)
1295 flag_omit_frame_pointer = 1;
1296 if (flag_asynchronous_unwind_tables == 2)
1297 flag_asynchronous_unwind_tables = 1;
1298 if (flag_pcc_struct_return == 2)
1299 flag_pcc_struct_return = 0;
1301 else
1303 if (flag_omit_frame_pointer == 2)
1304 flag_omit_frame_pointer = 0;
1305 if (flag_asynchronous_unwind_tables == 2)
1306 flag_asynchronous_unwind_tables = 0;
1307 if (flag_pcc_struct_return == 2)
1308 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
1311 if (!ix86_tune_string && ix86_arch_string)
1312 ix86_tune_string = ix86_arch_string;
1313 if (!ix86_tune_string)
1315 ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
1316 ix86_tune_defaulted = 1;
1318 if (!ix86_arch_string)
1319 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
1321 if (ix86_cmodel_string != 0)
1323 if (!strcmp (ix86_cmodel_string, "small"))
1324 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1325 else if (!strcmp (ix86_cmodel_string, "medium"))
1326 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
1327 else if (flag_pic)
1328 sorry ("code model %s not supported in PIC mode", ix86_cmodel_string);
1329 else if (!strcmp (ix86_cmodel_string, "32"))
1330 ix86_cmodel = CM_32;
1331 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
1332 ix86_cmodel = CM_KERNEL;
1333 else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic)
1334 ix86_cmodel = CM_LARGE;
1335 else
1336 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
1338 else
1340 ix86_cmodel = CM_32;
1341 if (TARGET_64BIT)
1342 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1344 if (ix86_asm_string != 0)
1346 if (! TARGET_MACHO
1347 && !strcmp (ix86_asm_string, "intel"))
1348 ix86_asm_dialect = ASM_INTEL;
1349 else if (!strcmp (ix86_asm_string, "att"))
1350 ix86_asm_dialect = ASM_ATT;
1351 else
1352 error ("bad value (%s) for -masm= switch", ix86_asm_string);
1354 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
1355 error ("code model %qs not supported in the %s bit mode",
1356 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
1357 if (ix86_cmodel == CM_LARGE)
1358 sorry ("code model %<large%> not supported yet");
1359 if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
1360 sorry ("%i-bit mode not compiled in",
1361 (target_flags & MASK_64BIT) ? 64 : 32);
1363 for (i = 0; i < pta_size; i++)
1364 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
1366 ix86_arch = processor_alias_table[i].processor;
1367 /* Default cpu tuning to the architecture. */
1368 ix86_tune = ix86_arch;
1369 if (processor_alias_table[i].flags & PTA_MMX
1370 && !(target_flags_explicit & MASK_MMX))
1371 target_flags |= MASK_MMX;
1372 if (processor_alias_table[i].flags & PTA_3DNOW
1373 && !(target_flags_explicit & MASK_3DNOW))
1374 target_flags |= MASK_3DNOW;
1375 if (processor_alias_table[i].flags & PTA_3DNOW_A
1376 && !(target_flags_explicit & MASK_3DNOW_A))
1377 target_flags |= MASK_3DNOW_A;
1378 if (processor_alias_table[i].flags & PTA_SSE
1379 && !(target_flags_explicit & MASK_SSE))
1380 target_flags |= MASK_SSE;
1381 if (processor_alias_table[i].flags & PTA_SSE2
1382 && !(target_flags_explicit & MASK_SSE2))
1383 target_flags |= MASK_SSE2;
1384 if (processor_alias_table[i].flags & PTA_SSE3
1385 && !(target_flags_explicit & MASK_SSE3))
1386 target_flags |= MASK_SSE3;
1387 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1388 x86_prefetch_sse = true;
1389 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1390 error ("CPU you selected does not support x86-64 "
1391 "instruction set");
1392 break;
1395 if (i == pta_size)
1396 error ("bad value (%s) for -march= switch", ix86_arch_string);
1398 for (i = 0; i < pta_size; i++)
1399 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
1401 ix86_tune = processor_alias_table[i].processor;
1402 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1404 if (ix86_tune_defaulted)
1406 ix86_tune_string = "x86-64";
1407 for (i = 0; i < pta_size; i++)
1408 if (! strcmp (ix86_tune_string,
1409 processor_alias_table[i].name))
1410 break;
1411 ix86_tune = processor_alias_table[i].processor;
1413 else
1414 error ("CPU you selected does not support x86-64 "
1415 "instruction set");
1417 /* Intel CPUs have always interpreted SSE prefetch instructions as
1418 NOPs; so, we can enable SSE prefetch instructions even when
1419 -mtune (rather than -march) points us to a processor that has them.
1420 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
1421 higher processors. */
1422 if (TARGET_CMOVE && (processor_alias_table[i].flags & PTA_PREFETCH_SSE))
1423 x86_prefetch_sse = true;
1424 break;
1426 if (i == pta_size)
1427 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1429 if (optimize_size)
1430 ix86_cost = &size_cost;
1431 else
1432 ix86_cost = processor_target_table[ix86_tune].cost;
1433 target_flags |= processor_target_table[ix86_tune].target_enable;
1434 target_flags &= ~processor_target_table[ix86_tune].target_disable;
1436 /* Arrange to set up i386_stack_locals for all functions. */
1437 init_machine_status = ix86_init_machine_status;
1439 /* Validate -mregparm= value. */
1440 if (ix86_regparm_string)
1442 i = atoi (ix86_regparm_string);
1443 if (i < 0 || i > REGPARM_MAX)
1444 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
1445 else
1446 ix86_regparm = i;
1448 else
1449 if (TARGET_64BIT)
1450 ix86_regparm = REGPARM_MAX;
1452 /* If the user has provided any of the -malign-* options,
1453 warn and use that value only if -falign-* is not set.
1454 Remove this code in GCC 3.2 or later. */
1455 if (ix86_align_loops_string)
1457 warning (0, "-malign-loops is obsolete, use -falign-loops");
1458 if (align_loops == 0)
1460 i = atoi (ix86_align_loops_string);
1461 if (i < 0 || i > MAX_CODE_ALIGN)
1462 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1463 else
1464 align_loops = 1 << i;
1468 if (ix86_align_jumps_string)
1470 warning (0, "-malign-jumps is obsolete, use -falign-jumps");
1471 if (align_jumps == 0)
1473 i = atoi (ix86_align_jumps_string);
1474 if (i < 0 || i > MAX_CODE_ALIGN)
1475 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1476 else
1477 align_jumps = 1 << i;
1481 if (ix86_align_funcs_string)
1483 warning (0, "-malign-functions is obsolete, use -falign-functions");
1484 if (align_functions == 0)
1486 i = atoi (ix86_align_funcs_string);
1487 if (i < 0 || i > MAX_CODE_ALIGN)
1488 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1489 else
1490 align_functions = 1 << i;
1494 /* Default align_* from the processor table. */
1495 if (align_loops == 0)
1497 align_loops = processor_target_table[ix86_tune].align_loop;
1498 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
1500 if (align_jumps == 0)
1502 align_jumps = processor_target_table[ix86_tune].align_jump;
1503 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
1505 if (align_functions == 0)
1507 align_functions = processor_target_table[ix86_tune].align_func;
1510 /* Validate -mpreferred-stack-boundary= value, or provide default.
1511 The default of 128 bits is for Pentium III's SSE __m128, but we
1512 don't want additional code to keep the stack aligned when
1513 optimizing for code size. */
1514 ix86_preferred_stack_boundary = (optimize_size
1515 ? TARGET_64BIT ? 128 : 32
1516 : 128);
1517 if (ix86_preferred_stack_boundary_string)
1519 i = atoi (ix86_preferred_stack_boundary_string);
1520 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
1521 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
1522 TARGET_64BIT ? 4 : 2);
1523 else
1524 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
1527 /* Validate -mbranch-cost= value, or provide default. */
1528 ix86_branch_cost = processor_target_table[ix86_tune].cost->branch_cost;
1529 if (ix86_branch_cost_string)
1531 i = atoi (ix86_branch_cost_string);
1532 if (i < 0 || i > 5)
1533 error ("-mbranch-cost=%d is not between 0 and 5", i);
1534 else
1535 ix86_branch_cost = i;
1537 if (ix86_section_threshold_string)
1539 i = atoi (ix86_section_threshold_string);
1540 if (i < 0)
1541 error ("-mlarge-data-threshold=%d is negative", i);
1542 else
1543 ix86_section_threshold = i;
1546 if (ix86_tls_dialect_string)
1548 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
1549 ix86_tls_dialect = TLS_DIALECT_GNU;
1550 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
1551 ix86_tls_dialect = TLS_DIALECT_SUN;
1552 else
1553 error ("bad value (%s) for -mtls-dialect= switch",
1554 ix86_tls_dialect_string);
1557 /* Keep nonleaf frame pointers. */
1558 if (flag_omit_frame_pointer)
1559 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
1560 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
1561 flag_omit_frame_pointer = 1;
1563 /* If we're doing fast math, we don't care about comparison order
1564 wrt NaNs. This lets us use a shorter comparison sequence. */
1565 if (flag_unsafe_math_optimizations)
1566 target_flags &= ~MASK_IEEE_FP;
1568 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
1569 since the insns won't need emulation. */
1570 if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
1571 target_flags &= ~MASK_NO_FANCY_MATH_387;
1573 /* Likewise, if the target doesn't have a 387, or we've specified
1574 software floating point, don't use 387 inline intrinsics. */
1575 if (!TARGET_80387)
1576 target_flags |= MASK_NO_FANCY_MATH_387;
1578 /* Turn on SSE2 builtins for -msse3. */
1579 if (TARGET_SSE3)
1580 target_flags |= MASK_SSE2;
1582 /* Turn on SSE builtins for -msse2. */
1583 if (TARGET_SSE2)
1584 target_flags |= MASK_SSE;
1586 /* Turn on MMX builtins for -msse. */
1587 if (TARGET_SSE)
1589 target_flags |= MASK_MMX & ~target_flags_explicit;
1590 x86_prefetch_sse = true;
1593 /* Turn on MMX builtins for 3Dnow. */
1594 if (TARGET_3DNOW)
1595 target_flags |= MASK_MMX;
1597 if (TARGET_64BIT)
1599 if (TARGET_ALIGN_DOUBLE)
1600 error ("-malign-double makes no sense in the 64bit mode");
1601 if (TARGET_RTD)
1602 error ("-mrtd calling convention not supported in the 64bit mode");
1604 /* Enable by default the SSE and MMX builtins. Do allow the user to
1605 explicitly disable any of these. In particular, disabling SSE and
1606 MMX for kernel code is extremely useful. */
1607 target_flags
1608 |= ((MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE)
1609 & ~target_flags_explicit);
1611 else
1613 /* i386 ABI does not specify red zone. It still makes sense to use it
1614 when programmer takes care to stack from being destroyed. */
1615 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
1616 target_flags |= MASK_NO_RED_ZONE;
1619 /* Accept -msseregparm only if at least SSE support is enabled. */
1620 if (TARGET_SSEREGPARM
1621 && ! TARGET_SSE)
1622 error ("-msseregparm used without SSE enabled");
1624 ix86_fpmath = TARGET_FPMATH_DEFAULT;
1626 if (ix86_fpmath_string != 0)
1628 if (! strcmp (ix86_fpmath_string, "387"))
1629 ix86_fpmath = FPMATH_387;
1630 else if (! strcmp (ix86_fpmath_string, "sse"))
1632 if (!TARGET_SSE)
1634 warning (0, "SSE instruction set disabled, using 387 arithmetics");
1635 ix86_fpmath = FPMATH_387;
1637 else
1638 ix86_fpmath = FPMATH_SSE;
1640 else if (! strcmp (ix86_fpmath_string, "387,sse")
1641 || ! strcmp (ix86_fpmath_string, "sse,387"))
1643 if (!TARGET_SSE)
1645 warning (0, "SSE instruction set disabled, using 387 arithmetics");
1646 ix86_fpmath = FPMATH_387;
1648 else if (!TARGET_80387)
1650 warning (0, "387 instruction set disabled, using SSE arithmetics");
1651 ix86_fpmath = FPMATH_SSE;
1653 else
1654 ix86_fpmath = FPMATH_SSE | FPMATH_387;
1656 else
1657 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
1660 /* If the i387 is disabled, then do not return values in it. */
1661 if (!TARGET_80387)
1662 target_flags &= ~MASK_FLOAT_RETURNS;
1664 if ((x86_accumulate_outgoing_args & TUNEMASK)
1665 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
1666 && !optimize_size)
1667 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
1669 /* ??? Unwind info is not correct around the CFG unless either a frame
1670 pointer is present or M_A_O_A is set. Fixing this requires rewriting
1671 unwind info generation to be aware of the CFG and propagating states
1672 around edges. */
1673 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
1674 || flag_exceptions || flag_non_call_exceptions)
1675 && flag_omit_frame_pointer
1676 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
1678 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
1679 warning (0, "unwind tables currently require either a frame pointer "
1680 "or -maccumulate-outgoing-args for correctness");
1681 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
1684 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
1686 char *p;
1687 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
1688 p = strchr (internal_label_prefix, 'X');
1689 internal_label_prefix_len = p - internal_label_prefix;
1690 *p = '\0';
1693 /* When scheduling description is not available, disable scheduler pass
1694 so it won't slow down the compilation and make x87 code slower. */
1695 if (!TARGET_SCHEDULE)
1696 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
1699 /* switch to the appropriate section for output of DECL.
1700 DECL is either a `VAR_DECL' node or a constant of some sort.
1701 RELOC indicates whether forming the initial value of DECL requires
1702 link-time relocations. */
1704 static section *
1705 x86_64_elf_select_section (tree decl, int reloc,
1706 unsigned HOST_WIDE_INT align)
1708 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1709 && ix86_in_large_data_p (decl))
1711 const char *sname = NULL;
1712 unsigned int flags = SECTION_WRITE;
1713 switch (categorize_decl_for_section (decl, reloc, flag_pic))
1715 case SECCAT_DATA:
1716 sname = ".ldata";
1717 break;
1718 case SECCAT_DATA_REL:
1719 sname = ".ldata.rel";
1720 break;
1721 case SECCAT_DATA_REL_LOCAL:
1722 sname = ".ldata.rel.local";
1723 break;
1724 case SECCAT_DATA_REL_RO:
1725 sname = ".ldata.rel.ro";
1726 break;
1727 case SECCAT_DATA_REL_RO_LOCAL:
1728 sname = ".ldata.rel.ro.local";
1729 break;
1730 case SECCAT_BSS:
1731 sname = ".lbss";
1732 flags |= SECTION_BSS;
1733 break;
1734 case SECCAT_RODATA:
1735 case SECCAT_RODATA_MERGE_STR:
1736 case SECCAT_RODATA_MERGE_STR_INIT:
1737 case SECCAT_RODATA_MERGE_CONST:
1738 sname = ".lrodata";
1739 flags = 0;
1740 break;
1741 case SECCAT_SRODATA:
1742 case SECCAT_SDATA:
1743 case SECCAT_SBSS:
1744 gcc_unreachable ();
1745 case SECCAT_TEXT:
1746 case SECCAT_TDATA:
1747 case SECCAT_TBSS:
1748 /* We don't split these for medium model. Place them into
1749 default sections and hope for best. */
1750 break;
1752 if (sname)
1754 /* We might get called with string constants, but get_named_section
1755 doesn't like them as they are not DECLs. Also, we need to set
1756 flags in that case. */
1757 if (!DECL_P (decl))
1758 return get_section (sname, flags, NULL);
1759 return get_named_section (decl, sname, reloc);
1762 return default_elf_select_section (decl, reloc, align);
1765 /* Build up a unique section name, expressed as a
1766 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
1767 RELOC indicates whether the initial value of EXP requires
1768 link-time relocations. */
1770 static void
1771 x86_64_elf_unique_section (tree decl, int reloc)
1773 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1774 && ix86_in_large_data_p (decl))
1776 const char *prefix = NULL;
1777 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
1778 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
1780 switch (categorize_decl_for_section (decl, reloc, flag_pic))
1782 case SECCAT_DATA:
1783 case SECCAT_DATA_REL:
1784 case SECCAT_DATA_REL_LOCAL:
1785 case SECCAT_DATA_REL_RO:
1786 case SECCAT_DATA_REL_RO_LOCAL:
1787 prefix = one_only ? ".gnu.linkonce.ld." : ".ldata.";
1788 break;
1789 case SECCAT_BSS:
1790 prefix = one_only ? ".gnu.linkonce.lb." : ".lbss.";
1791 break;
1792 case SECCAT_RODATA:
1793 case SECCAT_RODATA_MERGE_STR:
1794 case SECCAT_RODATA_MERGE_STR_INIT:
1795 case SECCAT_RODATA_MERGE_CONST:
1796 prefix = one_only ? ".gnu.linkonce.lr." : ".lrodata.";
1797 break;
1798 case SECCAT_SRODATA:
1799 case SECCAT_SDATA:
1800 case SECCAT_SBSS:
1801 gcc_unreachable ();
1802 case SECCAT_TEXT:
1803 case SECCAT_TDATA:
1804 case SECCAT_TBSS:
1805 /* We don't split these for medium model. Place them into
1806 default sections and hope for best. */
1807 break;
1809 if (prefix)
1811 const char *name;
1812 size_t nlen, plen;
1813 char *string;
1814 plen = strlen (prefix);
1816 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
1817 name = targetm.strip_name_encoding (name);
1818 nlen = strlen (name);
1820 string = alloca (nlen + plen + 1);
1821 memcpy (string, prefix, plen);
1822 memcpy (string + plen, name, nlen + 1);
1824 DECL_SECTION_NAME (decl) = build_string (nlen + plen, string);
1825 return;
1828 default_unique_section (decl, reloc);
1831 #ifdef COMMON_ASM_OP
1832 /* This says how to output assembler code to declare an
1833 uninitialized external linkage data object.
1835 For medium model x86-64 we need to use .largecomm opcode for
1836 large objects. */
1837 void
1838 x86_elf_aligned_common (FILE *file,
1839 const char *name, unsigned HOST_WIDE_INT size,
1840 int align)
1842 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1843 && size > (unsigned int)ix86_section_threshold)
1844 fprintf (file, ".largecomm\t");
1845 else
1846 fprintf (file, "%s", COMMON_ASM_OP);
1847 assemble_name (file, name);
1848 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
1849 size, align / BITS_PER_UNIT);
1852 /* Utility function for targets to use in implementing
1853 ASM_OUTPUT_ALIGNED_BSS. */
1855 void
1856 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
1857 const char *name, unsigned HOST_WIDE_INT size,
1858 int align)
1860 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1861 && size > (unsigned int)ix86_section_threshold)
1862 switch_to_section (get_named_section (decl, ".lbss", 0));
1863 else
1864 switch_to_section (bss_section);
1865 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
1866 #ifdef ASM_DECLARE_OBJECT_NAME
1867 last_assemble_variable_decl = decl;
1868 ASM_DECLARE_OBJECT_NAME (file, name, decl);
1869 #else
1870 /* Standard thing is just output label for the object. */
1871 ASM_OUTPUT_LABEL (file, name);
1872 #endif /* ASM_DECLARE_OBJECT_NAME */
1873 ASM_OUTPUT_SKIP (file, size ? size : 1);
1875 #endif
1877 void
1878 optimization_options (int level, int size ATTRIBUTE_UNUSED)
1880 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
1881 make the problem with not enough registers even worse. */
1882 #ifdef INSN_SCHEDULING
1883 if (level > 1)
1884 flag_schedule_insns = 0;
1885 #endif
1887 if (TARGET_MACHO)
1888 /* The Darwin libraries never set errno, so we might as well
1889 avoid calling them when that's the only reason we would. */
1890 flag_errno_math = 0;
1892 /* The default values of these switches depend on the TARGET_64BIT
1893 that is not known at this moment. Mark these values with 2 and
1894 let user the to override these. In case there is no command line option
1895 specifying them, we will set the defaults in override_options. */
1896 if (optimize >= 1)
1897 flag_omit_frame_pointer = 2;
1898 flag_pcc_struct_return = 2;
1899 flag_asynchronous_unwind_tables = 2;
1900 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
1901 SUBTARGET_OPTIMIZATION_OPTIONS;
1902 #endif
1905 /* Table of valid machine attributes. */
1906 const struct attribute_spec ix86_attribute_table[] =
1908 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
1909 /* Stdcall attribute says callee is responsible for popping arguments
1910 if they are not variable. */
1911 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1912 /* Fastcall attribute says callee is responsible for popping arguments
1913 if they are not variable. */
1914 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1915 /* Cdecl attribute says the callee is a normal C declaration */
1916 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1917 /* Regparm attribute specifies how many integer arguments are to be
1918 passed in registers. */
1919 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
1920 /* Sseregparm attribute says we are using x86_64 calling conventions
1921 for FP arguments. */
1922 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1923 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1924 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
1925 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
1926 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
1927 #endif
1928 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1929 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1930 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1931 SUBTARGET_ATTRIBUTE_TABLE,
1932 #endif
1933 { NULL, 0, 0, false, false, false, NULL }
1936 /* Decide whether we can make a sibling call to a function. DECL is the
1937 declaration of the function being targeted by the call and EXP is the
1938 CALL_EXPR representing the call. */
1940 static bool
1941 ix86_function_ok_for_sibcall (tree decl, tree exp)
1943 tree func;
1944 rtx a, b;
1946 /* If we are generating position-independent code, we cannot sibcall
1947 optimize any indirect call, or a direct call to a global function,
1948 as the PLT requires %ebx be live. */
1949 if (!TARGET_64BIT && flag_pic && (!decl || TREE_PUBLIC (decl)))
1950 return false;
1952 if (decl)
1953 func = decl;
1954 else
1956 func = TREE_TYPE (TREE_OPERAND (exp, 0));
1957 if (POINTER_TYPE_P (func))
1958 func = TREE_TYPE (func);
1961 /* Check that the return value locations are the same. Like
1962 if we are returning floats on the 80387 register stack, we cannot
1963 make a sibcall from a function that doesn't return a float to a
1964 function that does or, conversely, from a function that does return
1965 a float to a function that doesn't; the necessary stack adjustment
1966 would not be executed. This is also the place we notice
1967 differences in the return value ABI. Note that it is ok for one
1968 of the functions to have void return type as long as the return
1969 value of the other is passed in a register. */
1970 a = ix86_function_value (TREE_TYPE (exp), func, false);
1971 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
1972 cfun->decl, false);
1973 if (STACK_REG_P (a) || STACK_REG_P (b))
1975 if (!rtx_equal_p (a, b))
1976 return false;
1978 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1980 else if (!rtx_equal_p (a, b))
1981 return false;
1983 /* If this call is indirect, we'll need to be able to use a call-clobbered
1984 register for the address of the target function. Make sure that all
1985 such registers are not used for passing parameters. */
1986 if (!decl && !TARGET_64BIT)
1988 tree type;
1990 /* We're looking at the CALL_EXPR, we need the type of the function. */
1991 type = TREE_OPERAND (exp, 0); /* pointer expression */
1992 type = TREE_TYPE (type); /* pointer type */
1993 type = TREE_TYPE (type); /* function type */
1995 if (ix86_function_regparm (type, NULL) >= 3)
1997 /* ??? Need to count the actual number of registers to be used,
1998 not the possible number of registers. Fix later. */
1999 return false;
2003 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2004 /* Dllimport'd functions are also called indirectly. */
2005 if (decl && DECL_DLLIMPORT_P (decl)
2006 && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
2007 return false;
2008 #endif
2010 /* If we forced aligned the stack, then sibcalling would unalign the
2011 stack, which may break the called function. */
2012 if (cfun->machine->force_align_arg_pointer)
2013 return false;
2015 /* Otherwise okay. That also includes certain types of indirect calls. */
2016 return true;
2019 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
2020 calling convention attributes;
2021 arguments as in struct attribute_spec.handler. */
2023 static tree
2024 ix86_handle_cconv_attribute (tree *node, tree name,
2025 tree args,
2026 int flags ATTRIBUTE_UNUSED,
2027 bool *no_add_attrs)
2029 if (TREE_CODE (*node) != FUNCTION_TYPE
2030 && TREE_CODE (*node) != METHOD_TYPE
2031 && TREE_CODE (*node) != FIELD_DECL
2032 && TREE_CODE (*node) != TYPE_DECL)
2034 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2035 IDENTIFIER_POINTER (name));
2036 *no_add_attrs = true;
2037 return NULL_TREE;
2040 /* Can combine regparm with all attributes but fastcall. */
2041 if (is_attribute_p ("regparm", name))
2043 tree cst;
2045 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2047 error ("fastcall and regparm attributes are not compatible");
2050 cst = TREE_VALUE (args);
2051 if (TREE_CODE (cst) != INTEGER_CST)
2053 warning (OPT_Wattributes,
2054 "%qs attribute requires an integer constant argument",
2055 IDENTIFIER_POINTER (name));
2056 *no_add_attrs = true;
2058 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
2060 warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
2061 IDENTIFIER_POINTER (name), REGPARM_MAX);
2062 *no_add_attrs = true;
2065 return NULL_TREE;
2068 if (TARGET_64BIT)
2070 warning (OPT_Wattributes, "%qs attribute ignored",
2071 IDENTIFIER_POINTER (name));
2072 *no_add_attrs = true;
2073 return NULL_TREE;
2076 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
2077 if (is_attribute_p ("fastcall", name))
2079 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2081 error ("fastcall and cdecl attributes are not compatible");
2083 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2085 error ("fastcall and stdcall attributes are not compatible");
2087 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
2089 error ("fastcall and regparm attributes are not compatible");
2093 /* Can combine stdcall with fastcall (redundant), regparm and
2094 sseregparm. */
2095 else if (is_attribute_p ("stdcall", name))
2097 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2099 error ("stdcall and cdecl attributes are not compatible");
2101 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2103 error ("stdcall and fastcall attributes are not compatible");
2107 /* Can combine cdecl with regparm and sseregparm. */
2108 else if (is_attribute_p ("cdecl", name))
2110 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2112 error ("stdcall and cdecl attributes are not compatible");
2114 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2116 error ("fastcall and cdecl attributes are not compatible");
2120 /* Can combine sseregparm with all attributes. */
2122 return NULL_TREE;
2125 /* Return 0 if the attributes for two types are incompatible, 1 if they
2126 are compatible, and 2 if they are nearly compatible (which causes a
2127 warning to be generated). */
2129 static int
2130 ix86_comp_type_attributes (tree type1, tree type2)
2132 /* Check for mismatch of non-default calling convention. */
2133 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
2135 if (TREE_CODE (type1) != FUNCTION_TYPE)
2136 return 1;
2138 /* Check for mismatched fastcall/regparm types. */
2139 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
2140 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
2141 || (ix86_function_regparm (type1, NULL)
2142 != ix86_function_regparm (type2, NULL)))
2143 return 0;
2145 /* Check for mismatched sseregparm types. */
2146 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
2147 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
2148 return 0;
2150 /* Check for mismatched return types (cdecl vs stdcall). */
2151 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
2152 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
2153 return 0;
2155 return 1;
2158 /* Return the regparm value for a function with the indicated TYPE and DECL.
2159 DECL may be NULL when calling function indirectly
2160 or considering a libcall. */
2162 static int
2163 ix86_function_regparm (tree type, tree decl)
2165 tree attr;
2166 int regparm = ix86_regparm;
2167 bool user_convention = false;
2169 if (!TARGET_64BIT)
2171 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
2172 if (attr)
2174 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
2175 user_convention = true;
2178 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
2180 regparm = 2;
2181 user_convention = true;
2184 /* Use register calling convention for local functions when possible. */
2185 if (!TARGET_64BIT && !user_convention && decl
2186 && flag_unit_at_a_time && !profile_flag)
2188 struct cgraph_local_info *i = cgraph_local_info (decl);
2189 if (i && i->local)
2191 int local_regparm, globals = 0, regno;
2193 /* Make sure no regparm register is taken by a global register
2194 variable. */
2195 for (local_regparm = 0; local_regparm < 3; local_regparm++)
2196 if (global_regs[local_regparm])
2197 break;
2198 /* We can't use regparm(3) for nested functions as these use
2199 static chain pointer in third argument. */
2200 if (local_regparm == 3
2201 && decl_function_context (decl)
2202 && !DECL_NO_STATIC_CHAIN (decl))
2203 local_regparm = 2;
2204 /* Each global register variable increases register preassure,
2205 so the more global reg vars there are, the smaller regparm
2206 optimization use, unless requested by the user explicitly. */
2207 for (regno = 0; regno < 6; regno++)
2208 if (global_regs[regno])
2209 globals++;
2210 local_regparm
2211 = globals < local_regparm ? local_regparm - globals : 0;
2213 if (local_regparm > regparm)
2214 regparm = local_regparm;
2218 return regparm;
2221 /* Return 1 or 2, if we can pass up to 8 SFmode (1) and DFmode (2) arguments
2222 in SSE registers for a function with the indicated TYPE and DECL.
2223 DECL may be NULL when calling function indirectly
2224 or considering a libcall. Otherwise return 0. */
2226 static int
2227 ix86_function_sseregparm (tree type, tree decl)
2229 /* Use SSE registers to pass SFmode and DFmode arguments if requested
2230 by the sseregparm attribute. */
2231 if (TARGET_SSEREGPARM
2232 || (type
2233 && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
2235 if (!TARGET_SSE)
2237 if (decl)
2238 error ("Calling %qD with attribute sseregparm without "
2239 "SSE/SSE2 enabled", decl);
2240 else
2241 error ("Calling %qT with attribute sseregparm without "
2242 "SSE/SSE2 enabled", type);
2243 return 0;
2246 return 2;
2249 /* For local functions, pass SFmode (and DFmode for SSE2) arguments
2250 in SSE registers even for 32-bit mode and not just 3, but up to
2251 8 SSE arguments in registers. */
2252 if (!TARGET_64BIT && decl
2253 && TARGET_SSE_MATH && flag_unit_at_a_time && !profile_flag)
2255 struct cgraph_local_info *i = cgraph_local_info (decl);
2256 if (i && i->local)
2257 return TARGET_SSE2 ? 2 : 1;
2260 return 0;
2263 /* Return true if EAX is live at the start of the function. Used by
2264 ix86_expand_prologue to determine if we need special help before
2265 calling allocate_stack_worker. */
2267 static bool
2268 ix86_eax_live_at_start_p (void)
2270 /* Cheat. Don't bother working forward from ix86_function_regparm
2271 to the function type to whether an actual argument is located in
2272 eax. Instead just look at cfg info, which is still close enough
2273 to correct at this point. This gives false positives for broken
2274 functions that might use uninitialized data that happens to be
2275 allocated in eax, but who cares? */
2276 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->il.rtl->global_live_at_end, 0);
2279 /* Value is the number of bytes of arguments automatically
2280 popped when returning from a subroutine call.
2281 FUNDECL is the declaration node of the function (as a tree),
2282 FUNTYPE is the data type of the function (as a tree),
2283 or for a library call it is an identifier node for the subroutine name.
2284 SIZE is the number of bytes of arguments passed on the stack.
2286 On the 80386, the RTD insn may be used to pop them if the number
2287 of args is fixed, but if the number is variable then the caller
2288 must pop them all. RTD can't be used for library calls now
2289 because the library is compiled with the Unix compiler.
2290 Use of RTD is a selectable option, since it is incompatible with
2291 standard Unix calling sequences. If the option is not selected,
2292 the caller must always pop the args.
2294 The attribute stdcall is equivalent to RTD on a per module basis. */
2297 ix86_return_pops_args (tree fundecl, tree funtype, int size)
2299 int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
2301 /* Cdecl functions override -mrtd, and never pop the stack. */
2302 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
2304 /* Stdcall and fastcall functions will pop the stack if not
2305 variable args. */
2306 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
2307 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
2308 rtd = 1;
2310 if (rtd
2311 && (TYPE_ARG_TYPES (funtype) == NULL_TREE
2312 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
2313 == void_type_node)))
2314 return size;
2317 /* Lose any fake structure return argument if it is passed on the stack. */
2318 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
2319 && !TARGET_64BIT
2320 && !KEEP_AGGREGATE_RETURN_POINTER)
2322 int nregs = ix86_function_regparm (funtype, fundecl);
2324 if (!nregs)
2325 return GET_MODE_SIZE (Pmode);
2328 return 0;
2331 /* Argument support functions. */
2333 /* Return true when register may be used to pass function parameters. */
2334 bool
2335 ix86_function_arg_regno_p (int regno)
2337 int i;
2338 if (!TARGET_64BIT)
2339 return (regno < REGPARM_MAX
2340 || (TARGET_MMX && MMX_REGNO_P (regno)
2341 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
2342 || (TARGET_SSE && SSE_REGNO_P (regno)
2343 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
2345 if (TARGET_SSE && SSE_REGNO_P (regno)
2346 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
2347 return true;
2348 /* RAX is used as hidden argument to va_arg functions. */
2349 if (!regno)
2350 return true;
2351 for (i = 0; i < REGPARM_MAX; i++)
2352 if (regno == x86_64_int_parameter_registers[i])
2353 return true;
2354 return false;
2357 /* Return if we do not know how to pass TYPE solely in registers. */
2359 static bool
2360 ix86_must_pass_in_stack (enum machine_mode mode, tree type)
2362 if (must_pass_in_stack_var_size_or_pad (mode, type))
2363 return true;
2365 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
2366 The layout_type routine is crafty and tries to trick us into passing
2367 currently unsupported vector types on the stack by using TImode. */
2368 return (!TARGET_64BIT && mode == TImode
2369 && type && TREE_CODE (type) != VECTOR_TYPE);
2372 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2373 for a call to a function whose data type is FNTYPE.
2374 For a library call, FNTYPE is 0. */
2376 void
2377 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
2378 tree fntype, /* tree ptr for function decl */
2379 rtx libname, /* SYMBOL_REF of library name or 0 */
2380 tree fndecl)
2382 static CUMULATIVE_ARGS zero_cum;
2383 tree param, next_param;
2385 if (TARGET_DEBUG_ARG)
2387 fprintf (stderr, "\ninit_cumulative_args (");
2388 if (fntype)
2389 fprintf (stderr, "fntype code = %s, ret code = %s",
2390 tree_code_name[(int) TREE_CODE (fntype)],
2391 tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
2392 else
2393 fprintf (stderr, "no fntype");
2395 if (libname)
2396 fprintf (stderr, ", libname = %s", XSTR (libname, 0));
2399 *cum = zero_cum;
2401 /* Set up the number of registers to use for passing arguments. */
2402 cum->nregs = ix86_regparm;
2403 if (TARGET_SSE)
2404 cum->sse_nregs = SSE_REGPARM_MAX;
2405 if (TARGET_MMX)
2406 cum->mmx_nregs = MMX_REGPARM_MAX;
2407 cum->warn_sse = true;
2408 cum->warn_mmx = true;
2409 cum->maybe_vaarg = false;
2411 /* Use ecx and edx registers if function has fastcall attribute,
2412 else look for regparm information. */
2413 if (fntype && !TARGET_64BIT)
2415 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
2417 cum->nregs = 2;
2418 cum->fastcall = 1;
2420 else
2421 cum->nregs = ix86_function_regparm (fntype, fndecl);
2424 /* Set up the number of SSE registers used for passing SFmode
2425 and DFmode arguments. Warn for mismatching ABI. */
2426 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl);
2428 /* Determine if this function has variable arguments. This is
2429 indicated by the last argument being 'void_type_mode' if there
2430 are no variable arguments. If there are variable arguments, then
2431 we won't pass anything in registers in 32-bit mode. */
2433 if (cum->nregs || cum->mmx_nregs || cum->sse_nregs)
2435 for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
2436 param != 0; param = next_param)
2438 next_param = TREE_CHAIN (param);
2439 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
2441 if (!TARGET_64BIT)
2443 cum->nregs = 0;
2444 cum->sse_nregs = 0;
2445 cum->mmx_nregs = 0;
2446 cum->warn_sse = 0;
2447 cum->warn_mmx = 0;
2448 cum->fastcall = 0;
2449 cum->float_in_sse = 0;
2451 cum->maybe_vaarg = true;
2455 if ((!fntype && !libname)
2456 || (fntype && !TYPE_ARG_TYPES (fntype)))
2457 cum->maybe_vaarg = true;
2459 if (TARGET_DEBUG_ARG)
2460 fprintf (stderr, ", nregs=%d )\n", cum->nregs);
2462 return;
2465 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
2466 But in the case of vector types, it is some vector mode.
2468 When we have only some of our vector isa extensions enabled, then there
2469 are some modes for which vector_mode_supported_p is false. For these
2470 modes, the generic vector support in gcc will choose some non-vector mode
2471 in order to implement the type. By computing the natural mode, we'll
2472 select the proper ABI location for the operand and not depend on whatever
2473 the middle-end decides to do with these vector types. */
2475 static enum machine_mode
2476 type_natural_mode (tree type)
2478 enum machine_mode mode = TYPE_MODE (type);
2480 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
2482 HOST_WIDE_INT size = int_size_in_bytes (type);
2483 if ((size == 8 || size == 16)
2484 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
2485 && TYPE_VECTOR_SUBPARTS (type) > 1)
2487 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
2489 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
2490 mode = MIN_MODE_VECTOR_FLOAT;
2491 else
2492 mode = MIN_MODE_VECTOR_INT;
2494 /* Get the mode which has this inner mode and number of units. */
2495 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
2496 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
2497 && GET_MODE_INNER (mode) == innermode)
2498 return mode;
2500 gcc_unreachable ();
2504 return mode;
2507 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
2508 this may not agree with the mode that the type system has chosen for the
2509 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
2510 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
2512 static rtx
2513 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
2514 unsigned int regno)
2516 rtx tmp;
2518 if (orig_mode != BLKmode)
2519 tmp = gen_rtx_REG (orig_mode, regno);
2520 else
2522 tmp = gen_rtx_REG (mode, regno);
2523 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
2524 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
2527 return tmp;
2530 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
2531 of this code is to classify each 8bytes of incoming argument by the register
2532 class and assign registers accordingly. */
2534 /* Return the union class of CLASS1 and CLASS2.
2535 See the x86-64 PS ABI for details. */
2537 static enum x86_64_reg_class
2538 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
2540 /* Rule #1: If both classes are equal, this is the resulting class. */
2541 if (class1 == class2)
2542 return class1;
2544 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
2545 the other class. */
2546 if (class1 == X86_64_NO_CLASS)
2547 return class2;
2548 if (class2 == X86_64_NO_CLASS)
2549 return class1;
2551 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
2552 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
2553 return X86_64_MEMORY_CLASS;
2555 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
2556 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
2557 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
2558 return X86_64_INTEGERSI_CLASS;
2559 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
2560 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
2561 return X86_64_INTEGER_CLASS;
2563 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
2564 MEMORY is used. */
2565 if (class1 == X86_64_X87_CLASS
2566 || class1 == X86_64_X87UP_CLASS
2567 || class1 == X86_64_COMPLEX_X87_CLASS
2568 || class2 == X86_64_X87_CLASS
2569 || class2 == X86_64_X87UP_CLASS
2570 || class2 == X86_64_COMPLEX_X87_CLASS)
2571 return X86_64_MEMORY_CLASS;
2573 /* Rule #6: Otherwise class SSE is used. */
2574 return X86_64_SSE_CLASS;
2577 /* Classify the argument of type TYPE and mode MODE.
2578 CLASSES will be filled by the register class used to pass each word
2579 of the operand. The number of words is returned. In case the parameter
2580 should be passed in memory, 0 is returned. As a special case for zero
2581 sized containers, classes[0] will be NO_CLASS and 1 is returned.
2583 BIT_OFFSET is used internally for handling records and specifies offset
2584 of the offset in bits modulo 256 to avoid overflow cases.
2586 See the x86-64 PS ABI for details.
2589 static int
2590 classify_argument (enum machine_mode mode, tree type,
2591 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
2593 HOST_WIDE_INT bytes =
2594 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2595 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2597 /* Variable sized entities are always passed/returned in memory. */
2598 if (bytes < 0)
2599 return 0;
2601 if (mode != VOIDmode
2602 && targetm.calls.must_pass_in_stack (mode, type))
2603 return 0;
2605 if (type && AGGREGATE_TYPE_P (type))
2607 int i;
2608 tree field;
2609 enum x86_64_reg_class subclasses[MAX_CLASSES];
2611 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
2612 if (bytes > 16)
2613 return 0;
2615 for (i = 0; i < words; i++)
2616 classes[i] = X86_64_NO_CLASS;
2618 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
2619 signalize memory class, so handle it as special case. */
2620 if (!words)
2622 classes[0] = X86_64_NO_CLASS;
2623 return 1;
2626 /* Classify each field of record and merge classes. */
2627 switch (TREE_CODE (type))
2629 case RECORD_TYPE:
2630 /* For classes first merge in the field of the subclasses. */
2631 if (TYPE_BINFO (type))
2633 tree binfo, base_binfo;
2634 int basenum;
2636 for (binfo = TYPE_BINFO (type), basenum = 0;
2637 BINFO_BASE_ITERATE (binfo, basenum, base_binfo); basenum++)
2639 int num;
2640 int offset = tree_low_cst (BINFO_OFFSET (base_binfo), 0) * 8;
2641 tree type = BINFO_TYPE (base_binfo);
2643 num = classify_argument (TYPE_MODE (type),
2644 type, subclasses,
2645 (offset + bit_offset) % 256);
2646 if (!num)
2647 return 0;
2648 for (i = 0; i < num; i++)
2650 int pos = (offset + (bit_offset % 64)) / 8 / 8;
2651 classes[i + pos] =
2652 merge_classes (subclasses[i], classes[i + pos]);
2656 /* And now merge the fields of structure. */
2657 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2659 if (TREE_CODE (field) == FIELD_DECL)
2661 int num;
2663 /* Bitfields are always classified as integer. Handle them
2664 early, since later code would consider them to be
2665 misaligned integers. */
2666 if (DECL_BIT_FIELD (field))
2668 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
2669 i < ((int_bit_position (field) + (bit_offset % 64))
2670 + tree_low_cst (DECL_SIZE (field), 0)
2671 + 63) / 8 / 8; i++)
2672 classes[i] =
2673 merge_classes (X86_64_INTEGER_CLASS,
2674 classes[i]);
2676 else
2678 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2679 TREE_TYPE (field), subclasses,
2680 (int_bit_position (field)
2681 + bit_offset) % 256);
2682 if (!num)
2683 return 0;
2684 for (i = 0; i < num; i++)
2686 int pos =
2687 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
2688 classes[i + pos] =
2689 merge_classes (subclasses[i], classes[i + pos]);
2694 break;
2696 case ARRAY_TYPE:
2697 /* Arrays are handled as small records. */
2699 int num;
2700 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
2701 TREE_TYPE (type), subclasses, bit_offset);
2702 if (!num)
2703 return 0;
2705 /* The partial classes are now full classes. */
2706 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
2707 subclasses[0] = X86_64_SSE_CLASS;
2708 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
2709 subclasses[0] = X86_64_INTEGER_CLASS;
2711 for (i = 0; i < words; i++)
2712 classes[i] = subclasses[i % num];
2714 break;
2716 case UNION_TYPE:
2717 case QUAL_UNION_TYPE:
2718 /* Unions are similar to RECORD_TYPE but offset is always 0.
2721 /* Unions are not derived. */
2722 gcc_assert (!TYPE_BINFO (type)
2723 || !BINFO_N_BASE_BINFOS (TYPE_BINFO (type)));
2724 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2726 if (TREE_CODE (field) == FIELD_DECL)
2728 int num;
2729 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2730 TREE_TYPE (field), subclasses,
2731 bit_offset);
2732 if (!num)
2733 return 0;
2734 for (i = 0; i < num; i++)
2735 classes[i] = merge_classes (subclasses[i], classes[i]);
2738 break;
2740 default:
2741 gcc_unreachable ();
2744 /* Final merger cleanup. */
2745 for (i = 0; i < words; i++)
2747 /* If one class is MEMORY, everything should be passed in
2748 memory. */
2749 if (classes[i] == X86_64_MEMORY_CLASS)
2750 return 0;
2752 /* The X86_64_SSEUP_CLASS should be always preceded by
2753 X86_64_SSE_CLASS. */
2754 if (classes[i] == X86_64_SSEUP_CLASS
2755 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
2756 classes[i] = X86_64_SSE_CLASS;
2758 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
2759 if (classes[i] == X86_64_X87UP_CLASS
2760 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
2761 classes[i] = X86_64_SSE_CLASS;
2763 return words;
2766 /* Compute alignment needed. We align all types to natural boundaries with
2767 exception of XFmode that is aligned to 64bits. */
2768 if (mode != VOIDmode && mode != BLKmode)
2770 int mode_alignment = GET_MODE_BITSIZE (mode);
2772 if (mode == XFmode)
2773 mode_alignment = 128;
2774 else if (mode == XCmode)
2775 mode_alignment = 256;
2776 if (COMPLEX_MODE_P (mode))
2777 mode_alignment /= 2;
2778 /* Misaligned fields are always returned in memory. */
2779 if (bit_offset % mode_alignment)
2780 return 0;
2783 /* for V1xx modes, just use the base mode */
2784 if (VECTOR_MODE_P (mode)
2785 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
2786 mode = GET_MODE_INNER (mode);
2788 /* Classification of atomic types. */
2789 switch (mode)
2791 case DImode:
2792 case SImode:
2793 case HImode:
2794 case QImode:
2795 case CSImode:
2796 case CHImode:
2797 case CQImode:
2798 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2799 classes[0] = X86_64_INTEGERSI_CLASS;
2800 else
2801 classes[0] = X86_64_INTEGER_CLASS;
2802 return 1;
2803 case CDImode:
2804 case TImode:
2805 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
2806 return 2;
2807 case CTImode:
2808 return 0;
2809 case SFmode:
2810 if (!(bit_offset % 64))
2811 classes[0] = X86_64_SSESF_CLASS;
2812 else
2813 classes[0] = X86_64_SSE_CLASS;
2814 return 1;
2815 case DFmode:
2816 classes[0] = X86_64_SSEDF_CLASS;
2817 return 1;
2818 case XFmode:
2819 classes[0] = X86_64_X87_CLASS;
2820 classes[1] = X86_64_X87UP_CLASS;
2821 return 2;
2822 case TFmode:
2823 classes[0] = X86_64_SSE_CLASS;
2824 classes[1] = X86_64_SSEUP_CLASS;
2825 return 2;
2826 case SCmode:
2827 classes[0] = X86_64_SSE_CLASS;
2828 return 1;
2829 case DCmode:
2830 classes[0] = X86_64_SSEDF_CLASS;
2831 classes[1] = X86_64_SSEDF_CLASS;
2832 return 2;
2833 case XCmode:
2834 classes[0] = X86_64_COMPLEX_X87_CLASS;
2835 return 1;
2836 case TCmode:
2837 /* This modes is larger than 16 bytes. */
2838 return 0;
2839 case V4SFmode:
2840 case V4SImode:
2841 case V16QImode:
2842 case V8HImode:
2843 case V2DFmode:
2844 case V2DImode:
2845 classes[0] = X86_64_SSE_CLASS;
2846 classes[1] = X86_64_SSEUP_CLASS;
2847 return 2;
2848 case V2SFmode:
2849 case V2SImode:
2850 case V4HImode:
2851 case V8QImode:
2852 classes[0] = X86_64_SSE_CLASS;
2853 return 1;
2854 case BLKmode:
2855 case VOIDmode:
2856 return 0;
2857 default:
2858 gcc_assert (VECTOR_MODE_P (mode));
2860 if (bytes > 16)
2861 return 0;
2863 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
2865 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2866 classes[0] = X86_64_INTEGERSI_CLASS;
2867 else
2868 classes[0] = X86_64_INTEGER_CLASS;
2869 classes[1] = X86_64_INTEGER_CLASS;
2870 return 1 + (bytes > 8);
2874 /* Examine the argument and return set number of register required in each
2875 class. Return 0 iff parameter should be passed in memory. */
2876 static int
2877 examine_argument (enum machine_mode mode, tree type, int in_return,
2878 int *int_nregs, int *sse_nregs)
2880 enum x86_64_reg_class class[MAX_CLASSES];
2881 int n = classify_argument (mode, type, class, 0);
2883 *int_nregs = 0;
2884 *sse_nregs = 0;
2885 if (!n)
2886 return 0;
2887 for (n--; n >= 0; n--)
2888 switch (class[n])
2890 case X86_64_INTEGER_CLASS:
2891 case X86_64_INTEGERSI_CLASS:
2892 (*int_nregs)++;
2893 break;
2894 case X86_64_SSE_CLASS:
2895 case X86_64_SSESF_CLASS:
2896 case X86_64_SSEDF_CLASS:
2897 (*sse_nregs)++;
2898 break;
2899 case X86_64_NO_CLASS:
2900 case X86_64_SSEUP_CLASS:
2901 break;
2902 case X86_64_X87_CLASS:
2903 case X86_64_X87UP_CLASS:
2904 if (!in_return)
2905 return 0;
2906 break;
2907 case X86_64_COMPLEX_X87_CLASS:
2908 return in_return ? 2 : 0;
2909 case X86_64_MEMORY_CLASS:
2910 gcc_unreachable ();
2912 return 1;
2915 /* Construct container for the argument used by GCC interface. See
2916 FUNCTION_ARG for the detailed description. */
2918 static rtx
2919 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
2920 tree type, int in_return, int nintregs, int nsseregs,
2921 const int *intreg, int sse_regno)
2923 enum machine_mode tmpmode;
2924 int bytes =
2925 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2926 enum x86_64_reg_class class[MAX_CLASSES];
2927 int n;
2928 int i;
2929 int nexps = 0;
2930 int needed_sseregs, needed_intregs;
2931 rtx exp[MAX_CLASSES];
2932 rtx ret;
2934 n = classify_argument (mode, type, class, 0);
2935 if (TARGET_DEBUG_ARG)
2937 if (!n)
2938 fprintf (stderr, "Memory class\n");
2939 else
2941 fprintf (stderr, "Classes:");
2942 for (i = 0; i < n; i++)
2944 fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]);
2946 fprintf (stderr, "\n");
2949 if (!n)
2950 return NULL;
2951 if (!examine_argument (mode, type, in_return, &needed_intregs,
2952 &needed_sseregs))
2953 return NULL;
2954 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
2955 return NULL;
2957 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
2958 some less clueful developer tries to use floating-point anyway. */
2959 if (needed_sseregs && !TARGET_SSE)
2961 static bool issued_error;
2962 if (!issued_error)
2964 issued_error = true;
2965 if (in_return)
2966 error ("SSE register return with SSE disabled");
2967 else
2968 error ("SSE register argument with SSE disabled");
2970 return NULL;
2973 /* First construct simple cases. Avoid SCmode, since we want to use
2974 single register to pass this type. */
2975 if (n == 1 && mode != SCmode)
2976 switch (class[0])
2978 case X86_64_INTEGER_CLASS:
2979 case X86_64_INTEGERSI_CLASS:
2980 return gen_rtx_REG (mode, intreg[0]);
2981 case X86_64_SSE_CLASS:
2982 case X86_64_SSESF_CLASS:
2983 case X86_64_SSEDF_CLASS:
2984 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
2985 case X86_64_X87_CLASS:
2986 case X86_64_COMPLEX_X87_CLASS:
2987 return gen_rtx_REG (mode, FIRST_STACK_REG);
2988 case X86_64_NO_CLASS:
2989 /* Zero sized array, struct or class. */
2990 return NULL;
2991 default:
2992 gcc_unreachable ();
2994 if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS
2995 && mode != BLKmode)
2996 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
2997 if (n == 2
2998 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
2999 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
3000 if (n == 2 && class[0] == X86_64_INTEGER_CLASS
3001 && class[1] == X86_64_INTEGER_CLASS
3002 && (mode == CDImode || mode == TImode || mode == TFmode)
3003 && intreg[0] + 1 == intreg[1])
3004 return gen_rtx_REG (mode, intreg[0]);
3006 /* Otherwise figure out the entries of the PARALLEL. */
3007 for (i = 0; i < n; i++)
3009 switch (class[i])
3011 case X86_64_NO_CLASS:
3012 break;
3013 case X86_64_INTEGER_CLASS:
3014 case X86_64_INTEGERSI_CLASS:
3015 /* Merge TImodes on aligned occasions here too. */
3016 if (i * 8 + 8 > bytes)
3017 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
3018 else if (class[i] == X86_64_INTEGERSI_CLASS)
3019 tmpmode = SImode;
3020 else
3021 tmpmode = DImode;
3022 /* We've requested 24 bytes we don't have mode for. Use DImode. */
3023 if (tmpmode == BLKmode)
3024 tmpmode = DImode;
3025 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3026 gen_rtx_REG (tmpmode, *intreg),
3027 GEN_INT (i*8));
3028 intreg++;
3029 break;
3030 case X86_64_SSESF_CLASS:
3031 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3032 gen_rtx_REG (SFmode,
3033 SSE_REGNO (sse_regno)),
3034 GEN_INT (i*8));
3035 sse_regno++;
3036 break;
3037 case X86_64_SSEDF_CLASS:
3038 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3039 gen_rtx_REG (DFmode,
3040 SSE_REGNO (sse_regno)),
3041 GEN_INT (i*8));
3042 sse_regno++;
3043 break;
3044 case X86_64_SSE_CLASS:
3045 if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
3046 tmpmode = TImode;
3047 else
3048 tmpmode = DImode;
3049 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3050 gen_rtx_REG (tmpmode,
3051 SSE_REGNO (sse_regno)),
3052 GEN_INT (i*8));
3053 if (tmpmode == TImode)
3054 i++;
3055 sse_regno++;
3056 break;
3057 default:
3058 gcc_unreachable ();
3062 /* Empty aligned struct, union or class. */
3063 if (nexps == 0)
3064 return NULL;
3066 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
3067 for (i = 0; i < nexps; i++)
3068 XVECEXP (ret, 0, i) = exp [i];
3069 return ret;
3072 /* Update the data in CUM to advance over an argument
3073 of mode MODE and data type TYPE.
3074 (TYPE is null for libcalls where that information may not be available.) */
3076 void
3077 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3078 tree type, int named)
3080 int bytes =
3081 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3082 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3084 if (type)
3085 mode = type_natural_mode (type);
3087 if (TARGET_DEBUG_ARG)
3088 fprintf (stderr, "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, "
3089 "mode=%s, named=%d)\n\n",
3090 words, cum->words, cum->nregs, cum->sse_nregs,
3091 GET_MODE_NAME (mode), named);
3093 if (TARGET_64BIT)
3095 int int_nregs, sse_nregs;
3096 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
3097 cum->words += words;
3098 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
3100 cum->nregs -= int_nregs;
3101 cum->sse_nregs -= sse_nregs;
3102 cum->regno += int_nregs;
3103 cum->sse_regno += sse_nregs;
3105 else
3106 cum->words += words;
3108 else
3110 switch (mode)
3112 default:
3113 break;
3115 case BLKmode:
3116 if (bytes < 0)
3117 break;
3118 /* FALLTHRU */
3120 case DImode:
3121 case SImode:
3122 case HImode:
3123 case QImode:
3124 cum->words += words;
3125 cum->nregs -= words;
3126 cum->regno += words;
3128 if (cum->nregs <= 0)
3130 cum->nregs = 0;
3131 cum->regno = 0;
3133 break;
3135 case DFmode:
3136 if (cum->float_in_sse < 2)
3137 break;
3138 case SFmode:
3139 if (cum->float_in_sse < 1)
3140 break;
3141 /* FALLTHRU */
3143 case TImode:
3144 case V16QImode:
3145 case V8HImode:
3146 case V4SImode:
3147 case V2DImode:
3148 case V4SFmode:
3149 case V2DFmode:
3150 if (!type || !AGGREGATE_TYPE_P (type))
3152 cum->sse_words += words;
3153 cum->sse_nregs -= 1;
3154 cum->sse_regno += 1;
3155 if (cum->sse_nregs <= 0)
3157 cum->sse_nregs = 0;
3158 cum->sse_regno = 0;
3161 break;
3163 case V8QImode:
3164 case V4HImode:
3165 case V2SImode:
3166 case V2SFmode:
3167 if (!type || !AGGREGATE_TYPE_P (type))
3169 cum->mmx_words += words;
3170 cum->mmx_nregs -= 1;
3171 cum->mmx_regno += 1;
3172 if (cum->mmx_nregs <= 0)
3174 cum->mmx_nregs = 0;
3175 cum->mmx_regno = 0;
3178 break;
3183 /* Define where to put the arguments to a function.
3184 Value is zero to push the argument on the stack,
3185 or a hard register in which to store the argument.
3187 MODE is the argument's machine mode.
3188 TYPE is the data type of the argument (as a tree).
3189 This is null for libcalls where that information may
3190 not be available.
3191 CUM is a variable of type CUMULATIVE_ARGS which gives info about
3192 the preceding args and about the function being called.
3193 NAMED is nonzero if this argument is a named parameter
3194 (otherwise it is an extra parameter matching an ellipsis). */
3197 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode orig_mode,
3198 tree type, int named)
3200 enum machine_mode mode = orig_mode;
3201 rtx ret = NULL_RTX;
3202 int bytes =
3203 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3204 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3205 static bool warnedsse, warnedmmx;
3207 /* To simplify the code below, represent vector types with a vector mode
3208 even if MMX/SSE are not active. */
3209 if (type && TREE_CODE (type) == VECTOR_TYPE)
3210 mode = type_natural_mode (type);
3212 /* Handle a hidden AL argument containing number of registers for varargs
3213 x86-64 functions. For i386 ABI just return constm1_rtx to avoid
3214 any AL settings. */
3215 if (mode == VOIDmode)
3217 if (TARGET_64BIT)
3218 return GEN_INT (cum->maybe_vaarg
3219 ? (cum->sse_nregs < 0
3220 ? SSE_REGPARM_MAX
3221 : cum->sse_regno)
3222 : -1);
3223 else
3224 return constm1_rtx;
3226 if (TARGET_64BIT)
3227 ret = construct_container (mode, orig_mode, type, 0, cum->nregs,
3228 cum->sse_nregs,
3229 &x86_64_int_parameter_registers [cum->regno],
3230 cum->sse_regno);
3231 else
3232 switch (mode)
3234 /* For now, pass fp/complex values on the stack. */
3235 default:
3236 break;
3238 case BLKmode:
3239 if (bytes < 0)
3240 break;
3241 /* FALLTHRU */
3242 case DImode:
3243 case SImode:
3244 case HImode:
3245 case QImode:
3246 if (words <= cum->nregs)
3248 int regno = cum->regno;
3250 /* Fastcall allocates the first two DWORD (SImode) or
3251 smaller arguments to ECX and EDX. */
3252 if (cum->fastcall)
3254 if (mode == BLKmode || mode == DImode)
3255 break;
3257 /* ECX not EAX is the first allocated register. */
3258 if (regno == 0)
3259 regno = 2;
3261 ret = gen_rtx_REG (mode, regno);
3263 break;
3264 case DFmode:
3265 if (cum->float_in_sse < 2)
3266 break;
3267 case SFmode:
3268 if (cum->float_in_sse < 1)
3269 break;
3270 /* FALLTHRU */
3271 case TImode:
3272 case V16QImode:
3273 case V8HImode:
3274 case V4SImode:
3275 case V2DImode:
3276 case V4SFmode:
3277 case V2DFmode:
3278 if (!type || !AGGREGATE_TYPE_P (type))
3280 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
3282 warnedsse = true;
3283 warning (0, "SSE vector argument without SSE enabled "
3284 "changes the ABI");
3286 if (cum->sse_nregs)
3287 ret = gen_reg_or_parallel (mode, orig_mode,
3288 cum->sse_regno + FIRST_SSE_REG);
3290 break;
3291 case V8QImode:
3292 case V4HImode:
3293 case V2SImode:
3294 case V2SFmode:
3295 if (!type || !AGGREGATE_TYPE_P (type))
3297 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
3299 warnedmmx = true;
3300 warning (0, "MMX vector argument without MMX enabled "
3301 "changes the ABI");
3303 if (cum->mmx_nregs)
3304 ret = gen_reg_or_parallel (mode, orig_mode,
3305 cum->mmx_regno + FIRST_MMX_REG);
3307 break;
3310 if (TARGET_DEBUG_ARG)
3312 fprintf (stderr,
3313 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
3314 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
3316 if (ret)
3317 print_simple_rtl (stderr, ret);
3318 else
3319 fprintf (stderr, ", stack");
3321 fprintf (stderr, " )\n");
3324 return ret;
3327 /* A C expression that indicates when an argument must be passed by
3328 reference. If nonzero for an argument, a copy of that argument is
3329 made in memory and a pointer to the argument is passed instead of
3330 the argument itself. The pointer is passed in whatever way is
3331 appropriate for passing a pointer to that type. */
3333 static bool
3334 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3335 enum machine_mode mode ATTRIBUTE_UNUSED,
3336 tree type, bool named ATTRIBUTE_UNUSED)
3338 if (!TARGET_64BIT)
3339 return 0;
3341 if (type && int_size_in_bytes (type) == -1)
3343 if (TARGET_DEBUG_ARG)
3344 fprintf (stderr, "function_arg_pass_by_reference\n");
3345 return 1;
3348 return 0;
3351 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
3352 ABI. Only called if TARGET_SSE. */
3353 static bool
3354 contains_128bit_aligned_vector_p (tree type)
3356 enum machine_mode mode = TYPE_MODE (type);
3357 if (SSE_REG_MODE_P (mode)
3358 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
3359 return true;
3360 if (TYPE_ALIGN (type) < 128)
3361 return false;
3363 if (AGGREGATE_TYPE_P (type))
3365 /* Walk the aggregates recursively. */
3366 switch (TREE_CODE (type))
3368 case RECORD_TYPE:
3369 case UNION_TYPE:
3370 case QUAL_UNION_TYPE:
3372 tree field;
3374 if (TYPE_BINFO (type))
3376 tree binfo, base_binfo;
3377 int i;
3379 for (binfo = TYPE_BINFO (type), i = 0;
3380 BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
3381 if (contains_128bit_aligned_vector_p
3382 (BINFO_TYPE (base_binfo)))
3383 return true;
3385 /* And now merge the fields of structure. */
3386 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3388 if (TREE_CODE (field) == FIELD_DECL
3389 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
3390 return true;
3392 break;
3395 case ARRAY_TYPE:
3396 /* Just for use if some languages passes arrays by value. */
3397 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
3398 return true;
3399 break;
3401 default:
3402 gcc_unreachable ();
3405 return false;
3408 /* Gives the alignment boundary, in bits, of an argument with the
3409 specified mode and type. */
3412 ix86_function_arg_boundary (enum machine_mode mode, tree type)
3414 int align;
3415 if (type)
3416 align = TYPE_ALIGN (type);
3417 else
3418 align = GET_MODE_ALIGNMENT (mode);
3419 if (align < PARM_BOUNDARY)
3420 align = PARM_BOUNDARY;
3421 if (!TARGET_64BIT)
3423 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
3424 make an exception for SSE modes since these require 128bit
3425 alignment.
3427 The handling here differs from field_alignment. ICC aligns MMX
3428 arguments to 4 byte boundaries, while structure fields are aligned
3429 to 8 byte boundaries. */
3430 if (!TARGET_SSE)
3431 align = PARM_BOUNDARY;
3432 else if (!type)
3434 if (!SSE_REG_MODE_P (mode))
3435 align = PARM_BOUNDARY;
3437 else
3439 if (!contains_128bit_aligned_vector_p (type))
3440 align = PARM_BOUNDARY;
3443 if (align > 128)
3444 align = 128;
3445 return align;
3448 /* Return true if N is a possible register number of function value. */
3449 bool
3450 ix86_function_value_regno_p (int regno)
3452 if (regno == 0
3453 || (regno == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
3454 || (regno == FIRST_SSE_REG && TARGET_SSE))
3455 return true;
3457 if (!TARGET_64BIT
3458 && (regno == FIRST_MMX_REG && TARGET_MMX))
3459 return true;
3461 return false;
3464 /* Define how to find the value returned by a function.
3465 VALTYPE is the data type of the value (as a tree).
3466 If the precise function being called is known, FUNC is its FUNCTION_DECL;
3467 otherwise, FUNC is 0. */
3469 ix86_function_value (tree valtype, tree fntype_or_decl,
3470 bool outgoing ATTRIBUTE_UNUSED)
3472 enum machine_mode natmode = type_natural_mode (valtype);
3474 if (TARGET_64BIT)
3476 rtx ret = construct_container (natmode, TYPE_MODE (valtype), valtype,
3477 1, REGPARM_MAX, SSE_REGPARM_MAX,
3478 x86_64_int_return_registers, 0);
3479 /* For zero sized structures, construct_container return NULL, but we
3480 need to keep rest of compiler happy by returning meaningful value. */
3481 if (!ret)
3482 ret = gen_rtx_REG (TYPE_MODE (valtype), 0);
3483 return ret;
3485 else
3487 tree fn = NULL_TREE, fntype;
3488 if (fntype_or_decl
3489 && DECL_P (fntype_or_decl))
3490 fn = fntype_or_decl;
3491 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
3492 return gen_rtx_REG (TYPE_MODE (valtype),
3493 ix86_value_regno (natmode, fn, fntype));
3497 /* Return false iff type is returned in memory. */
3499 ix86_return_in_memory (tree type)
3501 int needed_intregs, needed_sseregs, size;
3502 enum machine_mode mode = type_natural_mode (type);
3504 if (TARGET_64BIT)
3505 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
3507 if (mode == BLKmode)
3508 return 1;
3510 size = int_size_in_bytes (type);
3512 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
3513 return 0;
3515 if (VECTOR_MODE_P (mode) || mode == TImode)
3517 /* User-created vectors small enough to fit in EAX. */
3518 if (size < 8)
3519 return 0;
3521 /* MMX/3dNow values are returned in MM0,
3522 except when it doesn't exits. */
3523 if (size == 8)
3524 return (TARGET_MMX ? 0 : 1);
3526 /* SSE values are returned in XMM0, except when it doesn't exist. */
3527 if (size == 16)
3528 return (TARGET_SSE ? 0 : 1);
3531 if (mode == XFmode)
3532 return 0;
3534 if (size > 12)
3535 return 1;
3536 return 0;
3539 /* When returning SSE vector types, we have a choice of either
3540 (1) being abi incompatible with a -march switch, or
3541 (2) generating an error.
3542 Given no good solution, I think the safest thing is one warning.
3543 The user won't be able to use -Werror, but....
3545 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
3546 called in response to actually generating a caller or callee that
3547 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
3548 via aggregate_value_p for general type probing from tree-ssa. */
3550 static rtx
3551 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
3553 static bool warnedsse, warnedmmx;
3555 if (type)
3557 /* Look at the return type of the function, not the function type. */
3558 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
3560 if (!TARGET_SSE && !warnedsse)
3562 if (mode == TImode
3563 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3565 warnedsse = true;
3566 warning (0, "SSE vector return without SSE enabled "
3567 "changes the ABI");
3571 if (!TARGET_MMX && !warnedmmx)
3573 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
3575 warnedmmx = true;
3576 warning (0, "MMX vector return without MMX enabled "
3577 "changes the ABI");
3582 return NULL;
3585 /* Define how to find the value returned by a library function
3586 assuming the value has mode MODE. */
3588 ix86_libcall_value (enum machine_mode mode)
3590 if (TARGET_64BIT)
3592 switch (mode)
3594 case SFmode:
3595 case SCmode:
3596 case DFmode:
3597 case DCmode:
3598 case TFmode:
3599 return gen_rtx_REG (mode, FIRST_SSE_REG);
3600 case XFmode:
3601 case XCmode:
3602 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
3603 case TCmode:
3604 return NULL;
3605 default:
3606 return gen_rtx_REG (mode, 0);
3609 else
3610 return gen_rtx_REG (mode, ix86_value_regno (mode, NULL, NULL));
3613 /* Given a mode, return the register to use for a return value. */
3615 static int
3616 ix86_value_regno (enum machine_mode mode, tree func, tree fntype)
3618 gcc_assert (!TARGET_64BIT);
3620 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
3621 we prevent this case when mmx is not available. */
3622 if ((VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8))
3623 return FIRST_MMX_REG;
3625 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
3626 we prevent this case when sse is not available. */
3627 if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3628 return FIRST_SSE_REG;
3630 /* Most things go in %eax, except (unless -mno-fp-ret-in-387) fp values. */
3631 if (!SCALAR_FLOAT_MODE_P (mode) || !TARGET_FLOAT_RETURNS_IN_80387)
3632 return 0;
3634 /* Floating point return values in %st(0), except for local functions when
3635 SSE math is enabled or for functions with sseregparm attribute. */
3636 if ((func || fntype)
3637 && (mode == SFmode || mode == DFmode))
3639 int sse_level = ix86_function_sseregparm (fntype, func);
3640 if ((sse_level >= 1 && mode == SFmode)
3641 || (sse_level == 2 && mode == DFmode))
3642 return FIRST_SSE_REG;
3645 return FIRST_FLOAT_REG;
3648 /* Create the va_list data type. */
3650 static tree
3651 ix86_build_builtin_va_list (void)
3653 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
3655 /* For i386 we use plain pointer to argument area. */
3656 if (!TARGET_64BIT)
3657 return build_pointer_type (char_type_node);
3659 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
3660 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
3662 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
3663 unsigned_type_node);
3664 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
3665 unsigned_type_node);
3666 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
3667 ptr_type_node);
3668 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
3669 ptr_type_node);
3671 va_list_gpr_counter_field = f_gpr;
3672 va_list_fpr_counter_field = f_fpr;
3674 DECL_FIELD_CONTEXT (f_gpr) = record;
3675 DECL_FIELD_CONTEXT (f_fpr) = record;
3676 DECL_FIELD_CONTEXT (f_ovf) = record;
3677 DECL_FIELD_CONTEXT (f_sav) = record;
3679 TREE_CHAIN (record) = type_decl;
3680 TYPE_NAME (record) = type_decl;
3681 TYPE_FIELDS (record) = f_gpr;
3682 TREE_CHAIN (f_gpr) = f_fpr;
3683 TREE_CHAIN (f_fpr) = f_ovf;
3684 TREE_CHAIN (f_ovf) = f_sav;
3686 layout_type (record);
3688 /* The correct type is an array type of one element. */
3689 return build_array_type (record, build_index_type (size_zero_node));
3692 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
3694 static void
3695 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3696 tree type, int *pretend_size ATTRIBUTE_UNUSED,
3697 int no_rtl)
3699 CUMULATIVE_ARGS next_cum;
3700 rtx save_area = NULL_RTX, mem;
3701 rtx label;
3702 rtx label_ref;
3703 rtx tmp_reg;
3704 rtx nsse_reg;
3705 int set;
3706 tree fntype;
3707 int stdarg_p;
3708 int i;
3710 if (!TARGET_64BIT)
3711 return;
3713 if (! cfun->va_list_gpr_size && ! cfun->va_list_fpr_size)
3714 return;
3716 /* Indicate to allocate space on the stack for varargs save area. */
3717 ix86_save_varrargs_registers = 1;
3719 cfun->stack_alignment_needed = 128;
3721 fntype = TREE_TYPE (current_function_decl);
3722 stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
3723 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
3724 != void_type_node));
3726 /* For varargs, we do not want to skip the dummy va_dcl argument.
3727 For stdargs, we do want to skip the last named argument. */
3728 next_cum = *cum;
3729 if (stdarg_p)
3730 function_arg_advance (&next_cum, mode, type, 1);
3732 if (!no_rtl)
3733 save_area = frame_pointer_rtx;
3735 set = get_varargs_alias_set ();
3737 for (i = next_cum.regno;
3738 i < ix86_regparm
3739 && i < next_cum.regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
3740 i++)
3742 mem = gen_rtx_MEM (Pmode,
3743 plus_constant (save_area, i * UNITS_PER_WORD));
3744 MEM_NOTRAP_P (mem) = 1;
3745 set_mem_alias_set (mem, set);
3746 emit_move_insn (mem, gen_rtx_REG (Pmode,
3747 x86_64_int_parameter_registers[i]));
3750 if (next_cum.sse_nregs && cfun->va_list_fpr_size)
3752 /* Now emit code to save SSE registers. The AX parameter contains number
3753 of SSE parameter registers used to call this function. We use
3754 sse_prologue_save insn template that produces computed jump across
3755 SSE saves. We need some preparation work to get this working. */
3757 label = gen_label_rtx ();
3758 label_ref = gen_rtx_LABEL_REF (Pmode, label);
3760 /* Compute address to jump to :
3761 label - 5*eax + nnamed_sse_arguments*5 */
3762 tmp_reg = gen_reg_rtx (Pmode);
3763 nsse_reg = gen_reg_rtx (Pmode);
3764 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
3765 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3766 gen_rtx_MULT (Pmode, nsse_reg,
3767 GEN_INT (4))));
3768 if (next_cum.sse_regno)
3769 emit_move_insn
3770 (nsse_reg,
3771 gen_rtx_CONST (DImode,
3772 gen_rtx_PLUS (DImode,
3773 label_ref,
3774 GEN_INT (next_cum.sse_regno * 4))));
3775 else
3776 emit_move_insn (nsse_reg, label_ref);
3777 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
3779 /* Compute address of memory block we save into. We always use pointer
3780 pointing 127 bytes after first byte to store - this is needed to keep
3781 instruction size limited by 4 bytes. */
3782 tmp_reg = gen_reg_rtx (Pmode);
3783 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3784 plus_constant (save_area,
3785 8 * REGPARM_MAX + 127)));
3786 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
3787 MEM_NOTRAP_P (mem) = 1;
3788 set_mem_alias_set (mem, set);
3789 set_mem_align (mem, BITS_PER_WORD);
3791 /* And finally do the dirty job! */
3792 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
3793 GEN_INT (next_cum.sse_regno), label));
3798 /* Implement va_start. */
3800 void
3801 ix86_va_start (tree valist, rtx nextarg)
3803 HOST_WIDE_INT words, n_gpr, n_fpr;
3804 tree f_gpr, f_fpr, f_ovf, f_sav;
3805 tree gpr, fpr, ovf, sav, t;
3807 /* Only 64bit target needs something special. */
3808 if (!TARGET_64BIT)
3810 std_expand_builtin_va_start (valist, nextarg);
3811 return;
3814 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3815 f_fpr = TREE_CHAIN (f_gpr);
3816 f_ovf = TREE_CHAIN (f_fpr);
3817 f_sav = TREE_CHAIN (f_ovf);
3819 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
3820 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3821 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3822 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3823 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3825 /* Count number of gp and fp argument registers used. */
3826 words = current_function_args_info.words;
3827 n_gpr = current_function_args_info.regno;
3828 n_fpr = current_function_args_info.sse_regno;
3830 if (TARGET_DEBUG_ARG)
3831 fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
3832 (int) words, (int) n_gpr, (int) n_fpr);
3834 if (cfun->va_list_gpr_size)
3836 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
3837 build_int_cst (NULL_TREE, n_gpr * 8));
3838 TREE_SIDE_EFFECTS (t) = 1;
3839 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3842 if (cfun->va_list_fpr_size)
3844 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
3845 build_int_cst (NULL_TREE, n_fpr * 16 + 8*REGPARM_MAX));
3846 TREE_SIDE_EFFECTS (t) = 1;
3847 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3850 /* Find the overflow area. */
3851 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
3852 if (words != 0)
3853 t = build2 (PLUS_EXPR, TREE_TYPE (ovf), t,
3854 build_int_cst (NULL_TREE, words * UNITS_PER_WORD));
3855 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3856 TREE_SIDE_EFFECTS (t) = 1;
3857 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3859 if (cfun->va_list_gpr_size || cfun->va_list_fpr_size)
3861 /* Find the register save area.
3862 Prologue of the function save it right above stack frame. */
3863 t = make_tree (TREE_TYPE (sav), frame_pointer_rtx);
3864 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
3865 TREE_SIDE_EFFECTS (t) = 1;
3866 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3870 /* Implement va_arg. */
3872 tree
3873 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
3875 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
3876 tree f_gpr, f_fpr, f_ovf, f_sav;
3877 tree gpr, fpr, ovf, sav, t;
3878 int size, rsize;
3879 tree lab_false, lab_over = NULL_TREE;
3880 tree addr, t2;
3881 rtx container;
3882 int indirect_p = 0;
3883 tree ptrtype;
3884 enum machine_mode nat_mode;
3886 /* Only 64bit target needs something special. */
3887 if (!TARGET_64BIT)
3888 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3890 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3891 f_fpr = TREE_CHAIN (f_gpr);
3892 f_ovf = TREE_CHAIN (f_fpr);
3893 f_sav = TREE_CHAIN (f_ovf);
3895 valist = build_va_arg_indirect_ref (valist);
3896 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3897 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3898 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3899 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3901 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
3902 if (indirect_p)
3903 type = build_pointer_type (type);
3904 size = int_size_in_bytes (type);
3905 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3907 nat_mode = type_natural_mode (type);
3908 container = construct_container (nat_mode, TYPE_MODE (type), type, 0,
3909 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
3911 /* Pull the value out of the saved registers. */
3913 addr = create_tmp_var (ptr_type_node, "addr");
3914 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
3916 if (container)
3918 int needed_intregs, needed_sseregs;
3919 bool need_temp;
3920 tree int_addr, sse_addr;
3922 lab_false = create_artificial_label ();
3923 lab_over = create_artificial_label ();
3925 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
3927 need_temp = (!REG_P (container)
3928 && ((needed_intregs && TYPE_ALIGN (type) > 64)
3929 || TYPE_ALIGN (type) > 128));
3931 /* In case we are passing structure, verify that it is consecutive block
3932 on the register save area. If not we need to do moves. */
3933 if (!need_temp && !REG_P (container))
3935 /* Verify that all registers are strictly consecutive */
3936 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
3938 int i;
3940 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3942 rtx slot = XVECEXP (container, 0, i);
3943 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
3944 || INTVAL (XEXP (slot, 1)) != i * 16)
3945 need_temp = 1;
3948 else
3950 int i;
3952 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3954 rtx slot = XVECEXP (container, 0, i);
3955 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
3956 || INTVAL (XEXP (slot, 1)) != i * 8)
3957 need_temp = 1;
3961 if (!need_temp)
3963 int_addr = addr;
3964 sse_addr = addr;
3966 else
3968 int_addr = create_tmp_var (ptr_type_node, "int_addr");
3969 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
3970 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
3971 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
3974 /* First ensure that we fit completely in registers. */
3975 if (needed_intregs)
3977 t = build_int_cst (TREE_TYPE (gpr),
3978 (REGPARM_MAX - needed_intregs + 1) * 8);
3979 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
3980 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3981 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3982 gimplify_and_add (t, pre_p);
3984 if (needed_sseregs)
3986 t = build_int_cst (TREE_TYPE (fpr),
3987 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
3988 + REGPARM_MAX * 8);
3989 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
3990 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3991 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3992 gimplify_and_add (t, pre_p);
3995 /* Compute index to start of area used for integer regs. */
3996 if (needed_intregs)
3998 /* int_addr = gpr + sav; */
3999 t = fold_convert (ptr_type_node, gpr);
4000 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
4001 t = build2 (MODIFY_EXPR, void_type_node, int_addr, t);
4002 gimplify_and_add (t, pre_p);
4004 if (needed_sseregs)
4006 /* sse_addr = fpr + sav; */
4007 t = fold_convert (ptr_type_node, fpr);
4008 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
4009 t = build2 (MODIFY_EXPR, void_type_node, sse_addr, t);
4010 gimplify_and_add (t, pre_p);
4012 if (need_temp)
4014 int i;
4015 tree temp = create_tmp_var (type, "va_arg_tmp");
4017 /* addr = &temp; */
4018 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
4019 t = build2 (MODIFY_EXPR, void_type_node, addr, t);
4020 gimplify_and_add (t, pre_p);
4022 for (i = 0; i < XVECLEN (container, 0); i++)
4024 rtx slot = XVECEXP (container, 0, i);
4025 rtx reg = XEXP (slot, 0);
4026 enum machine_mode mode = GET_MODE (reg);
4027 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
4028 tree addr_type = build_pointer_type (piece_type);
4029 tree src_addr, src;
4030 int src_offset;
4031 tree dest_addr, dest;
4033 if (SSE_REGNO_P (REGNO (reg)))
4035 src_addr = sse_addr;
4036 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
4038 else
4040 src_addr = int_addr;
4041 src_offset = REGNO (reg) * 8;
4043 src_addr = fold_convert (addr_type, src_addr);
4044 src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr,
4045 size_int (src_offset)));
4046 src = build_va_arg_indirect_ref (src_addr);
4048 dest_addr = fold_convert (addr_type, addr);
4049 dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr,
4050 size_int (INTVAL (XEXP (slot, 1)))));
4051 dest = build_va_arg_indirect_ref (dest_addr);
4053 t = build2 (MODIFY_EXPR, void_type_node, dest, src);
4054 gimplify_and_add (t, pre_p);
4058 if (needed_intregs)
4060 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
4061 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
4062 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr, t);
4063 gimplify_and_add (t, pre_p);
4065 if (needed_sseregs)
4067 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
4068 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
4069 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr, t);
4070 gimplify_and_add (t, pre_p);
4073 t = build1 (GOTO_EXPR, void_type_node, lab_over);
4074 gimplify_and_add (t, pre_p);
4076 t = build1 (LABEL_EXPR, void_type_node, lab_false);
4077 append_to_statement_list (t, pre_p);
4080 /* ... otherwise out of the overflow area. */
4082 /* Care for on-stack alignment if needed. */
4083 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64)
4084 t = ovf;
4085 else
4087 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
4088 t = build2 (PLUS_EXPR, TREE_TYPE (ovf), ovf,
4089 build_int_cst (TREE_TYPE (ovf), align - 1));
4090 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4091 build_int_cst (TREE_TYPE (t), -align));
4093 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
4095 t2 = build2 (MODIFY_EXPR, void_type_node, addr, t);
4096 gimplify_and_add (t2, pre_p);
4098 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
4099 build_int_cst (TREE_TYPE (t), rsize * UNITS_PER_WORD));
4100 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
4101 gimplify_and_add (t, pre_p);
4103 if (container)
4105 t = build1 (LABEL_EXPR, void_type_node, lab_over);
4106 append_to_statement_list (t, pre_p);
4109 ptrtype = build_pointer_type (type);
4110 addr = fold_convert (ptrtype, addr);
4112 if (indirect_p)
4113 addr = build_va_arg_indirect_ref (addr);
4114 return build_va_arg_indirect_ref (addr);
4117 /* Return nonzero if OPNUM's MEM should be matched
4118 in movabs* patterns. */
4121 ix86_check_movabs (rtx insn, int opnum)
4123 rtx set, mem;
4125 set = PATTERN (insn);
4126 if (GET_CODE (set) == PARALLEL)
4127 set = XVECEXP (set, 0, 0);
4128 gcc_assert (GET_CODE (set) == SET);
4129 mem = XEXP (set, opnum);
4130 while (GET_CODE (mem) == SUBREG)
4131 mem = SUBREG_REG (mem);
4132 gcc_assert (GET_CODE (mem) == MEM);
4133 return (volatile_ok || !MEM_VOLATILE_P (mem));
4136 /* Initialize the table of extra 80387 mathematical constants. */
4138 static void
4139 init_ext_80387_constants (void)
4141 static const char * cst[5] =
4143 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
4144 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
4145 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
4146 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
4147 "3.1415926535897932385128089594061862044", /* 4: fldpi */
4149 int i;
4151 for (i = 0; i < 5; i++)
4153 real_from_string (&ext_80387_constants_table[i], cst[i]);
4154 /* Ensure each constant is rounded to XFmode precision. */
4155 real_convert (&ext_80387_constants_table[i],
4156 XFmode, &ext_80387_constants_table[i]);
4159 ext_80387_constants_init = 1;
4162 /* Return true if the constant is something that can be loaded with
4163 a special instruction. */
4166 standard_80387_constant_p (rtx x)
4168 if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
4169 return -1;
4171 if (x == CONST0_RTX (GET_MODE (x)))
4172 return 1;
4173 if (x == CONST1_RTX (GET_MODE (x)))
4174 return 2;
4176 /* For XFmode constants, try to find a special 80387 instruction when
4177 optimizing for size or on those CPUs that benefit from them. */
4178 if (GET_MODE (x) == XFmode
4179 && (optimize_size || x86_ext_80387_constants & TUNEMASK))
4181 REAL_VALUE_TYPE r;
4182 int i;
4184 if (! ext_80387_constants_init)
4185 init_ext_80387_constants ();
4187 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4188 for (i = 0; i < 5; i++)
4189 if (real_identical (&r, &ext_80387_constants_table[i]))
4190 return i + 3;
4193 return 0;
4196 /* Return the opcode of the special instruction to be used to load
4197 the constant X. */
4199 const char *
4200 standard_80387_constant_opcode (rtx x)
4202 switch (standard_80387_constant_p (x))
4204 case 1:
4205 return "fldz";
4206 case 2:
4207 return "fld1";
4208 case 3:
4209 return "fldlg2";
4210 case 4:
4211 return "fldln2";
4212 case 5:
4213 return "fldl2e";
4214 case 6:
4215 return "fldl2t";
4216 case 7:
4217 return "fldpi";
4218 default:
4219 gcc_unreachable ();
4223 /* Return the CONST_DOUBLE representing the 80387 constant that is
4224 loaded by the specified special instruction. The argument IDX
4225 matches the return value from standard_80387_constant_p. */
4228 standard_80387_constant_rtx (int idx)
4230 int i;
4232 if (! ext_80387_constants_init)
4233 init_ext_80387_constants ();
4235 switch (idx)
4237 case 3:
4238 case 4:
4239 case 5:
4240 case 6:
4241 case 7:
4242 i = idx - 3;
4243 break;
4245 default:
4246 gcc_unreachable ();
4249 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
4250 XFmode);
4253 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
4256 standard_sse_constant_p (rtx x)
4258 if (x == const0_rtx)
4259 return 1;
4260 return (x == CONST0_RTX (GET_MODE (x)));
4263 /* Returns 1 if OP contains a symbol reference */
4266 symbolic_reference_mentioned_p (rtx op)
4268 const char *fmt;
4269 int i;
4271 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
4272 return 1;
4274 fmt = GET_RTX_FORMAT (GET_CODE (op));
4275 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
4277 if (fmt[i] == 'E')
4279 int j;
4281 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
4282 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
4283 return 1;
4286 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
4287 return 1;
4290 return 0;
4293 /* Return 1 if it is appropriate to emit `ret' instructions in the
4294 body of a function. Do this only if the epilogue is simple, needing a
4295 couple of insns. Prior to reloading, we can't tell how many registers
4296 must be saved, so return 0 then. Return 0 if there is no frame
4297 marker to de-allocate. */
4300 ix86_can_use_return_insn_p (void)
4302 struct ix86_frame frame;
4304 if (! reload_completed || frame_pointer_needed)
4305 return 0;
4307 /* Don't allow more than 32 pop, since that's all we can do
4308 with one instruction. */
4309 if (current_function_pops_args
4310 && current_function_args_size >= 32768)
4311 return 0;
4313 ix86_compute_frame_layout (&frame);
4314 return frame.to_allocate == 0 && frame.nregs == 0;
4317 /* Value should be nonzero if functions must have frame pointers.
4318 Zero means the frame pointer need not be set up (and parms may
4319 be accessed via the stack pointer) in functions that seem suitable. */
4322 ix86_frame_pointer_required (void)
4324 /* If we accessed previous frames, then the generated code expects
4325 to be able to access the saved ebp value in our frame. */
4326 if (cfun->machine->accesses_prev_frame)
4327 return 1;
4329 /* Several x86 os'es need a frame pointer for other reasons,
4330 usually pertaining to setjmp. */
4331 if (SUBTARGET_FRAME_POINTER_REQUIRED)
4332 return 1;
4334 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
4335 the frame pointer by default. Turn it back on now if we've not
4336 got a leaf function. */
4337 if (TARGET_OMIT_LEAF_FRAME_POINTER
4338 && (!current_function_is_leaf))
4339 return 1;
4341 if (current_function_profile)
4342 return 1;
4344 return 0;
4347 /* Record that the current function accesses previous call frames. */
4349 void
4350 ix86_setup_frame_addresses (void)
4352 cfun->machine->accesses_prev_frame = 1;
4355 #if defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)
4356 # define USE_HIDDEN_LINKONCE 1
4357 #else
4358 # define USE_HIDDEN_LINKONCE 0
4359 #endif
4361 static int pic_labels_used;
4363 /* Fills in the label name that should be used for a pc thunk for
4364 the given register. */
4366 static void
4367 get_pc_thunk_name (char name[32], unsigned int regno)
4369 if (USE_HIDDEN_LINKONCE)
4370 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
4371 else
4372 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
4376 /* This function generates code for -fpic that loads %ebx with
4377 the return address of the caller and then returns. */
4379 void
4380 ix86_file_end (void)
4382 rtx xops[2];
4383 int regno;
4385 for (regno = 0; regno < 8; ++regno)
4387 char name[32];
4389 if (! ((pic_labels_used >> regno) & 1))
4390 continue;
4392 get_pc_thunk_name (name, regno);
4394 if (USE_HIDDEN_LINKONCE)
4396 tree decl;
4398 decl = build_decl (FUNCTION_DECL, get_identifier (name),
4399 error_mark_node);
4400 TREE_PUBLIC (decl) = 1;
4401 TREE_STATIC (decl) = 1;
4402 DECL_ONE_ONLY (decl) = 1;
4404 (*targetm.asm_out.unique_section) (decl, 0);
4405 switch_to_section (get_named_section (decl, NULL, 0));
4407 (*targetm.asm_out.globalize_label) (asm_out_file, name);
4408 fputs ("\t.hidden\t", asm_out_file);
4409 assemble_name (asm_out_file, name);
4410 fputc ('\n', asm_out_file);
4411 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
4413 else
4415 switch_to_section (text_section);
4416 ASM_OUTPUT_LABEL (asm_out_file, name);
4419 xops[0] = gen_rtx_REG (SImode, regno);
4420 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
4421 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
4422 output_asm_insn ("ret", xops);
4425 if (NEED_INDICATE_EXEC_STACK)
4426 file_end_indicate_exec_stack ();
4429 /* Emit code for the SET_GOT patterns. */
4431 const char *
4432 output_set_got (rtx dest)
4434 rtx xops[3];
4436 xops[0] = dest;
4437 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
4439 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
4441 xops[2] = gen_rtx_LABEL_REF (Pmode, gen_label_rtx ());
4443 if (!flag_pic)
4444 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
4445 else
4446 output_asm_insn ("call\t%a2", xops);
4448 #if TARGET_MACHO
4449 /* Output the "canonical" label name ("Lxx$pb") here too. This
4450 is what will be referred to by the Mach-O PIC subsystem. */
4451 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
4452 #endif
4453 (*targetm.asm_out.internal_label) (asm_out_file, "L",
4454 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
4456 if (flag_pic)
4457 output_asm_insn ("pop{l}\t%0", xops);
4459 else
4461 char name[32];
4462 get_pc_thunk_name (name, REGNO (dest));
4463 pic_labels_used |= 1 << REGNO (dest);
4465 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
4466 xops[2] = gen_rtx_MEM (QImode, xops[2]);
4467 output_asm_insn ("call\t%X2", xops);
4470 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
4471 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
4472 else if (!TARGET_MACHO)
4473 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
4475 return "";
4478 /* Generate an "push" pattern for input ARG. */
4480 static rtx
4481 gen_push (rtx arg)
4483 return gen_rtx_SET (VOIDmode,
4484 gen_rtx_MEM (Pmode,
4485 gen_rtx_PRE_DEC (Pmode,
4486 stack_pointer_rtx)),
4487 arg);
4490 /* Return >= 0 if there is an unused call-clobbered register available
4491 for the entire function. */
4493 static unsigned int
4494 ix86_select_alt_pic_regnum (void)
4496 if (current_function_is_leaf && !current_function_profile)
4498 int i;
4499 for (i = 2; i >= 0; --i)
4500 if (!regs_ever_live[i])
4501 return i;
4504 return INVALID_REGNUM;
4507 /* Return 1 if we need to save REGNO. */
4508 static int
4509 ix86_save_reg (unsigned int regno, int maybe_eh_return)
4511 if (pic_offset_table_rtx
4512 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
4513 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4514 || current_function_profile
4515 || current_function_calls_eh_return
4516 || current_function_uses_const_pool))
4518 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
4519 return 0;
4520 return 1;
4523 if (current_function_calls_eh_return && maybe_eh_return)
4525 unsigned i;
4526 for (i = 0; ; i++)
4528 unsigned test = EH_RETURN_DATA_REGNO (i);
4529 if (test == INVALID_REGNUM)
4530 break;
4531 if (test == regno)
4532 return 1;
4536 if (cfun->machine->force_align_arg_pointer
4537 && regno == REGNO (cfun->machine->force_align_arg_pointer))
4538 return 1;
4540 return (regs_ever_live[regno]
4541 && !call_used_regs[regno]
4542 && !fixed_regs[regno]
4543 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
4546 /* Return number of registers to be saved on the stack. */
4548 static int
4549 ix86_nsaved_regs (void)
4551 int nregs = 0;
4552 int regno;
4554 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
4555 if (ix86_save_reg (regno, true))
4556 nregs++;
4557 return nregs;
4560 /* Return the offset between two registers, one to be eliminated, and the other
4561 its replacement, at the start of a routine. */
4563 HOST_WIDE_INT
4564 ix86_initial_elimination_offset (int from, int to)
4566 struct ix86_frame frame;
4567 ix86_compute_frame_layout (&frame);
4569 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
4570 return frame.hard_frame_pointer_offset;
4571 else if (from == FRAME_POINTER_REGNUM
4572 && to == HARD_FRAME_POINTER_REGNUM)
4573 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
4574 else
4576 gcc_assert (to == STACK_POINTER_REGNUM);
4578 if (from == ARG_POINTER_REGNUM)
4579 return frame.stack_pointer_offset;
4581 gcc_assert (from == FRAME_POINTER_REGNUM);
4582 return frame.stack_pointer_offset - frame.frame_pointer_offset;
4586 /* Fill structure ix86_frame about frame of currently computed function. */
4588 static void
4589 ix86_compute_frame_layout (struct ix86_frame *frame)
4591 HOST_WIDE_INT total_size;
4592 unsigned int stack_alignment_needed;
4593 HOST_WIDE_INT offset;
4594 unsigned int preferred_alignment;
4595 HOST_WIDE_INT size = get_frame_size ();
4597 frame->nregs = ix86_nsaved_regs ();
4598 total_size = size;
4600 stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
4601 preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
4603 /* During reload iteration the amount of registers saved can change.
4604 Recompute the value as needed. Do not recompute when amount of registers
4605 didn't change as reload does multiple calls to the function and does not
4606 expect the decision to change within single iteration. */
4607 if (!optimize_size
4608 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
4610 int count = frame->nregs;
4612 cfun->machine->use_fast_prologue_epilogue_nregs = count;
4613 /* The fast prologue uses move instead of push to save registers. This
4614 is significantly longer, but also executes faster as modern hardware
4615 can execute the moves in parallel, but can't do that for push/pop.
4617 Be careful about choosing what prologue to emit: When function takes
4618 many instructions to execute we may use slow version as well as in
4619 case function is known to be outside hot spot (this is known with
4620 feedback only). Weight the size of function by number of registers
4621 to save as it is cheap to use one or two push instructions but very
4622 slow to use many of them. */
4623 if (count)
4624 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
4625 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
4626 || (flag_branch_probabilities
4627 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
4628 cfun->machine->use_fast_prologue_epilogue = false;
4629 else
4630 cfun->machine->use_fast_prologue_epilogue
4631 = !expensive_function_p (count);
4633 if (TARGET_PROLOGUE_USING_MOVE
4634 && cfun->machine->use_fast_prologue_epilogue)
4635 frame->save_regs_using_mov = true;
4636 else
4637 frame->save_regs_using_mov = false;
4640 /* Skip return address and saved base pointer. */
4641 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
4643 frame->hard_frame_pointer_offset = offset;
4645 /* Do some sanity checking of stack_alignment_needed and
4646 preferred_alignment, since i386 port is the only using those features
4647 that may break easily. */
4649 gcc_assert (!size || stack_alignment_needed);
4650 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
4651 gcc_assert (preferred_alignment <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
4652 gcc_assert (stack_alignment_needed
4653 <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
4655 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
4656 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
4658 /* Register save area */
4659 offset += frame->nregs * UNITS_PER_WORD;
4661 /* Va-arg area */
4662 if (ix86_save_varrargs_registers)
4664 offset += X86_64_VARARGS_SIZE;
4665 frame->va_arg_size = X86_64_VARARGS_SIZE;
4667 else
4668 frame->va_arg_size = 0;
4670 /* Align start of frame for local function. */
4671 frame->padding1 = ((offset + stack_alignment_needed - 1)
4672 & -stack_alignment_needed) - offset;
4674 offset += frame->padding1;
4676 /* Frame pointer points here. */
4677 frame->frame_pointer_offset = offset;
4679 offset += size;
4681 /* Add outgoing arguments area. Can be skipped if we eliminated
4682 all the function calls as dead code.
4683 Skipping is however impossible when function calls alloca. Alloca
4684 expander assumes that last current_function_outgoing_args_size
4685 of stack frame are unused. */
4686 if (ACCUMULATE_OUTGOING_ARGS
4687 && (!current_function_is_leaf || current_function_calls_alloca))
4689 offset += current_function_outgoing_args_size;
4690 frame->outgoing_arguments_size = current_function_outgoing_args_size;
4692 else
4693 frame->outgoing_arguments_size = 0;
4695 /* Align stack boundary. Only needed if we're calling another function
4696 or using alloca. */
4697 if (!current_function_is_leaf || current_function_calls_alloca)
4698 frame->padding2 = ((offset + preferred_alignment - 1)
4699 & -preferred_alignment) - offset;
4700 else
4701 frame->padding2 = 0;
4703 offset += frame->padding2;
4705 /* We've reached end of stack frame. */
4706 frame->stack_pointer_offset = offset;
4708 /* Size prologue needs to allocate. */
4709 frame->to_allocate =
4710 (size + frame->padding1 + frame->padding2
4711 + frame->outgoing_arguments_size + frame->va_arg_size);
4713 if ((!frame->to_allocate && frame->nregs <= 1)
4714 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
4715 frame->save_regs_using_mov = false;
4717 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
4718 && current_function_is_leaf)
4720 frame->red_zone_size = frame->to_allocate;
4721 if (frame->save_regs_using_mov)
4722 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
4723 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
4724 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
4726 else
4727 frame->red_zone_size = 0;
4728 frame->to_allocate -= frame->red_zone_size;
4729 frame->stack_pointer_offset -= frame->red_zone_size;
4730 #if 0
4731 fprintf (stderr, "nregs: %i\n", frame->nregs);
4732 fprintf (stderr, "size: %i\n", size);
4733 fprintf (stderr, "alignment1: %i\n", stack_alignment_needed);
4734 fprintf (stderr, "padding1: %i\n", frame->padding1);
4735 fprintf (stderr, "va_arg: %i\n", frame->va_arg_size);
4736 fprintf (stderr, "padding2: %i\n", frame->padding2);
4737 fprintf (stderr, "to_allocate: %i\n", frame->to_allocate);
4738 fprintf (stderr, "red_zone_size: %i\n", frame->red_zone_size);
4739 fprintf (stderr, "frame_pointer_offset: %i\n", frame->frame_pointer_offset);
4740 fprintf (stderr, "hard_frame_pointer_offset: %i\n",
4741 frame->hard_frame_pointer_offset);
4742 fprintf (stderr, "stack_pointer_offset: %i\n", frame->stack_pointer_offset);
4743 #endif
4746 /* Emit code to save registers in the prologue. */
4748 static void
4749 ix86_emit_save_regs (void)
4751 unsigned int regno;
4752 rtx insn;
4754 for (regno = FIRST_PSEUDO_REGISTER; regno-- > 0; )
4755 if (ix86_save_reg (regno, true))
4757 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
4758 RTX_FRAME_RELATED_P (insn) = 1;
4762 /* Emit code to save registers using MOV insns. First register
4763 is restored from POINTER + OFFSET. */
4764 static void
4765 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
4767 unsigned int regno;
4768 rtx insn;
4770 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4771 if (ix86_save_reg (regno, true))
4773 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
4774 Pmode, offset),
4775 gen_rtx_REG (Pmode, regno));
4776 RTX_FRAME_RELATED_P (insn) = 1;
4777 offset += UNITS_PER_WORD;
4781 /* Expand prologue or epilogue stack adjustment.
4782 The pattern exist to put a dependency on all ebp-based memory accesses.
4783 STYLE should be negative if instructions should be marked as frame related,
4784 zero if %r11 register is live and cannot be freely used and positive
4785 otherwise. */
4787 static void
4788 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
4790 rtx insn;
4792 if (! TARGET_64BIT)
4793 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
4794 else if (x86_64_immediate_operand (offset, DImode))
4795 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
4796 else
4798 rtx r11;
4799 /* r11 is used by indirect sibcall return as well, set before the
4800 epilogue and used after the epilogue. ATM indirect sibcall
4801 shouldn't be used together with huge frame sizes in one
4802 function because of the frame_size check in sibcall.c. */
4803 gcc_assert (style);
4804 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4805 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
4806 if (style < 0)
4807 RTX_FRAME_RELATED_P (insn) = 1;
4808 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
4809 offset));
4811 if (style < 0)
4812 RTX_FRAME_RELATED_P (insn) = 1;
4815 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
4817 static rtx
4818 ix86_internal_arg_pointer (void)
4820 if (FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN
4821 && DECL_NAME (current_function_decl)
4822 && MAIN_NAME_P (DECL_NAME (current_function_decl))
4823 && DECL_FILE_SCOPE_P (current_function_decl))
4825 cfun->machine->force_align_arg_pointer = gen_rtx_REG (Pmode, 2);
4826 return copy_to_reg (cfun->machine->force_align_arg_pointer);
4828 else
4829 return virtual_incoming_args_rtx;
4832 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
4833 This is called from dwarf2out.c to emit call frame instructions
4834 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
4835 static void
4836 ix86_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
4838 rtx unspec = SET_SRC (pattern);
4839 gcc_assert (GET_CODE (unspec) == UNSPEC);
4841 switch (index)
4843 case UNSPEC_REG_SAVE:
4844 dwarf2out_reg_save_reg (label, XVECEXP (unspec, 0, 0),
4845 SET_DEST (pattern));
4846 break;
4847 case UNSPEC_DEF_CFA:
4848 dwarf2out_def_cfa (label, REGNO (SET_DEST (pattern)),
4849 INTVAL (XVECEXP (unspec, 0, 0)));
4850 break;
4851 default:
4852 gcc_unreachable ();
4856 /* Expand the prologue into a bunch of separate insns. */
4858 void
4859 ix86_expand_prologue (void)
4861 rtx insn;
4862 bool pic_reg_used;
4863 struct ix86_frame frame;
4864 HOST_WIDE_INT allocate;
4866 ix86_compute_frame_layout (&frame);
4868 if (cfun->machine->force_align_arg_pointer)
4870 rtx x, y;
4872 /* Grab the argument pointer. */
4873 x = plus_constant (stack_pointer_rtx, 4);
4874 y = cfun->machine->force_align_arg_pointer;
4875 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
4876 RTX_FRAME_RELATED_P (insn) = 1;
4878 /* The unwind info consists of two parts: install the fafp as the cfa,
4879 and record the fafp as the "save register" of the stack pointer.
4880 The later is there in order that the unwinder can see where it
4881 should restore the stack pointer across the and insn. */
4882 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_DEF_CFA);
4883 x = gen_rtx_SET (VOIDmode, y, x);
4884 RTX_FRAME_RELATED_P (x) = 1;
4885 y = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, stack_pointer_rtx),
4886 UNSPEC_REG_SAVE);
4887 y = gen_rtx_SET (VOIDmode, cfun->machine->force_align_arg_pointer, y);
4888 RTX_FRAME_RELATED_P (y) = 1;
4889 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x, y));
4890 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
4891 REG_NOTES (insn) = x;
4893 /* Align the stack. */
4894 emit_insn (gen_andsi3 (stack_pointer_rtx, stack_pointer_rtx,
4895 GEN_INT (-16)));
4897 /* And here we cheat like madmen with the unwind info. We force the
4898 cfa register back to sp+4, which is exactly what it was at the
4899 start of the function. Re-pushing the return address results in
4900 the return at the same spot relative to the cfa, and thus is
4901 correct wrt the unwind info. */
4902 x = cfun->machine->force_align_arg_pointer;
4903 x = gen_frame_mem (Pmode, plus_constant (x, -4));
4904 insn = emit_insn (gen_push (x));
4905 RTX_FRAME_RELATED_P (insn) = 1;
4907 x = GEN_INT (4);
4908 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, x), UNSPEC_DEF_CFA);
4909 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
4910 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
4911 REG_NOTES (insn) = x;
4914 /* Note: AT&T enter does NOT have reversed args. Enter is probably
4915 slower on all targets. Also sdb doesn't like it. */
4917 if (frame_pointer_needed)
4919 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
4920 RTX_FRAME_RELATED_P (insn) = 1;
4922 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
4923 RTX_FRAME_RELATED_P (insn) = 1;
4926 allocate = frame.to_allocate;
4928 if (!frame.save_regs_using_mov)
4929 ix86_emit_save_regs ();
4930 else
4931 allocate += frame.nregs * UNITS_PER_WORD;
4933 /* When using red zone we may start register saving before allocating
4934 the stack frame saving one cycle of the prologue. */
4935 if (TARGET_RED_ZONE && frame.save_regs_using_mov)
4936 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
4937 : stack_pointer_rtx,
4938 -frame.nregs * UNITS_PER_WORD);
4940 if (allocate == 0)
4942 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
4943 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4944 GEN_INT (-allocate), -1);
4945 else
4947 /* Only valid for Win32. */
4948 rtx eax = gen_rtx_REG (SImode, 0);
4949 bool eax_live = ix86_eax_live_at_start_p ();
4950 rtx t;
4952 gcc_assert (!TARGET_64BIT);
4954 if (eax_live)
4956 emit_insn (gen_push (eax));
4957 allocate -= 4;
4960 emit_move_insn (eax, GEN_INT (allocate));
4962 insn = emit_insn (gen_allocate_stack_worker (eax));
4963 RTX_FRAME_RELATED_P (insn) = 1;
4964 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
4965 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
4966 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
4967 t, REG_NOTES (insn));
4969 if (eax_live)
4971 if (frame_pointer_needed)
4972 t = plus_constant (hard_frame_pointer_rtx,
4973 allocate
4974 - frame.to_allocate
4975 - frame.nregs * UNITS_PER_WORD);
4976 else
4977 t = plus_constant (stack_pointer_rtx, allocate);
4978 emit_move_insn (eax, gen_rtx_MEM (SImode, t));
4982 if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
4984 if (!frame_pointer_needed || !frame.to_allocate)
4985 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
4986 else
4987 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
4988 -frame.nregs * UNITS_PER_WORD);
4991 pic_reg_used = false;
4992 if (pic_offset_table_rtx
4993 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4994 || current_function_profile))
4996 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
4998 if (alt_pic_reg_used != INVALID_REGNUM)
4999 REGNO (pic_offset_table_rtx) = alt_pic_reg_used;
5001 pic_reg_used = true;
5004 if (pic_reg_used)
5006 if (TARGET_64BIT)
5007 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
5008 else
5009 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
5011 /* Even with accurate pre-reload life analysis, we can wind up
5012 deleting all references to the pic register after reload.
5013 Consider if cross-jumping unifies two sides of a branch
5014 controlled by a comparison vs the only read from a global.
5015 In which case, allow the set_got to be deleted, though we're
5016 too late to do anything about the ebx save in the prologue. */
5017 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
5020 /* Prevent function calls from be scheduled before the call to mcount.
5021 In the pic_reg_used case, make sure that the got load isn't deleted. */
5022 if (current_function_profile)
5023 emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
5026 /* Emit code to restore saved registers using MOV insns. First register
5027 is restored from POINTER + OFFSET. */
5028 static void
5029 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
5030 int maybe_eh_return)
5032 int regno;
5033 rtx base_address = gen_rtx_MEM (Pmode, pointer);
5035 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5036 if (ix86_save_reg (regno, maybe_eh_return))
5038 /* Ensure that adjust_address won't be forced to produce pointer
5039 out of range allowed by x86-64 instruction set. */
5040 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
5042 rtx r11;
5044 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
5045 emit_move_insn (r11, GEN_INT (offset));
5046 emit_insn (gen_adddi3 (r11, r11, pointer));
5047 base_address = gen_rtx_MEM (Pmode, r11);
5048 offset = 0;
5050 emit_move_insn (gen_rtx_REG (Pmode, regno),
5051 adjust_address (base_address, Pmode, offset));
5052 offset += UNITS_PER_WORD;
5056 /* Restore function stack, frame, and registers. */
5058 void
5059 ix86_expand_epilogue (int style)
5061 int regno;
5062 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
5063 struct ix86_frame frame;
5064 HOST_WIDE_INT offset;
5066 ix86_compute_frame_layout (&frame);
5068 /* Calculate start of saved registers relative to ebp. Special care
5069 must be taken for the normal return case of a function using
5070 eh_return: the eax and edx registers are marked as saved, but not
5071 restored along this path. */
5072 offset = frame.nregs;
5073 if (current_function_calls_eh_return && style != 2)
5074 offset -= 2;
5075 offset *= -UNITS_PER_WORD;
5077 /* If we're only restoring one register and sp is not valid then
5078 using a move instruction to restore the register since it's
5079 less work than reloading sp and popping the register.
5081 The default code result in stack adjustment using add/lea instruction,
5082 while this code results in LEAVE instruction (or discrete equivalent),
5083 so it is profitable in some other cases as well. Especially when there
5084 are no registers to restore. We also use this code when TARGET_USE_LEAVE
5085 and there is exactly one register to pop. This heuristic may need some
5086 tuning in future. */
5087 if ((!sp_valid && frame.nregs <= 1)
5088 || (TARGET_EPILOGUE_USING_MOVE
5089 && cfun->machine->use_fast_prologue_epilogue
5090 && (frame.nregs > 1 || frame.to_allocate))
5091 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
5092 || (frame_pointer_needed && TARGET_USE_LEAVE
5093 && cfun->machine->use_fast_prologue_epilogue
5094 && frame.nregs == 1)
5095 || current_function_calls_eh_return)
5097 /* Restore registers. We can use ebp or esp to address the memory
5098 locations. If both are available, default to ebp, since offsets
5099 are known to be small. Only exception is esp pointing directly to the
5100 end of block of saved registers, where we may simplify addressing
5101 mode. */
5103 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
5104 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
5105 frame.to_allocate, style == 2);
5106 else
5107 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
5108 offset, style == 2);
5110 /* eh_return epilogues need %ecx added to the stack pointer. */
5111 if (style == 2)
5113 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
5115 if (frame_pointer_needed)
5117 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
5118 tmp = plus_constant (tmp, UNITS_PER_WORD);
5119 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
5121 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
5122 emit_move_insn (hard_frame_pointer_rtx, tmp);
5124 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
5125 const0_rtx, style);
5127 else
5129 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
5130 tmp = plus_constant (tmp, (frame.to_allocate
5131 + frame.nregs * UNITS_PER_WORD));
5132 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
5135 else if (!frame_pointer_needed)
5136 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5137 GEN_INT (frame.to_allocate
5138 + frame.nregs * UNITS_PER_WORD),
5139 style);
5140 /* If not an i386, mov & pop is faster than "leave". */
5141 else if (TARGET_USE_LEAVE || optimize_size
5142 || !cfun->machine->use_fast_prologue_epilogue)
5143 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5144 else
5146 pro_epilogue_adjust_stack (stack_pointer_rtx,
5147 hard_frame_pointer_rtx,
5148 const0_rtx, style);
5149 if (TARGET_64BIT)
5150 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5151 else
5152 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5155 else
5157 /* First step is to deallocate the stack frame so that we can
5158 pop the registers. */
5159 if (!sp_valid)
5161 gcc_assert (frame_pointer_needed);
5162 pro_epilogue_adjust_stack (stack_pointer_rtx,
5163 hard_frame_pointer_rtx,
5164 GEN_INT (offset), style);
5166 else if (frame.to_allocate)
5167 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5168 GEN_INT (frame.to_allocate), style);
5170 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5171 if (ix86_save_reg (regno, false))
5173 if (TARGET_64BIT)
5174 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
5175 else
5176 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
5178 if (frame_pointer_needed)
5180 /* Leave results in shorter dependency chains on CPUs that are
5181 able to grok it fast. */
5182 if (TARGET_USE_LEAVE)
5183 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5184 else if (TARGET_64BIT)
5185 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5186 else
5187 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5191 if (cfun->machine->force_align_arg_pointer)
5193 emit_insn (gen_addsi3 (stack_pointer_rtx,
5194 cfun->machine->force_align_arg_pointer,
5195 GEN_INT (-4)));
5198 /* Sibcall epilogues don't want a return instruction. */
5199 if (style == 0)
5200 return;
5202 if (current_function_pops_args && current_function_args_size)
5204 rtx popc = GEN_INT (current_function_pops_args);
5206 /* i386 can only pop 64K bytes. If asked to pop more, pop
5207 return address, do explicit add, and jump indirectly to the
5208 caller. */
5210 if (current_function_pops_args >= 65536)
5212 rtx ecx = gen_rtx_REG (SImode, 2);
5214 /* There is no "pascal" calling convention in 64bit ABI. */
5215 gcc_assert (!TARGET_64BIT);
5217 emit_insn (gen_popsi1 (ecx));
5218 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
5219 emit_jump_insn (gen_return_indirect_internal (ecx));
5221 else
5222 emit_jump_insn (gen_return_pop_internal (popc));
5224 else
5225 emit_jump_insn (gen_return_internal ());
5228 /* Reset from the function's potential modifications. */
5230 static void
5231 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
5232 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5234 if (pic_offset_table_rtx)
5235 REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
5238 /* Extract the parts of an RTL expression that is a valid memory address
5239 for an instruction. Return 0 if the structure of the address is
5240 grossly off. Return -1 if the address contains ASHIFT, so it is not
5241 strictly valid, but still used for computing length of lea instruction. */
5244 ix86_decompose_address (rtx addr, struct ix86_address *out)
5246 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
5247 rtx base_reg, index_reg;
5248 HOST_WIDE_INT scale = 1;
5249 rtx scale_rtx = NULL_RTX;
5250 int retval = 1;
5251 enum ix86_address_seg seg = SEG_DEFAULT;
5253 if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
5254 base = addr;
5255 else if (GET_CODE (addr) == PLUS)
5257 rtx addends[4], op;
5258 int n = 0, i;
5260 op = addr;
5263 if (n >= 4)
5264 return 0;
5265 addends[n++] = XEXP (op, 1);
5266 op = XEXP (op, 0);
5268 while (GET_CODE (op) == PLUS);
5269 if (n >= 4)
5270 return 0;
5271 addends[n] = op;
5273 for (i = n; i >= 0; --i)
5275 op = addends[i];
5276 switch (GET_CODE (op))
5278 case MULT:
5279 if (index)
5280 return 0;
5281 index = XEXP (op, 0);
5282 scale_rtx = XEXP (op, 1);
5283 break;
5285 case UNSPEC:
5286 if (XINT (op, 1) == UNSPEC_TP
5287 && TARGET_TLS_DIRECT_SEG_REFS
5288 && seg == SEG_DEFAULT)
5289 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
5290 else
5291 return 0;
5292 break;
5294 case REG:
5295 case SUBREG:
5296 if (!base)
5297 base = op;
5298 else if (!index)
5299 index = op;
5300 else
5301 return 0;
5302 break;
5304 case CONST:
5305 case CONST_INT:
5306 case SYMBOL_REF:
5307 case LABEL_REF:
5308 if (disp)
5309 return 0;
5310 disp = op;
5311 break;
5313 default:
5314 return 0;
5318 else if (GET_CODE (addr) == MULT)
5320 index = XEXP (addr, 0); /* index*scale */
5321 scale_rtx = XEXP (addr, 1);
5323 else if (GET_CODE (addr) == ASHIFT)
5325 rtx tmp;
5327 /* We're called for lea too, which implements ashift on occasion. */
5328 index = XEXP (addr, 0);
5329 tmp = XEXP (addr, 1);
5330 if (GET_CODE (tmp) != CONST_INT)
5331 return 0;
5332 scale = INTVAL (tmp);
5333 if ((unsigned HOST_WIDE_INT) scale > 3)
5334 return 0;
5335 scale = 1 << scale;
5336 retval = -1;
5338 else
5339 disp = addr; /* displacement */
5341 /* Extract the integral value of scale. */
5342 if (scale_rtx)
5344 if (GET_CODE (scale_rtx) != CONST_INT)
5345 return 0;
5346 scale = INTVAL (scale_rtx);
5349 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
5350 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
5352 /* Allow arg pointer and stack pointer as index if there is not scaling. */
5353 if (base_reg && index_reg && scale == 1
5354 && (index_reg == arg_pointer_rtx
5355 || index_reg == frame_pointer_rtx
5356 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
5358 rtx tmp;
5359 tmp = base, base = index, index = tmp;
5360 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
5363 /* Special case: %ebp cannot be encoded as a base without a displacement. */
5364 if ((base_reg == hard_frame_pointer_rtx
5365 || base_reg == frame_pointer_rtx
5366 || base_reg == arg_pointer_rtx) && !disp)
5367 disp = const0_rtx;
5369 /* Special case: on K6, [%esi] makes the instruction vector decoded.
5370 Avoid this by transforming to [%esi+0]. */
5371 if (ix86_tune == PROCESSOR_K6 && !optimize_size
5372 && base_reg && !index_reg && !disp
5373 && REG_P (base_reg)
5374 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
5375 disp = const0_rtx;
5377 /* Special case: encode reg+reg instead of reg*2. */
5378 if (!base && index && scale && scale == 2)
5379 base = index, base_reg = index_reg, scale = 1;
5381 /* Special case: scaling cannot be encoded without base or displacement. */
5382 if (!base && !disp && index && scale != 1)
5383 disp = const0_rtx;
5385 out->base = base;
5386 out->index = index;
5387 out->disp = disp;
5388 out->scale = scale;
5389 out->seg = seg;
5391 return retval;
5394 /* Return cost of the memory address x.
5395 For i386, it is better to use a complex address than let gcc copy
5396 the address into a reg and make a new pseudo. But not if the address
5397 requires to two regs - that would mean more pseudos with longer
5398 lifetimes. */
5399 static int
5400 ix86_address_cost (rtx x)
5402 struct ix86_address parts;
5403 int cost = 1;
5404 int ok = ix86_decompose_address (x, &parts);
5406 gcc_assert (ok);
5408 if (parts.base && GET_CODE (parts.base) == SUBREG)
5409 parts.base = SUBREG_REG (parts.base);
5410 if (parts.index && GET_CODE (parts.index) == SUBREG)
5411 parts.index = SUBREG_REG (parts.index);
5413 /* More complex memory references are better. */
5414 if (parts.disp && parts.disp != const0_rtx)
5415 cost--;
5416 if (parts.seg != SEG_DEFAULT)
5417 cost--;
5419 /* Attempt to minimize number of registers in the address. */
5420 if ((parts.base
5421 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
5422 || (parts.index
5423 && (!REG_P (parts.index)
5424 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
5425 cost++;
5427 if (parts.base
5428 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
5429 && parts.index
5430 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
5431 && parts.base != parts.index)
5432 cost++;
5434 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
5435 since it's predecode logic can't detect the length of instructions
5436 and it degenerates to vector decoded. Increase cost of such
5437 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
5438 to split such addresses or even refuse such addresses at all.
5440 Following addressing modes are affected:
5441 [base+scale*index]
5442 [scale*index+disp]
5443 [base+index]
5445 The first and last case may be avoidable by explicitly coding the zero in
5446 memory address, but I don't have AMD-K6 machine handy to check this
5447 theory. */
5449 if (TARGET_K6
5450 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
5451 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
5452 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
5453 cost += 10;
5455 return cost;
5458 /* If X is a machine specific address (i.e. a symbol or label being
5459 referenced as a displacement from the GOT implemented using an
5460 UNSPEC), then return the base term. Otherwise return X. */
5463 ix86_find_base_term (rtx x)
5465 rtx term;
5467 if (TARGET_64BIT)
5469 if (GET_CODE (x) != CONST)
5470 return x;
5471 term = XEXP (x, 0);
5472 if (GET_CODE (term) == PLUS
5473 && (GET_CODE (XEXP (term, 1)) == CONST_INT
5474 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
5475 term = XEXP (term, 0);
5476 if (GET_CODE (term) != UNSPEC
5477 || XINT (term, 1) != UNSPEC_GOTPCREL)
5478 return x;
5480 term = XVECEXP (term, 0, 0);
5482 if (GET_CODE (term) != SYMBOL_REF
5483 && GET_CODE (term) != LABEL_REF)
5484 return x;
5486 return term;
5489 term = ix86_delegitimize_address (x);
5491 if (GET_CODE (term) != SYMBOL_REF
5492 && GET_CODE (term) != LABEL_REF)
5493 return x;
5495 return term;
5498 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
5499 this is used for to form addresses to local data when -fPIC is in
5500 use. */
5502 static bool
5503 darwin_local_data_pic (rtx disp)
5505 if (GET_CODE (disp) == MINUS)
5507 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
5508 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
5509 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
5511 const char *sym_name = XSTR (XEXP (disp, 1), 0);
5512 if (! strcmp (sym_name, "<pic base>"))
5513 return true;
5517 return false;
5520 /* Determine if a given RTX is a valid constant. We already know this
5521 satisfies CONSTANT_P. */
5523 bool
5524 legitimate_constant_p (rtx x)
5526 switch (GET_CODE (x))
5528 case CONST:
5529 x = XEXP (x, 0);
5531 if (GET_CODE (x) == PLUS)
5533 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5534 return false;
5535 x = XEXP (x, 0);
5538 if (TARGET_MACHO && darwin_local_data_pic (x))
5539 return true;
5541 /* Only some unspecs are valid as "constants". */
5542 if (GET_CODE (x) == UNSPEC)
5543 switch (XINT (x, 1))
5545 case UNSPEC_GOTOFF:
5546 return TARGET_64BIT;
5547 case UNSPEC_TPOFF:
5548 case UNSPEC_NTPOFF:
5549 x = XVECEXP (x, 0, 0);
5550 return (GET_CODE (x) == SYMBOL_REF
5551 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
5552 case UNSPEC_DTPOFF:
5553 x = XVECEXP (x, 0, 0);
5554 return (GET_CODE (x) == SYMBOL_REF
5555 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
5556 default:
5557 return false;
5560 /* We must have drilled down to a symbol. */
5561 if (GET_CODE (x) == LABEL_REF)
5562 return true;
5563 if (GET_CODE (x) != SYMBOL_REF)
5564 return false;
5565 /* FALLTHRU */
5567 case SYMBOL_REF:
5568 /* TLS symbols are never valid. */
5569 if (SYMBOL_REF_TLS_MODEL (x))
5570 return false;
5571 break;
5573 default:
5574 break;
5577 /* Otherwise we handle everything else in the move patterns. */
5578 return true;
5581 /* Determine if it's legal to put X into the constant pool. This
5582 is not possible for the address of thread-local symbols, which
5583 is checked above. */
5585 static bool
5586 ix86_cannot_force_const_mem (rtx x)
5588 return !legitimate_constant_p (x);
5591 /* Determine if a given RTX is a valid constant address. */
5593 bool
5594 constant_address_p (rtx x)
5596 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
5599 /* Nonzero if the constant value X is a legitimate general operand
5600 when generating PIC code. It is given that flag_pic is on and
5601 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
5603 bool
5604 legitimate_pic_operand_p (rtx x)
5606 rtx inner;
5608 switch (GET_CODE (x))
5610 case CONST:
5611 inner = XEXP (x, 0);
5612 if (GET_CODE (inner) == PLUS
5613 && GET_CODE (XEXP (inner, 1)) == CONST_INT)
5614 inner = XEXP (inner, 0);
5616 /* Only some unspecs are valid as "constants". */
5617 if (GET_CODE (inner) == UNSPEC)
5618 switch (XINT (inner, 1))
5620 case UNSPEC_GOTOFF:
5621 return TARGET_64BIT;
5622 case UNSPEC_TPOFF:
5623 x = XVECEXP (inner, 0, 0);
5624 return (GET_CODE (x) == SYMBOL_REF
5625 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
5626 default:
5627 return false;
5629 /* FALLTHRU */
5631 case SYMBOL_REF:
5632 case LABEL_REF:
5633 return legitimate_pic_address_disp_p (x);
5635 default:
5636 return true;
5640 /* Determine if a given CONST RTX is a valid memory displacement
5641 in PIC mode. */
5644 legitimate_pic_address_disp_p (rtx disp)
5646 bool saw_plus;
5648 /* In 64bit mode we can allow direct addresses of symbols and labels
5649 when they are not dynamic symbols. */
5650 if (TARGET_64BIT)
5652 rtx op0 = disp, op1;
5654 switch (GET_CODE (disp))
5656 case LABEL_REF:
5657 return true;
5659 case CONST:
5660 if (GET_CODE (XEXP (disp, 0)) != PLUS)
5661 break;
5662 op0 = XEXP (XEXP (disp, 0), 0);
5663 op1 = XEXP (XEXP (disp, 0), 1);
5664 if (GET_CODE (op1) != CONST_INT
5665 || INTVAL (op1) >= 16*1024*1024
5666 || INTVAL (op1) < -16*1024*1024)
5667 break;
5668 if (GET_CODE (op0) == LABEL_REF)
5669 return true;
5670 if (GET_CODE (op0) != SYMBOL_REF)
5671 break;
5672 /* FALLTHRU */
5674 case SYMBOL_REF:
5675 /* TLS references should always be enclosed in UNSPEC. */
5676 if (SYMBOL_REF_TLS_MODEL (op0))
5677 return false;
5678 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0))
5679 return true;
5680 break;
5682 default:
5683 break;
5686 if (GET_CODE (disp) != CONST)
5687 return 0;
5688 disp = XEXP (disp, 0);
5690 if (TARGET_64BIT)
5692 /* We are unsafe to allow PLUS expressions. This limit allowed distance
5693 of GOT tables. We should not need these anyway. */
5694 if (GET_CODE (disp) != UNSPEC
5695 || (XINT (disp, 1) != UNSPEC_GOTPCREL
5696 && XINT (disp, 1) != UNSPEC_GOTOFF))
5697 return 0;
5699 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
5700 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
5701 return 0;
5702 return 1;
5705 saw_plus = false;
5706 if (GET_CODE (disp) == PLUS)
5708 if (GET_CODE (XEXP (disp, 1)) != CONST_INT)
5709 return 0;
5710 disp = XEXP (disp, 0);
5711 saw_plus = true;
5714 if (TARGET_MACHO && darwin_local_data_pic (disp))
5715 return 1;
5717 if (GET_CODE (disp) != UNSPEC)
5718 return 0;
5720 switch (XINT (disp, 1))
5722 case UNSPEC_GOT:
5723 if (saw_plus)
5724 return false;
5725 return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF;
5726 case UNSPEC_GOTOFF:
5727 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
5728 While ABI specify also 32bit relocation but we don't produce it in
5729 small PIC model at all. */
5730 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
5731 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
5732 && !TARGET_64BIT)
5733 return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5734 return false;
5735 case UNSPEC_GOTTPOFF:
5736 case UNSPEC_GOTNTPOFF:
5737 case UNSPEC_INDNTPOFF:
5738 if (saw_plus)
5739 return false;
5740 disp = XVECEXP (disp, 0, 0);
5741 return (GET_CODE (disp) == SYMBOL_REF
5742 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
5743 case UNSPEC_NTPOFF:
5744 disp = XVECEXP (disp, 0, 0);
5745 return (GET_CODE (disp) == SYMBOL_REF
5746 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
5747 case UNSPEC_DTPOFF:
5748 disp = XVECEXP (disp, 0, 0);
5749 return (GET_CODE (disp) == SYMBOL_REF
5750 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
5753 return 0;
5756 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
5757 memory address for an instruction. The MODE argument is the machine mode
5758 for the MEM expression that wants to use this address.
5760 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
5761 convert common non-canonical forms to canonical form so that they will
5762 be recognized. */
5765 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
5767 struct ix86_address parts;
5768 rtx base, index, disp;
5769 HOST_WIDE_INT scale;
5770 const char *reason = NULL;
5771 rtx reason_rtx = NULL_RTX;
5773 if (TARGET_DEBUG_ADDR)
5775 fprintf (stderr,
5776 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
5777 GET_MODE_NAME (mode), strict);
5778 debug_rtx (addr);
5781 if (ix86_decompose_address (addr, &parts) <= 0)
5783 reason = "decomposition failed";
5784 goto report_error;
5787 base = parts.base;
5788 index = parts.index;
5789 disp = parts.disp;
5790 scale = parts.scale;
5792 /* Validate base register.
5794 Don't allow SUBREG's that span more than a word here. It can lead to spill
5795 failures when the base is one word out of a two word structure, which is
5796 represented internally as a DImode int. */
5798 if (base)
5800 rtx reg;
5801 reason_rtx = base;
5803 if (REG_P (base))
5804 reg = base;
5805 else if (GET_CODE (base) == SUBREG
5806 && REG_P (SUBREG_REG (base))
5807 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
5808 <= UNITS_PER_WORD)
5809 reg = SUBREG_REG (base);
5810 else
5812 reason = "base is not a register";
5813 goto report_error;
5816 if (GET_MODE (base) != Pmode)
5818 reason = "base is not in Pmode";
5819 goto report_error;
5822 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
5823 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
5825 reason = "base is not valid";
5826 goto report_error;
5830 /* Validate index register.
5832 Don't allow SUBREG's that span more than a word here -- same as above. */
5834 if (index)
5836 rtx reg;
5837 reason_rtx = index;
5839 if (REG_P (index))
5840 reg = index;
5841 else if (GET_CODE (index) == SUBREG
5842 && REG_P (SUBREG_REG (index))
5843 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
5844 <= UNITS_PER_WORD)
5845 reg = SUBREG_REG (index);
5846 else
5848 reason = "index is not a register";
5849 goto report_error;
5852 if (GET_MODE (index) != Pmode)
5854 reason = "index is not in Pmode";
5855 goto report_error;
5858 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
5859 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
5861 reason = "index is not valid";
5862 goto report_error;
5866 /* Validate scale factor. */
5867 if (scale != 1)
5869 reason_rtx = GEN_INT (scale);
5870 if (!index)
5872 reason = "scale without index";
5873 goto report_error;
5876 if (scale != 2 && scale != 4 && scale != 8)
5878 reason = "scale is not a valid multiplier";
5879 goto report_error;
5883 /* Validate displacement. */
5884 if (disp)
5886 reason_rtx = disp;
5888 if (GET_CODE (disp) == CONST
5889 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
5890 switch (XINT (XEXP (disp, 0), 1))
5892 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
5893 used. While ABI specify also 32bit relocations, we don't produce
5894 them at all and use IP relative instead. */
5895 case UNSPEC_GOT:
5896 case UNSPEC_GOTOFF:
5897 gcc_assert (flag_pic);
5898 if (!TARGET_64BIT)
5899 goto is_legitimate_pic;
5900 reason = "64bit address unspec";
5901 goto report_error;
5903 case UNSPEC_GOTPCREL:
5904 gcc_assert (flag_pic);
5905 goto is_legitimate_pic;
5907 case UNSPEC_GOTTPOFF:
5908 case UNSPEC_GOTNTPOFF:
5909 case UNSPEC_INDNTPOFF:
5910 case UNSPEC_NTPOFF:
5911 case UNSPEC_DTPOFF:
5912 break;
5914 default:
5915 reason = "invalid address unspec";
5916 goto report_error;
5919 else if (flag_pic && (SYMBOLIC_CONST (disp)
5920 #if TARGET_MACHO
5921 && !machopic_operand_p (disp)
5922 #endif
5925 is_legitimate_pic:
5926 if (TARGET_64BIT && (index || base))
5928 /* foo@dtpoff(%rX) is ok. */
5929 if (GET_CODE (disp) != CONST
5930 || GET_CODE (XEXP (disp, 0)) != PLUS
5931 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
5932 || GET_CODE (XEXP (XEXP (disp, 0), 1)) != CONST_INT
5933 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
5934 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
5936 reason = "non-constant pic memory reference";
5937 goto report_error;
5940 else if (! legitimate_pic_address_disp_p (disp))
5942 reason = "displacement is an invalid pic construct";
5943 goto report_error;
5946 /* This code used to verify that a symbolic pic displacement
5947 includes the pic_offset_table_rtx register.
5949 While this is good idea, unfortunately these constructs may
5950 be created by "adds using lea" optimization for incorrect
5951 code like:
5953 int a;
5954 int foo(int i)
5956 return *(&a+i);
5959 This code is nonsensical, but results in addressing
5960 GOT table with pic_offset_table_rtx base. We can't
5961 just refuse it easily, since it gets matched by
5962 "addsi3" pattern, that later gets split to lea in the
5963 case output register differs from input. While this
5964 can be handled by separate addsi pattern for this case
5965 that never results in lea, this seems to be easier and
5966 correct fix for crash to disable this test. */
5968 else if (GET_CODE (disp) != LABEL_REF
5969 && GET_CODE (disp) != CONST_INT
5970 && (GET_CODE (disp) != CONST
5971 || !legitimate_constant_p (disp))
5972 && (GET_CODE (disp) != SYMBOL_REF
5973 || !legitimate_constant_p (disp)))
5975 reason = "displacement is not constant";
5976 goto report_error;
5978 else if (TARGET_64BIT
5979 && !x86_64_immediate_operand (disp, VOIDmode))
5981 reason = "displacement is out of range";
5982 goto report_error;
5986 /* Everything looks valid. */
5987 if (TARGET_DEBUG_ADDR)
5988 fprintf (stderr, "Success.\n");
5989 return TRUE;
5991 report_error:
5992 if (TARGET_DEBUG_ADDR)
5994 fprintf (stderr, "Error: %s\n", reason);
5995 debug_rtx (reason_rtx);
5997 return FALSE;
6000 /* Return a unique alias set for the GOT. */
6002 static HOST_WIDE_INT
6003 ix86_GOT_alias_set (void)
6005 static HOST_WIDE_INT set = -1;
6006 if (set == -1)
6007 set = new_alias_set ();
6008 return set;
6011 /* Return a legitimate reference for ORIG (an address) using the
6012 register REG. If REG is 0, a new pseudo is generated.
6014 There are two types of references that must be handled:
6016 1. Global data references must load the address from the GOT, via
6017 the PIC reg. An insn is emitted to do this load, and the reg is
6018 returned.
6020 2. Static data references, constant pool addresses, and code labels
6021 compute the address as an offset from the GOT, whose base is in
6022 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
6023 differentiate them from global data objects. The returned
6024 address is the PIC reg + an unspec constant.
6026 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
6027 reg also appears in the address. */
6029 static rtx
6030 legitimize_pic_address (rtx orig, rtx reg)
6032 rtx addr = orig;
6033 rtx new = orig;
6034 rtx base;
6036 #if TARGET_MACHO
6037 if (reg == 0)
6038 reg = gen_reg_rtx (Pmode);
6039 /* Use the generic Mach-O PIC machinery. */
6040 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
6041 #endif
6043 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
6044 new = addr;
6045 else if (TARGET_64BIT
6046 && ix86_cmodel != CM_SMALL_PIC
6047 && local_symbolic_operand (addr, Pmode))
6049 rtx tmpreg;
6050 /* This symbol may be referenced via a displacement from the PIC
6051 base address (@GOTOFF). */
6053 if (reload_in_progress)
6054 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6055 if (GET_CODE (addr) == CONST)
6056 addr = XEXP (addr, 0);
6057 if (GET_CODE (addr) == PLUS)
6059 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
6060 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
6062 else
6063 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
6064 new = gen_rtx_CONST (Pmode, new);
6065 if (!reg)
6066 tmpreg = gen_reg_rtx (Pmode);
6067 else
6068 tmpreg = reg;
6069 emit_move_insn (tmpreg, new);
6071 if (reg != 0)
6073 new = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
6074 tmpreg, 1, OPTAB_DIRECT);
6075 new = reg;
6077 else new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
6079 else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode))
6081 /* This symbol may be referenced via a displacement from the PIC
6082 base address (@GOTOFF). */
6084 if (reload_in_progress)
6085 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6086 if (GET_CODE (addr) == CONST)
6087 addr = XEXP (addr, 0);
6088 if (GET_CODE (addr) == PLUS)
6090 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
6091 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
6093 else
6094 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
6095 new = gen_rtx_CONST (Pmode, new);
6096 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6098 if (reg != 0)
6100 emit_move_insn (reg, new);
6101 new = reg;
6104 else if (GET_CODE (addr) == SYMBOL_REF)
6106 if (TARGET_64BIT)
6108 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
6109 new = gen_rtx_CONST (Pmode, new);
6110 new = gen_const_mem (Pmode, new);
6111 set_mem_alias_set (new, ix86_GOT_alias_set ());
6113 if (reg == 0)
6114 reg = gen_reg_rtx (Pmode);
6115 /* Use directly gen_movsi, otherwise the address is loaded
6116 into register for CSE. We don't want to CSE this addresses,
6117 instead we CSE addresses from the GOT table, so skip this. */
6118 emit_insn (gen_movsi (reg, new));
6119 new = reg;
6121 else
6123 /* This symbol must be referenced via a load from the
6124 Global Offset Table (@GOT). */
6126 if (reload_in_progress)
6127 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6128 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
6129 new = gen_rtx_CONST (Pmode, new);
6130 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6131 new = gen_const_mem (Pmode, new);
6132 set_mem_alias_set (new, ix86_GOT_alias_set ());
6134 if (reg == 0)
6135 reg = gen_reg_rtx (Pmode);
6136 emit_move_insn (reg, new);
6137 new = reg;
6140 else
6142 if (GET_CODE (addr) == CONST_INT
6143 && !x86_64_immediate_operand (addr, VOIDmode))
6145 if (reg)
6147 emit_move_insn (reg, addr);
6148 new = reg;
6150 else
6151 new = force_reg (Pmode, addr);
6153 else if (GET_CODE (addr) == CONST)
6155 addr = XEXP (addr, 0);
6157 /* We must match stuff we generate before. Assume the only
6158 unspecs that can get here are ours. Not that we could do
6159 anything with them anyway.... */
6160 if (GET_CODE (addr) == UNSPEC
6161 || (GET_CODE (addr) == PLUS
6162 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
6163 return orig;
6164 gcc_assert (GET_CODE (addr) == PLUS);
6166 if (GET_CODE (addr) == PLUS)
6168 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
6170 /* Check first to see if this is a constant offset from a @GOTOFF
6171 symbol reference. */
6172 if (local_symbolic_operand (op0, Pmode)
6173 && GET_CODE (op1) == CONST_INT)
6175 if (!TARGET_64BIT)
6177 if (reload_in_progress)
6178 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6179 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
6180 UNSPEC_GOTOFF);
6181 new = gen_rtx_PLUS (Pmode, new, op1);
6182 new = gen_rtx_CONST (Pmode, new);
6183 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6185 if (reg != 0)
6187 emit_move_insn (reg, new);
6188 new = reg;
6191 else
6193 if (INTVAL (op1) < -16*1024*1024
6194 || INTVAL (op1) >= 16*1024*1024)
6196 if (!x86_64_immediate_operand (op1, Pmode))
6197 op1 = force_reg (Pmode, op1);
6198 new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
6202 else
6204 base = legitimize_pic_address (XEXP (addr, 0), reg);
6205 new = legitimize_pic_address (XEXP (addr, 1),
6206 base == reg ? NULL_RTX : reg);
6208 if (GET_CODE (new) == CONST_INT)
6209 new = plus_constant (base, INTVAL (new));
6210 else
6212 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
6214 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
6215 new = XEXP (new, 1);
6217 new = gen_rtx_PLUS (Pmode, base, new);
6222 return new;
6225 /* Load the thread pointer. If TO_REG is true, force it into a register. */
6227 static rtx
6228 get_thread_pointer (int to_reg)
6230 rtx tp, reg, insn;
6232 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
6233 if (!to_reg)
6234 return tp;
6236 reg = gen_reg_rtx (Pmode);
6237 insn = gen_rtx_SET (VOIDmode, reg, tp);
6238 insn = emit_insn (insn);
6240 return reg;
6243 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
6244 false if we expect this to be used for a memory address and true if
6245 we expect to load the address into a register. */
6247 static rtx
6248 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
6250 rtx dest, base, off, pic;
6251 int type;
6253 switch (model)
6255 case TLS_MODEL_GLOBAL_DYNAMIC:
6256 dest = gen_reg_rtx (Pmode);
6257 if (TARGET_64BIT)
6259 rtx rax = gen_rtx_REG (Pmode, 0), insns;
6261 start_sequence ();
6262 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
6263 insns = get_insns ();
6264 end_sequence ();
6266 emit_libcall_block (insns, dest, rax, x);
6268 else
6269 emit_insn (gen_tls_global_dynamic_32 (dest, x));
6270 break;
6272 case TLS_MODEL_LOCAL_DYNAMIC:
6273 base = gen_reg_rtx (Pmode);
6274 if (TARGET_64BIT)
6276 rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
6278 start_sequence ();
6279 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
6280 insns = get_insns ();
6281 end_sequence ();
6283 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
6284 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
6285 emit_libcall_block (insns, base, rax, note);
6287 else
6288 emit_insn (gen_tls_local_dynamic_base_32 (base));
6290 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
6291 off = gen_rtx_CONST (Pmode, off);
6293 return gen_rtx_PLUS (Pmode, base, off);
6295 case TLS_MODEL_INITIAL_EXEC:
6296 if (TARGET_64BIT)
6298 pic = NULL;
6299 type = UNSPEC_GOTNTPOFF;
6301 else if (flag_pic)
6303 if (reload_in_progress)
6304 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6305 pic = pic_offset_table_rtx;
6306 type = TARGET_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
6308 else if (!TARGET_GNU_TLS)
6310 pic = gen_reg_rtx (Pmode);
6311 emit_insn (gen_set_got (pic));
6312 type = UNSPEC_GOTTPOFF;
6314 else
6316 pic = NULL;
6317 type = UNSPEC_INDNTPOFF;
6320 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
6321 off = gen_rtx_CONST (Pmode, off);
6322 if (pic)
6323 off = gen_rtx_PLUS (Pmode, pic, off);
6324 off = gen_const_mem (Pmode, off);
6325 set_mem_alias_set (off, ix86_GOT_alias_set ());
6327 if (TARGET_64BIT || TARGET_GNU_TLS)
6329 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
6330 off = force_reg (Pmode, off);
6331 return gen_rtx_PLUS (Pmode, base, off);
6333 else
6335 base = get_thread_pointer (true);
6336 dest = gen_reg_rtx (Pmode);
6337 emit_insn (gen_subsi3 (dest, base, off));
6339 break;
6341 case TLS_MODEL_LOCAL_EXEC:
6342 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
6343 (TARGET_64BIT || TARGET_GNU_TLS)
6344 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
6345 off = gen_rtx_CONST (Pmode, off);
6347 if (TARGET_64BIT || TARGET_GNU_TLS)
6349 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
6350 return gen_rtx_PLUS (Pmode, base, off);
6352 else
6354 base = get_thread_pointer (true);
6355 dest = gen_reg_rtx (Pmode);
6356 emit_insn (gen_subsi3 (dest, base, off));
6358 break;
6360 default:
6361 gcc_unreachable ();
6364 return dest;
6367 /* Try machine-dependent ways of modifying an illegitimate address
6368 to be legitimate. If we find one, return the new, valid address.
6369 This macro is used in only one place: `memory_address' in explow.c.
6371 OLDX is the address as it was before break_out_memory_refs was called.
6372 In some cases it is useful to look at this to decide what needs to be done.
6374 MODE and WIN are passed so that this macro can use
6375 GO_IF_LEGITIMATE_ADDRESS.
6377 It is always safe for this macro to do nothing. It exists to recognize
6378 opportunities to optimize the output.
6380 For the 80386, we handle X+REG by loading X into a register R and
6381 using R+REG. R will go in a general reg and indexing will be used.
6382 However, if REG is a broken-out memory address or multiplication,
6383 nothing needs to be done because REG can certainly go in a general reg.
6385 When -fpic is used, special handling is needed for symbolic references.
6386 See comments by legitimize_pic_address in i386.c for details. */
6389 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
6391 int changed = 0;
6392 unsigned log;
6394 if (TARGET_DEBUG_ADDR)
6396 fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
6397 GET_MODE_NAME (mode));
6398 debug_rtx (x);
6401 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
6402 if (log)
6403 return legitimize_tls_address (x, log, false);
6404 if (GET_CODE (x) == CONST
6405 && GET_CODE (XEXP (x, 0)) == PLUS
6406 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6407 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
6409 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0), log, false);
6410 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
6413 if (flag_pic && SYMBOLIC_CONST (x))
6414 return legitimize_pic_address (x, 0);
6416 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
6417 if (GET_CODE (x) == ASHIFT
6418 && GET_CODE (XEXP (x, 1)) == CONST_INT
6419 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
6421 changed = 1;
6422 log = INTVAL (XEXP (x, 1));
6423 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
6424 GEN_INT (1 << log));
6427 if (GET_CODE (x) == PLUS)
6429 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
6431 if (GET_CODE (XEXP (x, 0)) == ASHIFT
6432 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6433 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
6435 changed = 1;
6436 log = INTVAL (XEXP (XEXP (x, 0), 1));
6437 XEXP (x, 0) = gen_rtx_MULT (Pmode,
6438 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
6439 GEN_INT (1 << log));
6442 if (GET_CODE (XEXP (x, 1)) == ASHIFT
6443 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
6444 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
6446 changed = 1;
6447 log = INTVAL (XEXP (XEXP (x, 1), 1));
6448 XEXP (x, 1) = gen_rtx_MULT (Pmode,
6449 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
6450 GEN_INT (1 << log));
6453 /* Put multiply first if it isn't already. */
6454 if (GET_CODE (XEXP (x, 1)) == MULT)
6456 rtx tmp = XEXP (x, 0);
6457 XEXP (x, 0) = XEXP (x, 1);
6458 XEXP (x, 1) = tmp;
6459 changed = 1;
6462 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
6463 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
6464 created by virtual register instantiation, register elimination, and
6465 similar optimizations. */
6466 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
6468 changed = 1;
6469 x = gen_rtx_PLUS (Pmode,
6470 gen_rtx_PLUS (Pmode, XEXP (x, 0),
6471 XEXP (XEXP (x, 1), 0)),
6472 XEXP (XEXP (x, 1), 1));
6475 /* Canonicalize
6476 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
6477 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
6478 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
6479 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
6480 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
6481 && CONSTANT_P (XEXP (x, 1)))
6483 rtx constant;
6484 rtx other = NULL_RTX;
6486 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6488 constant = XEXP (x, 1);
6489 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
6491 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
6493 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
6494 other = XEXP (x, 1);
6496 else
6497 constant = 0;
6499 if (constant)
6501 changed = 1;
6502 x = gen_rtx_PLUS (Pmode,
6503 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
6504 XEXP (XEXP (XEXP (x, 0), 1), 0)),
6505 plus_constant (other, INTVAL (constant)));
6509 if (changed && legitimate_address_p (mode, x, FALSE))
6510 return x;
6512 if (GET_CODE (XEXP (x, 0)) == MULT)
6514 changed = 1;
6515 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
6518 if (GET_CODE (XEXP (x, 1)) == MULT)
6520 changed = 1;
6521 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
6524 if (changed
6525 && GET_CODE (XEXP (x, 1)) == REG
6526 && GET_CODE (XEXP (x, 0)) == REG)
6527 return x;
6529 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
6531 changed = 1;
6532 x = legitimize_pic_address (x, 0);
6535 if (changed && legitimate_address_p (mode, x, FALSE))
6536 return x;
6538 if (GET_CODE (XEXP (x, 0)) == REG)
6540 rtx temp = gen_reg_rtx (Pmode);
6541 rtx val = force_operand (XEXP (x, 1), temp);
6542 if (val != temp)
6543 emit_move_insn (temp, val);
6545 XEXP (x, 1) = temp;
6546 return x;
6549 else if (GET_CODE (XEXP (x, 1)) == REG)
6551 rtx temp = gen_reg_rtx (Pmode);
6552 rtx val = force_operand (XEXP (x, 0), temp);
6553 if (val != temp)
6554 emit_move_insn (temp, val);
6556 XEXP (x, 0) = temp;
6557 return x;
6561 return x;
6564 /* Print an integer constant expression in assembler syntax. Addition
6565 and subtraction are the only arithmetic that may appear in these
6566 expressions. FILE is the stdio stream to write to, X is the rtx, and
6567 CODE is the operand print code from the output string. */
6569 static void
6570 output_pic_addr_const (FILE *file, rtx x, int code)
6572 char buf[256];
6574 switch (GET_CODE (x))
6576 case PC:
6577 gcc_assert (flag_pic);
6578 putc ('.', file);
6579 break;
6581 case SYMBOL_REF:
6582 assemble_name (file, XSTR (x, 0));
6583 if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
6584 fputs ("@PLT", file);
6585 break;
6587 case LABEL_REF:
6588 x = XEXP (x, 0);
6589 /* FALLTHRU */
6590 case CODE_LABEL:
6591 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
6592 assemble_name (asm_out_file, buf);
6593 break;
6595 case CONST_INT:
6596 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
6597 break;
6599 case CONST:
6600 /* This used to output parentheses around the expression,
6601 but that does not work on the 386 (either ATT or BSD assembler). */
6602 output_pic_addr_const (file, XEXP (x, 0), code);
6603 break;
6605 case CONST_DOUBLE:
6606 if (GET_MODE (x) == VOIDmode)
6608 /* We can use %d if the number is <32 bits and positive. */
6609 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
6610 fprintf (file, "0x%lx%08lx",
6611 (unsigned long) CONST_DOUBLE_HIGH (x),
6612 (unsigned long) CONST_DOUBLE_LOW (x));
6613 else
6614 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
6616 else
6617 /* We can't handle floating point constants;
6618 PRINT_OPERAND must handle them. */
6619 output_operand_lossage ("floating constant misused");
6620 break;
6622 case PLUS:
6623 /* Some assemblers need integer constants to appear first. */
6624 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
6626 output_pic_addr_const (file, XEXP (x, 0), code);
6627 putc ('+', file);
6628 output_pic_addr_const (file, XEXP (x, 1), code);
6630 else
6632 gcc_assert (GET_CODE (XEXP (x, 1)) == CONST_INT);
6633 output_pic_addr_const (file, XEXP (x, 1), code);
6634 putc ('+', file);
6635 output_pic_addr_const (file, XEXP (x, 0), code);
6637 break;
6639 case MINUS:
6640 if (!TARGET_MACHO)
6641 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
6642 output_pic_addr_const (file, XEXP (x, 0), code);
6643 putc ('-', file);
6644 output_pic_addr_const (file, XEXP (x, 1), code);
6645 if (!TARGET_MACHO)
6646 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
6647 break;
6649 case UNSPEC:
6650 gcc_assert (XVECLEN (x, 0) == 1);
6651 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
6652 switch (XINT (x, 1))
6654 case UNSPEC_GOT:
6655 fputs ("@GOT", file);
6656 break;
6657 case UNSPEC_GOTOFF:
6658 fputs ("@GOTOFF", file);
6659 break;
6660 case UNSPEC_GOTPCREL:
6661 fputs ("@GOTPCREL(%rip)", file);
6662 break;
6663 case UNSPEC_GOTTPOFF:
6664 /* FIXME: This might be @TPOFF in Sun ld too. */
6665 fputs ("@GOTTPOFF", file);
6666 break;
6667 case UNSPEC_TPOFF:
6668 fputs ("@TPOFF", file);
6669 break;
6670 case UNSPEC_NTPOFF:
6671 if (TARGET_64BIT)
6672 fputs ("@TPOFF", file);
6673 else
6674 fputs ("@NTPOFF", file);
6675 break;
6676 case UNSPEC_DTPOFF:
6677 fputs ("@DTPOFF", file);
6678 break;
6679 case UNSPEC_GOTNTPOFF:
6680 if (TARGET_64BIT)
6681 fputs ("@GOTTPOFF(%rip)", file);
6682 else
6683 fputs ("@GOTNTPOFF", file);
6684 break;
6685 case UNSPEC_INDNTPOFF:
6686 fputs ("@INDNTPOFF", file);
6687 break;
6688 default:
6689 output_operand_lossage ("invalid UNSPEC as operand");
6690 break;
6692 break;
6694 default:
6695 output_operand_lossage ("invalid expression as operand");
6699 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6700 We need to emit DTP-relative relocations. */
6702 static void
6703 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
6705 fputs (ASM_LONG, file);
6706 output_addr_const (file, x);
6707 fputs ("@DTPOFF", file);
6708 switch (size)
6710 case 4:
6711 break;
6712 case 8:
6713 fputs (", 0", file);
6714 break;
6715 default:
6716 gcc_unreachable ();
6720 /* In the name of slightly smaller debug output, and to cater to
6721 general assembler lossage, recognize PIC+GOTOFF and turn it back
6722 into a direct symbol reference. */
6724 static rtx
6725 ix86_delegitimize_address (rtx orig_x)
6727 rtx x = orig_x, y;
6729 if (GET_CODE (x) == MEM)
6730 x = XEXP (x, 0);
6732 if (TARGET_64BIT)
6734 if (GET_CODE (x) != CONST
6735 || GET_CODE (XEXP (x, 0)) != UNSPEC
6736 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
6737 || GET_CODE (orig_x) != MEM)
6738 return orig_x;
6739 return XVECEXP (XEXP (x, 0), 0, 0);
6742 if (GET_CODE (x) != PLUS
6743 || GET_CODE (XEXP (x, 1)) != CONST)
6744 return orig_x;
6746 if (GET_CODE (XEXP (x, 0)) == REG
6747 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6748 /* %ebx + GOT/GOTOFF */
6749 y = NULL;
6750 else if (GET_CODE (XEXP (x, 0)) == PLUS)
6752 /* %ebx + %reg * scale + GOT/GOTOFF */
6753 y = XEXP (x, 0);
6754 if (GET_CODE (XEXP (y, 0)) == REG
6755 && REGNO (XEXP (y, 0)) == PIC_OFFSET_TABLE_REGNUM)
6756 y = XEXP (y, 1);
6757 else if (GET_CODE (XEXP (y, 1)) == REG
6758 && REGNO (XEXP (y, 1)) == PIC_OFFSET_TABLE_REGNUM)
6759 y = XEXP (y, 0);
6760 else
6761 return orig_x;
6762 if (GET_CODE (y) != REG
6763 && GET_CODE (y) != MULT
6764 && GET_CODE (y) != ASHIFT)
6765 return orig_x;
6767 else
6768 return orig_x;
6770 x = XEXP (XEXP (x, 1), 0);
6771 if (GET_CODE (x) == UNSPEC
6772 && ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6773 || (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM)))
6775 if (y)
6776 return gen_rtx_PLUS (Pmode, y, XVECEXP (x, 0, 0));
6777 return XVECEXP (x, 0, 0);
6780 if (GET_CODE (x) == PLUS
6781 && GET_CODE (XEXP (x, 0)) == UNSPEC
6782 && GET_CODE (XEXP (x, 1)) == CONST_INT
6783 && ((XINT (XEXP (x, 0), 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6784 || (XINT (XEXP (x, 0), 1) == UNSPEC_GOTOFF
6785 && GET_CODE (orig_x) != MEM)))
6787 x = gen_rtx_PLUS (VOIDmode, XVECEXP (XEXP (x, 0), 0, 0), XEXP (x, 1));
6788 if (y)
6789 return gen_rtx_PLUS (Pmode, y, x);
6790 return x;
6793 return orig_x;
6796 static void
6797 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
6798 int fp, FILE *file)
6800 const char *suffix;
6802 if (mode == CCFPmode || mode == CCFPUmode)
6804 enum rtx_code second_code, bypass_code;
6805 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
6806 gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
6807 code = ix86_fp_compare_code_to_integer (code);
6808 mode = CCmode;
6810 if (reverse)
6811 code = reverse_condition (code);
6813 switch (code)
6815 case EQ:
6816 suffix = "e";
6817 break;
6818 case NE:
6819 suffix = "ne";
6820 break;
6821 case GT:
6822 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
6823 suffix = "g";
6824 break;
6825 case GTU:
6826 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
6827 Those same assemblers have the same but opposite lossage on cmov. */
6828 gcc_assert (mode == CCmode);
6829 suffix = fp ? "nbe" : "a";
6830 break;
6831 case LT:
6832 switch (mode)
6834 case CCNOmode:
6835 case CCGOCmode:
6836 suffix = "s";
6837 break;
6839 case CCmode:
6840 case CCGCmode:
6841 suffix = "l";
6842 break;
6844 default:
6845 gcc_unreachable ();
6847 break;
6848 case LTU:
6849 gcc_assert (mode == CCmode);
6850 suffix = "b";
6851 break;
6852 case GE:
6853 switch (mode)
6855 case CCNOmode:
6856 case CCGOCmode:
6857 suffix = "ns";
6858 break;
6860 case CCmode:
6861 case CCGCmode:
6862 suffix = "ge";
6863 break;
6865 default:
6866 gcc_unreachable ();
6868 break;
6869 case GEU:
6870 /* ??? As above. */
6871 gcc_assert (mode == CCmode);
6872 suffix = fp ? "nb" : "ae";
6873 break;
6874 case LE:
6875 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
6876 suffix = "le";
6877 break;
6878 case LEU:
6879 gcc_assert (mode == CCmode);
6880 suffix = "be";
6881 break;
6882 case UNORDERED:
6883 suffix = fp ? "u" : "p";
6884 break;
6885 case ORDERED:
6886 suffix = fp ? "nu" : "np";
6887 break;
6888 default:
6889 gcc_unreachable ();
6891 fputs (suffix, file);
6894 /* Print the name of register X to FILE based on its machine mode and number.
6895 If CODE is 'w', pretend the mode is HImode.
6896 If CODE is 'b', pretend the mode is QImode.
6897 If CODE is 'k', pretend the mode is SImode.
6898 If CODE is 'q', pretend the mode is DImode.
6899 If CODE is 'h', pretend the reg is the 'high' byte register.
6900 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
6902 void
6903 print_reg (rtx x, int code, FILE *file)
6905 gcc_assert (REGNO (x) != ARG_POINTER_REGNUM
6906 && REGNO (x) != FRAME_POINTER_REGNUM
6907 && REGNO (x) != FLAGS_REG
6908 && REGNO (x) != FPSR_REG);
6910 if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
6911 putc ('%', file);
6913 if (code == 'w' || MMX_REG_P (x))
6914 code = 2;
6915 else if (code == 'b')
6916 code = 1;
6917 else if (code == 'k')
6918 code = 4;
6919 else if (code == 'q')
6920 code = 8;
6921 else if (code == 'y')
6922 code = 3;
6923 else if (code == 'h')
6924 code = 0;
6925 else
6926 code = GET_MODE_SIZE (GET_MODE (x));
6928 /* Irritatingly, AMD extended registers use different naming convention
6929 from the normal registers. */
6930 if (REX_INT_REG_P (x))
6932 gcc_assert (TARGET_64BIT);
6933 switch (code)
6935 case 0:
6936 error ("extended registers have no high halves");
6937 break;
6938 case 1:
6939 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
6940 break;
6941 case 2:
6942 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
6943 break;
6944 case 4:
6945 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
6946 break;
6947 case 8:
6948 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
6949 break;
6950 default:
6951 error ("unsupported operand size for extended register");
6952 break;
6954 return;
6956 switch (code)
6958 case 3:
6959 if (STACK_TOP_P (x))
6961 fputs ("st(0)", file);
6962 break;
6964 /* FALLTHRU */
6965 case 8:
6966 case 4:
6967 case 12:
6968 if (! ANY_FP_REG_P (x))
6969 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
6970 /* FALLTHRU */
6971 case 16:
6972 case 2:
6973 normal:
6974 fputs (hi_reg_name[REGNO (x)], file);
6975 break;
6976 case 1:
6977 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
6978 goto normal;
6979 fputs (qi_reg_name[REGNO (x)], file);
6980 break;
6981 case 0:
6982 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
6983 goto normal;
6984 fputs (qi_high_reg_name[REGNO (x)], file);
6985 break;
6986 default:
6987 gcc_unreachable ();
6991 /* Locate some local-dynamic symbol still in use by this function
6992 so that we can print its name in some tls_local_dynamic_base
6993 pattern. */
6995 static const char *
6996 get_some_local_dynamic_name (void)
6998 rtx insn;
7000 if (cfun->machine->some_ld_name)
7001 return cfun->machine->some_ld_name;
7003 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
7004 if (INSN_P (insn)
7005 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
7006 return cfun->machine->some_ld_name;
7008 gcc_unreachable ();
7011 static int
7012 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
7014 rtx x = *px;
7016 if (GET_CODE (x) == SYMBOL_REF
7017 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
7019 cfun->machine->some_ld_name = XSTR (x, 0);
7020 return 1;
7023 return 0;
7026 /* Meaning of CODE:
7027 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
7028 C -- print opcode suffix for set/cmov insn.
7029 c -- like C, but print reversed condition
7030 F,f -- likewise, but for floating-point.
7031 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
7032 otherwise nothing
7033 R -- print the prefix for register names.
7034 z -- print the opcode suffix for the size of the current operand.
7035 * -- print a star (in certain assembler syntax)
7036 A -- print an absolute memory reference.
7037 w -- print the operand as if it's a "word" (HImode) even if it isn't.
7038 s -- print a shift double count, followed by the assemblers argument
7039 delimiter.
7040 b -- print the QImode name of the register for the indicated operand.
7041 %b0 would print %al if operands[0] is reg 0.
7042 w -- likewise, print the HImode name of the register.
7043 k -- likewise, print the SImode name of the register.
7044 q -- likewise, print the DImode name of the register.
7045 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
7046 y -- print "st(0)" instead of "st" as a register.
7047 D -- print condition for SSE cmp instruction.
7048 P -- if PIC, print an @PLT suffix.
7049 X -- don't print any sort of PIC '@' suffix for a symbol.
7050 & -- print some in-use local-dynamic symbol name.
7051 H -- print a memory address offset by 8; used for sse high-parts
7054 void
7055 print_operand (FILE *file, rtx x, int code)
7057 if (code)
7059 switch (code)
7061 case '*':
7062 if (ASSEMBLER_DIALECT == ASM_ATT)
7063 putc ('*', file);
7064 return;
7066 case '&':
7067 assemble_name (file, get_some_local_dynamic_name ());
7068 return;
7070 case 'A':
7071 switch (ASSEMBLER_DIALECT)
7073 case ASM_ATT:
7074 putc ('*', file);
7075 break;
7077 case ASM_INTEL:
7078 /* Intel syntax. For absolute addresses, registers should not
7079 be surrounded by braces. */
7080 if (GET_CODE (x) != REG)
7082 putc ('[', file);
7083 PRINT_OPERAND (file, x, 0);
7084 putc (']', file);
7085 return;
7087 break;
7089 default:
7090 gcc_unreachable ();
7093 PRINT_OPERAND (file, x, 0);
7094 return;
7097 case 'L':
7098 if (ASSEMBLER_DIALECT == ASM_ATT)
7099 putc ('l', file);
7100 return;
7102 case 'W':
7103 if (ASSEMBLER_DIALECT == ASM_ATT)
7104 putc ('w', file);
7105 return;
7107 case 'B':
7108 if (ASSEMBLER_DIALECT == ASM_ATT)
7109 putc ('b', file);
7110 return;
7112 case 'Q':
7113 if (ASSEMBLER_DIALECT == ASM_ATT)
7114 putc ('l', file);
7115 return;
7117 case 'S':
7118 if (ASSEMBLER_DIALECT == ASM_ATT)
7119 putc ('s', file);
7120 return;
7122 case 'T':
7123 if (ASSEMBLER_DIALECT == ASM_ATT)
7124 putc ('t', file);
7125 return;
7127 case 'z':
7128 /* 387 opcodes don't get size suffixes if the operands are
7129 registers. */
7130 if (STACK_REG_P (x))
7131 return;
7133 /* Likewise if using Intel opcodes. */
7134 if (ASSEMBLER_DIALECT == ASM_INTEL)
7135 return;
7137 /* This is the size of op from size of operand. */
7138 switch (GET_MODE_SIZE (GET_MODE (x)))
7140 case 2:
7141 #ifdef HAVE_GAS_FILDS_FISTS
7142 putc ('s', file);
7143 #endif
7144 return;
7146 case 4:
7147 if (GET_MODE (x) == SFmode)
7149 putc ('s', file);
7150 return;
7152 else
7153 putc ('l', file);
7154 return;
7156 case 12:
7157 case 16:
7158 putc ('t', file);
7159 return;
7161 case 8:
7162 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
7164 #ifdef GAS_MNEMONICS
7165 putc ('q', file);
7166 #else
7167 putc ('l', file);
7168 putc ('l', file);
7169 #endif
7171 else
7172 putc ('l', file);
7173 return;
7175 default:
7176 gcc_unreachable ();
7179 case 'b':
7180 case 'w':
7181 case 'k':
7182 case 'q':
7183 case 'h':
7184 case 'y':
7185 case 'X':
7186 case 'P':
7187 break;
7189 case 's':
7190 if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
7192 PRINT_OPERAND (file, x, 0);
7193 putc (',', file);
7195 return;
7197 case 'D':
7198 /* Little bit of braindamage here. The SSE compare instructions
7199 does use completely different names for the comparisons that the
7200 fp conditional moves. */
7201 switch (GET_CODE (x))
7203 case EQ:
7204 case UNEQ:
7205 fputs ("eq", file);
7206 break;
7207 case LT:
7208 case UNLT:
7209 fputs ("lt", file);
7210 break;
7211 case LE:
7212 case UNLE:
7213 fputs ("le", file);
7214 break;
7215 case UNORDERED:
7216 fputs ("unord", file);
7217 break;
7218 case NE:
7219 case LTGT:
7220 fputs ("neq", file);
7221 break;
7222 case UNGE:
7223 case GE:
7224 fputs ("nlt", file);
7225 break;
7226 case UNGT:
7227 case GT:
7228 fputs ("nle", file);
7229 break;
7230 case ORDERED:
7231 fputs ("ord", file);
7232 break;
7233 default:
7234 gcc_unreachable ();
7236 return;
7237 case 'O':
7238 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
7239 if (ASSEMBLER_DIALECT == ASM_ATT)
7241 switch (GET_MODE (x))
7243 case HImode: putc ('w', file); break;
7244 case SImode:
7245 case SFmode: putc ('l', file); break;
7246 case DImode:
7247 case DFmode: putc ('q', file); break;
7248 default: gcc_unreachable ();
7250 putc ('.', file);
7252 #endif
7253 return;
7254 case 'C':
7255 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
7256 return;
7257 case 'F':
7258 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
7259 if (ASSEMBLER_DIALECT == ASM_ATT)
7260 putc ('.', file);
7261 #endif
7262 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
7263 return;
7265 /* Like above, but reverse condition */
7266 case 'c':
7267 /* Check to see if argument to %c is really a constant
7268 and not a condition code which needs to be reversed. */
7269 if (!COMPARISON_P (x))
7271 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
7272 return;
7274 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
7275 return;
7276 case 'f':
7277 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
7278 if (ASSEMBLER_DIALECT == ASM_ATT)
7279 putc ('.', file);
7280 #endif
7281 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
7282 return;
7284 case 'H':
7285 /* It doesn't actually matter what mode we use here, as we're
7286 only going to use this for printing. */
7287 x = adjust_address_nv (x, DImode, 8);
7288 break;
7290 case '+':
7292 rtx x;
7294 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
7295 return;
7297 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
7298 if (x)
7300 int pred_val = INTVAL (XEXP (x, 0));
7302 if (pred_val < REG_BR_PROB_BASE * 45 / 100
7303 || pred_val > REG_BR_PROB_BASE * 55 / 100)
7305 int taken = pred_val > REG_BR_PROB_BASE / 2;
7306 int cputaken = final_forward_branch_p (current_output_insn) == 0;
7308 /* Emit hints only in the case default branch prediction
7309 heuristics would fail. */
7310 if (taken != cputaken)
7312 /* We use 3e (DS) prefix for taken branches and
7313 2e (CS) prefix for not taken branches. */
7314 if (taken)
7315 fputs ("ds ; ", file);
7316 else
7317 fputs ("cs ; ", file);
7321 return;
7323 default:
7324 output_operand_lossage ("invalid operand code '%c'", code);
7328 if (GET_CODE (x) == REG)
7329 print_reg (x, code, file);
7331 else if (GET_CODE (x) == MEM)
7333 /* No `byte ptr' prefix for call instructions. */
7334 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
7336 const char * size;
7337 switch (GET_MODE_SIZE (GET_MODE (x)))
7339 case 1: size = "BYTE"; break;
7340 case 2: size = "WORD"; break;
7341 case 4: size = "DWORD"; break;
7342 case 8: size = "QWORD"; break;
7343 case 12: size = "XWORD"; break;
7344 case 16: size = "XMMWORD"; break;
7345 default:
7346 gcc_unreachable ();
7349 /* Check for explicit size override (codes 'b', 'w' and 'k') */
7350 if (code == 'b')
7351 size = "BYTE";
7352 else if (code == 'w')
7353 size = "WORD";
7354 else if (code == 'k')
7355 size = "DWORD";
7357 fputs (size, file);
7358 fputs (" PTR ", file);
7361 x = XEXP (x, 0);
7362 /* Avoid (%rip) for call operands. */
7363 if (CONSTANT_ADDRESS_P (x) && code == 'P'
7364 && GET_CODE (x) != CONST_INT)
7365 output_addr_const (file, x);
7366 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
7367 output_operand_lossage ("invalid constraints for operand");
7368 else
7369 output_address (x);
7372 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
7374 REAL_VALUE_TYPE r;
7375 long l;
7377 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7378 REAL_VALUE_TO_TARGET_SINGLE (r, l);
7380 if (ASSEMBLER_DIALECT == ASM_ATT)
7381 putc ('$', file);
7382 fprintf (file, "0x%08lx", l);
7385 /* These float cases don't actually occur as immediate operands. */
7386 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
7388 char dstr[30];
7390 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
7391 fprintf (file, "%s", dstr);
7394 else if (GET_CODE (x) == CONST_DOUBLE
7395 && GET_MODE (x) == XFmode)
7397 char dstr[30];
7399 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
7400 fprintf (file, "%s", dstr);
7403 else
7405 /* We have patterns that allow zero sets of memory, for instance.
7406 In 64-bit mode, we should probably support all 8-byte vectors,
7407 since we can in fact encode that into an immediate. */
7408 if (GET_CODE (x) == CONST_VECTOR)
7410 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
7411 x = const0_rtx;
7414 if (code != 'P')
7416 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
7418 if (ASSEMBLER_DIALECT == ASM_ATT)
7419 putc ('$', file);
7421 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
7422 || GET_CODE (x) == LABEL_REF)
7424 if (ASSEMBLER_DIALECT == ASM_ATT)
7425 putc ('$', file);
7426 else
7427 fputs ("OFFSET FLAT:", file);
7430 if (GET_CODE (x) == CONST_INT)
7431 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
7432 else if (flag_pic)
7433 output_pic_addr_const (file, x, code);
7434 else
7435 output_addr_const (file, x);
7439 /* Print a memory operand whose address is ADDR. */
7441 void
7442 print_operand_address (FILE *file, rtx addr)
7444 struct ix86_address parts;
7445 rtx base, index, disp;
7446 int scale;
7447 int ok = ix86_decompose_address (addr, &parts);
7449 gcc_assert (ok);
7451 base = parts.base;
7452 index = parts.index;
7453 disp = parts.disp;
7454 scale = parts.scale;
7456 switch (parts.seg)
7458 case SEG_DEFAULT:
7459 break;
7460 case SEG_FS:
7461 case SEG_GS:
7462 if (USER_LABEL_PREFIX[0] == 0)
7463 putc ('%', file);
7464 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
7465 break;
7466 default:
7467 gcc_unreachable ();
7470 if (!base && !index)
7472 /* Displacement only requires special attention. */
7474 if (GET_CODE (disp) == CONST_INT)
7476 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
7478 if (USER_LABEL_PREFIX[0] == 0)
7479 putc ('%', file);
7480 fputs ("ds:", file);
7482 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
7484 else if (flag_pic)
7485 output_pic_addr_const (file, disp, 0);
7486 else
7487 output_addr_const (file, disp);
7489 /* Use one byte shorter RIP relative addressing for 64bit mode. */
7490 if (TARGET_64BIT)
7492 if (GET_CODE (disp) == CONST
7493 && GET_CODE (XEXP (disp, 0)) == PLUS
7494 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
7495 disp = XEXP (XEXP (disp, 0), 0);
7496 if (GET_CODE (disp) == LABEL_REF
7497 || (GET_CODE (disp) == SYMBOL_REF
7498 && SYMBOL_REF_TLS_MODEL (disp) == 0))
7499 fputs ("(%rip)", file);
7502 else
7504 if (ASSEMBLER_DIALECT == ASM_ATT)
7506 if (disp)
7508 if (flag_pic)
7509 output_pic_addr_const (file, disp, 0);
7510 else if (GET_CODE (disp) == LABEL_REF)
7511 output_asm_label (disp);
7512 else
7513 output_addr_const (file, disp);
7516 putc ('(', file);
7517 if (base)
7518 print_reg (base, 0, file);
7519 if (index)
7521 putc (',', file);
7522 print_reg (index, 0, file);
7523 if (scale != 1)
7524 fprintf (file, ",%d", scale);
7526 putc (')', file);
7528 else
7530 rtx offset = NULL_RTX;
7532 if (disp)
7534 /* Pull out the offset of a symbol; print any symbol itself. */
7535 if (GET_CODE (disp) == CONST
7536 && GET_CODE (XEXP (disp, 0)) == PLUS
7537 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
7539 offset = XEXP (XEXP (disp, 0), 1);
7540 disp = gen_rtx_CONST (VOIDmode,
7541 XEXP (XEXP (disp, 0), 0));
7544 if (flag_pic)
7545 output_pic_addr_const (file, disp, 0);
7546 else if (GET_CODE (disp) == LABEL_REF)
7547 output_asm_label (disp);
7548 else if (GET_CODE (disp) == CONST_INT)
7549 offset = disp;
7550 else
7551 output_addr_const (file, disp);
7554 putc ('[', file);
7555 if (base)
7557 print_reg (base, 0, file);
7558 if (offset)
7560 if (INTVAL (offset) >= 0)
7561 putc ('+', file);
7562 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
7565 else if (offset)
7566 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
7567 else
7568 putc ('0', file);
7570 if (index)
7572 putc ('+', file);
7573 print_reg (index, 0, file);
7574 if (scale != 1)
7575 fprintf (file, "*%d", scale);
7577 putc (']', file);
7582 bool
7583 output_addr_const_extra (FILE *file, rtx x)
7585 rtx op;
7587 if (GET_CODE (x) != UNSPEC)
7588 return false;
7590 op = XVECEXP (x, 0, 0);
7591 switch (XINT (x, 1))
7593 case UNSPEC_GOTTPOFF:
7594 output_addr_const (file, op);
7595 /* FIXME: This might be @TPOFF in Sun ld. */
7596 fputs ("@GOTTPOFF", file);
7597 break;
7598 case UNSPEC_TPOFF:
7599 output_addr_const (file, op);
7600 fputs ("@TPOFF", file);
7601 break;
7602 case UNSPEC_NTPOFF:
7603 output_addr_const (file, op);
7604 if (TARGET_64BIT)
7605 fputs ("@TPOFF", file);
7606 else
7607 fputs ("@NTPOFF", file);
7608 break;
7609 case UNSPEC_DTPOFF:
7610 output_addr_const (file, op);
7611 fputs ("@DTPOFF", file);
7612 break;
7613 case UNSPEC_GOTNTPOFF:
7614 output_addr_const (file, op);
7615 if (TARGET_64BIT)
7616 fputs ("@GOTTPOFF(%rip)", file);
7617 else
7618 fputs ("@GOTNTPOFF", file);
7619 break;
7620 case UNSPEC_INDNTPOFF:
7621 output_addr_const (file, op);
7622 fputs ("@INDNTPOFF", file);
7623 break;
7625 default:
7626 return false;
7629 return true;
7632 /* Split one or more DImode RTL references into pairs of SImode
7633 references. The RTL can be REG, offsettable MEM, integer constant, or
7634 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
7635 split and "num" is its length. lo_half and hi_half are output arrays
7636 that parallel "operands". */
7638 void
7639 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
7641 while (num--)
7643 rtx op = operands[num];
7645 /* simplify_subreg refuse to split volatile memory addresses,
7646 but we still have to handle it. */
7647 if (GET_CODE (op) == MEM)
7649 lo_half[num] = adjust_address (op, SImode, 0);
7650 hi_half[num] = adjust_address (op, SImode, 4);
7652 else
7654 lo_half[num] = simplify_gen_subreg (SImode, op,
7655 GET_MODE (op) == VOIDmode
7656 ? DImode : GET_MODE (op), 0);
7657 hi_half[num] = simplify_gen_subreg (SImode, op,
7658 GET_MODE (op) == VOIDmode
7659 ? DImode : GET_MODE (op), 4);
7663 /* Split one or more TImode RTL references into pairs of DImode
7664 references. The RTL can be REG, offsettable MEM, integer constant, or
7665 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
7666 split and "num" is its length. lo_half and hi_half are output arrays
7667 that parallel "operands". */
7669 void
7670 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
7672 while (num--)
7674 rtx op = operands[num];
7676 /* simplify_subreg refuse to split volatile memory addresses, but we
7677 still have to handle it. */
7678 if (GET_CODE (op) == MEM)
7680 lo_half[num] = adjust_address (op, DImode, 0);
7681 hi_half[num] = adjust_address (op, DImode, 8);
7683 else
7685 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
7686 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
7691 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
7692 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
7693 is the expression of the binary operation. The output may either be
7694 emitted here, or returned to the caller, like all output_* functions.
7696 There is no guarantee that the operands are the same mode, as they
7697 might be within FLOAT or FLOAT_EXTEND expressions. */
7699 #ifndef SYSV386_COMPAT
7700 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
7701 wants to fix the assemblers because that causes incompatibility
7702 with gcc. No-one wants to fix gcc because that causes
7703 incompatibility with assemblers... You can use the option of
7704 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
7705 #define SYSV386_COMPAT 1
7706 #endif
7708 const char *
7709 output_387_binary_op (rtx insn, rtx *operands)
7711 static char buf[30];
7712 const char *p;
7713 const char *ssep;
7714 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
7716 #ifdef ENABLE_CHECKING
7717 /* Even if we do not want to check the inputs, this documents input
7718 constraints. Which helps in understanding the following code. */
7719 if (STACK_REG_P (operands[0])
7720 && ((REG_P (operands[1])
7721 && REGNO (operands[0]) == REGNO (operands[1])
7722 && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM))
7723 || (REG_P (operands[2])
7724 && REGNO (operands[0]) == REGNO (operands[2])
7725 && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)))
7726 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
7727 ; /* ok */
7728 else
7729 gcc_assert (is_sse);
7730 #endif
7732 switch (GET_CODE (operands[3]))
7734 case PLUS:
7735 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7736 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7737 p = "fiadd";
7738 else
7739 p = "fadd";
7740 ssep = "add";
7741 break;
7743 case MINUS:
7744 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7745 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7746 p = "fisub";
7747 else
7748 p = "fsub";
7749 ssep = "sub";
7750 break;
7752 case MULT:
7753 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7754 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7755 p = "fimul";
7756 else
7757 p = "fmul";
7758 ssep = "mul";
7759 break;
7761 case DIV:
7762 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7763 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7764 p = "fidiv";
7765 else
7766 p = "fdiv";
7767 ssep = "div";
7768 break;
7770 default:
7771 gcc_unreachable ();
7774 if (is_sse)
7776 strcpy (buf, ssep);
7777 if (GET_MODE (operands[0]) == SFmode)
7778 strcat (buf, "ss\t{%2, %0|%0, %2}");
7779 else
7780 strcat (buf, "sd\t{%2, %0|%0, %2}");
7781 return buf;
7783 strcpy (buf, p);
7785 switch (GET_CODE (operands[3]))
7787 case MULT:
7788 case PLUS:
7789 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
7791 rtx temp = operands[2];
7792 operands[2] = operands[1];
7793 operands[1] = temp;
7796 /* know operands[0] == operands[1]. */
7798 if (GET_CODE (operands[2]) == MEM)
7800 p = "%z2\t%2";
7801 break;
7804 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7806 if (STACK_TOP_P (operands[0]))
7807 /* How is it that we are storing to a dead operand[2]?
7808 Well, presumably operands[1] is dead too. We can't
7809 store the result to st(0) as st(0) gets popped on this
7810 instruction. Instead store to operands[2] (which I
7811 think has to be st(1)). st(1) will be popped later.
7812 gcc <= 2.8.1 didn't have this check and generated
7813 assembly code that the Unixware assembler rejected. */
7814 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7815 else
7816 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7817 break;
7820 if (STACK_TOP_P (operands[0]))
7821 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7822 else
7823 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7824 break;
7826 case MINUS:
7827 case DIV:
7828 if (GET_CODE (operands[1]) == MEM)
7830 p = "r%z1\t%1";
7831 break;
7834 if (GET_CODE (operands[2]) == MEM)
7836 p = "%z2\t%2";
7837 break;
7840 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7842 #if SYSV386_COMPAT
7843 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
7844 derived assemblers, confusingly reverse the direction of
7845 the operation for fsub{r} and fdiv{r} when the
7846 destination register is not st(0). The Intel assembler
7847 doesn't have this brain damage. Read !SYSV386_COMPAT to
7848 figure out what the hardware really does. */
7849 if (STACK_TOP_P (operands[0]))
7850 p = "{p\t%0, %2|rp\t%2, %0}";
7851 else
7852 p = "{rp\t%2, %0|p\t%0, %2}";
7853 #else
7854 if (STACK_TOP_P (operands[0]))
7855 /* As above for fmul/fadd, we can't store to st(0). */
7856 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7857 else
7858 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7859 #endif
7860 break;
7863 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
7865 #if SYSV386_COMPAT
7866 if (STACK_TOP_P (operands[0]))
7867 p = "{rp\t%0, %1|p\t%1, %0}";
7868 else
7869 p = "{p\t%1, %0|rp\t%0, %1}";
7870 #else
7871 if (STACK_TOP_P (operands[0]))
7872 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
7873 else
7874 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
7875 #endif
7876 break;
7879 if (STACK_TOP_P (operands[0]))
7881 if (STACK_TOP_P (operands[1]))
7882 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7883 else
7884 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
7885 break;
7887 else if (STACK_TOP_P (operands[1]))
7889 #if SYSV386_COMPAT
7890 p = "{\t%1, %0|r\t%0, %1}";
7891 #else
7892 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
7893 #endif
7895 else
7897 #if SYSV386_COMPAT
7898 p = "{r\t%2, %0|\t%0, %2}";
7899 #else
7900 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7901 #endif
7903 break;
7905 default:
7906 gcc_unreachable ();
7909 strcat (buf, p);
7910 return buf;
7913 /* Return needed mode for entity in optimize_mode_switching pass. */
7916 ix86_mode_needed (int entity, rtx insn)
7918 enum attr_i387_cw mode;
7920 /* The mode UNINITIALIZED is used to store control word after a
7921 function call or ASM pattern. The mode ANY specify that function
7922 has no requirements on the control word and make no changes in the
7923 bits we are interested in. */
7925 if (CALL_P (insn)
7926 || (NONJUMP_INSN_P (insn)
7927 && (asm_noperands (PATTERN (insn)) >= 0
7928 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
7929 return I387_CW_UNINITIALIZED;
7931 if (recog_memoized (insn) < 0)
7932 return I387_CW_ANY;
7934 mode = get_attr_i387_cw (insn);
7936 switch (entity)
7938 case I387_TRUNC:
7939 if (mode == I387_CW_TRUNC)
7940 return mode;
7941 break;
7943 case I387_FLOOR:
7944 if (mode == I387_CW_FLOOR)
7945 return mode;
7946 break;
7948 case I387_CEIL:
7949 if (mode == I387_CW_CEIL)
7950 return mode;
7951 break;
7953 case I387_MASK_PM:
7954 if (mode == I387_CW_MASK_PM)
7955 return mode;
7956 break;
7958 default:
7959 gcc_unreachable ();
7962 return I387_CW_ANY;
7965 /* Output code to initialize control word copies used by trunc?f?i and
7966 rounding patterns. CURRENT_MODE is set to current control word,
7967 while NEW_MODE is set to new control word. */
7969 void
7970 emit_i387_cw_initialization (int mode)
7972 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
7973 rtx new_mode;
7975 int slot;
7977 rtx reg = gen_reg_rtx (HImode);
7979 emit_insn (gen_x86_fnstcw_1 (stored_mode));
7980 emit_move_insn (reg, stored_mode);
7982 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL || optimize_size)
7984 switch (mode)
7986 case I387_CW_TRUNC:
7987 /* round toward zero (truncate) */
7988 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
7989 slot = SLOT_CW_TRUNC;
7990 break;
7992 case I387_CW_FLOOR:
7993 /* round down toward -oo */
7994 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7995 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
7996 slot = SLOT_CW_FLOOR;
7997 break;
7999 case I387_CW_CEIL:
8000 /* round up toward +oo */
8001 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
8002 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
8003 slot = SLOT_CW_CEIL;
8004 break;
8006 case I387_CW_MASK_PM:
8007 /* mask precision exception for nearbyint() */
8008 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
8009 slot = SLOT_CW_MASK_PM;
8010 break;
8012 default:
8013 gcc_unreachable ();
8016 else
8018 switch (mode)
8020 case I387_CW_TRUNC:
8021 /* round toward zero (truncate) */
8022 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
8023 slot = SLOT_CW_TRUNC;
8024 break;
8026 case I387_CW_FLOOR:
8027 /* round down toward -oo */
8028 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
8029 slot = SLOT_CW_FLOOR;
8030 break;
8032 case I387_CW_CEIL:
8033 /* round up toward +oo */
8034 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
8035 slot = SLOT_CW_CEIL;
8036 break;
8038 case I387_CW_MASK_PM:
8039 /* mask precision exception for nearbyint() */
8040 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
8041 slot = SLOT_CW_MASK_PM;
8042 break;
8044 default:
8045 gcc_unreachable ();
8049 gcc_assert (slot < MAX_386_STACK_LOCALS);
8051 new_mode = assign_386_stack_local (HImode, slot);
8052 emit_move_insn (new_mode, reg);
8055 /* Output code for INSN to convert a float to a signed int. OPERANDS
8056 are the insn operands. The output may be [HSD]Imode and the input
8057 operand may be [SDX]Fmode. */
8059 const char *
8060 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
8062 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
8063 int dimode_p = GET_MODE (operands[0]) == DImode;
8064 int round_mode = get_attr_i387_cw (insn);
8066 /* Jump through a hoop or two for DImode, since the hardware has no
8067 non-popping instruction. We used to do this a different way, but
8068 that was somewhat fragile and broke with post-reload splitters. */
8069 if ((dimode_p || fisttp) && !stack_top_dies)
8070 output_asm_insn ("fld\t%y1", operands);
8072 gcc_assert (STACK_TOP_P (operands[1]));
8073 gcc_assert (GET_CODE (operands[0]) == MEM);
8075 if (fisttp)
8076 output_asm_insn ("fisttp%z0\t%0", operands);
8077 else
8079 if (round_mode != I387_CW_ANY)
8080 output_asm_insn ("fldcw\t%3", operands);
8081 if (stack_top_dies || dimode_p)
8082 output_asm_insn ("fistp%z0\t%0", operands);
8083 else
8084 output_asm_insn ("fist%z0\t%0", operands);
8085 if (round_mode != I387_CW_ANY)
8086 output_asm_insn ("fldcw\t%2", operands);
8089 return "";
8092 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
8093 should be used. UNORDERED_P is true when fucom should be used. */
8095 const char *
8096 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
8098 int stack_top_dies;
8099 rtx cmp_op0, cmp_op1;
8100 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
8102 if (eflags_p)
8104 cmp_op0 = operands[0];
8105 cmp_op1 = operands[1];
8107 else
8109 cmp_op0 = operands[1];
8110 cmp_op1 = operands[2];
8113 if (is_sse)
8115 if (GET_MODE (operands[0]) == SFmode)
8116 if (unordered_p)
8117 return "ucomiss\t{%1, %0|%0, %1}";
8118 else
8119 return "comiss\t{%1, %0|%0, %1}";
8120 else
8121 if (unordered_p)
8122 return "ucomisd\t{%1, %0|%0, %1}";
8123 else
8124 return "comisd\t{%1, %0|%0, %1}";
8127 gcc_assert (STACK_TOP_P (cmp_op0));
8129 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
8131 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
8133 if (stack_top_dies)
8135 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
8136 return TARGET_USE_FFREEP ? "ffreep\t%y1" : "fstp\t%y1";
8138 else
8139 return "ftst\n\tfnstsw\t%0";
8142 if (STACK_REG_P (cmp_op1)
8143 && stack_top_dies
8144 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
8145 && REGNO (cmp_op1) != FIRST_STACK_REG)
8147 /* If both the top of the 387 stack dies, and the other operand
8148 is also a stack register that dies, then this must be a
8149 `fcompp' float compare */
8151 if (eflags_p)
8153 /* There is no double popping fcomi variant. Fortunately,
8154 eflags is immune from the fstp's cc clobbering. */
8155 if (unordered_p)
8156 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
8157 else
8158 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
8159 return TARGET_USE_FFREEP ? "ffreep\t%y0" : "fstp\t%y0";
8161 else
8163 if (unordered_p)
8164 return "fucompp\n\tfnstsw\t%0";
8165 else
8166 return "fcompp\n\tfnstsw\t%0";
8169 else
8171 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
8173 static const char * const alt[16] =
8175 "fcom%z2\t%y2\n\tfnstsw\t%0",
8176 "fcomp%z2\t%y2\n\tfnstsw\t%0",
8177 "fucom%z2\t%y2\n\tfnstsw\t%0",
8178 "fucomp%z2\t%y2\n\tfnstsw\t%0",
8180 "ficom%z2\t%y2\n\tfnstsw\t%0",
8181 "ficomp%z2\t%y2\n\tfnstsw\t%0",
8182 NULL,
8183 NULL,
8185 "fcomi\t{%y1, %0|%0, %y1}",
8186 "fcomip\t{%y1, %0|%0, %y1}",
8187 "fucomi\t{%y1, %0|%0, %y1}",
8188 "fucomip\t{%y1, %0|%0, %y1}",
8190 NULL,
8191 NULL,
8192 NULL,
8193 NULL
8196 int mask;
8197 const char *ret;
8199 mask = eflags_p << 3;
8200 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
8201 mask |= unordered_p << 1;
8202 mask |= stack_top_dies;
8204 gcc_assert (mask < 16);
8205 ret = alt[mask];
8206 gcc_assert (ret);
8208 return ret;
8212 void
8213 ix86_output_addr_vec_elt (FILE *file, int value)
8215 const char *directive = ASM_LONG;
8217 #ifdef ASM_QUAD
8218 if (TARGET_64BIT)
8219 directive = ASM_QUAD;
8220 #else
8221 gcc_assert (!TARGET_64BIT);
8222 #endif
8224 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
8227 void
8228 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
8230 if (TARGET_64BIT)
8231 fprintf (file, "%s%s%d-%s%d\n",
8232 ASM_LONG, LPREFIX, value, LPREFIX, rel);
8233 else if (HAVE_AS_GOTOFF_IN_DATA)
8234 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
8235 #if TARGET_MACHO
8236 else if (TARGET_MACHO)
8238 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
8239 machopic_output_function_base_name (file);
8240 fprintf(file, "\n");
8242 #endif
8243 else
8244 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
8245 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
8248 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
8249 for the target. */
8251 void
8252 ix86_expand_clear (rtx dest)
8254 rtx tmp;
8256 /* We play register width games, which are only valid after reload. */
8257 gcc_assert (reload_completed);
8259 /* Avoid HImode and its attendant prefix byte. */
8260 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
8261 dest = gen_rtx_REG (SImode, REGNO (dest));
8263 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
8265 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
8266 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
8268 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
8269 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
8272 emit_insn (tmp);
8275 /* X is an unchanging MEM. If it is a constant pool reference, return
8276 the constant pool rtx, else NULL. */
8279 maybe_get_pool_constant (rtx x)
8281 x = ix86_delegitimize_address (XEXP (x, 0));
8283 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
8284 return get_pool_constant (x);
8286 return NULL_RTX;
8289 void
8290 ix86_expand_move (enum machine_mode mode, rtx operands[])
8292 int strict = (reload_in_progress || reload_completed);
8293 rtx op0, op1;
8294 enum tls_model model;
8296 op0 = operands[0];
8297 op1 = operands[1];
8299 if (GET_CODE (op1) == SYMBOL_REF)
8301 model = SYMBOL_REF_TLS_MODEL (op1);
8302 if (model)
8304 op1 = legitimize_tls_address (op1, model, true);
8305 op1 = force_operand (op1, op0);
8306 if (op1 == op0)
8307 return;
8310 else if (GET_CODE (op1) == CONST
8311 && GET_CODE (XEXP (op1, 0)) == PLUS
8312 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
8314 model = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (op1, 0), 0));
8315 if (model)
8317 rtx addend = XEXP (XEXP (op1, 0), 1);
8318 op1 = legitimize_tls_address (XEXP (XEXP (op1, 0), 0), model, true);
8319 op1 = force_operand (op1, NULL);
8320 op1 = expand_simple_binop (Pmode, PLUS, op1, addend,
8321 op0, 1, OPTAB_DIRECT);
8322 if (op1 == op0)
8323 return;
8327 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
8329 #if TARGET_MACHO
8330 if (MACHOPIC_PURE)
8332 rtx temp = ((reload_in_progress
8333 || ((op0 && GET_CODE (op0) == REG)
8334 && mode == Pmode))
8335 ? op0 : gen_reg_rtx (Pmode));
8336 op1 = machopic_indirect_data_reference (op1, temp);
8337 op1 = machopic_legitimize_pic_address (op1, mode,
8338 temp == op1 ? 0 : temp);
8340 else if (MACHOPIC_INDIRECT)
8341 op1 = machopic_indirect_data_reference (op1, 0);
8342 if (op0 == op1)
8343 return;
8344 #else
8345 if (GET_CODE (op0) == MEM)
8346 op1 = force_reg (Pmode, op1);
8347 else
8348 op1 = legitimize_address (op1, op1, Pmode);
8349 #endif /* TARGET_MACHO */
8351 else
8353 if (GET_CODE (op0) == MEM
8354 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
8355 || !push_operand (op0, mode))
8356 && GET_CODE (op1) == MEM)
8357 op1 = force_reg (mode, op1);
8359 if (push_operand (op0, mode)
8360 && ! general_no_elim_operand (op1, mode))
8361 op1 = copy_to_mode_reg (mode, op1);
8363 /* Force large constants in 64bit compilation into register
8364 to get them CSEed. */
8365 if (TARGET_64BIT && mode == DImode
8366 && immediate_operand (op1, mode)
8367 && !x86_64_zext_immediate_operand (op1, VOIDmode)
8368 && !register_operand (op0, mode)
8369 && optimize && !reload_completed && !reload_in_progress)
8370 op1 = copy_to_mode_reg (mode, op1);
8372 if (FLOAT_MODE_P (mode))
8374 /* If we are loading a floating point constant to a register,
8375 force the value to memory now, since we'll get better code
8376 out the back end. */
8378 if (strict)
8380 else if (GET_CODE (op1) == CONST_DOUBLE)
8382 op1 = validize_mem (force_const_mem (mode, op1));
8383 if (!register_operand (op0, mode))
8385 rtx temp = gen_reg_rtx (mode);
8386 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
8387 emit_move_insn (op0, temp);
8388 return;
8394 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
8397 void
8398 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
8400 rtx op0 = operands[0], op1 = operands[1];
8402 /* Force constants other than zero into memory. We do not know how
8403 the instructions used to build constants modify the upper 64 bits
8404 of the register, once we have that information we may be able
8405 to handle some of them more efficiently. */
8406 if ((reload_in_progress | reload_completed) == 0
8407 && register_operand (op0, mode)
8408 && CONSTANT_P (op1) && op1 != CONST0_RTX (mode))
8409 op1 = validize_mem (force_const_mem (mode, op1));
8411 /* Make operand1 a register if it isn't already. */
8412 if (!no_new_pseudos
8413 && !register_operand (op0, mode)
8414 && !register_operand (op1, mode))
8416 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
8417 return;
8420 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
8423 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
8424 straight to ix86_expand_vector_move. */
8426 void
8427 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
8429 rtx op0, op1, m;
8431 op0 = operands[0];
8432 op1 = operands[1];
8434 if (MEM_P (op1))
8436 /* If we're optimizing for size, movups is the smallest. */
8437 if (optimize_size)
8439 op0 = gen_lowpart (V4SFmode, op0);
8440 op1 = gen_lowpart (V4SFmode, op1);
8441 emit_insn (gen_sse_movups (op0, op1));
8442 return;
8445 /* ??? If we have typed data, then it would appear that using
8446 movdqu is the only way to get unaligned data loaded with
8447 integer type. */
8448 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
8450 op0 = gen_lowpart (V16QImode, op0);
8451 op1 = gen_lowpart (V16QImode, op1);
8452 emit_insn (gen_sse2_movdqu (op0, op1));
8453 return;
8456 if (TARGET_SSE2 && mode == V2DFmode)
8458 rtx zero;
8460 /* When SSE registers are split into halves, we can avoid
8461 writing to the top half twice. */
8462 if (TARGET_SSE_SPLIT_REGS)
8464 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
8465 zero = op0;
8467 else
8469 /* ??? Not sure about the best option for the Intel chips.
8470 The following would seem to satisfy; the register is
8471 entirely cleared, breaking the dependency chain. We
8472 then store to the upper half, with a dependency depth
8473 of one. A rumor has it that Intel recommends two movsd
8474 followed by an unpacklpd, but this is unconfirmed. And
8475 given that the dependency depth of the unpacklpd would
8476 still be one, I'm not sure why this would be better. */
8477 zero = CONST0_RTX (V2DFmode);
8480 m = adjust_address (op1, DFmode, 0);
8481 emit_insn (gen_sse2_loadlpd (op0, zero, m));
8482 m = adjust_address (op1, DFmode, 8);
8483 emit_insn (gen_sse2_loadhpd (op0, op0, m));
8485 else
8487 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
8488 emit_move_insn (op0, CONST0_RTX (mode));
8489 else
8490 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
8492 if (mode != V4SFmode)
8493 op0 = gen_lowpart (V4SFmode, op0);
8494 m = adjust_address (op1, V2SFmode, 0);
8495 emit_insn (gen_sse_loadlps (op0, op0, m));
8496 m = adjust_address (op1, V2SFmode, 8);
8497 emit_insn (gen_sse_loadhps (op0, op0, m));
8500 else if (MEM_P (op0))
8502 /* If we're optimizing for size, movups is the smallest. */
8503 if (optimize_size)
8505 op0 = gen_lowpart (V4SFmode, op0);
8506 op1 = gen_lowpart (V4SFmode, op1);
8507 emit_insn (gen_sse_movups (op0, op1));
8508 return;
8511 /* ??? Similar to above, only less clear because of quote
8512 typeless stores unquote. */
8513 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
8514 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
8516 op0 = gen_lowpart (V16QImode, op0);
8517 op1 = gen_lowpart (V16QImode, op1);
8518 emit_insn (gen_sse2_movdqu (op0, op1));
8519 return;
8522 if (TARGET_SSE2 && mode == V2DFmode)
8524 m = adjust_address (op0, DFmode, 0);
8525 emit_insn (gen_sse2_storelpd (m, op1));
8526 m = adjust_address (op0, DFmode, 8);
8527 emit_insn (gen_sse2_storehpd (m, op1));
8529 else
8531 if (mode != V4SFmode)
8532 op1 = gen_lowpart (V4SFmode, op1);
8533 m = adjust_address (op0, V2SFmode, 0);
8534 emit_insn (gen_sse_storelps (m, op1));
8535 m = adjust_address (op0, V2SFmode, 8);
8536 emit_insn (gen_sse_storehps (m, op1));
8539 else
8540 gcc_unreachable ();
8543 /* Expand a push in MODE. This is some mode for which we do not support
8544 proper push instructions, at least from the registers that we expect
8545 the value to live in. */
8547 void
8548 ix86_expand_push (enum machine_mode mode, rtx x)
8550 rtx tmp;
8552 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
8553 GEN_INT (-GET_MODE_SIZE (mode)),
8554 stack_pointer_rtx, 1, OPTAB_DIRECT);
8555 if (tmp != stack_pointer_rtx)
8556 emit_move_insn (stack_pointer_rtx, tmp);
8558 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
8559 emit_move_insn (tmp, x);
8562 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
8563 destination to use for the operation. If different from the true
8564 destination in operands[0], a copy operation will be required. */
8567 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
8568 rtx operands[])
8570 int matching_memory;
8571 rtx src1, src2, dst;
8573 dst = operands[0];
8574 src1 = operands[1];
8575 src2 = operands[2];
8577 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
8578 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8579 && (rtx_equal_p (dst, src2)
8580 || immediate_operand (src1, mode)))
8582 rtx temp = src1;
8583 src1 = src2;
8584 src2 = temp;
8587 /* If the destination is memory, and we do not have matching source
8588 operands, do things in registers. */
8589 matching_memory = 0;
8590 if (GET_CODE (dst) == MEM)
8592 if (rtx_equal_p (dst, src1))
8593 matching_memory = 1;
8594 else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8595 && rtx_equal_p (dst, src2))
8596 matching_memory = 2;
8597 else
8598 dst = gen_reg_rtx (mode);
8601 /* Both source operands cannot be in memory. */
8602 if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM)
8604 if (matching_memory != 2)
8605 src2 = force_reg (mode, src2);
8606 else
8607 src1 = force_reg (mode, src1);
8610 /* If the operation is not commutable, source 1 cannot be a constant
8611 or non-matching memory. */
8612 if ((CONSTANT_P (src1)
8613 || (!matching_memory && GET_CODE (src1) == MEM))
8614 && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
8615 src1 = force_reg (mode, src1);
8617 src1 = operands[1] = src1;
8618 src2 = operands[2] = src2;
8619 return dst;
8622 /* Similarly, but assume that the destination has already been
8623 set up properly. */
8625 void
8626 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
8627 enum machine_mode mode, rtx operands[])
8629 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
8630 gcc_assert (dst == operands[0]);
8633 /* Attempt to expand a binary operator. Make the expansion closer to the
8634 actual machine, then just general_operand, which will allow 3 separate
8635 memory references (one output, two input) in a single insn. */
8637 void
8638 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
8639 rtx operands[])
8641 rtx src1, src2, dst, op, clob;
8643 dst = ix86_fixup_binary_operands (code, mode, operands);
8644 src1 = operands[1];
8645 src2 = operands[2];
8647 /* Emit the instruction. */
8649 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
8650 if (reload_in_progress)
8652 /* Reload doesn't know about the flags register, and doesn't know that
8653 it doesn't want to clobber it. We can only do this with PLUS. */
8654 gcc_assert (code == PLUS);
8655 emit_insn (op);
8657 else
8659 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8660 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
8663 /* Fix up the destination if needed. */
8664 if (dst != operands[0])
8665 emit_move_insn (operands[0], dst);
8668 /* Return TRUE or FALSE depending on whether the binary operator meets the
8669 appropriate constraints. */
8672 ix86_binary_operator_ok (enum rtx_code code,
8673 enum machine_mode mode ATTRIBUTE_UNUSED,
8674 rtx operands[3])
8676 /* Both source operands cannot be in memory. */
8677 if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM)
8678 return 0;
8679 /* If the operation is not commutable, source 1 cannot be a constant. */
8680 if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
8681 return 0;
8682 /* If the destination is memory, we must have a matching source operand. */
8683 if (GET_CODE (operands[0]) == MEM
8684 && ! (rtx_equal_p (operands[0], operands[1])
8685 || (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8686 && rtx_equal_p (operands[0], operands[2]))))
8687 return 0;
8688 /* If the operation is not commutable and the source 1 is memory, we must
8689 have a matching destination. */
8690 if (GET_CODE (operands[1]) == MEM
8691 && GET_RTX_CLASS (code) != RTX_COMM_ARITH
8692 && ! rtx_equal_p (operands[0], operands[1]))
8693 return 0;
8694 return 1;
8697 /* Attempt to expand a unary operator. Make the expansion closer to the
8698 actual machine, then just general_operand, which will allow 2 separate
8699 memory references (one output, one input) in a single insn. */
8701 void
8702 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
8703 rtx operands[])
8705 int matching_memory;
8706 rtx src, dst, op, clob;
8708 dst = operands[0];
8709 src = operands[1];
8711 /* If the destination is memory, and we do not have matching source
8712 operands, do things in registers. */
8713 matching_memory = 0;
8714 if (MEM_P (dst))
8716 if (rtx_equal_p (dst, src))
8717 matching_memory = 1;
8718 else
8719 dst = gen_reg_rtx (mode);
8722 /* When source operand is memory, destination must match. */
8723 if (MEM_P (src) && !matching_memory)
8724 src = force_reg (mode, src);
8726 /* Emit the instruction. */
8728 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
8729 if (reload_in_progress || code == NOT)
8731 /* Reload doesn't know about the flags register, and doesn't know that
8732 it doesn't want to clobber it. */
8733 gcc_assert (code == NOT);
8734 emit_insn (op);
8736 else
8738 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8739 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
8742 /* Fix up the destination if needed. */
8743 if (dst != operands[0])
8744 emit_move_insn (operands[0], dst);
8747 /* Return TRUE or FALSE depending on whether the unary operator meets the
8748 appropriate constraints. */
8751 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
8752 enum machine_mode mode ATTRIBUTE_UNUSED,
8753 rtx operands[2] ATTRIBUTE_UNUSED)
8755 /* If one of operands is memory, source and destination must match. */
8756 if ((GET_CODE (operands[0]) == MEM
8757 || GET_CODE (operands[1]) == MEM)
8758 && ! rtx_equal_p (operands[0], operands[1]))
8759 return FALSE;
8760 return TRUE;
8763 /* A subroutine of ix86_expand_fp_absneg_operator and copysign expanders.
8764 Create a mask for the sign bit in MODE for an SSE register. If VECT is
8765 true, then replicate the mask for all elements of the vector register.
8766 If INVERT is true, then create a mask excluding the sign bit. */
8769 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
8771 enum machine_mode vec_mode;
8772 HOST_WIDE_INT hi, lo;
8773 int shift = 63;
8774 rtvec v;
8775 rtx mask;
8777 /* Find the sign bit, sign extended to 2*HWI. */
8778 if (mode == SFmode)
8779 lo = 0x80000000, hi = lo < 0;
8780 else if (HOST_BITS_PER_WIDE_INT >= 64)
8781 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
8782 else
8783 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
8785 if (invert)
8786 lo = ~lo, hi = ~hi;
8788 /* Force this value into the low part of a fp vector constant. */
8789 mask = immed_double_const (lo, hi, mode == SFmode ? SImode : DImode);
8790 mask = gen_lowpart (mode, mask);
8792 if (mode == SFmode)
8794 if (vect)
8795 v = gen_rtvec (4, mask, mask, mask, mask);
8796 else
8797 v = gen_rtvec (4, mask, CONST0_RTX (SFmode),
8798 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
8799 vec_mode = V4SFmode;
8801 else
8803 if (vect)
8804 v = gen_rtvec (2, mask, mask);
8805 else
8806 v = gen_rtvec (2, mask, CONST0_RTX (DFmode));
8807 vec_mode = V2DFmode;
8810 return force_reg (vec_mode, gen_rtx_CONST_VECTOR (vec_mode, v));
8813 /* Generate code for floating point ABS or NEG. */
8815 void
8816 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
8817 rtx operands[])
8819 rtx mask, set, use, clob, dst, src;
8820 bool matching_memory;
8821 bool use_sse = false;
8822 bool vector_mode = VECTOR_MODE_P (mode);
8823 enum machine_mode elt_mode = mode;
8825 if (vector_mode)
8827 elt_mode = GET_MODE_INNER (mode);
8828 use_sse = true;
8830 else if (TARGET_SSE_MATH)
8831 use_sse = SSE_FLOAT_MODE_P (mode);
8833 /* NEG and ABS performed with SSE use bitwise mask operations.
8834 Create the appropriate mask now. */
8835 if (use_sse)
8836 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
8837 else
8839 /* When not using SSE, we don't use the mask, but prefer to keep the
8840 same general form of the insn pattern to reduce duplication when
8841 it comes time to split. */
8842 mask = const0_rtx;
8845 dst = operands[0];
8846 src = operands[1];
8848 /* If the destination is memory, and we don't have matching source
8849 operands, do things in registers. */
8850 matching_memory = false;
8851 if (MEM_P (dst))
8853 if (rtx_equal_p (dst, src))
8854 matching_memory = true;
8855 else
8856 dst = gen_reg_rtx (mode);
8858 if (MEM_P (src) && !matching_memory)
8859 src = force_reg (mode, src);
8861 if (vector_mode)
8863 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
8864 set = gen_rtx_SET (VOIDmode, dst, set);
8865 emit_insn (set);
8867 else
8869 set = gen_rtx_fmt_e (code, mode, src);
8870 set = gen_rtx_SET (VOIDmode, dst, set);
8871 use = gen_rtx_USE (VOIDmode, mask);
8872 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8873 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, set, use, clob)));
8876 if (dst != operands[0])
8877 emit_move_insn (operands[0], dst);
8880 /* Expand a copysign operation. Special case operand 0 being a constant. */
8882 void
8883 ix86_expand_copysign (rtx operands[])
8885 enum machine_mode mode, vmode;
8886 rtx dest, op0, op1, mask, nmask;
8888 dest = operands[0];
8889 op0 = operands[1];
8890 op1 = operands[2];
8892 mode = GET_MODE (dest);
8893 vmode = mode == SFmode ? V4SFmode : V2DFmode;
8895 if (GET_CODE (op0) == CONST_DOUBLE)
8897 rtvec v;
8899 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
8900 op0 = simplify_unary_operation (ABS, mode, op0, mode);
8902 if (op0 == CONST0_RTX (mode))
8903 op0 = CONST0_RTX (vmode);
8904 else
8906 if (mode == SFmode)
8907 v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
8908 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
8909 else
8910 v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
8911 op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
8914 mask = ix86_build_signbit_mask (mode, 0, 0);
8916 if (mode == SFmode)
8917 emit_insn (gen_copysignsf3_const (dest, op0, op1, mask));
8918 else
8919 emit_insn (gen_copysigndf3_const (dest, op0, op1, mask));
8921 else
8923 nmask = ix86_build_signbit_mask (mode, 0, 1);
8924 mask = ix86_build_signbit_mask (mode, 0, 0);
8926 if (mode == SFmode)
8927 emit_insn (gen_copysignsf3_var (dest, NULL, op0, op1, nmask, mask));
8928 else
8929 emit_insn (gen_copysigndf3_var (dest, NULL, op0, op1, nmask, mask));
8933 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
8934 be a constant, and so has already been expanded into a vector constant. */
8936 void
8937 ix86_split_copysign_const (rtx operands[])
8939 enum machine_mode mode, vmode;
8940 rtx dest, op0, op1, mask, x;
8942 dest = operands[0];
8943 op0 = operands[1];
8944 op1 = operands[2];
8945 mask = operands[3];
8947 mode = GET_MODE (dest);
8948 vmode = GET_MODE (mask);
8950 dest = simplify_gen_subreg (vmode, dest, mode, 0);
8951 x = gen_rtx_AND (vmode, dest, mask);
8952 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8954 if (op0 != CONST0_RTX (vmode))
8956 x = gen_rtx_IOR (vmode, dest, op0);
8957 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8961 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
8962 so we have to do two masks. */
8964 void
8965 ix86_split_copysign_var (rtx operands[])
8967 enum machine_mode mode, vmode;
8968 rtx dest, scratch, op0, op1, mask, nmask, x;
8970 dest = operands[0];
8971 scratch = operands[1];
8972 op0 = operands[2];
8973 op1 = operands[3];
8974 nmask = operands[4];
8975 mask = operands[5];
8977 mode = GET_MODE (dest);
8978 vmode = GET_MODE (mask);
8980 if (rtx_equal_p (op0, op1))
8982 /* Shouldn't happen often (it's useless, obviously), but when it does
8983 we'd generate incorrect code if we continue below. */
8984 emit_move_insn (dest, op0);
8985 return;
8988 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
8990 gcc_assert (REGNO (op1) == REGNO (scratch));
8992 x = gen_rtx_AND (vmode, scratch, mask);
8993 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
8995 dest = mask;
8996 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
8997 x = gen_rtx_NOT (vmode, dest);
8998 x = gen_rtx_AND (vmode, x, op0);
8999 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9001 else
9003 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
9005 x = gen_rtx_AND (vmode, scratch, mask);
9007 else /* alternative 2,4 */
9009 gcc_assert (REGNO (mask) == REGNO (scratch));
9010 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
9011 x = gen_rtx_AND (vmode, scratch, op1);
9013 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
9015 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
9017 dest = simplify_gen_subreg (vmode, op0, mode, 0);
9018 x = gen_rtx_AND (vmode, dest, nmask);
9020 else /* alternative 3,4 */
9022 gcc_assert (REGNO (nmask) == REGNO (dest));
9023 dest = nmask;
9024 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
9025 x = gen_rtx_AND (vmode, dest, op0);
9027 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9030 x = gen_rtx_IOR (vmode, dest, scratch);
9031 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9034 /* Return TRUE or FALSE depending on whether the first SET in INSN
9035 has source and destination with matching CC modes, and that the
9036 CC mode is at least as constrained as REQ_MODE. */
9039 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
9041 rtx set;
9042 enum machine_mode set_mode;
9044 set = PATTERN (insn);
9045 if (GET_CODE (set) == PARALLEL)
9046 set = XVECEXP (set, 0, 0);
9047 gcc_assert (GET_CODE (set) == SET);
9048 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
9050 set_mode = GET_MODE (SET_DEST (set));
9051 switch (set_mode)
9053 case CCNOmode:
9054 if (req_mode != CCNOmode
9055 && (req_mode != CCmode
9056 || XEXP (SET_SRC (set), 1) != const0_rtx))
9057 return 0;
9058 break;
9059 case CCmode:
9060 if (req_mode == CCGCmode)
9061 return 0;
9062 /* FALLTHRU */
9063 case CCGCmode:
9064 if (req_mode == CCGOCmode || req_mode == CCNOmode)
9065 return 0;
9066 /* FALLTHRU */
9067 case CCGOCmode:
9068 if (req_mode == CCZmode)
9069 return 0;
9070 /* FALLTHRU */
9071 case CCZmode:
9072 break;
9074 default:
9075 gcc_unreachable ();
9078 return (GET_MODE (SET_SRC (set)) == set_mode);
9081 /* Generate insn patterns to do an integer compare of OPERANDS. */
9083 static rtx
9084 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
9086 enum machine_mode cmpmode;
9087 rtx tmp, flags;
9089 cmpmode = SELECT_CC_MODE (code, op0, op1);
9090 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
9092 /* This is very simple, but making the interface the same as in the
9093 FP case makes the rest of the code easier. */
9094 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
9095 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
9097 /* Return the test that should be put into the flags user, i.e.
9098 the bcc, scc, or cmov instruction. */
9099 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
9102 /* Figure out whether to use ordered or unordered fp comparisons.
9103 Return the appropriate mode to use. */
9105 enum machine_mode
9106 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
9108 /* ??? In order to make all comparisons reversible, we do all comparisons
9109 non-trapping when compiling for IEEE. Once gcc is able to distinguish
9110 all forms trapping and nontrapping comparisons, we can make inequality
9111 comparisons trapping again, since it results in better code when using
9112 FCOM based compares. */
9113 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
9116 enum machine_mode
9117 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
9119 if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
9120 return ix86_fp_compare_mode (code);
9121 switch (code)
9123 /* Only zero flag is needed. */
9124 case EQ: /* ZF=0 */
9125 case NE: /* ZF!=0 */
9126 return CCZmode;
9127 /* Codes needing carry flag. */
9128 case GEU: /* CF=0 */
9129 case GTU: /* CF=0 & ZF=0 */
9130 case LTU: /* CF=1 */
9131 case LEU: /* CF=1 | ZF=1 */
9132 return CCmode;
9133 /* Codes possibly doable only with sign flag when
9134 comparing against zero. */
9135 case GE: /* SF=OF or SF=0 */
9136 case LT: /* SF<>OF or SF=1 */
9137 if (op1 == const0_rtx)
9138 return CCGOCmode;
9139 else
9140 /* For other cases Carry flag is not required. */
9141 return CCGCmode;
9142 /* Codes doable only with sign flag when comparing
9143 against zero, but we miss jump instruction for it
9144 so we need to use relational tests against overflow
9145 that thus needs to be zero. */
9146 case GT: /* ZF=0 & SF=OF */
9147 case LE: /* ZF=1 | SF<>OF */
9148 if (op1 == const0_rtx)
9149 return CCNOmode;
9150 else
9151 return CCGCmode;
9152 /* strcmp pattern do (use flags) and combine may ask us for proper
9153 mode. */
9154 case USE:
9155 return CCmode;
9156 default:
9157 gcc_unreachable ();
9161 /* Return the fixed registers used for condition codes. */
9163 static bool
9164 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9166 *p1 = FLAGS_REG;
9167 *p2 = FPSR_REG;
9168 return true;
9171 /* If two condition code modes are compatible, return a condition code
9172 mode which is compatible with both. Otherwise, return
9173 VOIDmode. */
9175 static enum machine_mode
9176 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
9178 if (m1 == m2)
9179 return m1;
9181 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
9182 return VOIDmode;
9184 if ((m1 == CCGCmode && m2 == CCGOCmode)
9185 || (m1 == CCGOCmode && m2 == CCGCmode))
9186 return CCGCmode;
9188 switch (m1)
9190 default:
9191 gcc_unreachable ();
9193 case CCmode:
9194 case CCGCmode:
9195 case CCGOCmode:
9196 case CCNOmode:
9197 case CCZmode:
9198 switch (m2)
9200 default:
9201 return VOIDmode;
9203 case CCmode:
9204 case CCGCmode:
9205 case CCGOCmode:
9206 case CCNOmode:
9207 case CCZmode:
9208 return CCmode;
9211 case CCFPmode:
9212 case CCFPUmode:
9213 /* These are only compatible with themselves, which we already
9214 checked above. */
9215 return VOIDmode;
9219 /* Return true if we should use an FCOMI instruction for this fp comparison. */
9222 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
9224 enum rtx_code swapped_code = swap_condition (code);
9225 return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code))
9226 || (ix86_fp_comparison_cost (swapped_code)
9227 == ix86_fp_comparison_fcomi_cost (swapped_code)));
9230 /* Swap, force into registers, or otherwise massage the two operands
9231 to a fp comparison. The operands are updated in place; the new
9232 comparison code is returned. */
9234 static enum rtx_code
9235 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
9237 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
9238 rtx op0 = *pop0, op1 = *pop1;
9239 enum machine_mode op_mode = GET_MODE (op0);
9240 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
9242 /* All of the unordered compare instructions only work on registers.
9243 The same is true of the fcomi compare instructions. The XFmode
9244 compare instructions require registers except when comparing
9245 against zero or when converting operand 1 from fixed point to
9246 floating point. */
9248 if (!is_sse
9249 && (fpcmp_mode == CCFPUmode
9250 || (op_mode == XFmode
9251 && ! (standard_80387_constant_p (op0) == 1
9252 || standard_80387_constant_p (op1) == 1)
9253 && GET_CODE (op1) != FLOAT)
9254 || ix86_use_fcomi_compare (code)))
9256 op0 = force_reg (op_mode, op0);
9257 op1 = force_reg (op_mode, op1);
9259 else
9261 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
9262 things around if they appear profitable, otherwise force op0
9263 into a register. */
9265 if (standard_80387_constant_p (op0) == 0
9266 || (GET_CODE (op0) == MEM
9267 && ! (standard_80387_constant_p (op1) == 0
9268 || GET_CODE (op1) == MEM)))
9270 rtx tmp;
9271 tmp = op0, op0 = op1, op1 = tmp;
9272 code = swap_condition (code);
9275 if (GET_CODE (op0) != REG)
9276 op0 = force_reg (op_mode, op0);
9278 if (CONSTANT_P (op1))
9280 int tmp = standard_80387_constant_p (op1);
9281 if (tmp == 0)
9282 op1 = validize_mem (force_const_mem (op_mode, op1));
9283 else if (tmp == 1)
9285 if (TARGET_CMOVE)
9286 op1 = force_reg (op_mode, op1);
9288 else
9289 op1 = force_reg (op_mode, op1);
9293 /* Try to rearrange the comparison to make it cheaper. */
9294 if (ix86_fp_comparison_cost (code)
9295 > ix86_fp_comparison_cost (swap_condition (code))
9296 && (GET_CODE (op1) == REG || !no_new_pseudos))
9298 rtx tmp;
9299 tmp = op0, op0 = op1, op1 = tmp;
9300 code = swap_condition (code);
9301 if (GET_CODE (op0) != REG)
9302 op0 = force_reg (op_mode, op0);
9305 *pop0 = op0;
9306 *pop1 = op1;
9307 return code;
9310 /* Convert comparison codes we use to represent FP comparison to integer
9311 code that will result in proper branch. Return UNKNOWN if no such code
9312 is available. */
9314 enum rtx_code
9315 ix86_fp_compare_code_to_integer (enum rtx_code code)
9317 switch (code)
9319 case GT:
9320 return GTU;
9321 case GE:
9322 return GEU;
9323 case ORDERED:
9324 case UNORDERED:
9325 return code;
9326 break;
9327 case UNEQ:
9328 return EQ;
9329 break;
9330 case UNLT:
9331 return LTU;
9332 break;
9333 case UNLE:
9334 return LEU;
9335 break;
9336 case LTGT:
9337 return NE;
9338 break;
9339 default:
9340 return UNKNOWN;
9344 /* Split comparison code CODE into comparisons we can do using branch
9345 instructions. BYPASS_CODE is comparison code for branch that will
9346 branch around FIRST_CODE and SECOND_CODE. If some of branches
9347 is not required, set value to UNKNOWN.
9348 We never require more than two branches. */
9350 void
9351 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
9352 enum rtx_code *first_code,
9353 enum rtx_code *second_code)
9355 *first_code = code;
9356 *bypass_code = UNKNOWN;
9357 *second_code = UNKNOWN;
9359 /* The fcomi comparison sets flags as follows:
9361 cmp ZF PF CF
9362 > 0 0 0
9363 < 0 0 1
9364 = 1 0 0
9365 un 1 1 1 */
9367 switch (code)
9369 case GT: /* GTU - CF=0 & ZF=0 */
9370 case GE: /* GEU - CF=0 */
9371 case ORDERED: /* PF=0 */
9372 case UNORDERED: /* PF=1 */
9373 case UNEQ: /* EQ - ZF=1 */
9374 case UNLT: /* LTU - CF=1 */
9375 case UNLE: /* LEU - CF=1 | ZF=1 */
9376 case LTGT: /* EQ - ZF=0 */
9377 break;
9378 case LT: /* LTU - CF=1 - fails on unordered */
9379 *first_code = UNLT;
9380 *bypass_code = UNORDERED;
9381 break;
9382 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
9383 *first_code = UNLE;
9384 *bypass_code = UNORDERED;
9385 break;
9386 case EQ: /* EQ - ZF=1 - fails on unordered */
9387 *first_code = UNEQ;
9388 *bypass_code = UNORDERED;
9389 break;
9390 case NE: /* NE - ZF=0 - fails on unordered */
9391 *first_code = LTGT;
9392 *second_code = UNORDERED;
9393 break;
9394 case UNGE: /* GEU - CF=0 - fails on unordered */
9395 *first_code = GE;
9396 *second_code = UNORDERED;
9397 break;
9398 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
9399 *first_code = GT;
9400 *second_code = UNORDERED;
9401 break;
9402 default:
9403 gcc_unreachable ();
9405 if (!TARGET_IEEE_FP)
9407 *second_code = UNKNOWN;
9408 *bypass_code = UNKNOWN;
9412 /* Return cost of comparison done fcom + arithmetics operations on AX.
9413 All following functions do use number of instructions as a cost metrics.
9414 In future this should be tweaked to compute bytes for optimize_size and
9415 take into account performance of various instructions on various CPUs. */
9416 static int
9417 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
9419 if (!TARGET_IEEE_FP)
9420 return 4;
9421 /* The cost of code output by ix86_expand_fp_compare. */
9422 switch (code)
9424 case UNLE:
9425 case UNLT:
9426 case LTGT:
9427 case GT:
9428 case GE:
9429 case UNORDERED:
9430 case ORDERED:
9431 case UNEQ:
9432 return 4;
9433 break;
9434 case LT:
9435 case NE:
9436 case EQ:
9437 case UNGE:
9438 return 5;
9439 break;
9440 case LE:
9441 case UNGT:
9442 return 6;
9443 break;
9444 default:
9445 gcc_unreachable ();
9449 /* Return cost of comparison done using fcomi operation.
9450 See ix86_fp_comparison_arithmetics_cost for the metrics. */
9451 static int
9452 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
9454 enum rtx_code bypass_code, first_code, second_code;
9455 /* Return arbitrarily high cost when instruction is not supported - this
9456 prevents gcc from using it. */
9457 if (!TARGET_CMOVE)
9458 return 1024;
9459 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9460 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
9463 /* Return cost of comparison done using sahf operation.
9464 See ix86_fp_comparison_arithmetics_cost for the metrics. */
9465 static int
9466 ix86_fp_comparison_sahf_cost (enum rtx_code code)
9468 enum rtx_code bypass_code, first_code, second_code;
9469 /* Return arbitrarily high cost when instruction is not preferred - this
9470 avoids gcc from using it. */
9471 if (!TARGET_USE_SAHF && !optimize_size)
9472 return 1024;
9473 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9474 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
9477 /* Compute cost of the comparison done using any method.
9478 See ix86_fp_comparison_arithmetics_cost for the metrics. */
9479 static int
9480 ix86_fp_comparison_cost (enum rtx_code code)
9482 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
9483 int min;
9485 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
9486 sahf_cost = ix86_fp_comparison_sahf_cost (code);
9488 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
9489 if (min > sahf_cost)
9490 min = sahf_cost;
9491 if (min > fcomi_cost)
9492 min = fcomi_cost;
9493 return min;
9496 /* Generate insn patterns to do a floating point compare of OPERANDS. */
9498 static rtx
9499 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
9500 rtx *second_test, rtx *bypass_test)
9502 enum machine_mode fpcmp_mode, intcmp_mode;
9503 rtx tmp, tmp2;
9504 int cost = ix86_fp_comparison_cost (code);
9505 enum rtx_code bypass_code, first_code, second_code;
9507 fpcmp_mode = ix86_fp_compare_mode (code);
9508 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
9510 if (second_test)
9511 *second_test = NULL_RTX;
9512 if (bypass_test)
9513 *bypass_test = NULL_RTX;
9515 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9517 /* Do fcomi/sahf based test when profitable. */
9518 if ((bypass_code == UNKNOWN || bypass_test)
9519 && (second_code == UNKNOWN || second_test)
9520 && ix86_fp_comparison_arithmetics_cost (code) > cost)
9522 if (TARGET_CMOVE)
9524 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9525 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
9526 tmp);
9527 emit_insn (tmp);
9529 else
9531 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9532 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
9533 if (!scratch)
9534 scratch = gen_reg_rtx (HImode);
9535 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
9536 emit_insn (gen_x86_sahf_1 (scratch));
9539 /* The FP codes work out to act like unsigned. */
9540 intcmp_mode = fpcmp_mode;
9541 code = first_code;
9542 if (bypass_code != UNKNOWN)
9543 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
9544 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9545 const0_rtx);
9546 if (second_code != UNKNOWN)
9547 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
9548 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9549 const0_rtx);
9551 else
9553 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
9554 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9555 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
9556 if (!scratch)
9557 scratch = gen_reg_rtx (HImode);
9558 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
9560 /* In the unordered case, we have to check C2 for NaN's, which
9561 doesn't happen to work out to anything nice combination-wise.
9562 So do some bit twiddling on the value we've got in AH to come
9563 up with an appropriate set of condition codes. */
9565 intcmp_mode = CCNOmode;
9566 switch (code)
9568 case GT:
9569 case UNGT:
9570 if (code == GT || !TARGET_IEEE_FP)
9572 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
9573 code = EQ;
9575 else
9577 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9578 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
9579 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
9580 intcmp_mode = CCmode;
9581 code = GEU;
9583 break;
9584 case LT:
9585 case UNLT:
9586 if (code == LT && TARGET_IEEE_FP)
9588 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9589 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
9590 intcmp_mode = CCmode;
9591 code = EQ;
9593 else
9595 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
9596 code = NE;
9598 break;
9599 case GE:
9600 case UNGE:
9601 if (code == GE || !TARGET_IEEE_FP)
9603 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
9604 code = EQ;
9606 else
9608 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9609 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
9610 GEN_INT (0x01)));
9611 code = NE;
9613 break;
9614 case LE:
9615 case UNLE:
9616 if (code == LE && TARGET_IEEE_FP)
9618 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9619 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
9620 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
9621 intcmp_mode = CCmode;
9622 code = LTU;
9624 else
9626 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
9627 code = NE;
9629 break;
9630 case EQ:
9631 case UNEQ:
9632 if (code == EQ && TARGET_IEEE_FP)
9634 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9635 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
9636 intcmp_mode = CCmode;
9637 code = EQ;
9639 else
9641 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
9642 code = NE;
9643 break;
9645 break;
9646 case NE:
9647 case LTGT:
9648 if (code == NE && TARGET_IEEE_FP)
9650 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9651 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
9652 GEN_INT (0x40)));
9653 code = NE;
9655 else
9657 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
9658 code = EQ;
9660 break;
9662 case UNORDERED:
9663 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
9664 code = NE;
9665 break;
9666 case ORDERED:
9667 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
9668 code = EQ;
9669 break;
9671 default:
9672 gcc_unreachable ();
9676 /* Return the test that should be put into the flags user, i.e.
9677 the bcc, scc, or cmov instruction. */
9678 return gen_rtx_fmt_ee (code, VOIDmode,
9679 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9680 const0_rtx);
9684 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
9686 rtx op0, op1, ret;
9687 op0 = ix86_compare_op0;
9688 op1 = ix86_compare_op1;
9690 if (second_test)
9691 *second_test = NULL_RTX;
9692 if (bypass_test)
9693 *bypass_test = NULL_RTX;
9695 if (ix86_compare_emitted)
9697 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
9698 ix86_compare_emitted = NULL_RTX;
9700 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
9701 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
9702 second_test, bypass_test);
9703 else
9704 ret = ix86_expand_int_compare (code, op0, op1);
9706 return ret;
9709 /* Return true if the CODE will result in nontrivial jump sequence. */
9710 bool
9711 ix86_fp_jump_nontrivial_p (enum rtx_code code)
9713 enum rtx_code bypass_code, first_code, second_code;
9714 if (!TARGET_CMOVE)
9715 return true;
9716 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9717 return bypass_code != UNKNOWN || second_code != UNKNOWN;
9720 void
9721 ix86_expand_branch (enum rtx_code code, rtx label)
9723 rtx tmp;
9725 switch (GET_MODE (ix86_compare_op0))
9727 case QImode:
9728 case HImode:
9729 case SImode:
9730 simple:
9731 tmp = ix86_expand_compare (code, NULL, NULL);
9732 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
9733 gen_rtx_LABEL_REF (VOIDmode, label),
9734 pc_rtx);
9735 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
9736 return;
9738 case SFmode:
9739 case DFmode:
9740 case XFmode:
9742 rtvec vec;
9743 int use_fcomi;
9744 enum rtx_code bypass_code, first_code, second_code;
9746 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
9747 &ix86_compare_op1);
9749 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9751 /* Check whether we will use the natural sequence with one jump. If
9752 so, we can expand jump early. Otherwise delay expansion by
9753 creating compound insn to not confuse optimizers. */
9754 if (bypass_code == UNKNOWN && second_code == UNKNOWN
9755 && TARGET_CMOVE)
9757 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
9758 gen_rtx_LABEL_REF (VOIDmode, label),
9759 pc_rtx, NULL_RTX, NULL_RTX);
9761 else
9763 tmp = gen_rtx_fmt_ee (code, VOIDmode,
9764 ix86_compare_op0, ix86_compare_op1);
9765 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
9766 gen_rtx_LABEL_REF (VOIDmode, label),
9767 pc_rtx);
9768 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
9770 use_fcomi = ix86_use_fcomi_compare (code);
9771 vec = rtvec_alloc (3 + !use_fcomi);
9772 RTVEC_ELT (vec, 0) = tmp;
9773 RTVEC_ELT (vec, 1)
9774 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
9775 RTVEC_ELT (vec, 2)
9776 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
9777 if (! use_fcomi)
9778 RTVEC_ELT (vec, 3)
9779 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
9781 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
9783 return;
9786 case DImode:
9787 if (TARGET_64BIT)
9788 goto simple;
9789 case TImode:
9790 /* Expand DImode branch into multiple compare+branch. */
9792 rtx lo[2], hi[2], label2;
9793 enum rtx_code code1, code2, code3;
9794 enum machine_mode submode;
9796 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
9798 tmp = ix86_compare_op0;
9799 ix86_compare_op0 = ix86_compare_op1;
9800 ix86_compare_op1 = tmp;
9801 code = swap_condition (code);
9803 if (GET_MODE (ix86_compare_op0) == DImode)
9805 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
9806 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
9807 submode = SImode;
9809 else
9811 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
9812 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
9813 submode = DImode;
9816 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
9817 avoid two branches. This costs one extra insn, so disable when
9818 optimizing for size. */
9820 if ((code == EQ || code == NE)
9821 && (!optimize_size
9822 || hi[1] == const0_rtx || lo[1] == const0_rtx))
9824 rtx xor0, xor1;
9826 xor1 = hi[0];
9827 if (hi[1] != const0_rtx)
9828 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
9829 NULL_RTX, 0, OPTAB_WIDEN);
9831 xor0 = lo[0];
9832 if (lo[1] != const0_rtx)
9833 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
9834 NULL_RTX, 0, OPTAB_WIDEN);
9836 tmp = expand_binop (submode, ior_optab, xor1, xor0,
9837 NULL_RTX, 0, OPTAB_WIDEN);
9839 ix86_compare_op0 = tmp;
9840 ix86_compare_op1 = const0_rtx;
9841 ix86_expand_branch (code, label);
9842 return;
9845 /* Otherwise, if we are doing less-than or greater-or-equal-than,
9846 op1 is a constant and the low word is zero, then we can just
9847 examine the high word. */
9849 if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx)
9850 switch (code)
9852 case LT: case LTU: case GE: case GEU:
9853 ix86_compare_op0 = hi[0];
9854 ix86_compare_op1 = hi[1];
9855 ix86_expand_branch (code, label);
9856 return;
9857 default:
9858 break;
9861 /* Otherwise, we need two or three jumps. */
9863 label2 = gen_label_rtx ();
9865 code1 = code;
9866 code2 = swap_condition (code);
9867 code3 = unsigned_condition (code);
9869 switch (code)
9871 case LT: case GT: case LTU: case GTU:
9872 break;
9874 case LE: code1 = LT; code2 = GT; break;
9875 case GE: code1 = GT; code2 = LT; break;
9876 case LEU: code1 = LTU; code2 = GTU; break;
9877 case GEU: code1 = GTU; code2 = LTU; break;
9879 case EQ: code1 = UNKNOWN; code2 = NE; break;
9880 case NE: code2 = UNKNOWN; break;
9882 default:
9883 gcc_unreachable ();
9887 * a < b =>
9888 * if (hi(a) < hi(b)) goto true;
9889 * if (hi(a) > hi(b)) goto false;
9890 * if (lo(a) < lo(b)) goto true;
9891 * false:
9894 ix86_compare_op0 = hi[0];
9895 ix86_compare_op1 = hi[1];
9897 if (code1 != UNKNOWN)
9898 ix86_expand_branch (code1, label);
9899 if (code2 != UNKNOWN)
9900 ix86_expand_branch (code2, label2);
9902 ix86_compare_op0 = lo[0];
9903 ix86_compare_op1 = lo[1];
9904 ix86_expand_branch (code3, label);
9906 if (code2 != UNKNOWN)
9907 emit_label (label2);
9908 return;
9911 default:
9912 gcc_unreachable ();
9916 /* Split branch based on floating point condition. */
9917 void
9918 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
9919 rtx target1, rtx target2, rtx tmp, rtx pushed)
9921 rtx second, bypass;
9922 rtx label = NULL_RTX;
9923 rtx condition;
9924 int bypass_probability = -1, second_probability = -1, probability = -1;
9925 rtx i;
9927 if (target2 != pc_rtx)
9929 rtx tmp = target2;
9930 code = reverse_condition_maybe_unordered (code);
9931 target2 = target1;
9932 target1 = tmp;
9935 condition = ix86_expand_fp_compare (code, op1, op2,
9936 tmp, &second, &bypass);
9938 /* Remove pushed operand from stack. */
9939 if (pushed)
9940 ix86_free_from_memory (GET_MODE (pushed));
9942 if (split_branch_probability >= 0)
9944 /* Distribute the probabilities across the jumps.
9945 Assume the BYPASS and SECOND to be always test
9946 for UNORDERED. */
9947 probability = split_branch_probability;
9949 /* Value of 1 is low enough to make no need for probability
9950 to be updated. Later we may run some experiments and see
9951 if unordered values are more frequent in practice. */
9952 if (bypass)
9953 bypass_probability = 1;
9954 if (second)
9955 second_probability = 1;
9957 if (bypass != NULL_RTX)
9959 label = gen_label_rtx ();
9960 i = emit_jump_insn (gen_rtx_SET
9961 (VOIDmode, pc_rtx,
9962 gen_rtx_IF_THEN_ELSE (VOIDmode,
9963 bypass,
9964 gen_rtx_LABEL_REF (VOIDmode,
9965 label),
9966 pc_rtx)));
9967 if (bypass_probability >= 0)
9968 REG_NOTES (i)
9969 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9970 GEN_INT (bypass_probability),
9971 REG_NOTES (i));
9973 i = emit_jump_insn (gen_rtx_SET
9974 (VOIDmode, pc_rtx,
9975 gen_rtx_IF_THEN_ELSE (VOIDmode,
9976 condition, target1, target2)));
9977 if (probability >= 0)
9978 REG_NOTES (i)
9979 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9980 GEN_INT (probability),
9981 REG_NOTES (i));
9982 if (second != NULL_RTX)
9984 i = emit_jump_insn (gen_rtx_SET
9985 (VOIDmode, pc_rtx,
9986 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
9987 target2)));
9988 if (second_probability >= 0)
9989 REG_NOTES (i)
9990 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9991 GEN_INT (second_probability),
9992 REG_NOTES (i));
9994 if (label != NULL_RTX)
9995 emit_label (label);
9999 ix86_expand_setcc (enum rtx_code code, rtx dest)
10001 rtx ret, tmp, tmpreg, equiv;
10002 rtx second_test, bypass_test;
10004 if (GET_MODE (ix86_compare_op0) == (TARGET_64BIT ? TImode : DImode))
10005 return 0; /* FAIL */
10007 gcc_assert (GET_MODE (dest) == QImode);
10009 ret = ix86_expand_compare (code, &second_test, &bypass_test);
10010 PUT_MODE (ret, QImode);
10012 tmp = dest;
10013 tmpreg = dest;
10015 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
10016 if (bypass_test || second_test)
10018 rtx test = second_test;
10019 int bypass = 0;
10020 rtx tmp2 = gen_reg_rtx (QImode);
10021 if (bypass_test)
10023 gcc_assert (!second_test);
10024 test = bypass_test;
10025 bypass = 1;
10026 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
10028 PUT_MODE (test, QImode);
10029 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
10031 if (bypass)
10032 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
10033 else
10034 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
10037 /* Attach a REG_EQUAL note describing the comparison result. */
10038 if (ix86_compare_op0 && ix86_compare_op1)
10040 equiv = simplify_gen_relational (code, QImode,
10041 GET_MODE (ix86_compare_op0),
10042 ix86_compare_op0, ix86_compare_op1);
10043 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
10046 return 1; /* DONE */
10049 /* Expand comparison setting or clearing carry flag. Return true when
10050 successful and set pop for the operation. */
10051 static bool
10052 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
10054 enum machine_mode mode =
10055 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
10057 /* Do not handle DImode compares that go trought special path. Also we can't
10058 deal with FP compares yet. This is possible to add. */
10059 if (mode == (TARGET_64BIT ? TImode : DImode))
10060 return false;
10061 if (FLOAT_MODE_P (mode))
10063 rtx second_test = NULL, bypass_test = NULL;
10064 rtx compare_op, compare_seq;
10066 /* Shortcut: following common codes never translate into carry flag compares. */
10067 if (code == EQ || code == NE || code == UNEQ || code == LTGT
10068 || code == ORDERED || code == UNORDERED)
10069 return false;
10071 /* These comparisons require zero flag; swap operands so they won't. */
10072 if ((code == GT || code == UNLE || code == LE || code == UNGT)
10073 && !TARGET_IEEE_FP)
10075 rtx tmp = op0;
10076 op0 = op1;
10077 op1 = tmp;
10078 code = swap_condition (code);
10081 /* Try to expand the comparison and verify that we end up with carry flag
10082 based comparison. This is fails to be true only when we decide to expand
10083 comparison using arithmetic that is not too common scenario. */
10084 start_sequence ();
10085 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
10086 &second_test, &bypass_test);
10087 compare_seq = get_insns ();
10088 end_sequence ();
10090 if (second_test || bypass_test)
10091 return false;
10092 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
10093 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
10094 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
10095 else
10096 code = GET_CODE (compare_op);
10097 if (code != LTU && code != GEU)
10098 return false;
10099 emit_insn (compare_seq);
10100 *pop = compare_op;
10101 return true;
10103 if (!INTEGRAL_MODE_P (mode))
10104 return false;
10105 switch (code)
10107 case LTU:
10108 case GEU:
10109 break;
10111 /* Convert a==0 into (unsigned)a<1. */
10112 case EQ:
10113 case NE:
10114 if (op1 != const0_rtx)
10115 return false;
10116 op1 = const1_rtx;
10117 code = (code == EQ ? LTU : GEU);
10118 break;
10120 /* Convert a>b into b<a or a>=b-1. */
10121 case GTU:
10122 case LEU:
10123 if (GET_CODE (op1) == CONST_INT)
10125 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
10126 /* Bail out on overflow. We still can swap operands but that
10127 would force loading of the constant into register. */
10128 if (op1 == const0_rtx
10129 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
10130 return false;
10131 code = (code == GTU ? GEU : LTU);
10133 else
10135 rtx tmp = op1;
10136 op1 = op0;
10137 op0 = tmp;
10138 code = (code == GTU ? LTU : GEU);
10140 break;
10142 /* Convert a>=0 into (unsigned)a<0x80000000. */
10143 case LT:
10144 case GE:
10145 if (mode == DImode || op1 != const0_rtx)
10146 return false;
10147 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
10148 code = (code == LT ? GEU : LTU);
10149 break;
10150 case LE:
10151 case GT:
10152 if (mode == DImode || op1 != constm1_rtx)
10153 return false;
10154 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
10155 code = (code == LE ? GEU : LTU);
10156 break;
10158 default:
10159 return false;
10161 /* Swapping operands may cause constant to appear as first operand. */
10162 if (!nonimmediate_operand (op0, VOIDmode))
10164 if (no_new_pseudos)
10165 return false;
10166 op0 = force_reg (mode, op0);
10168 ix86_compare_op0 = op0;
10169 ix86_compare_op1 = op1;
10170 *pop = ix86_expand_compare (code, NULL, NULL);
10171 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
10172 return true;
10176 ix86_expand_int_movcc (rtx operands[])
10178 enum rtx_code code = GET_CODE (operands[1]), compare_code;
10179 rtx compare_seq, compare_op;
10180 rtx second_test, bypass_test;
10181 enum machine_mode mode = GET_MODE (operands[0]);
10182 bool sign_bit_compare_p = false;;
10184 start_sequence ();
10185 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
10186 compare_seq = get_insns ();
10187 end_sequence ();
10189 compare_code = GET_CODE (compare_op);
10191 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
10192 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
10193 sign_bit_compare_p = true;
10195 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
10196 HImode insns, we'd be swallowed in word prefix ops. */
10198 if ((mode != HImode || TARGET_FAST_PREFIX)
10199 && (mode != (TARGET_64BIT ? TImode : DImode))
10200 && GET_CODE (operands[2]) == CONST_INT
10201 && GET_CODE (operands[3]) == CONST_INT)
10203 rtx out = operands[0];
10204 HOST_WIDE_INT ct = INTVAL (operands[2]);
10205 HOST_WIDE_INT cf = INTVAL (operands[3]);
10206 HOST_WIDE_INT diff;
10208 diff = ct - cf;
10209 /* Sign bit compares are better done using shifts than we do by using
10210 sbb. */
10211 if (sign_bit_compare_p
10212 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
10213 ix86_compare_op1, &compare_op))
10215 /* Detect overlap between destination and compare sources. */
10216 rtx tmp = out;
10218 if (!sign_bit_compare_p)
10220 bool fpcmp = false;
10222 compare_code = GET_CODE (compare_op);
10224 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
10225 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
10227 fpcmp = true;
10228 compare_code = ix86_fp_compare_code_to_integer (compare_code);
10231 /* To simplify rest of code, restrict to the GEU case. */
10232 if (compare_code == LTU)
10234 HOST_WIDE_INT tmp = ct;
10235 ct = cf;
10236 cf = tmp;
10237 compare_code = reverse_condition (compare_code);
10238 code = reverse_condition (code);
10240 else
10242 if (fpcmp)
10243 PUT_CODE (compare_op,
10244 reverse_condition_maybe_unordered
10245 (GET_CODE (compare_op)));
10246 else
10247 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
10249 diff = ct - cf;
10251 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
10252 || reg_overlap_mentioned_p (out, ix86_compare_op1))
10253 tmp = gen_reg_rtx (mode);
10255 if (mode == DImode)
10256 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
10257 else
10258 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
10260 else
10262 if (code == GT || code == GE)
10263 code = reverse_condition (code);
10264 else
10266 HOST_WIDE_INT tmp = ct;
10267 ct = cf;
10268 cf = tmp;
10269 diff = ct - cf;
10271 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
10272 ix86_compare_op1, VOIDmode, 0, -1);
10275 if (diff == 1)
10278 * cmpl op0,op1
10279 * sbbl dest,dest
10280 * [addl dest, ct]
10282 * Size 5 - 8.
10284 if (ct)
10285 tmp = expand_simple_binop (mode, PLUS,
10286 tmp, GEN_INT (ct),
10287 copy_rtx (tmp), 1, OPTAB_DIRECT);
10289 else if (cf == -1)
10292 * cmpl op0,op1
10293 * sbbl dest,dest
10294 * orl $ct, dest
10296 * Size 8.
10298 tmp = expand_simple_binop (mode, IOR,
10299 tmp, GEN_INT (ct),
10300 copy_rtx (tmp), 1, OPTAB_DIRECT);
10302 else if (diff == -1 && ct)
10305 * cmpl op0,op1
10306 * sbbl dest,dest
10307 * notl dest
10308 * [addl dest, cf]
10310 * Size 8 - 11.
10312 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
10313 if (cf)
10314 tmp = expand_simple_binop (mode, PLUS,
10315 copy_rtx (tmp), GEN_INT (cf),
10316 copy_rtx (tmp), 1, OPTAB_DIRECT);
10318 else
10321 * cmpl op0,op1
10322 * sbbl dest,dest
10323 * [notl dest]
10324 * andl cf - ct, dest
10325 * [addl dest, ct]
10327 * Size 8 - 11.
10330 if (cf == 0)
10332 cf = ct;
10333 ct = 0;
10334 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
10337 tmp = expand_simple_binop (mode, AND,
10338 copy_rtx (tmp),
10339 gen_int_mode (cf - ct, mode),
10340 copy_rtx (tmp), 1, OPTAB_DIRECT);
10341 if (ct)
10342 tmp = expand_simple_binop (mode, PLUS,
10343 copy_rtx (tmp), GEN_INT (ct),
10344 copy_rtx (tmp), 1, OPTAB_DIRECT);
10347 if (!rtx_equal_p (tmp, out))
10348 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
10350 return 1; /* DONE */
10353 if (diff < 0)
10355 HOST_WIDE_INT tmp;
10356 tmp = ct, ct = cf, cf = tmp;
10357 diff = -diff;
10358 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
10360 /* We may be reversing unordered compare to normal compare, that
10361 is not valid in general (we may convert non-trapping condition
10362 to trapping one), however on i386 we currently emit all
10363 comparisons unordered. */
10364 compare_code = reverse_condition_maybe_unordered (compare_code);
10365 code = reverse_condition_maybe_unordered (code);
10367 else
10369 compare_code = reverse_condition (compare_code);
10370 code = reverse_condition (code);
10374 compare_code = UNKNOWN;
10375 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
10376 && GET_CODE (ix86_compare_op1) == CONST_INT)
10378 if (ix86_compare_op1 == const0_rtx
10379 && (code == LT || code == GE))
10380 compare_code = code;
10381 else if (ix86_compare_op1 == constm1_rtx)
10383 if (code == LE)
10384 compare_code = LT;
10385 else if (code == GT)
10386 compare_code = GE;
10390 /* Optimize dest = (op0 < 0) ? -1 : cf. */
10391 if (compare_code != UNKNOWN
10392 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
10393 && (cf == -1 || ct == -1))
10395 /* If lea code below could be used, only optimize
10396 if it results in a 2 insn sequence. */
10398 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
10399 || diff == 3 || diff == 5 || diff == 9)
10400 || (compare_code == LT && ct == -1)
10401 || (compare_code == GE && cf == -1))
10404 * notl op1 (if necessary)
10405 * sarl $31, op1
10406 * orl cf, op1
10408 if (ct != -1)
10410 cf = ct;
10411 ct = -1;
10412 code = reverse_condition (code);
10415 out = emit_store_flag (out, code, ix86_compare_op0,
10416 ix86_compare_op1, VOIDmode, 0, -1);
10418 out = expand_simple_binop (mode, IOR,
10419 out, GEN_INT (cf),
10420 out, 1, OPTAB_DIRECT);
10421 if (out != operands[0])
10422 emit_move_insn (operands[0], out);
10424 return 1; /* DONE */
10429 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
10430 || diff == 3 || diff == 5 || diff == 9)
10431 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
10432 && (mode != DImode
10433 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
10436 * xorl dest,dest
10437 * cmpl op1,op2
10438 * setcc dest
10439 * lea cf(dest*(ct-cf)),dest
10441 * Size 14.
10443 * This also catches the degenerate setcc-only case.
10446 rtx tmp;
10447 int nops;
10449 out = emit_store_flag (out, code, ix86_compare_op0,
10450 ix86_compare_op1, VOIDmode, 0, 1);
10452 nops = 0;
10453 /* On x86_64 the lea instruction operates on Pmode, so we need
10454 to get arithmetics done in proper mode to match. */
10455 if (diff == 1)
10456 tmp = copy_rtx (out);
10457 else
10459 rtx out1;
10460 out1 = copy_rtx (out);
10461 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
10462 nops++;
10463 if (diff & 1)
10465 tmp = gen_rtx_PLUS (mode, tmp, out1);
10466 nops++;
10469 if (cf != 0)
10471 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
10472 nops++;
10474 if (!rtx_equal_p (tmp, out))
10476 if (nops == 1)
10477 out = force_operand (tmp, copy_rtx (out));
10478 else
10479 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
10481 if (!rtx_equal_p (out, operands[0]))
10482 emit_move_insn (operands[0], copy_rtx (out));
10484 return 1; /* DONE */
10488 * General case: Jumpful:
10489 * xorl dest,dest cmpl op1, op2
10490 * cmpl op1, op2 movl ct, dest
10491 * setcc dest jcc 1f
10492 * decl dest movl cf, dest
10493 * andl (cf-ct),dest 1:
10494 * addl ct,dest
10496 * Size 20. Size 14.
10498 * This is reasonably steep, but branch mispredict costs are
10499 * high on modern cpus, so consider failing only if optimizing
10500 * for space.
10503 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
10504 && BRANCH_COST >= 2)
10506 if (cf == 0)
10508 cf = ct;
10509 ct = 0;
10510 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
10511 /* We may be reversing unordered compare to normal compare,
10512 that is not valid in general (we may convert non-trapping
10513 condition to trapping one), however on i386 we currently
10514 emit all comparisons unordered. */
10515 code = reverse_condition_maybe_unordered (code);
10516 else
10518 code = reverse_condition (code);
10519 if (compare_code != UNKNOWN)
10520 compare_code = reverse_condition (compare_code);
10524 if (compare_code != UNKNOWN)
10526 /* notl op1 (if needed)
10527 sarl $31, op1
10528 andl (cf-ct), op1
10529 addl ct, op1
10531 For x < 0 (resp. x <= -1) there will be no notl,
10532 so if possible swap the constants to get rid of the
10533 complement.
10534 True/false will be -1/0 while code below (store flag
10535 followed by decrement) is 0/-1, so the constants need
10536 to be exchanged once more. */
10538 if (compare_code == GE || !cf)
10540 code = reverse_condition (code);
10541 compare_code = LT;
10543 else
10545 HOST_WIDE_INT tmp = cf;
10546 cf = ct;
10547 ct = tmp;
10550 out = emit_store_flag (out, code, ix86_compare_op0,
10551 ix86_compare_op1, VOIDmode, 0, -1);
10553 else
10555 out = emit_store_flag (out, code, ix86_compare_op0,
10556 ix86_compare_op1, VOIDmode, 0, 1);
10558 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
10559 copy_rtx (out), 1, OPTAB_DIRECT);
10562 out = expand_simple_binop (mode, AND, copy_rtx (out),
10563 gen_int_mode (cf - ct, mode),
10564 copy_rtx (out), 1, OPTAB_DIRECT);
10565 if (ct)
10566 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
10567 copy_rtx (out), 1, OPTAB_DIRECT);
10568 if (!rtx_equal_p (out, operands[0]))
10569 emit_move_insn (operands[0], copy_rtx (out));
10571 return 1; /* DONE */
10575 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
10577 /* Try a few things more with specific constants and a variable. */
10579 optab op;
10580 rtx var, orig_out, out, tmp;
10582 if (BRANCH_COST <= 2)
10583 return 0; /* FAIL */
10585 /* If one of the two operands is an interesting constant, load a
10586 constant with the above and mask it in with a logical operation. */
10588 if (GET_CODE (operands[2]) == CONST_INT)
10590 var = operands[3];
10591 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
10592 operands[3] = constm1_rtx, op = and_optab;
10593 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
10594 operands[3] = const0_rtx, op = ior_optab;
10595 else
10596 return 0; /* FAIL */
10598 else if (GET_CODE (operands[3]) == CONST_INT)
10600 var = operands[2];
10601 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
10602 operands[2] = constm1_rtx, op = and_optab;
10603 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
10604 operands[2] = const0_rtx, op = ior_optab;
10605 else
10606 return 0; /* FAIL */
10608 else
10609 return 0; /* FAIL */
10611 orig_out = operands[0];
10612 tmp = gen_reg_rtx (mode);
10613 operands[0] = tmp;
10615 /* Recurse to get the constant loaded. */
10616 if (ix86_expand_int_movcc (operands) == 0)
10617 return 0; /* FAIL */
10619 /* Mask in the interesting variable. */
10620 out = expand_binop (mode, op, var, tmp, orig_out, 0,
10621 OPTAB_WIDEN);
10622 if (!rtx_equal_p (out, orig_out))
10623 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
10625 return 1; /* DONE */
10629 * For comparison with above,
10631 * movl cf,dest
10632 * movl ct,tmp
10633 * cmpl op1,op2
10634 * cmovcc tmp,dest
10636 * Size 15.
10639 if (! nonimmediate_operand (operands[2], mode))
10640 operands[2] = force_reg (mode, operands[2]);
10641 if (! nonimmediate_operand (operands[3], mode))
10642 operands[3] = force_reg (mode, operands[3]);
10644 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
10646 rtx tmp = gen_reg_rtx (mode);
10647 emit_move_insn (tmp, operands[3]);
10648 operands[3] = tmp;
10650 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
10652 rtx tmp = gen_reg_rtx (mode);
10653 emit_move_insn (tmp, operands[2]);
10654 operands[2] = tmp;
10657 if (! register_operand (operands[2], VOIDmode)
10658 && (mode == QImode
10659 || ! register_operand (operands[3], VOIDmode)))
10660 operands[2] = force_reg (mode, operands[2]);
10662 if (mode == QImode
10663 && ! register_operand (operands[3], VOIDmode))
10664 operands[3] = force_reg (mode, operands[3]);
10666 emit_insn (compare_seq);
10667 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10668 gen_rtx_IF_THEN_ELSE (mode,
10669 compare_op, operands[2],
10670 operands[3])));
10671 if (bypass_test)
10672 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
10673 gen_rtx_IF_THEN_ELSE (mode,
10674 bypass_test,
10675 copy_rtx (operands[3]),
10676 copy_rtx (operands[0]))));
10677 if (second_test)
10678 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
10679 gen_rtx_IF_THEN_ELSE (mode,
10680 second_test,
10681 copy_rtx (operands[2]),
10682 copy_rtx (operands[0]))));
10684 return 1; /* DONE */
10687 /* Swap, force into registers, or otherwise massage the two operands
10688 to an sse comparison with a mask result. Thus we differ a bit from
10689 ix86_prepare_fp_compare_args which expects to produce a flags result.
10691 The DEST operand exists to help determine whether to commute commutative
10692 operators. The POP0/POP1 operands are updated in place. The new
10693 comparison code is returned, or UNKNOWN if not implementable. */
10695 static enum rtx_code
10696 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
10697 rtx *pop0, rtx *pop1)
10699 rtx tmp;
10701 switch (code)
10703 case LTGT:
10704 case UNEQ:
10705 /* We have no LTGT as an operator. We could implement it with
10706 NE & ORDERED, but this requires an extra temporary. It's
10707 not clear that it's worth it. */
10708 return UNKNOWN;
10710 case LT:
10711 case LE:
10712 case UNGT:
10713 case UNGE:
10714 /* These are supported directly. */
10715 break;
10717 case EQ:
10718 case NE:
10719 case UNORDERED:
10720 case ORDERED:
10721 /* For commutative operators, try to canonicalize the destination
10722 operand to be first in the comparison - this helps reload to
10723 avoid extra moves. */
10724 if (!dest || !rtx_equal_p (dest, *pop1))
10725 break;
10726 /* FALLTHRU */
10728 case GE:
10729 case GT:
10730 case UNLE:
10731 case UNLT:
10732 /* These are not supported directly. Swap the comparison operands
10733 to transform into something that is supported. */
10734 tmp = *pop0;
10735 *pop0 = *pop1;
10736 *pop1 = tmp;
10737 code = swap_condition (code);
10738 break;
10740 default:
10741 gcc_unreachable ();
10744 return code;
10747 /* Detect conditional moves that exactly match min/max operational
10748 semantics. Note that this is IEEE safe, as long as we don't
10749 interchange the operands.
10751 Returns FALSE if this conditional move doesn't match a MIN/MAX,
10752 and TRUE if the operation is successful and instructions are emitted. */
10754 static bool
10755 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
10756 rtx cmp_op1, rtx if_true, rtx if_false)
10758 enum machine_mode mode;
10759 bool is_min;
10760 rtx tmp;
10762 if (code == LT)
10764 else if (code == UNGE)
10766 tmp = if_true;
10767 if_true = if_false;
10768 if_false = tmp;
10770 else
10771 return false;
10773 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
10774 is_min = true;
10775 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
10776 is_min = false;
10777 else
10778 return false;
10780 mode = GET_MODE (dest);
10782 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
10783 but MODE may be a vector mode and thus not appropriate. */
10784 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
10786 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
10787 rtvec v;
10789 if_true = force_reg (mode, if_true);
10790 v = gen_rtvec (2, if_true, if_false);
10791 tmp = gen_rtx_UNSPEC (mode, v, u);
10793 else
10795 code = is_min ? SMIN : SMAX;
10796 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
10799 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
10800 return true;
10803 /* Expand an sse vector comparison. Return the register with the result. */
10805 static rtx
10806 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
10807 rtx op_true, rtx op_false)
10809 enum machine_mode mode = GET_MODE (dest);
10810 rtx x;
10812 cmp_op0 = force_reg (mode, cmp_op0);
10813 if (!nonimmediate_operand (cmp_op1, mode))
10814 cmp_op1 = force_reg (mode, cmp_op1);
10816 if (optimize
10817 || reg_overlap_mentioned_p (dest, op_true)
10818 || reg_overlap_mentioned_p (dest, op_false))
10819 dest = gen_reg_rtx (mode);
10821 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
10822 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10824 return dest;
10827 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
10828 operations. This is used for both scalar and vector conditional moves. */
10830 static void
10831 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
10833 enum machine_mode mode = GET_MODE (dest);
10834 rtx t2, t3, x;
10836 if (op_false == CONST0_RTX (mode))
10838 op_true = force_reg (mode, op_true);
10839 x = gen_rtx_AND (mode, cmp, op_true);
10840 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10842 else if (op_true == CONST0_RTX (mode))
10844 op_false = force_reg (mode, op_false);
10845 x = gen_rtx_NOT (mode, cmp);
10846 x = gen_rtx_AND (mode, x, op_false);
10847 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10849 else
10851 op_true = force_reg (mode, op_true);
10852 op_false = force_reg (mode, op_false);
10854 t2 = gen_reg_rtx (mode);
10855 if (optimize)
10856 t3 = gen_reg_rtx (mode);
10857 else
10858 t3 = dest;
10860 x = gen_rtx_AND (mode, op_true, cmp);
10861 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
10863 x = gen_rtx_NOT (mode, cmp);
10864 x = gen_rtx_AND (mode, x, op_false);
10865 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
10867 x = gen_rtx_IOR (mode, t3, t2);
10868 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10872 /* Expand a floating-point conditional move. Return true if successful. */
10875 ix86_expand_fp_movcc (rtx operands[])
10877 enum machine_mode mode = GET_MODE (operands[0]);
10878 enum rtx_code code = GET_CODE (operands[1]);
10879 rtx tmp, compare_op, second_test, bypass_test;
10881 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
10883 enum machine_mode cmode;
10885 /* Since we've no cmove for sse registers, don't force bad register
10886 allocation just to gain access to it. Deny movcc when the
10887 comparison mode doesn't match the move mode. */
10888 cmode = GET_MODE (ix86_compare_op0);
10889 if (cmode == VOIDmode)
10890 cmode = GET_MODE (ix86_compare_op1);
10891 if (cmode != mode)
10892 return 0;
10894 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
10895 &ix86_compare_op0,
10896 &ix86_compare_op1);
10897 if (code == UNKNOWN)
10898 return 0;
10900 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
10901 ix86_compare_op1, operands[2],
10902 operands[3]))
10903 return 1;
10905 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
10906 ix86_compare_op1, operands[2], operands[3]);
10907 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
10908 return 1;
10911 /* The floating point conditional move instructions don't directly
10912 support conditions resulting from a signed integer comparison. */
10914 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
10916 /* The floating point conditional move instructions don't directly
10917 support signed integer comparisons. */
10919 if (!fcmov_comparison_operator (compare_op, VOIDmode))
10921 gcc_assert (!second_test && !bypass_test);
10922 tmp = gen_reg_rtx (QImode);
10923 ix86_expand_setcc (code, tmp);
10924 code = NE;
10925 ix86_compare_op0 = tmp;
10926 ix86_compare_op1 = const0_rtx;
10927 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
10929 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
10931 tmp = gen_reg_rtx (mode);
10932 emit_move_insn (tmp, operands[3]);
10933 operands[3] = tmp;
10935 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
10937 tmp = gen_reg_rtx (mode);
10938 emit_move_insn (tmp, operands[2]);
10939 operands[2] = tmp;
10942 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10943 gen_rtx_IF_THEN_ELSE (mode, compare_op,
10944 operands[2], operands[3])));
10945 if (bypass_test)
10946 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10947 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
10948 operands[3], operands[0])));
10949 if (second_test)
10950 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10951 gen_rtx_IF_THEN_ELSE (mode, second_test,
10952 operands[2], operands[0])));
10954 return 1;
10957 /* Expand a floating-point vector conditional move; a vcond operation
10958 rather than a movcc operation. */
10960 bool
10961 ix86_expand_fp_vcond (rtx operands[])
10963 enum rtx_code code = GET_CODE (operands[3]);
10964 rtx cmp;
10966 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
10967 &operands[4], &operands[5]);
10968 if (code == UNKNOWN)
10969 return false;
10971 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
10972 operands[5], operands[1], operands[2]))
10973 return true;
10975 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
10976 operands[1], operands[2]);
10977 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
10978 return true;
10981 /* Expand a signed integral vector conditional move. */
10983 bool
10984 ix86_expand_int_vcond (rtx operands[])
10986 enum machine_mode mode = GET_MODE (operands[0]);
10987 enum rtx_code code = GET_CODE (operands[3]);
10988 bool negate = false;
10989 rtx x, cop0, cop1;
10991 cop0 = operands[4];
10992 cop1 = operands[5];
10994 /* Canonicalize the comparison to EQ, GT, GTU. */
10995 switch (code)
10997 case EQ:
10998 case GT:
10999 case GTU:
11000 break;
11002 case NE:
11003 case LE:
11004 case LEU:
11005 code = reverse_condition (code);
11006 negate = true;
11007 break;
11009 case GE:
11010 case GEU:
11011 code = reverse_condition (code);
11012 negate = true;
11013 /* FALLTHRU */
11015 case LT:
11016 case LTU:
11017 code = swap_condition (code);
11018 x = cop0, cop0 = cop1, cop1 = x;
11019 break;
11021 default:
11022 gcc_unreachable ();
11025 /* Unsigned parallel compare is not supported by the hardware. Play some
11026 tricks to turn this into a signed comparison against 0. */
11027 if (code == GTU)
11029 switch (mode)
11031 case V4SImode:
11033 rtx t1, t2, mask;
11035 /* Perform a parallel modulo subtraction. */
11036 t1 = gen_reg_rtx (mode);
11037 emit_insn (gen_subv4si3 (t1, cop0, cop1));
11039 /* Extract the original sign bit of op0. */
11040 mask = GEN_INT (-0x80000000);
11041 mask = gen_rtx_CONST_VECTOR (mode,
11042 gen_rtvec (4, mask, mask, mask, mask));
11043 mask = force_reg (mode, mask);
11044 t2 = gen_reg_rtx (mode);
11045 emit_insn (gen_andv4si3 (t2, cop0, mask));
11047 /* XOR it back into the result of the subtraction. This results
11048 in the sign bit set iff we saw unsigned underflow. */
11049 x = gen_reg_rtx (mode);
11050 emit_insn (gen_xorv4si3 (x, t1, t2));
11052 code = GT;
11054 break;
11056 case V16QImode:
11057 case V8HImode:
11058 /* Perform a parallel unsigned saturating subtraction. */
11059 x = gen_reg_rtx (mode);
11060 emit_insn (gen_rtx_SET (VOIDmode, x,
11061 gen_rtx_US_MINUS (mode, cop0, cop1)));
11063 code = EQ;
11064 negate = !negate;
11065 break;
11067 default:
11068 gcc_unreachable ();
11071 cop0 = x;
11072 cop1 = CONST0_RTX (mode);
11075 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
11076 operands[1+negate], operands[2-negate]);
11078 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
11079 operands[2-negate]);
11080 return true;
11083 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
11084 true if we should do zero extension, else sign extension. HIGH_P is
11085 true if we want the N/2 high elements, else the low elements. */
11087 void
11088 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
11090 enum machine_mode imode = GET_MODE (operands[1]);
11091 rtx (*unpack)(rtx, rtx, rtx);
11092 rtx se, dest;
11094 switch (imode)
11096 case V16QImode:
11097 if (high_p)
11098 unpack = gen_vec_interleave_highv16qi;
11099 else
11100 unpack = gen_vec_interleave_lowv16qi;
11101 break;
11102 case V8HImode:
11103 if (high_p)
11104 unpack = gen_vec_interleave_highv8hi;
11105 else
11106 unpack = gen_vec_interleave_lowv8hi;
11107 break;
11108 case V4SImode:
11109 if (high_p)
11110 unpack = gen_vec_interleave_highv4si;
11111 else
11112 unpack = gen_vec_interleave_lowv4si;
11113 break;
11114 default:
11115 gcc_unreachable ();
11118 dest = gen_lowpart (imode, operands[0]);
11120 if (unsigned_p)
11121 se = force_reg (imode, CONST0_RTX (imode));
11122 else
11123 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
11124 operands[1], pc_rtx, pc_rtx);
11126 emit_insn (unpack (dest, operands[1], se));
11129 /* Expand conditional increment or decrement using adb/sbb instructions.
11130 The default case using setcc followed by the conditional move can be
11131 done by generic code. */
11133 ix86_expand_int_addcc (rtx operands[])
11135 enum rtx_code code = GET_CODE (operands[1]);
11136 rtx compare_op;
11137 rtx val = const0_rtx;
11138 bool fpcmp = false;
11139 enum machine_mode mode = GET_MODE (operands[0]);
11141 if (operands[3] != const1_rtx
11142 && operands[3] != constm1_rtx)
11143 return 0;
11144 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
11145 ix86_compare_op1, &compare_op))
11146 return 0;
11147 code = GET_CODE (compare_op);
11149 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
11150 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
11152 fpcmp = true;
11153 code = ix86_fp_compare_code_to_integer (code);
11156 if (code != LTU)
11158 val = constm1_rtx;
11159 if (fpcmp)
11160 PUT_CODE (compare_op,
11161 reverse_condition_maybe_unordered
11162 (GET_CODE (compare_op)));
11163 else
11164 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
11166 PUT_MODE (compare_op, mode);
11168 /* Construct either adc or sbb insn. */
11169 if ((code == LTU) == (operands[3] == constm1_rtx))
11171 switch (GET_MODE (operands[0]))
11173 case QImode:
11174 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
11175 break;
11176 case HImode:
11177 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
11178 break;
11179 case SImode:
11180 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
11181 break;
11182 case DImode:
11183 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
11184 break;
11185 default:
11186 gcc_unreachable ();
11189 else
11191 switch (GET_MODE (operands[0]))
11193 case QImode:
11194 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
11195 break;
11196 case HImode:
11197 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
11198 break;
11199 case SImode:
11200 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
11201 break;
11202 case DImode:
11203 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
11204 break;
11205 default:
11206 gcc_unreachable ();
11209 return 1; /* DONE */
11213 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
11214 works for floating pointer parameters and nonoffsetable memories.
11215 For pushes, it returns just stack offsets; the values will be saved
11216 in the right order. Maximally three parts are generated. */
11218 static int
11219 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
11221 int size;
11223 if (!TARGET_64BIT)
11224 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
11225 else
11226 size = (GET_MODE_SIZE (mode) + 4) / 8;
11228 gcc_assert (GET_CODE (operand) != REG || !MMX_REGNO_P (REGNO (operand)));
11229 gcc_assert (size >= 2 && size <= 3);
11231 /* Optimize constant pool reference to immediates. This is used by fp
11232 moves, that force all constants to memory to allow combining. */
11233 if (GET_CODE (operand) == MEM && MEM_READONLY_P (operand))
11235 rtx tmp = maybe_get_pool_constant (operand);
11236 if (tmp)
11237 operand = tmp;
11240 if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand))
11242 /* The only non-offsetable memories we handle are pushes. */
11243 int ok = push_operand (operand, VOIDmode);
11245 gcc_assert (ok);
11247 operand = copy_rtx (operand);
11248 PUT_MODE (operand, Pmode);
11249 parts[0] = parts[1] = parts[2] = operand;
11250 return size;
11253 if (GET_CODE (operand) == CONST_VECTOR)
11255 enum machine_mode imode = int_mode_for_mode (mode);
11256 /* Caution: if we looked through a constant pool memory above,
11257 the operand may actually have a different mode now. That's
11258 ok, since we want to pun this all the way back to an integer. */
11259 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
11260 gcc_assert (operand != NULL);
11261 mode = imode;
11264 if (!TARGET_64BIT)
11266 if (mode == DImode)
11267 split_di (&operand, 1, &parts[0], &parts[1]);
11268 else
11270 if (REG_P (operand))
11272 gcc_assert (reload_completed);
11273 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
11274 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
11275 if (size == 3)
11276 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
11278 else if (offsettable_memref_p (operand))
11280 operand = adjust_address (operand, SImode, 0);
11281 parts[0] = operand;
11282 parts[1] = adjust_address (operand, SImode, 4);
11283 if (size == 3)
11284 parts[2] = adjust_address (operand, SImode, 8);
11286 else if (GET_CODE (operand) == CONST_DOUBLE)
11288 REAL_VALUE_TYPE r;
11289 long l[4];
11291 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
11292 switch (mode)
11294 case XFmode:
11295 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
11296 parts[2] = gen_int_mode (l[2], SImode);
11297 break;
11298 case DFmode:
11299 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
11300 break;
11301 default:
11302 gcc_unreachable ();
11304 parts[1] = gen_int_mode (l[1], SImode);
11305 parts[0] = gen_int_mode (l[0], SImode);
11307 else
11308 gcc_unreachable ();
11311 else
11313 if (mode == TImode)
11314 split_ti (&operand, 1, &parts[0], &parts[1]);
11315 if (mode == XFmode || mode == TFmode)
11317 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
11318 if (REG_P (operand))
11320 gcc_assert (reload_completed);
11321 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
11322 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
11324 else if (offsettable_memref_p (operand))
11326 operand = adjust_address (operand, DImode, 0);
11327 parts[0] = operand;
11328 parts[1] = adjust_address (operand, upper_mode, 8);
11330 else if (GET_CODE (operand) == CONST_DOUBLE)
11332 REAL_VALUE_TYPE r;
11333 long l[4];
11335 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
11336 real_to_target (l, &r, mode);
11338 /* Do not use shift by 32 to avoid warning on 32bit systems. */
11339 if (HOST_BITS_PER_WIDE_INT >= 64)
11340 parts[0]
11341 = gen_int_mode
11342 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
11343 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
11344 DImode);
11345 else
11346 parts[0] = immed_double_const (l[0], l[1], DImode);
11348 if (upper_mode == SImode)
11349 parts[1] = gen_int_mode (l[2], SImode);
11350 else if (HOST_BITS_PER_WIDE_INT >= 64)
11351 parts[1]
11352 = gen_int_mode
11353 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
11354 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
11355 DImode);
11356 else
11357 parts[1] = immed_double_const (l[2], l[3], DImode);
11359 else
11360 gcc_unreachable ();
11364 return size;
11367 /* Emit insns to perform a move or push of DI, DF, and XF values.
11368 Return false when normal moves are needed; true when all required
11369 insns have been emitted. Operands 2-4 contain the input values
11370 int the correct order; operands 5-7 contain the output values. */
11372 void
11373 ix86_split_long_move (rtx operands[])
11375 rtx part[2][3];
11376 int nparts;
11377 int push = 0;
11378 int collisions = 0;
11379 enum machine_mode mode = GET_MODE (operands[0]);
11381 /* The DFmode expanders may ask us to move double.
11382 For 64bit target this is single move. By hiding the fact
11383 here we simplify i386.md splitters. */
11384 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
11386 /* Optimize constant pool reference to immediates. This is used by
11387 fp moves, that force all constants to memory to allow combining. */
11389 if (GET_CODE (operands[1]) == MEM
11390 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
11391 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
11392 operands[1] = get_pool_constant (XEXP (operands[1], 0));
11393 if (push_operand (operands[0], VOIDmode))
11395 operands[0] = copy_rtx (operands[0]);
11396 PUT_MODE (operands[0], Pmode);
11398 else
11399 operands[0] = gen_lowpart (DImode, operands[0]);
11400 operands[1] = gen_lowpart (DImode, operands[1]);
11401 emit_move_insn (operands[0], operands[1]);
11402 return;
11405 /* The only non-offsettable memory we handle is push. */
11406 if (push_operand (operands[0], VOIDmode))
11407 push = 1;
11408 else
11409 gcc_assert (GET_CODE (operands[0]) != MEM
11410 || offsettable_memref_p (operands[0]));
11412 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
11413 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
11415 /* When emitting push, take care for source operands on the stack. */
11416 if (push && GET_CODE (operands[1]) == MEM
11417 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
11419 if (nparts == 3)
11420 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
11421 XEXP (part[1][2], 0));
11422 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
11423 XEXP (part[1][1], 0));
11426 /* We need to do copy in the right order in case an address register
11427 of the source overlaps the destination. */
11428 if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM)
11430 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
11431 collisions++;
11432 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
11433 collisions++;
11434 if (nparts == 3
11435 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
11436 collisions++;
11438 /* Collision in the middle part can be handled by reordering. */
11439 if (collisions == 1 && nparts == 3
11440 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
11442 rtx tmp;
11443 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
11444 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
11447 /* If there are more collisions, we can't handle it by reordering.
11448 Do an lea to the last part and use only one colliding move. */
11449 else if (collisions > 1)
11451 rtx base;
11453 collisions = 1;
11455 base = part[0][nparts - 1];
11457 /* Handle the case when the last part isn't valid for lea.
11458 Happens in 64-bit mode storing the 12-byte XFmode. */
11459 if (GET_MODE (base) != Pmode)
11460 base = gen_rtx_REG (Pmode, REGNO (base));
11462 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
11463 part[1][0] = replace_equiv_address (part[1][0], base);
11464 part[1][1] = replace_equiv_address (part[1][1],
11465 plus_constant (base, UNITS_PER_WORD));
11466 if (nparts == 3)
11467 part[1][2] = replace_equiv_address (part[1][2],
11468 plus_constant (base, 8));
11472 if (push)
11474 if (!TARGET_64BIT)
11476 if (nparts == 3)
11478 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
11479 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
11480 emit_move_insn (part[0][2], part[1][2]);
11483 else
11485 /* In 64bit mode we don't have 32bit push available. In case this is
11486 register, it is OK - we will just use larger counterpart. We also
11487 retype memory - these comes from attempt to avoid REX prefix on
11488 moving of second half of TFmode value. */
11489 if (GET_MODE (part[1][1]) == SImode)
11491 switch (GET_CODE (part[1][1]))
11493 case MEM:
11494 part[1][1] = adjust_address (part[1][1], DImode, 0);
11495 break;
11497 case REG:
11498 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
11499 break;
11501 default:
11502 gcc_unreachable ();
11505 if (GET_MODE (part[1][0]) == SImode)
11506 part[1][0] = part[1][1];
11509 emit_move_insn (part[0][1], part[1][1]);
11510 emit_move_insn (part[0][0], part[1][0]);
11511 return;
11514 /* Choose correct order to not overwrite the source before it is copied. */
11515 if ((REG_P (part[0][0])
11516 && REG_P (part[1][1])
11517 && (REGNO (part[0][0]) == REGNO (part[1][1])
11518 || (nparts == 3
11519 && REGNO (part[0][0]) == REGNO (part[1][2]))))
11520 || (collisions > 0
11521 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
11523 if (nparts == 3)
11525 operands[2] = part[0][2];
11526 operands[3] = part[0][1];
11527 operands[4] = part[0][0];
11528 operands[5] = part[1][2];
11529 operands[6] = part[1][1];
11530 operands[7] = part[1][0];
11532 else
11534 operands[2] = part[0][1];
11535 operands[3] = part[0][0];
11536 operands[5] = part[1][1];
11537 operands[6] = part[1][0];
11540 else
11542 if (nparts == 3)
11544 operands[2] = part[0][0];
11545 operands[3] = part[0][1];
11546 operands[4] = part[0][2];
11547 operands[5] = part[1][0];
11548 operands[6] = part[1][1];
11549 operands[7] = part[1][2];
11551 else
11553 operands[2] = part[0][0];
11554 operands[3] = part[0][1];
11555 operands[5] = part[1][0];
11556 operands[6] = part[1][1];
11560 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
11561 if (optimize_size)
11563 if (GET_CODE (operands[5]) == CONST_INT
11564 && operands[5] != const0_rtx
11565 && REG_P (operands[2]))
11567 if (GET_CODE (operands[6]) == CONST_INT
11568 && INTVAL (operands[6]) == INTVAL (operands[5]))
11569 operands[6] = operands[2];
11571 if (nparts == 3
11572 && GET_CODE (operands[7]) == CONST_INT
11573 && INTVAL (operands[7]) == INTVAL (operands[5]))
11574 operands[7] = operands[2];
11577 if (nparts == 3
11578 && GET_CODE (operands[6]) == CONST_INT
11579 && operands[6] != const0_rtx
11580 && REG_P (operands[3])
11581 && GET_CODE (operands[7]) == CONST_INT
11582 && INTVAL (operands[7]) == INTVAL (operands[6]))
11583 operands[7] = operands[3];
11586 emit_move_insn (operands[2], operands[5]);
11587 emit_move_insn (operands[3], operands[6]);
11588 if (nparts == 3)
11589 emit_move_insn (operands[4], operands[7]);
11591 return;
11594 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
11595 left shift by a constant, either using a single shift or
11596 a sequence of add instructions. */
11598 static void
11599 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
11601 if (count == 1)
11603 emit_insn ((mode == DImode
11604 ? gen_addsi3
11605 : gen_adddi3) (operand, operand, operand));
11607 else if (!optimize_size
11608 && count * ix86_cost->add <= ix86_cost->shift_const)
11610 int i;
11611 for (i=0; i<count; i++)
11613 emit_insn ((mode == DImode
11614 ? gen_addsi3
11615 : gen_adddi3) (operand, operand, operand));
11618 else
11619 emit_insn ((mode == DImode
11620 ? gen_ashlsi3
11621 : gen_ashldi3) (operand, operand, GEN_INT (count)));
11624 void
11625 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
11627 rtx low[2], high[2];
11628 int count;
11629 const int single_width = mode == DImode ? 32 : 64;
11631 if (GET_CODE (operands[2]) == CONST_INT)
11633 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
11634 count = INTVAL (operands[2]) & (single_width * 2 - 1);
11636 if (count >= single_width)
11638 emit_move_insn (high[0], low[1]);
11639 emit_move_insn (low[0], const0_rtx);
11641 if (count > single_width)
11642 ix86_expand_ashl_const (high[0], count - single_width, mode);
11644 else
11646 if (!rtx_equal_p (operands[0], operands[1]))
11647 emit_move_insn (operands[0], operands[1]);
11648 emit_insn ((mode == DImode
11649 ? gen_x86_shld_1
11650 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
11651 ix86_expand_ashl_const (low[0], count, mode);
11653 return;
11656 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11658 if (operands[1] == const1_rtx)
11660 /* Assuming we've chosen a QImode capable registers, then 1 << N
11661 can be done with two 32/64-bit shifts, no branches, no cmoves. */
11662 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
11664 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
11666 ix86_expand_clear (low[0]);
11667 ix86_expand_clear (high[0]);
11668 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
11670 d = gen_lowpart (QImode, low[0]);
11671 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
11672 s = gen_rtx_EQ (QImode, flags, const0_rtx);
11673 emit_insn (gen_rtx_SET (VOIDmode, d, s));
11675 d = gen_lowpart (QImode, high[0]);
11676 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
11677 s = gen_rtx_NE (QImode, flags, const0_rtx);
11678 emit_insn (gen_rtx_SET (VOIDmode, d, s));
11681 /* Otherwise, we can get the same results by manually performing
11682 a bit extract operation on bit 5/6, and then performing the two
11683 shifts. The two methods of getting 0/1 into low/high are exactly
11684 the same size. Avoiding the shift in the bit extract case helps
11685 pentium4 a bit; no one else seems to care much either way. */
11686 else
11688 rtx x;
11690 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
11691 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
11692 else
11693 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
11694 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
11696 emit_insn ((mode == DImode
11697 ? gen_lshrsi3
11698 : gen_lshrdi3) (high[0], high[0], GEN_INT (mode == DImode ? 5 : 6)));
11699 emit_insn ((mode == DImode
11700 ? gen_andsi3
11701 : gen_anddi3) (high[0], high[0], GEN_INT (1)));
11702 emit_move_insn (low[0], high[0]);
11703 emit_insn ((mode == DImode
11704 ? gen_xorsi3
11705 : gen_xordi3) (low[0], low[0], GEN_INT (1)));
11708 emit_insn ((mode == DImode
11709 ? gen_ashlsi3
11710 : gen_ashldi3) (low[0], low[0], operands[2]));
11711 emit_insn ((mode == DImode
11712 ? gen_ashlsi3
11713 : gen_ashldi3) (high[0], high[0], operands[2]));
11714 return;
11717 if (operands[1] == constm1_rtx)
11719 /* For -1 << N, we can avoid the shld instruction, because we
11720 know that we're shifting 0...31/63 ones into a -1. */
11721 emit_move_insn (low[0], constm1_rtx);
11722 if (optimize_size)
11723 emit_move_insn (high[0], low[0]);
11724 else
11725 emit_move_insn (high[0], constm1_rtx);
11727 else
11729 if (!rtx_equal_p (operands[0], operands[1]))
11730 emit_move_insn (operands[0], operands[1]);
11732 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11733 emit_insn ((mode == DImode
11734 ? gen_x86_shld_1
11735 : gen_x86_64_shld) (high[0], low[0], operands[2]));
11738 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
11740 if (TARGET_CMOVE && scratch)
11742 ix86_expand_clear (scratch);
11743 emit_insn ((mode == DImode
11744 ? gen_x86_shift_adj_1
11745 : gen_x86_64_shift_adj) (high[0], low[0], operands[2], scratch));
11747 else
11748 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
11751 void
11752 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
11754 rtx low[2], high[2];
11755 int count;
11756 const int single_width = mode == DImode ? 32 : 64;
11758 if (GET_CODE (operands[2]) == CONST_INT)
11760 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
11761 count = INTVAL (operands[2]) & (single_width * 2 - 1);
11763 if (count == single_width * 2 - 1)
11765 emit_move_insn (high[0], high[1]);
11766 emit_insn ((mode == DImode
11767 ? gen_ashrsi3
11768 : gen_ashrdi3) (high[0], high[0],
11769 GEN_INT (single_width - 1)));
11770 emit_move_insn (low[0], high[0]);
11773 else if (count >= single_width)
11775 emit_move_insn (low[0], high[1]);
11776 emit_move_insn (high[0], low[0]);
11777 emit_insn ((mode == DImode
11778 ? gen_ashrsi3
11779 : gen_ashrdi3) (high[0], high[0],
11780 GEN_INT (single_width - 1)));
11781 if (count > single_width)
11782 emit_insn ((mode == DImode
11783 ? gen_ashrsi3
11784 : gen_ashrdi3) (low[0], low[0],
11785 GEN_INT (count - single_width)));
11787 else
11789 if (!rtx_equal_p (operands[0], operands[1]))
11790 emit_move_insn (operands[0], operands[1]);
11791 emit_insn ((mode == DImode
11792 ? gen_x86_shrd_1
11793 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
11794 emit_insn ((mode == DImode
11795 ? gen_ashrsi3
11796 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
11799 else
11801 if (!rtx_equal_p (operands[0], operands[1]))
11802 emit_move_insn (operands[0], operands[1]);
11804 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11806 emit_insn ((mode == DImode
11807 ? gen_x86_shrd_1
11808 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
11809 emit_insn ((mode == DImode
11810 ? gen_ashrsi3
11811 : gen_ashrdi3) (high[0], high[0], operands[2]));
11813 if (TARGET_CMOVE && scratch)
11815 emit_move_insn (scratch, high[0]);
11816 emit_insn ((mode == DImode
11817 ? gen_ashrsi3
11818 : gen_ashrdi3) (scratch, scratch,
11819 GEN_INT (single_width - 1)));
11820 emit_insn ((mode == DImode
11821 ? gen_x86_shift_adj_1
11822 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
11823 scratch));
11825 else
11826 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
11830 void
11831 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
11833 rtx low[2], high[2];
11834 int count;
11835 const int single_width = mode == DImode ? 32 : 64;
11837 if (GET_CODE (operands[2]) == CONST_INT)
11839 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
11840 count = INTVAL (operands[2]) & (single_width * 2 - 1);
11842 if (count >= single_width)
11844 emit_move_insn (low[0], high[1]);
11845 ix86_expand_clear (high[0]);
11847 if (count > single_width)
11848 emit_insn ((mode == DImode
11849 ? gen_lshrsi3
11850 : gen_lshrdi3) (low[0], low[0],
11851 GEN_INT (count - single_width)));
11853 else
11855 if (!rtx_equal_p (operands[0], operands[1]))
11856 emit_move_insn (operands[0], operands[1]);
11857 emit_insn ((mode == DImode
11858 ? gen_x86_shrd_1
11859 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
11860 emit_insn ((mode == DImode
11861 ? gen_lshrsi3
11862 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
11865 else
11867 if (!rtx_equal_p (operands[0], operands[1]))
11868 emit_move_insn (operands[0], operands[1]);
11870 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11872 emit_insn ((mode == DImode
11873 ? gen_x86_shrd_1
11874 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
11875 emit_insn ((mode == DImode
11876 ? gen_lshrsi3
11877 : gen_lshrdi3) (high[0], high[0], operands[2]));
11879 /* Heh. By reversing the arguments, we can reuse this pattern. */
11880 if (TARGET_CMOVE && scratch)
11882 ix86_expand_clear (scratch);
11883 emit_insn ((mode == DImode
11884 ? gen_x86_shift_adj_1
11885 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
11886 scratch));
11888 else
11889 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
11893 /* Helper function for the string operations below. Dest VARIABLE whether
11894 it is aligned to VALUE bytes. If true, jump to the label. */
11895 static rtx
11896 ix86_expand_aligntest (rtx variable, int value)
11898 rtx label = gen_label_rtx ();
11899 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
11900 if (GET_MODE (variable) == DImode)
11901 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
11902 else
11903 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
11904 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
11905 1, label);
11906 return label;
11909 /* Adjust COUNTER by the VALUE. */
11910 static void
11911 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
11913 if (GET_MODE (countreg) == DImode)
11914 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
11915 else
11916 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
11919 /* Zero extend possibly SImode EXP to Pmode register. */
11921 ix86_zero_extend_to_Pmode (rtx exp)
11923 rtx r;
11924 if (GET_MODE (exp) == VOIDmode)
11925 return force_reg (Pmode, exp);
11926 if (GET_MODE (exp) == Pmode)
11927 return copy_to_mode_reg (Pmode, exp);
11928 r = gen_reg_rtx (Pmode);
11929 emit_insn (gen_zero_extendsidi2 (r, exp));
11930 return r;
11933 /* Expand string move (memcpy) operation. Use i386 string operations when
11934 profitable. expand_clrmem contains similar code. */
11936 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp)
11938 rtx srcreg, destreg, countreg, srcexp, destexp;
11939 enum machine_mode counter_mode;
11940 HOST_WIDE_INT align = 0;
11941 unsigned HOST_WIDE_INT count = 0;
11943 if (GET_CODE (align_exp) == CONST_INT)
11944 align = INTVAL (align_exp);
11946 /* Can't use any of this if the user has appropriated esi or edi. */
11947 if (global_regs[4] || global_regs[5])
11948 return 0;
11950 /* This simple hack avoids all inlining code and simplifies code below. */
11951 if (!TARGET_ALIGN_STRINGOPS)
11952 align = 64;
11954 if (GET_CODE (count_exp) == CONST_INT)
11956 count = INTVAL (count_exp);
11957 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
11958 return 0;
11961 /* Figure out proper mode for counter. For 32bits it is always SImode,
11962 for 64bits use SImode when possible, otherwise DImode.
11963 Set count to number of bytes copied when known at compile time. */
11964 if (!TARGET_64BIT
11965 || GET_MODE (count_exp) == SImode
11966 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
11967 counter_mode = SImode;
11968 else
11969 counter_mode = DImode;
11971 gcc_assert (counter_mode == SImode || counter_mode == DImode);
11973 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
11974 if (destreg != XEXP (dst, 0))
11975 dst = replace_equiv_address_nv (dst, destreg);
11976 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
11977 if (srcreg != XEXP (src, 0))
11978 src = replace_equiv_address_nv (src, srcreg);
11980 /* When optimizing for size emit simple rep ; movsb instruction for
11981 counts not divisible by 4, except when (movsl;)*(movsw;)?(movsb;)?
11982 sequence is shorter than mov{b,l} $count, %{ecx,cl}; rep; movsb.
11983 Sice of (movsl;)*(movsw;)?(movsb;)? sequence is
11984 count / 4 + (count & 3), the other sequence is either 4 or 7 bytes,
11985 but we don't know whether upper 24 (resp. 56) bits of %ecx will be
11986 known to be zero or not. The rep; movsb sequence causes higher
11987 register pressure though, so take that into account. */
11989 if ((!optimize || optimize_size)
11990 && (count == 0
11991 || ((count & 0x03)
11992 && (!optimize_size
11993 || count > 5 * 4
11994 || (count & 3) + count / 4 > 6))))
11996 emit_insn (gen_cld ());
11997 countreg = ix86_zero_extend_to_Pmode (count_exp);
11998 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
11999 srcexp = gen_rtx_PLUS (Pmode, srcreg, countreg);
12000 emit_insn (gen_rep_mov (destreg, dst, srcreg, src, countreg,
12001 destexp, srcexp));
12004 /* For constant aligned (or small unaligned) copies use rep movsl
12005 followed by code copying the rest. For PentiumPro ensure 8 byte
12006 alignment to allow rep movsl acceleration. */
12008 else if (count != 0
12009 && (align >= 8
12010 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
12011 || optimize_size || count < (unsigned int) 64))
12013 unsigned HOST_WIDE_INT offset = 0;
12014 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
12015 rtx srcmem, dstmem;
12017 emit_insn (gen_cld ());
12018 if (count & ~(size - 1))
12020 if ((TARGET_SINGLE_STRINGOP || optimize_size) && count < 5 * 4)
12022 enum machine_mode movs_mode = size == 4 ? SImode : DImode;
12024 while (offset < (count & ~(size - 1)))
12026 srcmem = adjust_automodify_address_nv (src, movs_mode,
12027 srcreg, offset);
12028 dstmem = adjust_automodify_address_nv (dst, movs_mode,
12029 destreg, offset);
12030 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12031 offset += size;
12034 else
12036 countreg = GEN_INT ((count >> (size == 4 ? 2 : 3))
12037 & (TARGET_64BIT ? -1 : 0x3fffffff));
12038 countreg = copy_to_mode_reg (counter_mode, countreg);
12039 countreg = ix86_zero_extend_to_Pmode (countreg);
12041 destexp = gen_rtx_ASHIFT (Pmode, countreg,
12042 GEN_INT (size == 4 ? 2 : 3));
12043 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
12044 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
12046 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
12047 countreg, destexp, srcexp));
12048 offset = count & ~(size - 1);
12051 if (size == 8 && (count & 0x04))
12053 srcmem = adjust_automodify_address_nv (src, SImode, srcreg,
12054 offset);
12055 dstmem = adjust_automodify_address_nv (dst, SImode, destreg,
12056 offset);
12057 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12058 offset += 4;
12060 if (count & 0x02)
12062 srcmem = adjust_automodify_address_nv (src, HImode, srcreg,
12063 offset);
12064 dstmem = adjust_automodify_address_nv (dst, HImode, destreg,
12065 offset);
12066 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12067 offset += 2;
12069 if (count & 0x01)
12071 srcmem = adjust_automodify_address_nv (src, QImode, srcreg,
12072 offset);
12073 dstmem = adjust_automodify_address_nv (dst, QImode, destreg,
12074 offset);
12075 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12078 /* The generic code based on the glibc implementation:
12079 - align destination to 4 bytes (8 byte alignment is used for PentiumPro
12080 allowing accelerated copying there)
12081 - copy the data using rep movsl
12082 - copy the rest. */
12083 else
12085 rtx countreg2;
12086 rtx label = NULL;
12087 rtx srcmem, dstmem;
12088 int desired_alignment = (TARGET_PENTIUMPRO
12089 && (count == 0 || count >= (unsigned int) 260)
12090 ? 8 : UNITS_PER_WORD);
12091 /* Get rid of MEM_OFFSETs, they won't be accurate. */
12092 dst = change_address (dst, BLKmode, destreg);
12093 src = change_address (src, BLKmode, srcreg);
12095 /* In case we don't know anything about the alignment, default to
12096 library version, since it is usually equally fast and result in
12097 shorter code.
12099 Also emit call when we know that the count is large and call overhead
12100 will not be important. */
12101 if (!TARGET_INLINE_ALL_STRINGOPS
12102 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
12103 return 0;
12105 if (TARGET_SINGLE_STRINGOP)
12106 emit_insn (gen_cld ());
12108 countreg2 = gen_reg_rtx (Pmode);
12109 countreg = copy_to_mode_reg (counter_mode, count_exp);
12111 /* We don't use loops to align destination and to copy parts smaller
12112 than 4 bytes, because gcc is able to optimize such code better (in
12113 the case the destination or the count really is aligned, gcc is often
12114 able to predict the branches) and also it is friendlier to the
12115 hardware branch prediction.
12117 Using loops is beneficial for generic case, because we can
12118 handle small counts using the loops. Many CPUs (such as Athlon)
12119 have large REP prefix setup costs.
12121 This is quite costly. Maybe we can revisit this decision later or
12122 add some customizability to this code. */
12124 if (count == 0 && align < desired_alignment)
12126 label = gen_label_rtx ();
12127 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
12128 LEU, 0, counter_mode, 1, label);
12130 if (align <= 1)
12132 rtx label = ix86_expand_aligntest (destreg, 1);
12133 srcmem = change_address (src, QImode, srcreg);
12134 dstmem = change_address (dst, QImode, destreg);
12135 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12136 ix86_adjust_counter (countreg, 1);
12137 emit_label (label);
12138 LABEL_NUSES (label) = 1;
12140 if (align <= 2)
12142 rtx label = ix86_expand_aligntest (destreg, 2);
12143 srcmem = change_address (src, HImode, srcreg);
12144 dstmem = change_address (dst, HImode, destreg);
12145 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12146 ix86_adjust_counter (countreg, 2);
12147 emit_label (label);
12148 LABEL_NUSES (label) = 1;
12150 if (align <= 4 && desired_alignment > 4)
12152 rtx label = ix86_expand_aligntest (destreg, 4);
12153 srcmem = change_address (src, SImode, srcreg);
12154 dstmem = change_address (dst, SImode, destreg);
12155 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12156 ix86_adjust_counter (countreg, 4);
12157 emit_label (label);
12158 LABEL_NUSES (label) = 1;
12161 if (label && desired_alignment > 4 && !TARGET_64BIT)
12163 emit_label (label);
12164 LABEL_NUSES (label) = 1;
12165 label = NULL_RTX;
12167 if (!TARGET_SINGLE_STRINGOP)
12168 emit_insn (gen_cld ());
12169 if (TARGET_64BIT)
12171 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
12172 GEN_INT (3)));
12173 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
12175 else
12177 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
12178 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
12180 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
12181 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
12182 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
12183 countreg2, destexp, srcexp));
12185 if (label)
12187 emit_label (label);
12188 LABEL_NUSES (label) = 1;
12190 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
12192 srcmem = change_address (src, SImode, srcreg);
12193 dstmem = change_address (dst, SImode, destreg);
12194 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12196 if ((align <= 4 || count == 0) && TARGET_64BIT)
12198 rtx label = ix86_expand_aligntest (countreg, 4);
12199 srcmem = change_address (src, SImode, srcreg);
12200 dstmem = change_address (dst, SImode, destreg);
12201 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12202 emit_label (label);
12203 LABEL_NUSES (label) = 1;
12205 if (align > 2 && count != 0 && (count & 2))
12207 srcmem = change_address (src, HImode, srcreg);
12208 dstmem = change_address (dst, HImode, destreg);
12209 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12211 if (align <= 2 || count == 0)
12213 rtx label = ix86_expand_aligntest (countreg, 2);
12214 srcmem = change_address (src, HImode, srcreg);
12215 dstmem = change_address (dst, HImode, destreg);
12216 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12217 emit_label (label);
12218 LABEL_NUSES (label) = 1;
12220 if (align > 1 && count != 0 && (count & 1))
12222 srcmem = change_address (src, QImode, srcreg);
12223 dstmem = change_address (dst, QImode, destreg);
12224 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12226 if (align <= 1 || count == 0)
12228 rtx label = ix86_expand_aligntest (countreg, 1);
12229 srcmem = change_address (src, QImode, srcreg);
12230 dstmem = change_address (dst, QImode, destreg);
12231 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12232 emit_label (label);
12233 LABEL_NUSES (label) = 1;
12237 return 1;
12240 /* Expand string clear operation (bzero). Use i386 string operations when
12241 profitable. expand_movmem contains similar code. */
12243 ix86_expand_clrmem (rtx dst, rtx count_exp, rtx align_exp)
12245 rtx destreg, zeroreg, countreg, destexp;
12246 enum machine_mode counter_mode;
12247 HOST_WIDE_INT align = 0;
12248 unsigned HOST_WIDE_INT count = 0;
12250 if (GET_CODE (align_exp) == CONST_INT)
12251 align = INTVAL (align_exp);
12253 /* Can't use any of this if the user has appropriated esi. */
12254 if (global_regs[4])
12255 return 0;
12257 /* This simple hack avoids all inlining code and simplifies code below. */
12258 if (!TARGET_ALIGN_STRINGOPS)
12259 align = 32;
12261 if (GET_CODE (count_exp) == CONST_INT)
12263 count = INTVAL (count_exp);
12264 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
12265 return 0;
12267 /* Figure out proper mode for counter. For 32bits it is always SImode,
12268 for 64bits use SImode when possible, otherwise DImode.
12269 Set count to number of bytes copied when known at compile time. */
12270 if (!TARGET_64BIT
12271 || GET_MODE (count_exp) == SImode
12272 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
12273 counter_mode = SImode;
12274 else
12275 counter_mode = DImode;
12277 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
12278 if (destreg != XEXP (dst, 0))
12279 dst = replace_equiv_address_nv (dst, destreg);
12282 /* When optimizing for size emit simple rep ; movsb instruction for
12283 counts not divisible by 4. The movl $N, %ecx; rep; stosb
12284 sequence is 7 bytes long, so if optimizing for size and count is
12285 small enough that some stosl, stosw and stosb instructions without
12286 rep are shorter, fall back into the next if. */
12288 if ((!optimize || optimize_size)
12289 && (count == 0
12290 || ((count & 0x03)
12291 && (!optimize_size || (count & 0x03) + (count >> 2) > 7))))
12293 emit_insn (gen_cld ());
12295 countreg = ix86_zero_extend_to_Pmode (count_exp);
12296 zeroreg = copy_to_mode_reg (QImode, const0_rtx);
12297 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
12298 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg, destexp));
12300 else if (count != 0
12301 && (align >= 8
12302 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
12303 || optimize_size || count < (unsigned int) 64))
12305 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
12306 unsigned HOST_WIDE_INT offset = 0;
12308 emit_insn (gen_cld ());
12310 zeroreg = copy_to_mode_reg (size == 4 ? SImode : DImode, const0_rtx);
12311 if (count & ~(size - 1))
12313 unsigned HOST_WIDE_INT repcount;
12314 unsigned int max_nonrep;
12316 repcount = count >> (size == 4 ? 2 : 3);
12317 if (!TARGET_64BIT)
12318 repcount &= 0x3fffffff;
12320 /* movl $N, %ecx; rep; stosl is 7 bytes, while N x stosl is N bytes.
12321 movl $N, %ecx; rep; stosq is 8 bytes, while N x stosq is 2xN
12322 bytes. In both cases the latter seems to be faster for small
12323 values of N. */
12324 max_nonrep = size == 4 ? 7 : 4;
12325 if (!optimize_size)
12326 switch (ix86_tune)
12328 case PROCESSOR_PENTIUM4:
12329 case PROCESSOR_NOCONA:
12330 max_nonrep = 3;
12331 break;
12332 default:
12333 break;
12336 if (repcount <= max_nonrep)
12337 while (repcount-- > 0)
12339 rtx mem = adjust_automodify_address_nv (dst,
12340 GET_MODE (zeroreg),
12341 destreg, offset);
12342 emit_insn (gen_strset (destreg, mem, zeroreg));
12343 offset += size;
12345 else
12347 countreg = copy_to_mode_reg (counter_mode, GEN_INT (repcount));
12348 countreg = ix86_zero_extend_to_Pmode (countreg);
12349 destexp = gen_rtx_ASHIFT (Pmode, countreg,
12350 GEN_INT (size == 4 ? 2 : 3));
12351 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
12352 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg,
12353 destexp));
12354 offset = count & ~(size - 1);
12357 if (size == 8 && (count & 0x04))
12359 rtx mem = adjust_automodify_address_nv (dst, SImode, destreg,
12360 offset);
12361 emit_insn (gen_strset (destreg, mem,
12362 gen_rtx_SUBREG (SImode, zeroreg, 0)));
12363 offset += 4;
12365 if (count & 0x02)
12367 rtx mem = adjust_automodify_address_nv (dst, HImode, destreg,
12368 offset);
12369 emit_insn (gen_strset (destreg, mem,
12370 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12371 offset += 2;
12373 if (count & 0x01)
12375 rtx mem = adjust_automodify_address_nv (dst, QImode, destreg,
12376 offset);
12377 emit_insn (gen_strset (destreg, mem,
12378 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12381 else
12383 rtx countreg2;
12384 rtx label = NULL;
12385 /* Compute desired alignment of the string operation. */
12386 int desired_alignment = (TARGET_PENTIUMPRO
12387 && (count == 0 || count >= (unsigned int) 260)
12388 ? 8 : UNITS_PER_WORD);
12390 /* In case we don't know anything about the alignment, default to
12391 library version, since it is usually equally fast and result in
12392 shorter code.
12394 Also emit call when we know that the count is large and call overhead
12395 will not be important. */
12396 if (!TARGET_INLINE_ALL_STRINGOPS
12397 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
12398 return 0;
12400 if (TARGET_SINGLE_STRINGOP)
12401 emit_insn (gen_cld ());
12403 countreg2 = gen_reg_rtx (Pmode);
12404 countreg = copy_to_mode_reg (counter_mode, count_exp);
12405 zeroreg = copy_to_mode_reg (Pmode, const0_rtx);
12406 /* Get rid of MEM_OFFSET, it won't be accurate. */
12407 dst = change_address (dst, BLKmode, destreg);
12409 if (count == 0 && align < desired_alignment)
12411 label = gen_label_rtx ();
12412 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
12413 LEU, 0, counter_mode, 1, label);
12415 if (align <= 1)
12417 rtx label = ix86_expand_aligntest (destreg, 1);
12418 emit_insn (gen_strset (destreg, dst,
12419 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12420 ix86_adjust_counter (countreg, 1);
12421 emit_label (label);
12422 LABEL_NUSES (label) = 1;
12424 if (align <= 2)
12426 rtx label = ix86_expand_aligntest (destreg, 2);
12427 emit_insn (gen_strset (destreg, dst,
12428 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12429 ix86_adjust_counter (countreg, 2);
12430 emit_label (label);
12431 LABEL_NUSES (label) = 1;
12433 if (align <= 4 && desired_alignment > 4)
12435 rtx label = ix86_expand_aligntest (destreg, 4);
12436 emit_insn (gen_strset (destreg, dst,
12437 (TARGET_64BIT
12438 ? gen_rtx_SUBREG (SImode, zeroreg, 0)
12439 : zeroreg)));
12440 ix86_adjust_counter (countreg, 4);
12441 emit_label (label);
12442 LABEL_NUSES (label) = 1;
12445 if (label && desired_alignment > 4 && !TARGET_64BIT)
12447 emit_label (label);
12448 LABEL_NUSES (label) = 1;
12449 label = NULL_RTX;
12452 if (!TARGET_SINGLE_STRINGOP)
12453 emit_insn (gen_cld ());
12454 if (TARGET_64BIT)
12456 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
12457 GEN_INT (3)));
12458 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
12460 else
12462 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
12463 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
12465 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
12466 emit_insn (gen_rep_stos (destreg, countreg2, dst, zeroreg, destexp));
12468 if (label)
12470 emit_label (label);
12471 LABEL_NUSES (label) = 1;
12474 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
12475 emit_insn (gen_strset (destreg, dst,
12476 gen_rtx_SUBREG (SImode, zeroreg, 0)));
12477 if (TARGET_64BIT && (align <= 4 || count == 0))
12479 rtx label = ix86_expand_aligntest (countreg, 4);
12480 emit_insn (gen_strset (destreg, dst,
12481 gen_rtx_SUBREG (SImode, zeroreg, 0)));
12482 emit_label (label);
12483 LABEL_NUSES (label) = 1;
12485 if (align > 2 && count != 0 && (count & 2))
12486 emit_insn (gen_strset (destreg, dst,
12487 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12488 if (align <= 2 || count == 0)
12490 rtx label = ix86_expand_aligntest (countreg, 2);
12491 emit_insn (gen_strset (destreg, dst,
12492 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12493 emit_label (label);
12494 LABEL_NUSES (label) = 1;
12496 if (align > 1 && count != 0 && (count & 1))
12497 emit_insn (gen_strset (destreg, dst,
12498 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12499 if (align <= 1 || count == 0)
12501 rtx label = ix86_expand_aligntest (countreg, 1);
12502 emit_insn (gen_strset (destreg, dst,
12503 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12504 emit_label (label);
12505 LABEL_NUSES (label) = 1;
12508 return 1;
12511 /* Expand strlen. */
12513 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
12515 rtx addr, scratch1, scratch2, scratch3, scratch4;
12517 /* The generic case of strlen expander is long. Avoid it's
12518 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
12520 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
12521 && !TARGET_INLINE_ALL_STRINGOPS
12522 && !optimize_size
12523 && (GET_CODE (align) != CONST_INT || INTVAL (align) < 4))
12524 return 0;
12526 addr = force_reg (Pmode, XEXP (src, 0));
12527 scratch1 = gen_reg_rtx (Pmode);
12529 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
12530 && !optimize_size)
12532 /* Well it seems that some optimizer does not combine a call like
12533 foo(strlen(bar), strlen(bar));
12534 when the move and the subtraction is done here. It does calculate
12535 the length just once when these instructions are done inside of
12536 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
12537 often used and I use one fewer register for the lifetime of
12538 output_strlen_unroll() this is better. */
12540 emit_move_insn (out, addr);
12542 ix86_expand_strlensi_unroll_1 (out, src, align);
12544 /* strlensi_unroll_1 returns the address of the zero at the end of
12545 the string, like memchr(), so compute the length by subtracting
12546 the start address. */
12547 if (TARGET_64BIT)
12548 emit_insn (gen_subdi3 (out, out, addr));
12549 else
12550 emit_insn (gen_subsi3 (out, out, addr));
12552 else
12554 rtx unspec;
12555 scratch2 = gen_reg_rtx (Pmode);
12556 scratch3 = gen_reg_rtx (Pmode);
12557 scratch4 = force_reg (Pmode, constm1_rtx);
12559 emit_move_insn (scratch3, addr);
12560 eoschar = force_reg (QImode, eoschar);
12562 emit_insn (gen_cld ());
12563 src = replace_equiv_address_nv (src, scratch3);
12565 /* If .md starts supporting :P, this can be done in .md. */
12566 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
12567 scratch4), UNSPEC_SCAS);
12568 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
12569 if (TARGET_64BIT)
12571 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
12572 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
12574 else
12576 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
12577 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
12580 return 1;
12583 /* Expand the appropriate insns for doing strlen if not just doing
12584 repnz; scasb
12586 out = result, initialized with the start address
12587 align_rtx = alignment of the address.
12588 scratch = scratch register, initialized with the startaddress when
12589 not aligned, otherwise undefined
12591 This is just the body. It needs the initializations mentioned above and
12592 some address computing at the end. These things are done in i386.md. */
12594 static void
12595 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
12597 int align;
12598 rtx tmp;
12599 rtx align_2_label = NULL_RTX;
12600 rtx align_3_label = NULL_RTX;
12601 rtx align_4_label = gen_label_rtx ();
12602 rtx end_0_label = gen_label_rtx ();
12603 rtx mem;
12604 rtx tmpreg = gen_reg_rtx (SImode);
12605 rtx scratch = gen_reg_rtx (SImode);
12606 rtx cmp;
12608 align = 0;
12609 if (GET_CODE (align_rtx) == CONST_INT)
12610 align = INTVAL (align_rtx);
12612 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
12614 /* Is there a known alignment and is it less than 4? */
12615 if (align < 4)
12617 rtx scratch1 = gen_reg_rtx (Pmode);
12618 emit_move_insn (scratch1, out);
12619 /* Is there a known alignment and is it not 2? */
12620 if (align != 2)
12622 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
12623 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
12625 /* Leave just the 3 lower bits. */
12626 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
12627 NULL_RTX, 0, OPTAB_WIDEN);
12629 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
12630 Pmode, 1, align_4_label);
12631 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
12632 Pmode, 1, align_2_label);
12633 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
12634 Pmode, 1, align_3_label);
12636 else
12638 /* Since the alignment is 2, we have to check 2 or 0 bytes;
12639 check if is aligned to 4 - byte. */
12641 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
12642 NULL_RTX, 0, OPTAB_WIDEN);
12644 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
12645 Pmode, 1, align_4_label);
12648 mem = change_address (src, QImode, out);
12650 /* Now compare the bytes. */
12652 /* Compare the first n unaligned byte on a byte per byte basis. */
12653 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
12654 QImode, 1, end_0_label);
12656 /* Increment the address. */
12657 if (TARGET_64BIT)
12658 emit_insn (gen_adddi3 (out, out, const1_rtx));
12659 else
12660 emit_insn (gen_addsi3 (out, out, const1_rtx));
12662 /* Not needed with an alignment of 2 */
12663 if (align != 2)
12665 emit_label (align_2_label);
12667 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
12668 end_0_label);
12670 if (TARGET_64BIT)
12671 emit_insn (gen_adddi3 (out, out, const1_rtx));
12672 else
12673 emit_insn (gen_addsi3 (out, out, const1_rtx));
12675 emit_label (align_3_label);
12678 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
12679 end_0_label);
12681 if (TARGET_64BIT)
12682 emit_insn (gen_adddi3 (out, out, const1_rtx));
12683 else
12684 emit_insn (gen_addsi3 (out, out, const1_rtx));
12687 /* Generate loop to check 4 bytes at a time. It is not a good idea to
12688 align this loop. It gives only huge programs, but does not help to
12689 speed up. */
12690 emit_label (align_4_label);
12692 mem = change_address (src, SImode, out);
12693 emit_move_insn (scratch, mem);
12694 if (TARGET_64BIT)
12695 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
12696 else
12697 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
12699 /* This formula yields a nonzero result iff one of the bytes is zero.
12700 This saves three branches inside loop and many cycles. */
12702 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
12703 emit_insn (gen_one_cmplsi2 (scratch, scratch));
12704 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
12705 emit_insn (gen_andsi3 (tmpreg, tmpreg,
12706 gen_int_mode (0x80808080, SImode)));
12707 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
12708 align_4_label);
12710 if (TARGET_CMOVE)
12712 rtx reg = gen_reg_rtx (SImode);
12713 rtx reg2 = gen_reg_rtx (Pmode);
12714 emit_move_insn (reg, tmpreg);
12715 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
12717 /* If zero is not in the first two bytes, move two bytes forward. */
12718 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
12719 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12720 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
12721 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
12722 gen_rtx_IF_THEN_ELSE (SImode, tmp,
12723 reg,
12724 tmpreg)));
12725 /* Emit lea manually to avoid clobbering of flags. */
12726 emit_insn (gen_rtx_SET (SImode, reg2,
12727 gen_rtx_PLUS (Pmode, out, const2_rtx)));
12729 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12730 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
12731 emit_insn (gen_rtx_SET (VOIDmode, out,
12732 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
12733 reg2,
12734 out)));
12737 else
12739 rtx end_2_label = gen_label_rtx ();
12740 /* Is zero in the first two bytes? */
12742 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
12743 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12744 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
12745 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
12746 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
12747 pc_rtx);
12748 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
12749 JUMP_LABEL (tmp) = end_2_label;
12751 /* Not in the first two. Move two bytes forward. */
12752 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
12753 if (TARGET_64BIT)
12754 emit_insn (gen_adddi3 (out, out, const2_rtx));
12755 else
12756 emit_insn (gen_addsi3 (out, out, const2_rtx));
12758 emit_label (end_2_label);
12762 /* Avoid branch in fixing the byte. */
12763 tmpreg = gen_lowpart (QImode, tmpreg);
12764 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
12765 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
12766 if (TARGET_64BIT)
12767 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
12768 else
12769 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
12771 emit_label (end_0_label);
12774 void
12775 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
12776 rtx callarg2 ATTRIBUTE_UNUSED,
12777 rtx pop, int sibcall)
12779 rtx use = NULL, call;
12781 if (pop == const0_rtx)
12782 pop = NULL;
12783 gcc_assert (!TARGET_64BIT || !pop);
12785 #if TARGET_MACHO
12786 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
12787 fnaddr = machopic_indirect_call_target (fnaddr);
12788 #else
12789 /* Static functions and indirect calls don't need the pic register. */
12790 if (! TARGET_64BIT && flag_pic
12791 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
12792 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
12793 use_reg (&use, pic_offset_table_rtx);
12795 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
12797 rtx al = gen_rtx_REG (QImode, 0);
12798 emit_move_insn (al, callarg2);
12799 use_reg (&use, al);
12801 #endif /* TARGET_MACHO */
12803 if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
12805 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
12806 fnaddr = gen_rtx_MEM (QImode, fnaddr);
12808 if (sibcall && TARGET_64BIT
12809 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
12811 rtx addr;
12812 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
12813 fnaddr = gen_rtx_REG (Pmode, FIRST_REX_INT_REG + 3 /* R11 */);
12814 emit_move_insn (fnaddr, addr);
12815 fnaddr = gen_rtx_MEM (QImode, fnaddr);
12818 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
12819 if (retval)
12820 call = gen_rtx_SET (VOIDmode, retval, call);
12821 if (pop)
12823 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
12824 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
12825 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
12828 call = emit_call_insn (call);
12829 if (use)
12830 CALL_INSN_FUNCTION_USAGE (call) = use;
12834 /* Clear stack slot assignments remembered from previous functions.
12835 This is called from INIT_EXPANDERS once before RTL is emitted for each
12836 function. */
12838 static struct machine_function *
12839 ix86_init_machine_status (void)
12841 struct machine_function *f;
12843 f = ggc_alloc_cleared (sizeof (struct machine_function));
12844 f->use_fast_prologue_epilogue_nregs = -1;
12846 return f;
12849 /* Return a MEM corresponding to a stack slot with mode MODE.
12850 Allocate a new slot if necessary.
12852 The RTL for a function can have several slots available: N is
12853 which slot to use. */
12856 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
12858 struct stack_local_entry *s;
12860 gcc_assert (n < MAX_386_STACK_LOCALS);
12862 for (s = ix86_stack_locals; s; s = s->next)
12863 if (s->mode == mode && s->n == n)
12864 return s->rtl;
12866 s = (struct stack_local_entry *)
12867 ggc_alloc (sizeof (struct stack_local_entry));
12868 s->n = n;
12869 s->mode = mode;
12870 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
12872 s->next = ix86_stack_locals;
12873 ix86_stack_locals = s;
12874 return s->rtl;
12877 /* Construct the SYMBOL_REF for the tls_get_addr function. */
12879 static GTY(()) rtx ix86_tls_symbol;
12881 ix86_tls_get_addr (void)
12884 if (!ix86_tls_symbol)
12886 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
12887 (TARGET_GNU_TLS && !TARGET_64BIT)
12888 ? "___tls_get_addr"
12889 : "__tls_get_addr");
12892 return ix86_tls_symbol;
12895 /* Calculate the length of the memory address in the instruction
12896 encoding. Does not include the one-byte modrm, opcode, or prefix. */
12899 memory_address_length (rtx addr)
12901 struct ix86_address parts;
12902 rtx base, index, disp;
12903 int len;
12904 int ok;
12906 if (GET_CODE (addr) == PRE_DEC
12907 || GET_CODE (addr) == POST_INC
12908 || GET_CODE (addr) == PRE_MODIFY
12909 || GET_CODE (addr) == POST_MODIFY)
12910 return 0;
12912 ok = ix86_decompose_address (addr, &parts);
12913 gcc_assert (ok);
12915 if (parts.base && GET_CODE (parts.base) == SUBREG)
12916 parts.base = SUBREG_REG (parts.base);
12917 if (parts.index && GET_CODE (parts.index) == SUBREG)
12918 parts.index = SUBREG_REG (parts.index);
12920 base = parts.base;
12921 index = parts.index;
12922 disp = parts.disp;
12923 len = 0;
12925 /* Rule of thumb:
12926 - esp as the base always wants an index,
12927 - ebp as the base always wants a displacement. */
12929 /* Register Indirect. */
12930 if (base && !index && !disp)
12932 /* esp (for its index) and ebp (for its displacement) need
12933 the two-byte modrm form. */
12934 if (addr == stack_pointer_rtx
12935 || addr == arg_pointer_rtx
12936 || addr == frame_pointer_rtx
12937 || addr == hard_frame_pointer_rtx)
12938 len = 1;
12941 /* Direct Addressing. */
12942 else if (disp && !base && !index)
12943 len = 4;
12945 else
12947 /* Find the length of the displacement constant. */
12948 if (disp)
12950 if (GET_CODE (disp) == CONST_INT
12951 && CONST_OK_FOR_LETTER_P (INTVAL (disp), 'K')
12952 && base)
12953 len = 1;
12954 else
12955 len = 4;
12957 /* ebp always wants a displacement. */
12958 else if (base == hard_frame_pointer_rtx)
12959 len = 1;
12961 /* An index requires the two-byte modrm form.... */
12962 if (index
12963 /* ...like esp, which always wants an index. */
12964 || base == stack_pointer_rtx
12965 || base == arg_pointer_rtx
12966 || base == frame_pointer_rtx)
12967 len += 1;
12970 return len;
12973 /* Compute default value for "length_immediate" attribute. When SHORTFORM
12974 is set, expect that insn have 8bit immediate alternative. */
12976 ix86_attr_length_immediate_default (rtx insn, int shortform)
12978 int len = 0;
12979 int i;
12980 extract_insn_cached (insn);
12981 for (i = recog_data.n_operands - 1; i >= 0; --i)
12982 if (CONSTANT_P (recog_data.operand[i]))
12984 gcc_assert (!len);
12985 if (shortform
12986 && GET_CODE (recog_data.operand[i]) == CONST_INT
12987 && CONST_OK_FOR_LETTER_P (INTVAL (recog_data.operand[i]), 'K'))
12988 len = 1;
12989 else
12991 switch (get_attr_mode (insn))
12993 case MODE_QI:
12994 len+=1;
12995 break;
12996 case MODE_HI:
12997 len+=2;
12998 break;
12999 case MODE_SI:
13000 len+=4;
13001 break;
13002 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
13003 case MODE_DI:
13004 len+=4;
13005 break;
13006 default:
13007 fatal_insn ("unknown insn mode", insn);
13011 return len;
13013 /* Compute default value for "length_address" attribute. */
13015 ix86_attr_length_address_default (rtx insn)
13017 int i;
13019 if (get_attr_type (insn) == TYPE_LEA)
13021 rtx set = PATTERN (insn);
13023 if (GET_CODE (set) == PARALLEL)
13024 set = XVECEXP (set, 0, 0);
13026 gcc_assert (GET_CODE (set) == SET);
13028 return memory_address_length (SET_SRC (set));
13031 extract_insn_cached (insn);
13032 for (i = recog_data.n_operands - 1; i >= 0; --i)
13033 if (GET_CODE (recog_data.operand[i]) == MEM)
13035 return memory_address_length (XEXP (recog_data.operand[i], 0));
13036 break;
13038 return 0;
13041 /* Return the maximum number of instructions a cpu can issue. */
13043 static int
13044 ix86_issue_rate (void)
13046 switch (ix86_tune)
13048 case PROCESSOR_PENTIUM:
13049 case PROCESSOR_K6:
13050 return 2;
13052 case PROCESSOR_PENTIUMPRO:
13053 case PROCESSOR_PENTIUM4:
13054 case PROCESSOR_ATHLON:
13055 case PROCESSOR_K8:
13056 case PROCESSOR_NOCONA:
13057 return 3;
13059 default:
13060 return 1;
13064 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
13065 by DEP_INSN and nothing set by DEP_INSN. */
13067 static int
13068 ix86_flags_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
13070 rtx set, set2;
13072 /* Simplify the test for uninteresting insns. */
13073 if (insn_type != TYPE_SETCC
13074 && insn_type != TYPE_ICMOV
13075 && insn_type != TYPE_FCMOV
13076 && insn_type != TYPE_IBR)
13077 return 0;
13079 if ((set = single_set (dep_insn)) != 0)
13081 set = SET_DEST (set);
13082 set2 = NULL_RTX;
13084 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
13085 && XVECLEN (PATTERN (dep_insn), 0) == 2
13086 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
13087 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
13089 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
13090 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
13092 else
13093 return 0;
13095 if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG)
13096 return 0;
13098 /* This test is true if the dependent insn reads the flags but
13099 not any other potentially set register. */
13100 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
13101 return 0;
13103 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
13104 return 0;
13106 return 1;
13109 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
13110 address with operands set by DEP_INSN. */
13112 static int
13113 ix86_agi_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
13115 rtx addr;
13117 if (insn_type == TYPE_LEA
13118 && TARGET_PENTIUM)
13120 addr = PATTERN (insn);
13122 if (GET_CODE (addr) == PARALLEL)
13123 addr = XVECEXP (addr, 0, 0);
13125 gcc_assert (GET_CODE (addr) == SET);
13127 addr = SET_SRC (addr);
13129 else
13131 int i;
13132 extract_insn_cached (insn);
13133 for (i = recog_data.n_operands - 1; i >= 0; --i)
13134 if (GET_CODE (recog_data.operand[i]) == MEM)
13136 addr = XEXP (recog_data.operand[i], 0);
13137 goto found;
13139 return 0;
13140 found:;
13143 return modified_in_p (addr, dep_insn);
13146 static int
13147 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
13149 enum attr_type insn_type, dep_insn_type;
13150 enum attr_memory memory;
13151 rtx set, set2;
13152 int dep_insn_code_number;
13154 /* Anti and output dependencies have zero cost on all CPUs. */
13155 if (REG_NOTE_KIND (link) != 0)
13156 return 0;
13158 dep_insn_code_number = recog_memoized (dep_insn);
13160 /* If we can't recognize the insns, we can't really do anything. */
13161 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
13162 return cost;
13164 insn_type = get_attr_type (insn);
13165 dep_insn_type = get_attr_type (dep_insn);
13167 switch (ix86_tune)
13169 case PROCESSOR_PENTIUM:
13170 /* Address Generation Interlock adds a cycle of latency. */
13171 if (ix86_agi_dependant (insn, dep_insn, insn_type))
13172 cost += 1;
13174 /* ??? Compares pair with jump/setcc. */
13175 if (ix86_flags_dependant (insn, dep_insn, insn_type))
13176 cost = 0;
13178 /* Floating point stores require value to be ready one cycle earlier. */
13179 if (insn_type == TYPE_FMOV
13180 && get_attr_memory (insn) == MEMORY_STORE
13181 && !ix86_agi_dependant (insn, dep_insn, insn_type))
13182 cost += 1;
13183 break;
13185 case PROCESSOR_PENTIUMPRO:
13186 memory = get_attr_memory (insn);
13188 /* INT->FP conversion is expensive. */
13189 if (get_attr_fp_int_src (dep_insn))
13190 cost += 5;
13192 /* There is one cycle extra latency between an FP op and a store. */
13193 if (insn_type == TYPE_FMOV
13194 && (set = single_set (dep_insn)) != NULL_RTX
13195 && (set2 = single_set (insn)) != NULL_RTX
13196 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
13197 && GET_CODE (SET_DEST (set2)) == MEM)
13198 cost += 1;
13200 /* Show ability of reorder buffer to hide latency of load by executing
13201 in parallel with previous instruction in case
13202 previous instruction is not needed to compute the address. */
13203 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
13204 && !ix86_agi_dependant (insn, dep_insn, insn_type))
13206 /* Claim moves to take one cycle, as core can issue one load
13207 at time and the next load can start cycle later. */
13208 if (dep_insn_type == TYPE_IMOV
13209 || dep_insn_type == TYPE_FMOV)
13210 cost = 1;
13211 else if (cost > 1)
13212 cost--;
13214 break;
13216 case PROCESSOR_K6:
13217 memory = get_attr_memory (insn);
13219 /* The esp dependency is resolved before the instruction is really
13220 finished. */
13221 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
13222 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
13223 return 1;
13225 /* INT->FP conversion is expensive. */
13226 if (get_attr_fp_int_src (dep_insn))
13227 cost += 5;
13229 /* Show ability of reorder buffer to hide latency of load by executing
13230 in parallel with previous instruction in case
13231 previous instruction is not needed to compute the address. */
13232 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
13233 && !ix86_agi_dependant (insn, dep_insn, insn_type))
13235 /* Claim moves to take one cycle, as core can issue one load
13236 at time and the next load can start cycle later. */
13237 if (dep_insn_type == TYPE_IMOV
13238 || dep_insn_type == TYPE_FMOV)
13239 cost = 1;
13240 else if (cost > 2)
13241 cost -= 2;
13242 else
13243 cost = 1;
13245 break;
13247 case PROCESSOR_ATHLON:
13248 case PROCESSOR_K8:
13249 memory = get_attr_memory (insn);
13251 /* Show ability of reorder buffer to hide latency of load by executing
13252 in parallel with previous instruction in case
13253 previous instruction is not needed to compute the address. */
13254 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
13255 && !ix86_agi_dependant (insn, dep_insn, insn_type))
13257 enum attr_unit unit = get_attr_unit (insn);
13258 int loadcost = 3;
13260 /* Because of the difference between the length of integer and
13261 floating unit pipeline preparation stages, the memory operands
13262 for floating point are cheaper.
13264 ??? For Athlon it the difference is most probably 2. */
13265 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
13266 loadcost = 3;
13267 else
13268 loadcost = TARGET_ATHLON ? 2 : 0;
13270 if (cost >= loadcost)
13271 cost -= loadcost;
13272 else
13273 cost = 0;
13276 default:
13277 break;
13280 return cost;
13283 /* How many alternative schedules to try. This should be as wide as the
13284 scheduling freedom in the DFA, but no wider. Making this value too
13285 large results extra work for the scheduler. */
13287 static int
13288 ia32_multipass_dfa_lookahead (void)
13290 if (ix86_tune == PROCESSOR_PENTIUM)
13291 return 2;
13293 if (ix86_tune == PROCESSOR_PENTIUMPRO
13294 || ix86_tune == PROCESSOR_K6)
13295 return 1;
13297 else
13298 return 0;
13302 /* Compute the alignment given to a constant that is being placed in memory.
13303 EXP is the constant and ALIGN is the alignment that the object would
13304 ordinarily have.
13305 The value of this function is used instead of that alignment to align
13306 the object. */
13309 ix86_constant_alignment (tree exp, int align)
13311 if (TREE_CODE (exp) == REAL_CST)
13313 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
13314 return 64;
13315 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
13316 return 128;
13318 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
13319 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
13320 return BITS_PER_WORD;
13322 return align;
13325 /* Compute the alignment for a static variable.
13326 TYPE is the data type, and ALIGN is the alignment that
13327 the object would ordinarily have. The value of this function is used
13328 instead of that alignment to align the object. */
13331 ix86_data_alignment (tree type, int align)
13333 if (AGGREGATE_TYPE_P (type)
13334 && TYPE_SIZE (type)
13335 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
13336 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 256
13337 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 256)
13338 return 256;
13340 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
13341 to 16byte boundary. */
13342 if (TARGET_64BIT)
13344 if (AGGREGATE_TYPE_P (type)
13345 && TYPE_SIZE (type)
13346 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
13347 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
13348 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
13349 return 128;
13352 if (TREE_CODE (type) == ARRAY_TYPE)
13354 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
13355 return 64;
13356 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
13357 return 128;
13359 else if (TREE_CODE (type) == COMPLEX_TYPE)
13362 if (TYPE_MODE (type) == DCmode && align < 64)
13363 return 64;
13364 if (TYPE_MODE (type) == XCmode && align < 128)
13365 return 128;
13367 else if ((TREE_CODE (type) == RECORD_TYPE
13368 || TREE_CODE (type) == UNION_TYPE
13369 || TREE_CODE (type) == QUAL_UNION_TYPE)
13370 && TYPE_FIELDS (type))
13372 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
13373 return 64;
13374 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
13375 return 128;
13377 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
13378 || TREE_CODE (type) == INTEGER_TYPE)
13380 if (TYPE_MODE (type) == DFmode && align < 64)
13381 return 64;
13382 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
13383 return 128;
13386 return align;
13389 /* Compute the alignment for a local variable.
13390 TYPE is the data type, and ALIGN is the alignment that
13391 the object would ordinarily have. The value of this macro is used
13392 instead of that alignment to align the object. */
13395 ix86_local_alignment (tree type, int align)
13397 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
13398 to 16byte boundary. */
13399 if (TARGET_64BIT)
13401 if (AGGREGATE_TYPE_P (type)
13402 && TYPE_SIZE (type)
13403 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
13404 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
13405 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
13406 return 128;
13408 if (TREE_CODE (type) == ARRAY_TYPE)
13410 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
13411 return 64;
13412 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
13413 return 128;
13415 else if (TREE_CODE (type) == COMPLEX_TYPE)
13417 if (TYPE_MODE (type) == DCmode && align < 64)
13418 return 64;
13419 if (TYPE_MODE (type) == XCmode && align < 128)
13420 return 128;
13422 else if ((TREE_CODE (type) == RECORD_TYPE
13423 || TREE_CODE (type) == UNION_TYPE
13424 || TREE_CODE (type) == QUAL_UNION_TYPE)
13425 && TYPE_FIELDS (type))
13427 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
13428 return 64;
13429 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
13430 return 128;
13432 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
13433 || TREE_CODE (type) == INTEGER_TYPE)
13436 if (TYPE_MODE (type) == DFmode && align < 64)
13437 return 64;
13438 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
13439 return 128;
13441 return align;
13444 /* Emit RTL insns to initialize the variable parts of a trampoline.
13445 FNADDR is an RTX for the address of the function's pure code.
13446 CXT is an RTX for the static chain value for the function. */
13447 void
13448 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
13450 if (!TARGET_64BIT)
13452 /* Compute offset from the end of the jmp to the target function. */
13453 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
13454 plus_constant (tramp, 10),
13455 NULL_RTX, 1, OPTAB_DIRECT);
13456 emit_move_insn (gen_rtx_MEM (QImode, tramp),
13457 gen_int_mode (0xb9, QImode));
13458 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
13459 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
13460 gen_int_mode (0xe9, QImode));
13461 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
13463 else
13465 int offset = 0;
13466 /* Try to load address using shorter movl instead of movabs.
13467 We may want to support movq for kernel mode, but kernel does not use
13468 trampolines at the moment. */
13469 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
13471 fnaddr = copy_to_mode_reg (DImode, fnaddr);
13472 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13473 gen_int_mode (0xbb41, HImode));
13474 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
13475 gen_lowpart (SImode, fnaddr));
13476 offset += 6;
13478 else
13480 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13481 gen_int_mode (0xbb49, HImode));
13482 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
13483 fnaddr);
13484 offset += 10;
13486 /* Load static chain using movabs to r10. */
13487 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13488 gen_int_mode (0xba49, HImode));
13489 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
13490 cxt);
13491 offset += 10;
13492 /* Jump to the r11 */
13493 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13494 gen_int_mode (0xff49, HImode));
13495 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
13496 gen_int_mode (0xe3, QImode));
13497 offset += 3;
13498 gcc_assert (offset <= TRAMPOLINE_SIZE);
13501 #ifdef ENABLE_EXECUTE_STACK
13502 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
13503 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
13504 #endif
13507 /* Codes for all the SSE/MMX builtins. */
13508 enum ix86_builtins
13510 IX86_BUILTIN_ADDPS,
13511 IX86_BUILTIN_ADDSS,
13512 IX86_BUILTIN_DIVPS,
13513 IX86_BUILTIN_DIVSS,
13514 IX86_BUILTIN_MULPS,
13515 IX86_BUILTIN_MULSS,
13516 IX86_BUILTIN_SUBPS,
13517 IX86_BUILTIN_SUBSS,
13519 IX86_BUILTIN_CMPEQPS,
13520 IX86_BUILTIN_CMPLTPS,
13521 IX86_BUILTIN_CMPLEPS,
13522 IX86_BUILTIN_CMPGTPS,
13523 IX86_BUILTIN_CMPGEPS,
13524 IX86_BUILTIN_CMPNEQPS,
13525 IX86_BUILTIN_CMPNLTPS,
13526 IX86_BUILTIN_CMPNLEPS,
13527 IX86_BUILTIN_CMPNGTPS,
13528 IX86_BUILTIN_CMPNGEPS,
13529 IX86_BUILTIN_CMPORDPS,
13530 IX86_BUILTIN_CMPUNORDPS,
13531 IX86_BUILTIN_CMPEQSS,
13532 IX86_BUILTIN_CMPLTSS,
13533 IX86_BUILTIN_CMPLESS,
13534 IX86_BUILTIN_CMPNEQSS,
13535 IX86_BUILTIN_CMPNLTSS,
13536 IX86_BUILTIN_CMPNLESS,
13537 IX86_BUILTIN_CMPNGTSS,
13538 IX86_BUILTIN_CMPNGESS,
13539 IX86_BUILTIN_CMPORDSS,
13540 IX86_BUILTIN_CMPUNORDSS,
13542 IX86_BUILTIN_COMIEQSS,
13543 IX86_BUILTIN_COMILTSS,
13544 IX86_BUILTIN_COMILESS,
13545 IX86_BUILTIN_COMIGTSS,
13546 IX86_BUILTIN_COMIGESS,
13547 IX86_BUILTIN_COMINEQSS,
13548 IX86_BUILTIN_UCOMIEQSS,
13549 IX86_BUILTIN_UCOMILTSS,
13550 IX86_BUILTIN_UCOMILESS,
13551 IX86_BUILTIN_UCOMIGTSS,
13552 IX86_BUILTIN_UCOMIGESS,
13553 IX86_BUILTIN_UCOMINEQSS,
13555 IX86_BUILTIN_CVTPI2PS,
13556 IX86_BUILTIN_CVTPS2PI,
13557 IX86_BUILTIN_CVTSI2SS,
13558 IX86_BUILTIN_CVTSI642SS,
13559 IX86_BUILTIN_CVTSS2SI,
13560 IX86_BUILTIN_CVTSS2SI64,
13561 IX86_BUILTIN_CVTTPS2PI,
13562 IX86_BUILTIN_CVTTSS2SI,
13563 IX86_BUILTIN_CVTTSS2SI64,
13565 IX86_BUILTIN_MAXPS,
13566 IX86_BUILTIN_MAXSS,
13567 IX86_BUILTIN_MINPS,
13568 IX86_BUILTIN_MINSS,
13570 IX86_BUILTIN_LOADUPS,
13571 IX86_BUILTIN_STOREUPS,
13572 IX86_BUILTIN_MOVSS,
13574 IX86_BUILTIN_MOVHLPS,
13575 IX86_BUILTIN_MOVLHPS,
13576 IX86_BUILTIN_LOADHPS,
13577 IX86_BUILTIN_LOADLPS,
13578 IX86_BUILTIN_STOREHPS,
13579 IX86_BUILTIN_STORELPS,
13581 IX86_BUILTIN_MASKMOVQ,
13582 IX86_BUILTIN_MOVMSKPS,
13583 IX86_BUILTIN_PMOVMSKB,
13585 IX86_BUILTIN_MOVNTPS,
13586 IX86_BUILTIN_MOVNTQ,
13588 IX86_BUILTIN_LOADDQU,
13589 IX86_BUILTIN_STOREDQU,
13591 IX86_BUILTIN_PACKSSWB,
13592 IX86_BUILTIN_PACKSSDW,
13593 IX86_BUILTIN_PACKUSWB,
13595 IX86_BUILTIN_PADDB,
13596 IX86_BUILTIN_PADDW,
13597 IX86_BUILTIN_PADDD,
13598 IX86_BUILTIN_PADDQ,
13599 IX86_BUILTIN_PADDSB,
13600 IX86_BUILTIN_PADDSW,
13601 IX86_BUILTIN_PADDUSB,
13602 IX86_BUILTIN_PADDUSW,
13603 IX86_BUILTIN_PSUBB,
13604 IX86_BUILTIN_PSUBW,
13605 IX86_BUILTIN_PSUBD,
13606 IX86_BUILTIN_PSUBQ,
13607 IX86_BUILTIN_PSUBSB,
13608 IX86_BUILTIN_PSUBSW,
13609 IX86_BUILTIN_PSUBUSB,
13610 IX86_BUILTIN_PSUBUSW,
13612 IX86_BUILTIN_PAND,
13613 IX86_BUILTIN_PANDN,
13614 IX86_BUILTIN_POR,
13615 IX86_BUILTIN_PXOR,
13617 IX86_BUILTIN_PAVGB,
13618 IX86_BUILTIN_PAVGW,
13620 IX86_BUILTIN_PCMPEQB,
13621 IX86_BUILTIN_PCMPEQW,
13622 IX86_BUILTIN_PCMPEQD,
13623 IX86_BUILTIN_PCMPGTB,
13624 IX86_BUILTIN_PCMPGTW,
13625 IX86_BUILTIN_PCMPGTD,
13627 IX86_BUILTIN_PMADDWD,
13629 IX86_BUILTIN_PMAXSW,
13630 IX86_BUILTIN_PMAXUB,
13631 IX86_BUILTIN_PMINSW,
13632 IX86_BUILTIN_PMINUB,
13634 IX86_BUILTIN_PMULHUW,
13635 IX86_BUILTIN_PMULHW,
13636 IX86_BUILTIN_PMULLW,
13638 IX86_BUILTIN_PSADBW,
13639 IX86_BUILTIN_PSHUFW,
13641 IX86_BUILTIN_PSLLW,
13642 IX86_BUILTIN_PSLLD,
13643 IX86_BUILTIN_PSLLQ,
13644 IX86_BUILTIN_PSRAW,
13645 IX86_BUILTIN_PSRAD,
13646 IX86_BUILTIN_PSRLW,
13647 IX86_BUILTIN_PSRLD,
13648 IX86_BUILTIN_PSRLQ,
13649 IX86_BUILTIN_PSLLWI,
13650 IX86_BUILTIN_PSLLDI,
13651 IX86_BUILTIN_PSLLQI,
13652 IX86_BUILTIN_PSRAWI,
13653 IX86_BUILTIN_PSRADI,
13654 IX86_BUILTIN_PSRLWI,
13655 IX86_BUILTIN_PSRLDI,
13656 IX86_BUILTIN_PSRLQI,
13658 IX86_BUILTIN_PUNPCKHBW,
13659 IX86_BUILTIN_PUNPCKHWD,
13660 IX86_BUILTIN_PUNPCKHDQ,
13661 IX86_BUILTIN_PUNPCKLBW,
13662 IX86_BUILTIN_PUNPCKLWD,
13663 IX86_BUILTIN_PUNPCKLDQ,
13665 IX86_BUILTIN_SHUFPS,
13667 IX86_BUILTIN_RCPPS,
13668 IX86_BUILTIN_RCPSS,
13669 IX86_BUILTIN_RSQRTPS,
13670 IX86_BUILTIN_RSQRTSS,
13671 IX86_BUILTIN_SQRTPS,
13672 IX86_BUILTIN_SQRTSS,
13674 IX86_BUILTIN_UNPCKHPS,
13675 IX86_BUILTIN_UNPCKLPS,
13677 IX86_BUILTIN_ANDPS,
13678 IX86_BUILTIN_ANDNPS,
13679 IX86_BUILTIN_ORPS,
13680 IX86_BUILTIN_XORPS,
13682 IX86_BUILTIN_EMMS,
13683 IX86_BUILTIN_LDMXCSR,
13684 IX86_BUILTIN_STMXCSR,
13685 IX86_BUILTIN_SFENCE,
13687 /* 3DNow! Original */
13688 IX86_BUILTIN_FEMMS,
13689 IX86_BUILTIN_PAVGUSB,
13690 IX86_BUILTIN_PF2ID,
13691 IX86_BUILTIN_PFACC,
13692 IX86_BUILTIN_PFADD,
13693 IX86_BUILTIN_PFCMPEQ,
13694 IX86_BUILTIN_PFCMPGE,
13695 IX86_BUILTIN_PFCMPGT,
13696 IX86_BUILTIN_PFMAX,
13697 IX86_BUILTIN_PFMIN,
13698 IX86_BUILTIN_PFMUL,
13699 IX86_BUILTIN_PFRCP,
13700 IX86_BUILTIN_PFRCPIT1,
13701 IX86_BUILTIN_PFRCPIT2,
13702 IX86_BUILTIN_PFRSQIT1,
13703 IX86_BUILTIN_PFRSQRT,
13704 IX86_BUILTIN_PFSUB,
13705 IX86_BUILTIN_PFSUBR,
13706 IX86_BUILTIN_PI2FD,
13707 IX86_BUILTIN_PMULHRW,
13709 /* 3DNow! Athlon Extensions */
13710 IX86_BUILTIN_PF2IW,
13711 IX86_BUILTIN_PFNACC,
13712 IX86_BUILTIN_PFPNACC,
13713 IX86_BUILTIN_PI2FW,
13714 IX86_BUILTIN_PSWAPDSI,
13715 IX86_BUILTIN_PSWAPDSF,
13717 /* SSE2 */
13718 IX86_BUILTIN_ADDPD,
13719 IX86_BUILTIN_ADDSD,
13720 IX86_BUILTIN_DIVPD,
13721 IX86_BUILTIN_DIVSD,
13722 IX86_BUILTIN_MULPD,
13723 IX86_BUILTIN_MULSD,
13724 IX86_BUILTIN_SUBPD,
13725 IX86_BUILTIN_SUBSD,
13727 IX86_BUILTIN_CMPEQPD,
13728 IX86_BUILTIN_CMPLTPD,
13729 IX86_BUILTIN_CMPLEPD,
13730 IX86_BUILTIN_CMPGTPD,
13731 IX86_BUILTIN_CMPGEPD,
13732 IX86_BUILTIN_CMPNEQPD,
13733 IX86_BUILTIN_CMPNLTPD,
13734 IX86_BUILTIN_CMPNLEPD,
13735 IX86_BUILTIN_CMPNGTPD,
13736 IX86_BUILTIN_CMPNGEPD,
13737 IX86_BUILTIN_CMPORDPD,
13738 IX86_BUILTIN_CMPUNORDPD,
13739 IX86_BUILTIN_CMPNEPD,
13740 IX86_BUILTIN_CMPEQSD,
13741 IX86_BUILTIN_CMPLTSD,
13742 IX86_BUILTIN_CMPLESD,
13743 IX86_BUILTIN_CMPNEQSD,
13744 IX86_BUILTIN_CMPNLTSD,
13745 IX86_BUILTIN_CMPNLESD,
13746 IX86_BUILTIN_CMPORDSD,
13747 IX86_BUILTIN_CMPUNORDSD,
13748 IX86_BUILTIN_CMPNESD,
13750 IX86_BUILTIN_COMIEQSD,
13751 IX86_BUILTIN_COMILTSD,
13752 IX86_BUILTIN_COMILESD,
13753 IX86_BUILTIN_COMIGTSD,
13754 IX86_BUILTIN_COMIGESD,
13755 IX86_BUILTIN_COMINEQSD,
13756 IX86_BUILTIN_UCOMIEQSD,
13757 IX86_BUILTIN_UCOMILTSD,
13758 IX86_BUILTIN_UCOMILESD,
13759 IX86_BUILTIN_UCOMIGTSD,
13760 IX86_BUILTIN_UCOMIGESD,
13761 IX86_BUILTIN_UCOMINEQSD,
13763 IX86_BUILTIN_MAXPD,
13764 IX86_BUILTIN_MAXSD,
13765 IX86_BUILTIN_MINPD,
13766 IX86_BUILTIN_MINSD,
13768 IX86_BUILTIN_ANDPD,
13769 IX86_BUILTIN_ANDNPD,
13770 IX86_BUILTIN_ORPD,
13771 IX86_BUILTIN_XORPD,
13773 IX86_BUILTIN_SQRTPD,
13774 IX86_BUILTIN_SQRTSD,
13776 IX86_BUILTIN_UNPCKHPD,
13777 IX86_BUILTIN_UNPCKLPD,
13779 IX86_BUILTIN_SHUFPD,
13781 IX86_BUILTIN_LOADUPD,
13782 IX86_BUILTIN_STOREUPD,
13783 IX86_BUILTIN_MOVSD,
13785 IX86_BUILTIN_LOADHPD,
13786 IX86_BUILTIN_LOADLPD,
13788 IX86_BUILTIN_CVTDQ2PD,
13789 IX86_BUILTIN_CVTDQ2PS,
13791 IX86_BUILTIN_CVTPD2DQ,
13792 IX86_BUILTIN_CVTPD2PI,
13793 IX86_BUILTIN_CVTPD2PS,
13794 IX86_BUILTIN_CVTTPD2DQ,
13795 IX86_BUILTIN_CVTTPD2PI,
13797 IX86_BUILTIN_CVTPI2PD,
13798 IX86_BUILTIN_CVTSI2SD,
13799 IX86_BUILTIN_CVTSI642SD,
13801 IX86_BUILTIN_CVTSD2SI,
13802 IX86_BUILTIN_CVTSD2SI64,
13803 IX86_BUILTIN_CVTSD2SS,
13804 IX86_BUILTIN_CVTSS2SD,
13805 IX86_BUILTIN_CVTTSD2SI,
13806 IX86_BUILTIN_CVTTSD2SI64,
13808 IX86_BUILTIN_CVTPS2DQ,
13809 IX86_BUILTIN_CVTPS2PD,
13810 IX86_BUILTIN_CVTTPS2DQ,
13812 IX86_BUILTIN_MOVNTI,
13813 IX86_BUILTIN_MOVNTPD,
13814 IX86_BUILTIN_MOVNTDQ,
13816 /* SSE2 MMX */
13817 IX86_BUILTIN_MASKMOVDQU,
13818 IX86_BUILTIN_MOVMSKPD,
13819 IX86_BUILTIN_PMOVMSKB128,
13821 IX86_BUILTIN_PACKSSWB128,
13822 IX86_BUILTIN_PACKSSDW128,
13823 IX86_BUILTIN_PACKUSWB128,
13825 IX86_BUILTIN_PADDB128,
13826 IX86_BUILTIN_PADDW128,
13827 IX86_BUILTIN_PADDD128,
13828 IX86_BUILTIN_PADDQ128,
13829 IX86_BUILTIN_PADDSB128,
13830 IX86_BUILTIN_PADDSW128,
13831 IX86_BUILTIN_PADDUSB128,
13832 IX86_BUILTIN_PADDUSW128,
13833 IX86_BUILTIN_PSUBB128,
13834 IX86_BUILTIN_PSUBW128,
13835 IX86_BUILTIN_PSUBD128,
13836 IX86_BUILTIN_PSUBQ128,
13837 IX86_BUILTIN_PSUBSB128,
13838 IX86_BUILTIN_PSUBSW128,
13839 IX86_BUILTIN_PSUBUSB128,
13840 IX86_BUILTIN_PSUBUSW128,
13842 IX86_BUILTIN_PAND128,
13843 IX86_BUILTIN_PANDN128,
13844 IX86_BUILTIN_POR128,
13845 IX86_BUILTIN_PXOR128,
13847 IX86_BUILTIN_PAVGB128,
13848 IX86_BUILTIN_PAVGW128,
13850 IX86_BUILTIN_PCMPEQB128,
13851 IX86_BUILTIN_PCMPEQW128,
13852 IX86_BUILTIN_PCMPEQD128,
13853 IX86_BUILTIN_PCMPGTB128,
13854 IX86_BUILTIN_PCMPGTW128,
13855 IX86_BUILTIN_PCMPGTD128,
13857 IX86_BUILTIN_PMADDWD128,
13859 IX86_BUILTIN_PMAXSW128,
13860 IX86_BUILTIN_PMAXUB128,
13861 IX86_BUILTIN_PMINSW128,
13862 IX86_BUILTIN_PMINUB128,
13864 IX86_BUILTIN_PMULUDQ,
13865 IX86_BUILTIN_PMULUDQ128,
13866 IX86_BUILTIN_PMULHUW128,
13867 IX86_BUILTIN_PMULHW128,
13868 IX86_BUILTIN_PMULLW128,
13870 IX86_BUILTIN_PSADBW128,
13871 IX86_BUILTIN_PSHUFHW,
13872 IX86_BUILTIN_PSHUFLW,
13873 IX86_BUILTIN_PSHUFD,
13875 IX86_BUILTIN_PSLLW128,
13876 IX86_BUILTIN_PSLLD128,
13877 IX86_BUILTIN_PSLLQ128,
13878 IX86_BUILTIN_PSRAW128,
13879 IX86_BUILTIN_PSRAD128,
13880 IX86_BUILTIN_PSRLW128,
13881 IX86_BUILTIN_PSRLD128,
13882 IX86_BUILTIN_PSRLQ128,
13883 IX86_BUILTIN_PSLLDQI128,
13884 IX86_BUILTIN_PSLLWI128,
13885 IX86_BUILTIN_PSLLDI128,
13886 IX86_BUILTIN_PSLLQI128,
13887 IX86_BUILTIN_PSRAWI128,
13888 IX86_BUILTIN_PSRADI128,
13889 IX86_BUILTIN_PSRLDQI128,
13890 IX86_BUILTIN_PSRLWI128,
13891 IX86_BUILTIN_PSRLDI128,
13892 IX86_BUILTIN_PSRLQI128,
13894 IX86_BUILTIN_PUNPCKHBW128,
13895 IX86_BUILTIN_PUNPCKHWD128,
13896 IX86_BUILTIN_PUNPCKHDQ128,
13897 IX86_BUILTIN_PUNPCKHQDQ128,
13898 IX86_BUILTIN_PUNPCKLBW128,
13899 IX86_BUILTIN_PUNPCKLWD128,
13900 IX86_BUILTIN_PUNPCKLDQ128,
13901 IX86_BUILTIN_PUNPCKLQDQ128,
13903 IX86_BUILTIN_CLFLUSH,
13904 IX86_BUILTIN_MFENCE,
13905 IX86_BUILTIN_LFENCE,
13907 /* Prescott New Instructions. */
13908 IX86_BUILTIN_ADDSUBPS,
13909 IX86_BUILTIN_HADDPS,
13910 IX86_BUILTIN_HSUBPS,
13911 IX86_BUILTIN_MOVSHDUP,
13912 IX86_BUILTIN_MOVSLDUP,
13913 IX86_BUILTIN_ADDSUBPD,
13914 IX86_BUILTIN_HADDPD,
13915 IX86_BUILTIN_HSUBPD,
13916 IX86_BUILTIN_LDDQU,
13918 IX86_BUILTIN_MONITOR,
13919 IX86_BUILTIN_MWAIT,
13921 IX86_BUILTIN_VEC_INIT_V2SI,
13922 IX86_BUILTIN_VEC_INIT_V4HI,
13923 IX86_BUILTIN_VEC_INIT_V8QI,
13924 IX86_BUILTIN_VEC_EXT_V2DF,
13925 IX86_BUILTIN_VEC_EXT_V2DI,
13926 IX86_BUILTIN_VEC_EXT_V4SF,
13927 IX86_BUILTIN_VEC_EXT_V4SI,
13928 IX86_BUILTIN_VEC_EXT_V8HI,
13929 IX86_BUILTIN_VEC_EXT_V2SI,
13930 IX86_BUILTIN_VEC_EXT_V4HI,
13931 IX86_BUILTIN_VEC_SET_V8HI,
13932 IX86_BUILTIN_VEC_SET_V4HI,
13934 IX86_BUILTIN_MAX
13937 #define def_builtin(MASK, NAME, TYPE, CODE) \
13938 do { \
13939 if ((MASK) & target_flags \
13940 && (!((MASK) & MASK_64BIT) || TARGET_64BIT)) \
13941 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
13942 NULL, NULL_TREE); \
13943 } while (0)
13945 /* Bits for builtin_description.flag. */
13947 /* Set when we don't support the comparison natively, and should
13948 swap_comparison in order to support it. */
13949 #define BUILTIN_DESC_SWAP_OPERANDS 1
13951 struct builtin_description
13953 const unsigned int mask;
13954 const enum insn_code icode;
13955 const char *const name;
13956 const enum ix86_builtins code;
13957 const enum rtx_code comparison;
13958 const unsigned int flag;
13961 static const struct builtin_description bdesc_comi[] =
13963 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
13964 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
13965 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
13966 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
13967 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
13968 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
13969 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
13970 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
13971 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
13972 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
13973 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
13974 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
13975 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
13976 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
13977 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
13978 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
13979 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
13980 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
13981 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
13982 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
13983 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
13984 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
13985 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
13986 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
13989 static const struct builtin_description bdesc_2arg[] =
13991 /* SSE */
13992 { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
13993 { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
13994 { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
13995 { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
13996 { MASK_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
13997 { MASK_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
13998 { MASK_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
13999 { MASK_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
14001 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
14002 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
14003 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
14004 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT,
14005 BUILTIN_DESC_SWAP_OPERANDS },
14006 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE,
14007 BUILTIN_DESC_SWAP_OPERANDS },
14008 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
14009 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, 0 },
14010 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, 0 },
14011 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, 0 },
14012 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE,
14013 BUILTIN_DESC_SWAP_OPERANDS },
14014 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT,
14015 BUILTIN_DESC_SWAP_OPERANDS },
14016 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, 0 },
14017 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
14018 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
14019 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
14020 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
14021 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, 0 },
14022 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, 0 },
14023 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, 0 },
14024 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE,
14025 BUILTIN_DESC_SWAP_OPERANDS },
14026 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT,
14027 BUILTIN_DESC_SWAP_OPERANDS },
14028 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 },
14030 { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
14031 { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
14032 { MASK_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
14033 { MASK_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
14035 { MASK_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
14036 { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
14037 { MASK_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
14038 { MASK_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
14040 { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
14041 { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
14042 { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
14043 { MASK_SSE, CODE_FOR_vec_interleave_highv4sf, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
14044 { MASK_SSE, CODE_FOR_vec_interleave_lowv4sf, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
14046 /* MMX */
14047 { MASK_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
14048 { MASK_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
14049 { MASK_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
14050 { MASK_SSE2, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
14051 { MASK_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
14052 { MASK_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
14053 { MASK_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
14054 { MASK_SSE2, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
14056 { MASK_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
14057 { MASK_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
14058 { MASK_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
14059 { MASK_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
14060 { MASK_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
14061 { MASK_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
14062 { MASK_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
14063 { MASK_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
14065 { MASK_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
14066 { MASK_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
14067 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
14069 { MASK_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
14070 { MASK_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
14071 { MASK_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
14072 { MASK_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
14074 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
14075 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
14077 { MASK_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
14078 { MASK_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
14079 { MASK_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
14080 { MASK_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
14081 { MASK_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
14082 { MASK_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
14084 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
14085 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
14086 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
14087 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
14089 { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
14090 { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
14091 { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
14092 { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
14093 { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
14094 { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
14096 /* Special. */
14097 { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
14098 { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
14099 { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
14101 { MASK_SSE, CODE_FOR_sse_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
14102 { MASK_SSE, CODE_FOR_sse_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
14103 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
14105 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
14106 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
14107 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
14108 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
14109 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
14110 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
14112 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
14113 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
14114 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
14115 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
14116 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
14117 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
14119 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
14120 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
14121 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
14122 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 },
14124 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
14125 { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
14127 /* SSE2 */
14128 { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
14129 { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
14130 { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
14131 { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
14132 { MASK_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
14133 { MASK_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
14134 { MASK_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
14135 { MASK_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
14137 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
14138 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
14139 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
14140 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT,
14141 BUILTIN_DESC_SWAP_OPERANDS },
14142 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE,
14143 BUILTIN_DESC_SWAP_OPERANDS },
14144 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
14145 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, 0 },
14146 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, 0 },
14147 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, 0 },
14148 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE,
14149 BUILTIN_DESC_SWAP_OPERANDS },
14150 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT,
14151 BUILTIN_DESC_SWAP_OPERANDS },
14152 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, 0 },
14153 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
14154 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
14155 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
14156 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
14157 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, 0 },
14158 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, 0 },
14159 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, 0 },
14160 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, 0 },
14162 { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
14163 { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
14164 { MASK_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
14165 { MASK_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
14167 { MASK_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
14168 { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
14169 { MASK_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
14170 { MASK_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
14172 { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
14173 { MASK_SSE2, CODE_FOR_vec_interleave_highv2df, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
14174 { MASK_SSE2, CODE_FOR_vec_interleave_lowv2df, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
14176 /* SSE2 MMX */
14177 { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
14178 { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
14179 { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
14180 { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
14181 { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
14182 { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
14183 { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
14184 { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
14186 { MASK_MMX, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
14187 { MASK_MMX, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
14188 { MASK_MMX, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
14189 { MASK_MMX, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
14190 { MASK_MMX, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
14191 { MASK_MMX, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
14192 { MASK_MMX, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
14193 { MASK_MMX, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
14195 { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
14196 { MASK_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
14198 { MASK_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
14199 { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
14200 { MASK_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
14201 { MASK_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
14203 { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
14204 { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
14206 { MASK_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
14207 { MASK_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
14208 { MASK_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
14209 { MASK_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
14210 { MASK_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
14211 { MASK_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
14213 { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
14214 { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
14215 { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
14216 { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
14218 { MASK_SSE2, CODE_FOR_vec_interleave_highv16qi, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
14219 { MASK_SSE2, CODE_FOR_vec_interleave_highv8hi, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
14220 { MASK_SSE2, CODE_FOR_vec_interleave_highv4si, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
14221 { MASK_SSE2, CODE_FOR_vec_interleave_highv2di, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
14222 { MASK_SSE2, CODE_FOR_vec_interleave_lowv16qi, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
14223 { MASK_SSE2, CODE_FOR_vec_interleave_lowv8hi, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
14224 { MASK_SSE2, CODE_FOR_vec_interleave_lowv4si, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
14225 { MASK_SSE2, CODE_FOR_vec_interleave_lowv2di, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
14227 { MASK_SSE2, CODE_FOR_vec_pack_ssat_v8hi, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
14228 { MASK_SSE2, CODE_FOR_vec_pack_ssat_v4si, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
14229 { MASK_SSE2, CODE_FOR_vec_pack_usat_v8hi, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
14231 { MASK_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
14232 { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
14234 { MASK_SSE2, CODE_FOR_sse2_umulsidi3, 0, IX86_BUILTIN_PMULUDQ, 0, 0 },
14235 { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, 0, 0 },
14237 { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
14238 { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
14239 { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
14241 { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
14242 { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
14243 { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
14245 { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
14246 { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
14248 { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
14250 { MASK_SSE2, CODE_FOR_sse2_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
14251 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
14252 { MASK_SSE2, CODE_FOR_sse2_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
14253 { MASK_SSE2, CODE_FOR_sse2_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
14255 /* SSE3 MMX */
14256 { MASK_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
14257 { MASK_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
14258 { MASK_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
14259 { MASK_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
14260 { MASK_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
14261 { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 }
14264 static const struct builtin_description bdesc_1arg[] =
14266 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
14267 { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
14269 { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
14270 { MASK_SSE, CODE_FOR_sse_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
14271 { MASK_SSE, CODE_FOR_sse_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
14273 { MASK_SSE, CODE_FOR_sse_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
14274 { MASK_SSE, CODE_FOR_sse_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
14275 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
14276 { MASK_SSE, CODE_FOR_sse_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
14277 { MASK_SSE, CODE_FOR_sse_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
14278 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
14280 { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
14281 { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
14283 { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
14285 { MASK_SSE2, CODE_FOR_sse2_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
14286 { MASK_SSE2, CODE_FOR_sse2_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
14288 { MASK_SSE2, CODE_FOR_sse2_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
14289 { MASK_SSE2, CODE_FOR_sse2_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
14290 { MASK_SSE2, CODE_FOR_sse2_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
14291 { MASK_SSE2, CODE_FOR_sse2_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
14292 { MASK_SSE2, CODE_FOR_sse2_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
14294 { MASK_SSE2, CODE_FOR_sse2_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
14296 { MASK_SSE2, CODE_FOR_sse2_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
14297 { MASK_SSE2, CODE_FOR_sse2_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
14298 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
14299 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
14301 { MASK_SSE2, CODE_FOR_sse2_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
14302 { MASK_SSE2, CODE_FOR_sse2_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
14303 { MASK_SSE2, CODE_FOR_sse2_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
14305 /* SSE3 */
14306 { MASK_SSE3, CODE_FOR_sse3_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
14307 { MASK_SSE3, CODE_FOR_sse3_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
14310 static void
14311 ix86_init_builtins (void)
14313 if (TARGET_MMX)
14314 ix86_init_mmx_sse_builtins ();
14317 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
14318 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
14319 builtins. */
14320 static void
14321 ix86_init_mmx_sse_builtins (void)
14323 const struct builtin_description * d;
14324 size_t i;
14326 tree V16QI_type_node = build_vector_type_for_mode (intQI_type_node, V16QImode);
14327 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
14328 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
14329 tree V2DI_type_node
14330 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
14331 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
14332 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
14333 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
14334 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
14335 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
14336 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
14338 tree pchar_type_node = build_pointer_type (char_type_node);
14339 tree pcchar_type_node = build_pointer_type (
14340 build_type_variant (char_type_node, 1, 0));
14341 tree pfloat_type_node = build_pointer_type (float_type_node);
14342 tree pcfloat_type_node = build_pointer_type (
14343 build_type_variant (float_type_node, 1, 0));
14344 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
14345 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
14346 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
14348 /* Comparisons. */
14349 tree int_ftype_v4sf_v4sf
14350 = build_function_type_list (integer_type_node,
14351 V4SF_type_node, V4SF_type_node, NULL_TREE);
14352 tree v4si_ftype_v4sf_v4sf
14353 = build_function_type_list (V4SI_type_node,
14354 V4SF_type_node, V4SF_type_node, NULL_TREE);
14355 /* MMX/SSE/integer conversions. */
14356 tree int_ftype_v4sf
14357 = build_function_type_list (integer_type_node,
14358 V4SF_type_node, NULL_TREE);
14359 tree int64_ftype_v4sf
14360 = build_function_type_list (long_long_integer_type_node,
14361 V4SF_type_node, NULL_TREE);
14362 tree int_ftype_v8qi
14363 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
14364 tree v4sf_ftype_v4sf_int
14365 = build_function_type_list (V4SF_type_node,
14366 V4SF_type_node, integer_type_node, NULL_TREE);
14367 tree v4sf_ftype_v4sf_int64
14368 = build_function_type_list (V4SF_type_node,
14369 V4SF_type_node, long_long_integer_type_node,
14370 NULL_TREE);
14371 tree v4sf_ftype_v4sf_v2si
14372 = build_function_type_list (V4SF_type_node,
14373 V4SF_type_node, V2SI_type_node, NULL_TREE);
14375 /* Miscellaneous. */
14376 tree v8qi_ftype_v4hi_v4hi
14377 = build_function_type_list (V8QI_type_node,
14378 V4HI_type_node, V4HI_type_node, NULL_TREE);
14379 tree v4hi_ftype_v2si_v2si
14380 = build_function_type_list (V4HI_type_node,
14381 V2SI_type_node, V2SI_type_node, NULL_TREE);
14382 tree v4sf_ftype_v4sf_v4sf_int
14383 = build_function_type_list (V4SF_type_node,
14384 V4SF_type_node, V4SF_type_node,
14385 integer_type_node, NULL_TREE);
14386 tree v2si_ftype_v4hi_v4hi
14387 = build_function_type_list (V2SI_type_node,
14388 V4HI_type_node, V4HI_type_node, NULL_TREE);
14389 tree v4hi_ftype_v4hi_int
14390 = build_function_type_list (V4HI_type_node,
14391 V4HI_type_node, integer_type_node, NULL_TREE);
14392 tree v4hi_ftype_v4hi_di
14393 = build_function_type_list (V4HI_type_node,
14394 V4HI_type_node, long_long_unsigned_type_node,
14395 NULL_TREE);
14396 tree v2si_ftype_v2si_di
14397 = build_function_type_list (V2SI_type_node,
14398 V2SI_type_node, long_long_unsigned_type_node,
14399 NULL_TREE);
14400 tree void_ftype_void
14401 = build_function_type (void_type_node, void_list_node);
14402 tree void_ftype_unsigned
14403 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
14404 tree void_ftype_unsigned_unsigned
14405 = build_function_type_list (void_type_node, unsigned_type_node,
14406 unsigned_type_node, NULL_TREE);
14407 tree void_ftype_pcvoid_unsigned_unsigned
14408 = build_function_type_list (void_type_node, const_ptr_type_node,
14409 unsigned_type_node, unsigned_type_node,
14410 NULL_TREE);
14411 tree unsigned_ftype_void
14412 = build_function_type (unsigned_type_node, void_list_node);
14413 tree v2si_ftype_v4sf
14414 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
14415 /* Loads/stores. */
14416 tree void_ftype_v8qi_v8qi_pchar
14417 = build_function_type_list (void_type_node,
14418 V8QI_type_node, V8QI_type_node,
14419 pchar_type_node, NULL_TREE);
14420 tree v4sf_ftype_pcfloat
14421 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
14422 /* @@@ the type is bogus */
14423 tree v4sf_ftype_v4sf_pv2si
14424 = build_function_type_list (V4SF_type_node,
14425 V4SF_type_node, pv2si_type_node, NULL_TREE);
14426 tree void_ftype_pv2si_v4sf
14427 = build_function_type_list (void_type_node,
14428 pv2si_type_node, V4SF_type_node, NULL_TREE);
14429 tree void_ftype_pfloat_v4sf
14430 = build_function_type_list (void_type_node,
14431 pfloat_type_node, V4SF_type_node, NULL_TREE);
14432 tree void_ftype_pdi_di
14433 = build_function_type_list (void_type_node,
14434 pdi_type_node, long_long_unsigned_type_node,
14435 NULL_TREE);
14436 tree void_ftype_pv2di_v2di
14437 = build_function_type_list (void_type_node,
14438 pv2di_type_node, V2DI_type_node, NULL_TREE);
14439 /* Normal vector unops. */
14440 tree v4sf_ftype_v4sf
14441 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
14443 /* Normal vector binops. */
14444 tree v4sf_ftype_v4sf_v4sf
14445 = build_function_type_list (V4SF_type_node,
14446 V4SF_type_node, V4SF_type_node, NULL_TREE);
14447 tree v8qi_ftype_v8qi_v8qi
14448 = build_function_type_list (V8QI_type_node,
14449 V8QI_type_node, V8QI_type_node, NULL_TREE);
14450 tree v4hi_ftype_v4hi_v4hi
14451 = build_function_type_list (V4HI_type_node,
14452 V4HI_type_node, V4HI_type_node, NULL_TREE);
14453 tree v2si_ftype_v2si_v2si
14454 = build_function_type_list (V2SI_type_node,
14455 V2SI_type_node, V2SI_type_node, NULL_TREE);
14456 tree di_ftype_di_di
14457 = build_function_type_list (long_long_unsigned_type_node,
14458 long_long_unsigned_type_node,
14459 long_long_unsigned_type_node, NULL_TREE);
14461 tree v2si_ftype_v2sf
14462 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
14463 tree v2sf_ftype_v2si
14464 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
14465 tree v2si_ftype_v2si
14466 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
14467 tree v2sf_ftype_v2sf
14468 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
14469 tree v2sf_ftype_v2sf_v2sf
14470 = build_function_type_list (V2SF_type_node,
14471 V2SF_type_node, V2SF_type_node, NULL_TREE);
14472 tree v2si_ftype_v2sf_v2sf
14473 = build_function_type_list (V2SI_type_node,
14474 V2SF_type_node, V2SF_type_node, NULL_TREE);
14475 tree pint_type_node = build_pointer_type (integer_type_node);
14476 tree pdouble_type_node = build_pointer_type (double_type_node);
14477 tree pcdouble_type_node = build_pointer_type (
14478 build_type_variant (double_type_node, 1, 0));
14479 tree int_ftype_v2df_v2df
14480 = build_function_type_list (integer_type_node,
14481 V2DF_type_node, V2DF_type_node, NULL_TREE);
14483 tree void_ftype_pcvoid
14484 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
14485 tree v4sf_ftype_v4si
14486 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
14487 tree v4si_ftype_v4sf
14488 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
14489 tree v2df_ftype_v4si
14490 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
14491 tree v4si_ftype_v2df
14492 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
14493 tree v2si_ftype_v2df
14494 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
14495 tree v4sf_ftype_v2df
14496 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
14497 tree v2df_ftype_v2si
14498 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
14499 tree v2df_ftype_v4sf
14500 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
14501 tree int_ftype_v2df
14502 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
14503 tree int64_ftype_v2df
14504 = build_function_type_list (long_long_integer_type_node,
14505 V2DF_type_node, NULL_TREE);
14506 tree v2df_ftype_v2df_int
14507 = build_function_type_list (V2DF_type_node,
14508 V2DF_type_node, integer_type_node, NULL_TREE);
14509 tree v2df_ftype_v2df_int64
14510 = build_function_type_list (V2DF_type_node,
14511 V2DF_type_node, long_long_integer_type_node,
14512 NULL_TREE);
14513 tree v4sf_ftype_v4sf_v2df
14514 = build_function_type_list (V4SF_type_node,
14515 V4SF_type_node, V2DF_type_node, NULL_TREE);
14516 tree v2df_ftype_v2df_v4sf
14517 = build_function_type_list (V2DF_type_node,
14518 V2DF_type_node, V4SF_type_node, NULL_TREE);
14519 tree v2df_ftype_v2df_v2df_int
14520 = build_function_type_list (V2DF_type_node,
14521 V2DF_type_node, V2DF_type_node,
14522 integer_type_node,
14523 NULL_TREE);
14524 tree v2df_ftype_v2df_pcdouble
14525 = build_function_type_list (V2DF_type_node,
14526 V2DF_type_node, pcdouble_type_node, NULL_TREE);
14527 tree void_ftype_pdouble_v2df
14528 = build_function_type_list (void_type_node,
14529 pdouble_type_node, V2DF_type_node, NULL_TREE);
14530 tree void_ftype_pint_int
14531 = build_function_type_list (void_type_node,
14532 pint_type_node, integer_type_node, NULL_TREE);
14533 tree void_ftype_v16qi_v16qi_pchar
14534 = build_function_type_list (void_type_node,
14535 V16QI_type_node, V16QI_type_node,
14536 pchar_type_node, NULL_TREE);
14537 tree v2df_ftype_pcdouble
14538 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
14539 tree v2df_ftype_v2df_v2df
14540 = build_function_type_list (V2DF_type_node,
14541 V2DF_type_node, V2DF_type_node, NULL_TREE);
14542 tree v16qi_ftype_v16qi_v16qi
14543 = build_function_type_list (V16QI_type_node,
14544 V16QI_type_node, V16QI_type_node, NULL_TREE);
14545 tree v8hi_ftype_v8hi_v8hi
14546 = build_function_type_list (V8HI_type_node,
14547 V8HI_type_node, V8HI_type_node, NULL_TREE);
14548 tree v4si_ftype_v4si_v4si
14549 = build_function_type_list (V4SI_type_node,
14550 V4SI_type_node, V4SI_type_node, NULL_TREE);
14551 tree v2di_ftype_v2di_v2di
14552 = build_function_type_list (V2DI_type_node,
14553 V2DI_type_node, V2DI_type_node, NULL_TREE);
14554 tree v2di_ftype_v2df_v2df
14555 = build_function_type_list (V2DI_type_node,
14556 V2DF_type_node, V2DF_type_node, NULL_TREE);
14557 tree v2df_ftype_v2df
14558 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
14559 tree v2di_ftype_v2di_int
14560 = build_function_type_list (V2DI_type_node,
14561 V2DI_type_node, integer_type_node, NULL_TREE);
14562 tree v4si_ftype_v4si_int
14563 = build_function_type_list (V4SI_type_node,
14564 V4SI_type_node, integer_type_node, NULL_TREE);
14565 tree v8hi_ftype_v8hi_int
14566 = build_function_type_list (V8HI_type_node,
14567 V8HI_type_node, integer_type_node, NULL_TREE);
14568 tree v8hi_ftype_v8hi_v2di
14569 = build_function_type_list (V8HI_type_node,
14570 V8HI_type_node, V2DI_type_node, NULL_TREE);
14571 tree v4si_ftype_v4si_v2di
14572 = build_function_type_list (V4SI_type_node,
14573 V4SI_type_node, V2DI_type_node, NULL_TREE);
14574 tree v4si_ftype_v8hi_v8hi
14575 = build_function_type_list (V4SI_type_node,
14576 V8HI_type_node, V8HI_type_node, NULL_TREE);
14577 tree di_ftype_v8qi_v8qi
14578 = build_function_type_list (long_long_unsigned_type_node,
14579 V8QI_type_node, V8QI_type_node, NULL_TREE);
14580 tree di_ftype_v2si_v2si
14581 = build_function_type_list (long_long_unsigned_type_node,
14582 V2SI_type_node, V2SI_type_node, NULL_TREE);
14583 tree v2di_ftype_v16qi_v16qi
14584 = build_function_type_list (V2DI_type_node,
14585 V16QI_type_node, V16QI_type_node, NULL_TREE);
14586 tree v2di_ftype_v4si_v4si
14587 = build_function_type_list (V2DI_type_node,
14588 V4SI_type_node, V4SI_type_node, NULL_TREE);
14589 tree int_ftype_v16qi
14590 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
14591 tree v16qi_ftype_pcchar
14592 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
14593 tree void_ftype_pchar_v16qi
14594 = build_function_type_list (void_type_node,
14595 pchar_type_node, V16QI_type_node, NULL_TREE);
14597 tree float80_type;
14598 tree float128_type;
14599 tree ftype;
14601 /* The __float80 type. */
14602 if (TYPE_MODE (long_double_type_node) == XFmode)
14603 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
14604 "__float80");
14605 else
14607 /* The __float80 type. */
14608 float80_type = make_node (REAL_TYPE);
14609 TYPE_PRECISION (float80_type) = 80;
14610 layout_type (float80_type);
14611 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
14614 float128_type = make_node (REAL_TYPE);
14615 TYPE_PRECISION (float128_type) = 128;
14616 layout_type (float128_type);
14617 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
14619 /* Add all builtins that are more or less simple operations on two
14620 operands. */
14621 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
14623 /* Use one of the operands; the target can have a different mode for
14624 mask-generating compares. */
14625 enum machine_mode mode;
14626 tree type;
14628 if (d->name == 0)
14629 continue;
14630 mode = insn_data[d->icode].operand[1].mode;
14632 switch (mode)
14634 case V16QImode:
14635 type = v16qi_ftype_v16qi_v16qi;
14636 break;
14637 case V8HImode:
14638 type = v8hi_ftype_v8hi_v8hi;
14639 break;
14640 case V4SImode:
14641 type = v4si_ftype_v4si_v4si;
14642 break;
14643 case V2DImode:
14644 type = v2di_ftype_v2di_v2di;
14645 break;
14646 case V2DFmode:
14647 type = v2df_ftype_v2df_v2df;
14648 break;
14649 case V4SFmode:
14650 type = v4sf_ftype_v4sf_v4sf;
14651 break;
14652 case V8QImode:
14653 type = v8qi_ftype_v8qi_v8qi;
14654 break;
14655 case V4HImode:
14656 type = v4hi_ftype_v4hi_v4hi;
14657 break;
14658 case V2SImode:
14659 type = v2si_ftype_v2si_v2si;
14660 break;
14661 case DImode:
14662 type = di_ftype_di_di;
14663 break;
14665 default:
14666 gcc_unreachable ();
14669 /* Override for comparisons. */
14670 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
14671 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3)
14672 type = v4si_ftype_v4sf_v4sf;
14674 if (d->icode == CODE_FOR_sse2_maskcmpv2df3
14675 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
14676 type = v2di_ftype_v2df_v2df;
14678 def_builtin (d->mask, d->name, type, d->code);
14681 /* Add the remaining MMX insns with somewhat more complicated types. */
14682 def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
14683 def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
14684 def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD);
14685 def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ);
14687 def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW);
14688 def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD);
14689 def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ);
14691 def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
14692 def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
14694 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
14695 def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
14697 /* comi/ucomi insns. */
14698 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
14699 if (d->mask == MASK_SSE2)
14700 def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
14701 else
14702 def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
14704 def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
14705 def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
14706 def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
14708 def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
14709 def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
14710 def_builtin (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
14711 def_builtin (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
14712 def_builtin (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
14713 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
14714 def_builtin (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
14715 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
14716 def_builtin (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
14717 def_builtin (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
14718 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
14720 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
14722 def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
14723 def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
14725 def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
14726 def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
14727 def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
14728 def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
14730 def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
14731 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
14732 def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
14733 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
14735 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
14737 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
14739 def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
14740 def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
14741 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
14742 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
14743 def_builtin (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
14744 def_builtin (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
14746 def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
14748 /* Original 3DNow! */
14749 def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
14750 def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
14751 def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
14752 def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
14753 def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
14754 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
14755 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
14756 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
14757 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
14758 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
14759 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
14760 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
14761 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
14762 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
14763 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
14764 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
14765 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
14766 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
14767 def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
14768 def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
14770 /* 3DNow! extension as used in the Athlon CPU. */
14771 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
14772 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
14773 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
14774 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
14775 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
14776 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
14778 /* SSE2 */
14779 def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
14781 def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
14782 def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
14784 def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADHPD);
14785 def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADLPD);
14787 def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
14788 def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
14789 def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
14790 def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
14791 def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
14793 def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
14794 def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
14795 def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
14796 def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
14798 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
14799 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
14801 def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
14803 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
14804 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
14806 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
14807 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
14808 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
14809 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
14810 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
14812 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
14814 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
14815 def_builtin (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
14816 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
14817 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
14819 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
14820 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
14821 def_builtin (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
14823 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
14824 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
14825 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
14826 def_builtin (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
14828 def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
14829 def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
14830 def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
14832 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
14833 def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
14835 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
14836 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
14838 def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128);
14839 def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128);
14840 def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
14842 def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRLW128);
14843 def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRLD128);
14844 def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
14846 def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRAW128);
14847 def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRAD128);
14849 def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
14850 def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
14851 def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
14852 def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
14854 def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
14855 def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
14856 def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
14857 def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
14859 def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
14860 def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
14862 def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
14864 /* Prescott New Instructions. */
14865 def_builtin (MASK_SSE3, "__builtin_ia32_monitor",
14866 void_ftype_pcvoid_unsigned_unsigned,
14867 IX86_BUILTIN_MONITOR);
14868 def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
14869 void_ftype_unsigned_unsigned,
14870 IX86_BUILTIN_MWAIT);
14871 def_builtin (MASK_SSE3, "__builtin_ia32_movshdup",
14872 v4sf_ftype_v4sf,
14873 IX86_BUILTIN_MOVSHDUP);
14874 def_builtin (MASK_SSE3, "__builtin_ia32_movsldup",
14875 v4sf_ftype_v4sf,
14876 IX86_BUILTIN_MOVSLDUP);
14877 def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
14878 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
14880 /* Access to the vec_init patterns. */
14881 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
14882 integer_type_node, NULL_TREE);
14883 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v2si",
14884 ftype, IX86_BUILTIN_VEC_INIT_V2SI);
14886 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
14887 short_integer_type_node,
14888 short_integer_type_node,
14889 short_integer_type_node, NULL_TREE);
14890 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v4hi",
14891 ftype, IX86_BUILTIN_VEC_INIT_V4HI);
14893 ftype = build_function_type_list (V8QI_type_node, char_type_node,
14894 char_type_node, char_type_node,
14895 char_type_node, char_type_node,
14896 char_type_node, char_type_node,
14897 char_type_node, NULL_TREE);
14898 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v8qi",
14899 ftype, IX86_BUILTIN_VEC_INIT_V8QI);
14901 /* Access to the vec_extract patterns. */
14902 ftype = build_function_type_list (double_type_node, V2DF_type_node,
14903 integer_type_node, NULL_TREE);
14904 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2df",
14905 ftype, IX86_BUILTIN_VEC_EXT_V2DF);
14907 ftype = build_function_type_list (long_long_integer_type_node,
14908 V2DI_type_node, integer_type_node,
14909 NULL_TREE);
14910 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2di",
14911 ftype, IX86_BUILTIN_VEC_EXT_V2DI);
14913 ftype = build_function_type_list (float_type_node, V4SF_type_node,
14914 integer_type_node, NULL_TREE);
14915 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4sf",
14916 ftype, IX86_BUILTIN_VEC_EXT_V4SF);
14918 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
14919 integer_type_node, NULL_TREE);
14920 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4si",
14921 ftype, IX86_BUILTIN_VEC_EXT_V4SI);
14923 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
14924 integer_type_node, NULL_TREE);
14925 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v8hi",
14926 ftype, IX86_BUILTIN_VEC_EXT_V8HI);
14928 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
14929 integer_type_node, NULL_TREE);
14930 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_ext_v4hi",
14931 ftype, IX86_BUILTIN_VEC_EXT_V4HI);
14933 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
14934 integer_type_node, NULL_TREE);
14935 def_builtin (MASK_MMX, "__builtin_ia32_vec_ext_v2si",
14936 ftype, IX86_BUILTIN_VEC_EXT_V2SI);
14938 /* Access to the vec_set patterns. */
14939 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
14940 intHI_type_node,
14941 integer_type_node, NULL_TREE);
14942 def_builtin (MASK_SSE, "__builtin_ia32_vec_set_v8hi",
14943 ftype, IX86_BUILTIN_VEC_SET_V8HI);
14945 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
14946 intHI_type_node,
14947 integer_type_node, NULL_TREE);
14948 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_set_v4hi",
14949 ftype, IX86_BUILTIN_VEC_SET_V4HI);
14952 /* Errors in the source file can cause expand_expr to return const0_rtx
14953 where we expect a vector. To avoid crashing, use one of the vector
14954 clear instructions. */
14955 static rtx
14956 safe_vector_operand (rtx x, enum machine_mode mode)
14958 if (x == const0_rtx)
14959 x = CONST0_RTX (mode);
14960 return x;
14963 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
14965 static rtx
14966 ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
14968 rtx pat, xops[3];
14969 tree arg0 = TREE_VALUE (arglist);
14970 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14971 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14972 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14973 enum machine_mode tmode = insn_data[icode].operand[0].mode;
14974 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
14975 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
14977 if (VECTOR_MODE_P (mode0))
14978 op0 = safe_vector_operand (op0, mode0);
14979 if (VECTOR_MODE_P (mode1))
14980 op1 = safe_vector_operand (op1, mode1);
14982 if (optimize || !target
14983 || GET_MODE (target) != tmode
14984 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14985 target = gen_reg_rtx (tmode);
14987 if (GET_MODE (op1) == SImode && mode1 == TImode)
14989 rtx x = gen_reg_rtx (V4SImode);
14990 emit_insn (gen_sse2_loadd (x, op1));
14991 op1 = gen_lowpart (TImode, x);
14994 /* The insn must want input operands in the same modes as the
14995 result. */
14996 gcc_assert ((GET_MODE (op0) == mode0 || GET_MODE (op0) == VOIDmode)
14997 && (GET_MODE (op1) == mode1 || GET_MODE (op1) == VOIDmode));
14999 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
15000 op0 = copy_to_mode_reg (mode0, op0);
15001 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
15002 op1 = copy_to_mode_reg (mode1, op1);
15004 /* ??? Using ix86_fixup_binary_operands is problematic when
15005 we've got mismatched modes. Fake it. */
15007 xops[0] = target;
15008 xops[1] = op0;
15009 xops[2] = op1;
15011 if (tmode == mode0 && tmode == mode1)
15013 target = ix86_fixup_binary_operands (UNKNOWN, tmode, xops);
15014 op0 = xops[1];
15015 op1 = xops[2];
15017 else if (optimize || !ix86_binary_operator_ok (UNKNOWN, tmode, xops))
15019 op0 = force_reg (mode0, op0);
15020 op1 = force_reg (mode1, op1);
15021 target = gen_reg_rtx (tmode);
15024 pat = GEN_FCN (icode) (target, op0, op1);
15025 if (! pat)
15026 return 0;
15027 emit_insn (pat);
15028 return target;
15031 /* Subroutine of ix86_expand_builtin to take care of stores. */
15033 static rtx
15034 ix86_expand_store_builtin (enum insn_code icode, tree arglist)
15036 rtx pat;
15037 tree arg0 = TREE_VALUE (arglist);
15038 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15039 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15040 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15041 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
15042 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
15044 if (VECTOR_MODE_P (mode1))
15045 op1 = safe_vector_operand (op1, mode1);
15047 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15048 op1 = copy_to_mode_reg (mode1, op1);
15050 pat = GEN_FCN (icode) (op0, op1);
15051 if (pat)
15052 emit_insn (pat);
15053 return 0;
15056 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
15058 static rtx
15059 ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
15060 rtx target, int do_load)
15062 rtx pat;
15063 tree arg0 = TREE_VALUE (arglist);
15064 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15065 enum machine_mode tmode = insn_data[icode].operand[0].mode;
15066 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
15068 if (optimize || !target
15069 || GET_MODE (target) != tmode
15070 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15071 target = gen_reg_rtx (tmode);
15072 if (do_load)
15073 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15074 else
15076 if (VECTOR_MODE_P (mode0))
15077 op0 = safe_vector_operand (op0, mode0);
15079 if ((optimize && !register_operand (op0, mode0))
15080 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15081 op0 = copy_to_mode_reg (mode0, op0);
15084 pat = GEN_FCN (icode) (target, op0);
15085 if (! pat)
15086 return 0;
15087 emit_insn (pat);
15088 return target;
15091 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
15092 sqrtss, rsqrtss, rcpss. */
15094 static rtx
15095 ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
15097 rtx pat;
15098 tree arg0 = TREE_VALUE (arglist);
15099 rtx op1, op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15100 enum machine_mode tmode = insn_data[icode].operand[0].mode;
15101 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
15103 if (optimize || !target
15104 || GET_MODE (target) != tmode
15105 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15106 target = gen_reg_rtx (tmode);
15108 if (VECTOR_MODE_P (mode0))
15109 op0 = safe_vector_operand (op0, mode0);
15111 if ((optimize && !register_operand (op0, mode0))
15112 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15113 op0 = copy_to_mode_reg (mode0, op0);
15115 op1 = op0;
15116 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
15117 op1 = copy_to_mode_reg (mode0, op1);
15119 pat = GEN_FCN (icode) (target, op0, op1);
15120 if (! pat)
15121 return 0;
15122 emit_insn (pat);
15123 return target;
15126 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
15128 static rtx
15129 ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
15130 rtx target)
15132 rtx pat;
15133 tree arg0 = TREE_VALUE (arglist);
15134 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15135 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15136 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15137 rtx op2;
15138 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
15139 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
15140 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
15141 enum rtx_code comparison = d->comparison;
15143 if (VECTOR_MODE_P (mode0))
15144 op0 = safe_vector_operand (op0, mode0);
15145 if (VECTOR_MODE_P (mode1))
15146 op1 = safe_vector_operand (op1, mode1);
15148 /* Swap operands if we have a comparison that isn't available in
15149 hardware. */
15150 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
15152 rtx tmp = gen_reg_rtx (mode1);
15153 emit_move_insn (tmp, op1);
15154 op1 = op0;
15155 op0 = tmp;
15158 if (optimize || !target
15159 || GET_MODE (target) != tmode
15160 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
15161 target = gen_reg_rtx (tmode);
15163 if ((optimize && !register_operand (op0, mode0))
15164 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
15165 op0 = copy_to_mode_reg (mode0, op0);
15166 if ((optimize && !register_operand (op1, mode1))
15167 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
15168 op1 = copy_to_mode_reg (mode1, op1);
15170 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
15171 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
15172 if (! pat)
15173 return 0;
15174 emit_insn (pat);
15175 return target;
15178 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
15180 static rtx
15181 ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
15182 rtx target)
15184 rtx pat;
15185 tree arg0 = TREE_VALUE (arglist);
15186 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15187 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15188 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15189 rtx op2;
15190 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
15191 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
15192 enum rtx_code comparison = d->comparison;
15194 if (VECTOR_MODE_P (mode0))
15195 op0 = safe_vector_operand (op0, mode0);
15196 if (VECTOR_MODE_P (mode1))
15197 op1 = safe_vector_operand (op1, mode1);
15199 /* Swap operands if we have a comparison that isn't available in
15200 hardware. */
15201 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
15203 rtx tmp = op1;
15204 op1 = op0;
15205 op0 = tmp;
15208 target = gen_reg_rtx (SImode);
15209 emit_move_insn (target, const0_rtx);
15210 target = gen_rtx_SUBREG (QImode, target, 0);
15212 if ((optimize && !register_operand (op0, mode0))
15213 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
15214 op0 = copy_to_mode_reg (mode0, op0);
15215 if ((optimize && !register_operand (op1, mode1))
15216 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
15217 op1 = copy_to_mode_reg (mode1, op1);
15219 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
15220 pat = GEN_FCN (d->icode) (op0, op1);
15221 if (! pat)
15222 return 0;
15223 emit_insn (pat);
15224 emit_insn (gen_rtx_SET (VOIDmode,
15225 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
15226 gen_rtx_fmt_ee (comparison, QImode,
15227 SET_DEST (pat),
15228 const0_rtx)));
15230 return SUBREG_REG (target);
15233 /* Return the integer constant in ARG. Constrain it to be in the range
15234 of the subparts of VEC_TYPE; issue an error if not. */
15236 static int
15237 get_element_number (tree vec_type, tree arg)
15239 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
15241 if (!host_integerp (arg, 1)
15242 || (elt = tree_low_cst (arg, 1), elt > max))
15244 error ("selector must be an integer constant in the range 0..%wi", max);
15245 return 0;
15248 return elt;
15251 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
15252 ix86_expand_vector_init. We DO have language-level syntax for this, in
15253 the form of (type){ init-list }. Except that since we can't place emms
15254 instructions from inside the compiler, we can't allow the use of MMX
15255 registers unless the user explicitly asks for it. So we do *not* define
15256 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
15257 we have builtins invoked by mmintrin.h that gives us license to emit
15258 these sorts of instructions. */
15260 static rtx
15261 ix86_expand_vec_init_builtin (tree type, tree arglist, rtx target)
15263 enum machine_mode tmode = TYPE_MODE (type);
15264 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
15265 int i, n_elt = GET_MODE_NUNITS (tmode);
15266 rtvec v = rtvec_alloc (n_elt);
15268 gcc_assert (VECTOR_MODE_P (tmode));
15270 for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
15272 rtx x = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
15273 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
15276 gcc_assert (arglist == NULL);
15278 if (!target || !register_operand (target, tmode))
15279 target = gen_reg_rtx (tmode);
15281 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
15282 return target;
15285 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
15286 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
15287 had a language-level syntax for referencing vector elements. */
15289 static rtx
15290 ix86_expand_vec_ext_builtin (tree arglist, rtx target)
15292 enum machine_mode tmode, mode0;
15293 tree arg0, arg1;
15294 int elt;
15295 rtx op0;
15297 arg0 = TREE_VALUE (arglist);
15298 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15300 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15301 elt = get_element_number (TREE_TYPE (arg0), arg1);
15303 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15304 mode0 = TYPE_MODE (TREE_TYPE (arg0));
15305 gcc_assert (VECTOR_MODE_P (mode0));
15307 op0 = force_reg (mode0, op0);
15309 if (optimize || !target || !register_operand (target, tmode))
15310 target = gen_reg_rtx (tmode);
15312 ix86_expand_vector_extract (true, target, op0, elt);
15314 return target;
15317 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
15318 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
15319 a language-level syntax for referencing vector elements. */
15321 static rtx
15322 ix86_expand_vec_set_builtin (tree arglist)
15324 enum machine_mode tmode, mode1;
15325 tree arg0, arg1, arg2;
15326 int elt;
15327 rtx op0, op1;
15329 arg0 = TREE_VALUE (arglist);
15330 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15331 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15333 tmode = TYPE_MODE (TREE_TYPE (arg0));
15334 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15335 gcc_assert (VECTOR_MODE_P (tmode));
15337 op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
15338 op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
15339 elt = get_element_number (TREE_TYPE (arg0), arg2);
15341 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
15342 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
15344 op0 = force_reg (tmode, op0);
15345 op1 = force_reg (mode1, op1);
15347 ix86_expand_vector_set (true, op0, op1, elt);
15349 return op0;
15352 /* Expand an expression EXP that calls a built-in function,
15353 with result going to TARGET if that's convenient
15354 (and in mode MODE if that's convenient).
15355 SUBTARGET may be used as the target for computing one of EXP's operands.
15356 IGNORE is nonzero if the value is to be ignored. */
15358 static rtx
15359 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
15360 enum machine_mode mode ATTRIBUTE_UNUSED,
15361 int ignore ATTRIBUTE_UNUSED)
15363 const struct builtin_description *d;
15364 size_t i;
15365 enum insn_code icode;
15366 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
15367 tree arglist = TREE_OPERAND (exp, 1);
15368 tree arg0, arg1, arg2;
15369 rtx op0, op1, op2, pat;
15370 enum machine_mode tmode, mode0, mode1, mode2;
15371 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15373 switch (fcode)
15375 case IX86_BUILTIN_EMMS:
15376 emit_insn (gen_mmx_emms ());
15377 return 0;
15379 case IX86_BUILTIN_SFENCE:
15380 emit_insn (gen_sse_sfence ());
15381 return 0;
15383 case IX86_BUILTIN_MASKMOVQ:
15384 case IX86_BUILTIN_MASKMOVDQU:
15385 icode = (fcode == IX86_BUILTIN_MASKMOVQ
15386 ? CODE_FOR_mmx_maskmovq
15387 : CODE_FOR_sse2_maskmovdqu);
15388 /* Note the arg order is different from the operand order. */
15389 arg1 = TREE_VALUE (arglist);
15390 arg2 = TREE_VALUE (TREE_CHAIN (arglist));
15391 arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15392 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15393 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15394 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
15395 mode0 = insn_data[icode].operand[0].mode;
15396 mode1 = insn_data[icode].operand[1].mode;
15397 mode2 = insn_data[icode].operand[2].mode;
15399 op0 = force_reg (Pmode, op0);
15400 op0 = gen_rtx_MEM (mode1, op0);
15402 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15403 op0 = copy_to_mode_reg (mode0, op0);
15404 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
15405 op1 = copy_to_mode_reg (mode1, op1);
15406 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
15407 op2 = copy_to_mode_reg (mode2, op2);
15408 pat = GEN_FCN (icode) (op0, op1, op2);
15409 if (! pat)
15410 return 0;
15411 emit_insn (pat);
15412 return 0;
15414 case IX86_BUILTIN_SQRTSS:
15415 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2, arglist, target);
15416 case IX86_BUILTIN_RSQRTSS:
15417 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2, arglist, target);
15418 case IX86_BUILTIN_RCPSS:
15419 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, arglist, target);
15421 case IX86_BUILTIN_LOADUPS:
15422 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
15424 case IX86_BUILTIN_STOREUPS:
15425 return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
15427 case IX86_BUILTIN_LOADHPS:
15428 case IX86_BUILTIN_LOADLPS:
15429 case IX86_BUILTIN_LOADHPD:
15430 case IX86_BUILTIN_LOADLPD:
15431 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_loadhps
15432 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_loadlps
15433 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_loadhpd
15434 : CODE_FOR_sse2_loadlpd);
15435 arg0 = TREE_VALUE (arglist);
15436 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15437 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15438 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15439 tmode = insn_data[icode].operand[0].mode;
15440 mode0 = insn_data[icode].operand[1].mode;
15441 mode1 = insn_data[icode].operand[2].mode;
15443 op0 = force_reg (mode0, op0);
15444 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
15445 if (optimize || target == 0
15446 || GET_MODE (target) != tmode
15447 || !register_operand (target, tmode))
15448 target = gen_reg_rtx (tmode);
15449 pat = GEN_FCN (icode) (target, op0, op1);
15450 if (! pat)
15451 return 0;
15452 emit_insn (pat);
15453 return target;
15455 case IX86_BUILTIN_STOREHPS:
15456 case IX86_BUILTIN_STORELPS:
15457 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_storehps
15458 : CODE_FOR_sse_storelps);
15459 arg0 = TREE_VALUE (arglist);
15460 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15461 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15462 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15463 mode0 = insn_data[icode].operand[0].mode;
15464 mode1 = insn_data[icode].operand[1].mode;
15466 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15467 op1 = force_reg (mode1, op1);
15469 pat = GEN_FCN (icode) (op0, op1);
15470 if (! pat)
15471 return 0;
15472 emit_insn (pat);
15473 return const0_rtx;
15475 case IX86_BUILTIN_MOVNTPS:
15476 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
15477 case IX86_BUILTIN_MOVNTQ:
15478 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
15480 case IX86_BUILTIN_LDMXCSR:
15481 op0 = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
15482 target = assign_386_stack_local (SImode, SLOT_TEMP);
15483 emit_move_insn (target, op0);
15484 emit_insn (gen_sse_ldmxcsr (target));
15485 return 0;
15487 case IX86_BUILTIN_STMXCSR:
15488 target = assign_386_stack_local (SImode, SLOT_TEMP);
15489 emit_insn (gen_sse_stmxcsr (target));
15490 return copy_to_mode_reg (SImode, target);
15492 case IX86_BUILTIN_SHUFPS:
15493 case IX86_BUILTIN_SHUFPD:
15494 icode = (fcode == IX86_BUILTIN_SHUFPS
15495 ? CODE_FOR_sse_shufps
15496 : CODE_FOR_sse2_shufpd);
15497 arg0 = TREE_VALUE (arglist);
15498 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15499 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15500 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15501 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15502 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
15503 tmode = insn_data[icode].operand[0].mode;
15504 mode0 = insn_data[icode].operand[1].mode;
15505 mode1 = insn_data[icode].operand[2].mode;
15506 mode2 = insn_data[icode].operand[3].mode;
15508 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15509 op0 = copy_to_mode_reg (mode0, op0);
15510 if ((optimize && !register_operand (op1, mode1))
15511 || !(*insn_data[icode].operand[2].predicate) (op1, mode1))
15512 op1 = copy_to_mode_reg (mode1, op1);
15513 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
15515 /* @@@ better error message */
15516 error ("mask must be an immediate");
15517 return gen_reg_rtx (tmode);
15519 if (optimize || target == 0
15520 || GET_MODE (target) != tmode
15521 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15522 target = gen_reg_rtx (tmode);
15523 pat = GEN_FCN (icode) (target, op0, op1, op2);
15524 if (! pat)
15525 return 0;
15526 emit_insn (pat);
15527 return target;
15529 case IX86_BUILTIN_PSHUFW:
15530 case IX86_BUILTIN_PSHUFD:
15531 case IX86_BUILTIN_PSHUFHW:
15532 case IX86_BUILTIN_PSHUFLW:
15533 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
15534 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
15535 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
15536 : CODE_FOR_mmx_pshufw);
15537 arg0 = TREE_VALUE (arglist);
15538 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15539 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15540 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15541 tmode = insn_data[icode].operand[0].mode;
15542 mode1 = insn_data[icode].operand[1].mode;
15543 mode2 = insn_data[icode].operand[2].mode;
15545 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
15546 op0 = copy_to_mode_reg (mode1, op0);
15547 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
15549 /* @@@ better error message */
15550 error ("mask must be an immediate");
15551 return const0_rtx;
15553 if (target == 0
15554 || GET_MODE (target) != tmode
15555 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15556 target = gen_reg_rtx (tmode);
15557 pat = GEN_FCN (icode) (target, op0, op1);
15558 if (! pat)
15559 return 0;
15560 emit_insn (pat);
15561 return target;
15563 case IX86_BUILTIN_PSLLDQI128:
15564 case IX86_BUILTIN_PSRLDQI128:
15565 icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
15566 : CODE_FOR_sse2_lshrti3);
15567 arg0 = TREE_VALUE (arglist);
15568 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15569 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15570 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15571 tmode = insn_data[icode].operand[0].mode;
15572 mode1 = insn_data[icode].operand[1].mode;
15573 mode2 = insn_data[icode].operand[2].mode;
15575 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
15577 op0 = copy_to_reg (op0);
15578 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
15580 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
15582 error ("shift must be an immediate");
15583 return const0_rtx;
15585 target = gen_reg_rtx (V2DImode);
15586 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0), op0, op1);
15587 if (! pat)
15588 return 0;
15589 emit_insn (pat);
15590 return target;
15592 case IX86_BUILTIN_FEMMS:
15593 emit_insn (gen_mmx_femms ());
15594 return NULL_RTX;
15596 case IX86_BUILTIN_PAVGUSB:
15597 return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3, arglist, target);
15599 case IX86_BUILTIN_PF2ID:
15600 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id, arglist, target, 0);
15602 case IX86_BUILTIN_PFACC:
15603 return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3, arglist, target);
15605 case IX86_BUILTIN_PFADD:
15606 return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3, arglist, target);
15608 case IX86_BUILTIN_PFCMPEQ:
15609 return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3, arglist, target);
15611 case IX86_BUILTIN_PFCMPGE:
15612 return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3, arglist, target);
15614 case IX86_BUILTIN_PFCMPGT:
15615 return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3, arglist, target);
15617 case IX86_BUILTIN_PFMAX:
15618 return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3, arglist, target);
15620 case IX86_BUILTIN_PFMIN:
15621 return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3, arglist, target);
15623 case IX86_BUILTIN_PFMUL:
15624 return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3, arglist, target);
15626 case IX86_BUILTIN_PFRCP:
15627 return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2, arglist, target, 0);
15629 case IX86_BUILTIN_PFRCPIT1:
15630 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3, arglist, target);
15632 case IX86_BUILTIN_PFRCPIT2:
15633 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3, arglist, target);
15635 case IX86_BUILTIN_PFRSQIT1:
15636 return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3, arglist, target);
15638 case IX86_BUILTIN_PFRSQRT:
15639 return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2, arglist, target, 0);
15641 case IX86_BUILTIN_PFSUB:
15642 return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3, arglist, target);
15644 case IX86_BUILTIN_PFSUBR:
15645 return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3, arglist, target);
15647 case IX86_BUILTIN_PI2FD:
15648 return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2, arglist, target, 0);
15650 case IX86_BUILTIN_PMULHRW:
15651 return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3, arglist, target);
15653 case IX86_BUILTIN_PF2IW:
15654 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw, arglist, target, 0);
15656 case IX86_BUILTIN_PFNACC:
15657 return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3, arglist, target);
15659 case IX86_BUILTIN_PFPNACC:
15660 return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3, arglist, target);
15662 case IX86_BUILTIN_PI2FW:
15663 return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw, arglist, target, 0);
15665 case IX86_BUILTIN_PSWAPDSI:
15666 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2, arglist, target, 0);
15668 case IX86_BUILTIN_PSWAPDSF:
15669 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, arglist, target, 0);
15671 case IX86_BUILTIN_SQRTSD:
15672 return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, arglist, target);
15673 case IX86_BUILTIN_LOADUPD:
15674 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
15675 case IX86_BUILTIN_STOREUPD:
15676 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
15678 case IX86_BUILTIN_MFENCE:
15679 emit_insn (gen_sse2_mfence ());
15680 return 0;
15681 case IX86_BUILTIN_LFENCE:
15682 emit_insn (gen_sse2_lfence ());
15683 return 0;
15685 case IX86_BUILTIN_CLFLUSH:
15686 arg0 = TREE_VALUE (arglist);
15687 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15688 icode = CODE_FOR_sse2_clflush;
15689 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
15690 op0 = copy_to_mode_reg (Pmode, op0);
15692 emit_insn (gen_sse2_clflush (op0));
15693 return 0;
15695 case IX86_BUILTIN_MOVNTPD:
15696 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
15697 case IX86_BUILTIN_MOVNTDQ:
15698 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
15699 case IX86_BUILTIN_MOVNTI:
15700 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
15702 case IX86_BUILTIN_LOADDQU:
15703 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
15704 case IX86_BUILTIN_STOREDQU:
15705 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
15707 case IX86_BUILTIN_MONITOR:
15708 arg0 = TREE_VALUE (arglist);
15709 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15710 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15711 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15712 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15713 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
15714 if (!REG_P (op0))
15715 op0 = copy_to_mode_reg (SImode, op0);
15716 if (!REG_P (op1))
15717 op1 = copy_to_mode_reg (SImode, op1);
15718 if (!REG_P (op2))
15719 op2 = copy_to_mode_reg (SImode, op2);
15720 emit_insn (gen_sse3_monitor (op0, op1, op2));
15721 return 0;
15723 case IX86_BUILTIN_MWAIT:
15724 arg0 = TREE_VALUE (arglist);
15725 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15726 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15727 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15728 if (!REG_P (op0))
15729 op0 = copy_to_mode_reg (SImode, op0);
15730 if (!REG_P (op1))
15731 op1 = copy_to_mode_reg (SImode, op1);
15732 emit_insn (gen_sse3_mwait (op0, op1));
15733 return 0;
15735 case IX86_BUILTIN_LDDQU:
15736 return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist,
15737 target, 1);
15739 case IX86_BUILTIN_VEC_INIT_V2SI:
15740 case IX86_BUILTIN_VEC_INIT_V4HI:
15741 case IX86_BUILTIN_VEC_INIT_V8QI:
15742 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
15744 case IX86_BUILTIN_VEC_EXT_V2DF:
15745 case IX86_BUILTIN_VEC_EXT_V2DI:
15746 case IX86_BUILTIN_VEC_EXT_V4SF:
15747 case IX86_BUILTIN_VEC_EXT_V4SI:
15748 case IX86_BUILTIN_VEC_EXT_V8HI:
15749 case IX86_BUILTIN_VEC_EXT_V2SI:
15750 case IX86_BUILTIN_VEC_EXT_V4HI:
15751 return ix86_expand_vec_ext_builtin (arglist, target);
15753 case IX86_BUILTIN_VEC_SET_V8HI:
15754 case IX86_BUILTIN_VEC_SET_V4HI:
15755 return ix86_expand_vec_set_builtin (arglist);
15757 default:
15758 break;
15761 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
15762 if (d->code == fcode)
15764 /* Compares are treated specially. */
15765 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
15766 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3
15767 || d->icode == CODE_FOR_sse2_maskcmpv2df3
15768 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
15769 return ix86_expand_sse_compare (d, arglist, target);
15771 return ix86_expand_binop_builtin (d->icode, arglist, target);
15774 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
15775 if (d->code == fcode)
15776 return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
15778 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
15779 if (d->code == fcode)
15780 return ix86_expand_sse_comi (d, arglist, target);
15782 gcc_unreachable ();
15785 /* Store OPERAND to the memory after reload is completed. This means
15786 that we can't easily use assign_stack_local. */
15788 ix86_force_to_memory (enum machine_mode mode, rtx operand)
15790 rtx result;
15792 gcc_assert (reload_completed);
15793 if (TARGET_RED_ZONE)
15795 result = gen_rtx_MEM (mode,
15796 gen_rtx_PLUS (Pmode,
15797 stack_pointer_rtx,
15798 GEN_INT (-RED_ZONE_SIZE)));
15799 emit_move_insn (result, operand);
15801 else if (!TARGET_RED_ZONE && TARGET_64BIT)
15803 switch (mode)
15805 case HImode:
15806 case SImode:
15807 operand = gen_lowpart (DImode, operand);
15808 /* FALLTHRU */
15809 case DImode:
15810 emit_insn (
15811 gen_rtx_SET (VOIDmode,
15812 gen_rtx_MEM (DImode,
15813 gen_rtx_PRE_DEC (DImode,
15814 stack_pointer_rtx)),
15815 operand));
15816 break;
15817 default:
15818 gcc_unreachable ();
15820 result = gen_rtx_MEM (mode, stack_pointer_rtx);
15822 else
15824 switch (mode)
15826 case DImode:
15828 rtx operands[2];
15829 split_di (&operand, 1, operands, operands + 1);
15830 emit_insn (
15831 gen_rtx_SET (VOIDmode,
15832 gen_rtx_MEM (SImode,
15833 gen_rtx_PRE_DEC (Pmode,
15834 stack_pointer_rtx)),
15835 operands[1]));
15836 emit_insn (
15837 gen_rtx_SET (VOIDmode,
15838 gen_rtx_MEM (SImode,
15839 gen_rtx_PRE_DEC (Pmode,
15840 stack_pointer_rtx)),
15841 operands[0]));
15843 break;
15844 case HImode:
15845 /* Store HImodes as SImodes. */
15846 operand = gen_lowpart (SImode, operand);
15847 /* FALLTHRU */
15848 case SImode:
15849 emit_insn (
15850 gen_rtx_SET (VOIDmode,
15851 gen_rtx_MEM (GET_MODE (operand),
15852 gen_rtx_PRE_DEC (SImode,
15853 stack_pointer_rtx)),
15854 operand));
15855 break;
15856 default:
15857 gcc_unreachable ();
15859 result = gen_rtx_MEM (mode, stack_pointer_rtx);
15861 return result;
15864 /* Free operand from the memory. */
15865 void
15866 ix86_free_from_memory (enum machine_mode mode)
15868 if (!TARGET_RED_ZONE)
15870 int size;
15872 if (mode == DImode || TARGET_64BIT)
15873 size = 8;
15874 else
15875 size = 4;
15876 /* Use LEA to deallocate stack space. In peephole2 it will be converted
15877 to pop or add instruction if registers are available. */
15878 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
15879 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
15880 GEN_INT (size))));
15884 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
15885 QImode must go into class Q_REGS.
15886 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
15887 movdf to do mem-to-mem moves through integer regs. */
15888 enum reg_class
15889 ix86_preferred_reload_class (rtx x, enum reg_class class)
15891 /* We're only allowed to return a subclass of CLASS. Many of the
15892 following checks fail for NO_REGS, so eliminate that early. */
15893 if (class == NO_REGS)
15894 return NO_REGS;
15896 /* All classes can load zeros. */
15897 if (x == CONST0_RTX (GET_MODE (x)))
15898 return class;
15900 /* Floating-point constants need more complex checks. */
15901 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
15903 /* General regs can load everything. */
15904 if (reg_class_subset_p (class, GENERAL_REGS))
15905 return class;
15907 /* Floats can load 0 and 1 plus some others. Note that we eliminated
15908 zero above. We only want to wind up preferring 80387 registers if
15909 we plan on doing computation with them. */
15910 if (TARGET_80387
15911 && (TARGET_MIX_SSE_I387
15912 || !(TARGET_SSE_MATH && SSE_FLOAT_MODE_P (GET_MODE (x))))
15913 && standard_80387_constant_p (x))
15915 /* Limit class to non-sse. */
15916 if (class == FLOAT_SSE_REGS)
15917 return FLOAT_REGS;
15918 if (class == FP_TOP_SSE_REGS)
15919 return FP_TOP_REG;
15920 if (class == FP_SECOND_SSE_REGS)
15921 return FP_SECOND_REG;
15922 if (class == FLOAT_INT_REGS || class == FLOAT_REGS)
15923 return class;
15926 return NO_REGS;
15928 if (MAYBE_MMX_CLASS_P (class) && CONSTANT_P (x))
15929 return NO_REGS;
15930 if (MAYBE_SSE_CLASS_P (class) && CONSTANT_P (x))
15931 return NO_REGS;
15933 /* Generally when we see PLUS here, it's the function invariant
15934 (plus soft-fp const_int). Which can only be computed into general
15935 regs. */
15936 if (GET_CODE (x) == PLUS)
15937 return reg_class_subset_p (class, GENERAL_REGS) ? class : NO_REGS;
15939 /* QImode constants are easy to load, but non-constant QImode data
15940 must go into Q_REGS. */
15941 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
15943 if (reg_class_subset_p (class, Q_REGS))
15944 return class;
15945 if (reg_class_subset_p (Q_REGS, class))
15946 return Q_REGS;
15947 return NO_REGS;
15950 return class;
15953 /* If we are copying between general and FP registers, we need a memory
15954 location. The same is true for SSE and MMX registers.
15956 The macro can't work reliably when one of the CLASSES is class containing
15957 registers from multiple units (SSE, MMX, integer). We avoid this by never
15958 combining those units in single alternative in the machine description.
15959 Ensure that this constraint holds to avoid unexpected surprises.
15961 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
15962 enforce these sanity checks. */
15965 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
15966 enum machine_mode mode, int strict)
15968 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
15969 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
15970 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
15971 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
15972 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
15973 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
15975 gcc_assert (!strict);
15976 return true;
15979 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
15980 return true;
15982 /* ??? This is a lie. We do have moves between mmx/general, and for
15983 mmx/sse2. But by saying we need secondary memory we discourage the
15984 register allocator from using the mmx registers unless needed. */
15985 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
15986 return true;
15988 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
15990 /* SSE1 doesn't have any direct moves from other classes. */
15991 if (!TARGET_SSE2)
15992 return true;
15994 /* If the target says that inter-unit moves are more expensive
15995 than moving through memory, then don't generate them. */
15996 if (!TARGET_INTER_UNIT_MOVES && !optimize_size)
15997 return true;
15999 /* Between SSE and general, we have moves no larger than word size. */
16000 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
16001 return true;
16003 /* ??? For the cost of one register reformat penalty, we could use
16004 the same instructions to move SFmode and DFmode data, but the
16005 relevant move patterns don't support those alternatives. */
16006 if (mode == SFmode || mode == DFmode)
16007 return true;
16010 return false;
16013 /* Return true if the registers in CLASS cannot represent the change from
16014 modes FROM to TO. */
16016 bool
16017 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
16018 enum reg_class class)
16020 if (from == to)
16021 return false;
16023 /* x87 registers can't do subreg at all, as all values are reformatted
16024 to extended precision. */
16025 if (MAYBE_FLOAT_CLASS_P (class))
16026 return true;
16028 if (MAYBE_SSE_CLASS_P (class) || MAYBE_MMX_CLASS_P (class))
16030 /* Vector registers do not support QI or HImode loads. If we don't
16031 disallow a change to these modes, reload will assume it's ok to
16032 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
16033 the vec_dupv4hi pattern. */
16034 if (GET_MODE_SIZE (from) < 4)
16035 return true;
16037 /* Vector registers do not support subreg with nonzero offsets, which
16038 are otherwise valid for integer registers. Since we can't see
16039 whether we have a nonzero offset from here, prohibit all
16040 nonparadoxical subregs changing size. */
16041 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
16042 return true;
16045 return false;
16048 /* Return the cost of moving data from a register in class CLASS1 to
16049 one in class CLASS2.
16051 It is not required that the cost always equal 2 when FROM is the same as TO;
16052 on some machines it is expensive to move between registers if they are not
16053 general registers. */
16056 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
16057 enum reg_class class2)
16059 /* In case we require secondary memory, compute cost of the store followed
16060 by load. In order to avoid bad register allocation choices, we need
16061 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
16063 if (ix86_secondary_memory_needed (class1, class2, mode, 0))
16065 int cost = 1;
16067 cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
16068 MEMORY_MOVE_COST (mode, class1, 1));
16069 cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
16070 MEMORY_MOVE_COST (mode, class2, 1));
16072 /* In case of copying from general_purpose_register we may emit multiple
16073 stores followed by single load causing memory size mismatch stall.
16074 Count this as arbitrarily high cost of 20. */
16075 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
16076 cost += 20;
16078 /* In the case of FP/MMX moves, the registers actually overlap, and we
16079 have to switch modes in order to treat them differently. */
16080 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
16081 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
16082 cost += 20;
16084 return cost;
16087 /* Moves between SSE/MMX and integer unit are expensive. */
16088 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
16089 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
16090 return ix86_cost->mmxsse_to_integer;
16091 if (MAYBE_FLOAT_CLASS_P (class1))
16092 return ix86_cost->fp_move;
16093 if (MAYBE_SSE_CLASS_P (class1))
16094 return ix86_cost->sse_move;
16095 if (MAYBE_MMX_CLASS_P (class1))
16096 return ix86_cost->mmx_move;
16097 return 2;
16100 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
16102 bool
16103 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
16105 /* Flags and only flags can only hold CCmode values. */
16106 if (CC_REGNO_P (regno))
16107 return GET_MODE_CLASS (mode) == MODE_CC;
16108 if (GET_MODE_CLASS (mode) == MODE_CC
16109 || GET_MODE_CLASS (mode) == MODE_RANDOM
16110 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
16111 return 0;
16112 if (FP_REGNO_P (regno))
16113 return VALID_FP_MODE_P (mode);
16114 if (SSE_REGNO_P (regno))
16116 /* We implement the move patterns for all vector modes into and
16117 out of SSE registers, even when no operation instructions
16118 are available. */
16119 return (VALID_SSE_REG_MODE (mode)
16120 || VALID_SSE2_REG_MODE (mode)
16121 || VALID_MMX_REG_MODE (mode)
16122 || VALID_MMX_REG_MODE_3DNOW (mode));
16124 if (MMX_REGNO_P (regno))
16126 /* We implement the move patterns for 3DNOW modes even in MMX mode,
16127 so if the register is available at all, then we can move data of
16128 the given mode into or out of it. */
16129 return (VALID_MMX_REG_MODE (mode)
16130 || VALID_MMX_REG_MODE_3DNOW (mode));
16133 if (mode == QImode)
16135 /* Take care for QImode values - they can be in non-QI regs,
16136 but then they do cause partial register stalls. */
16137 if (regno < 4 || TARGET_64BIT)
16138 return 1;
16139 if (!TARGET_PARTIAL_REG_STALL)
16140 return 1;
16141 return reload_in_progress || reload_completed;
16143 /* We handle both integer and floats in the general purpose registers. */
16144 else if (VALID_INT_MODE_P (mode))
16145 return 1;
16146 else if (VALID_FP_MODE_P (mode))
16147 return 1;
16148 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
16149 on to use that value in smaller contexts, this can easily force a
16150 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
16151 supporting DImode, allow it. */
16152 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
16153 return 1;
16155 return 0;
16158 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
16159 tieable integer mode. */
16161 static bool
16162 ix86_tieable_integer_mode_p (enum machine_mode mode)
16164 switch (mode)
16166 case HImode:
16167 case SImode:
16168 return true;
16170 case QImode:
16171 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
16173 case DImode:
16174 return TARGET_64BIT;
16176 default:
16177 return false;
16181 /* Return true if MODE1 is accessible in a register that can hold MODE2
16182 without copying. That is, all register classes that can hold MODE2
16183 can also hold MODE1. */
16185 bool
16186 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
16188 if (mode1 == mode2)
16189 return true;
16191 if (ix86_tieable_integer_mode_p (mode1)
16192 && ix86_tieable_integer_mode_p (mode2))
16193 return true;
16195 /* MODE2 being XFmode implies fp stack or general regs, which means we
16196 can tie any smaller floating point modes to it. Note that we do not
16197 tie this with TFmode. */
16198 if (mode2 == XFmode)
16199 return mode1 == SFmode || mode1 == DFmode;
16201 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
16202 that we can tie it with SFmode. */
16203 if (mode2 == DFmode)
16204 return mode1 == SFmode;
16206 /* If MODE2 is only appropriate for an SSE register, then tie with
16207 any other mode acceptable to SSE registers. */
16208 if (GET_MODE_SIZE (mode2) >= 8
16209 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
16210 return ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1);
16212 /* If MODE2 is appropriate for an MMX (or SSE) register, then tie
16213 with any other mode acceptable to MMX registers. */
16214 if (GET_MODE_SIZE (mode2) == 8
16215 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
16216 return ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1);
16218 return false;
16221 /* Return the cost of moving data of mode M between a
16222 register and memory. A value of 2 is the default; this cost is
16223 relative to those in `REGISTER_MOVE_COST'.
16225 If moving between registers and memory is more expensive than
16226 between two registers, you should define this macro to express the
16227 relative cost.
16229 Model also increased moving costs of QImode registers in non
16230 Q_REGS classes.
16233 ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
16235 if (FLOAT_CLASS_P (class))
16237 int index;
16238 switch (mode)
16240 case SFmode:
16241 index = 0;
16242 break;
16243 case DFmode:
16244 index = 1;
16245 break;
16246 case XFmode:
16247 index = 2;
16248 break;
16249 default:
16250 return 100;
16252 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
16254 if (SSE_CLASS_P (class))
16256 int index;
16257 switch (GET_MODE_SIZE (mode))
16259 case 4:
16260 index = 0;
16261 break;
16262 case 8:
16263 index = 1;
16264 break;
16265 case 16:
16266 index = 2;
16267 break;
16268 default:
16269 return 100;
16271 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
16273 if (MMX_CLASS_P (class))
16275 int index;
16276 switch (GET_MODE_SIZE (mode))
16278 case 4:
16279 index = 0;
16280 break;
16281 case 8:
16282 index = 1;
16283 break;
16284 default:
16285 return 100;
16287 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
16289 switch (GET_MODE_SIZE (mode))
16291 case 1:
16292 if (in)
16293 return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
16294 : ix86_cost->movzbl_load);
16295 else
16296 return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
16297 : ix86_cost->int_store[0] + 4);
16298 break;
16299 case 2:
16300 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
16301 default:
16302 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
16303 if (mode == TFmode)
16304 mode = XFmode;
16305 return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
16306 * (((int) GET_MODE_SIZE (mode)
16307 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
16311 /* Compute a (partial) cost for rtx X. Return true if the complete
16312 cost has been computed, and false if subexpressions should be
16313 scanned. In either case, *TOTAL contains the cost result. */
16315 static bool
16316 ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
16318 enum machine_mode mode = GET_MODE (x);
16320 switch (code)
16322 case CONST_INT:
16323 case CONST:
16324 case LABEL_REF:
16325 case SYMBOL_REF:
16326 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
16327 *total = 3;
16328 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
16329 *total = 2;
16330 else if (flag_pic && SYMBOLIC_CONST (x)
16331 && (!TARGET_64BIT
16332 || (!GET_CODE (x) != LABEL_REF
16333 && (GET_CODE (x) != SYMBOL_REF
16334 || !SYMBOL_REF_LOCAL_P (x)))))
16335 *total = 1;
16336 else
16337 *total = 0;
16338 return true;
16340 case CONST_DOUBLE:
16341 if (mode == VOIDmode)
16342 *total = 0;
16343 else
16344 switch (standard_80387_constant_p (x))
16346 case 1: /* 0.0 */
16347 *total = 1;
16348 break;
16349 default: /* Other constants */
16350 *total = 2;
16351 break;
16352 case 0:
16353 case -1:
16354 /* Start with (MEM (SYMBOL_REF)), since that's where
16355 it'll probably end up. Add a penalty for size. */
16356 *total = (COSTS_N_INSNS (1)
16357 + (flag_pic != 0 && !TARGET_64BIT)
16358 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
16359 break;
16361 return true;
16363 case ZERO_EXTEND:
16364 /* The zero extensions is often completely free on x86_64, so make
16365 it as cheap as possible. */
16366 if (TARGET_64BIT && mode == DImode
16367 && GET_MODE (XEXP (x, 0)) == SImode)
16368 *total = 1;
16369 else if (TARGET_ZERO_EXTEND_WITH_AND)
16370 *total = COSTS_N_INSNS (ix86_cost->add);
16371 else
16372 *total = COSTS_N_INSNS (ix86_cost->movzx);
16373 return false;
16375 case SIGN_EXTEND:
16376 *total = COSTS_N_INSNS (ix86_cost->movsx);
16377 return false;
16379 case ASHIFT:
16380 if (GET_CODE (XEXP (x, 1)) == CONST_INT
16381 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
16383 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
16384 if (value == 1)
16386 *total = COSTS_N_INSNS (ix86_cost->add);
16387 return false;
16389 if ((value == 2 || value == 3)
16390 && ix86_cost->lea <= ix86_cost->shift_const)
16392 *total = COSTS_N_INSNS (ix86_cost->lea);
16393 return false;
16396 /* FALLTHRU */
16398 case ROTATE:
16399 case ASHIFTRT:
16400 case LSHIFTRT:
16401 case ROTATERT:
16402 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
16404 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
16406 if (INTVAL (XEXP (x, 1)) > 32)
16407 *total = COSTS_N_INSNS(ix86_cost->shift_const + 2);
16408 else
16409 *total = COSTS_N_INSNS(ix86_cost->shift_const * 2);
16411 else
16413 if (GET_CODE (XEXP (x, 1)) == AND)
16414 *total = COSTS_N_INSNS(ix86_cost->shift_var * 2);
16415 else
16416 *total = COSTS_N_INSNS(ix86_cost->shift_var * 6 + 2);
16419 else
16421 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
16422 *total = COSTS_N_INSNS (ix86_cost->shift_const);
16423 else
16424 *total = COSTS_N_INSNS (ix86_cost->shift_var);
16426 return false;
16428 case MULT:
16429 if (FLOAT_MODE_P (mode))
16431 *total = COSTS_N_INSNS (ix86_cost->fmul);
16432 return false;
16434 else
16436 rtx op0 = XEXP (x, 0);
16437 rtx op1 = XEXP (x, 1);
16438 int nbits;
16439 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
16441 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
16442 for (nbits = 0; value != 0; value &= value - 1)
16443 nbits++;
16445 else
16446 /* This is arbitrary. */
16447 nbits = 7;
16449 /* Compute costs correctly for widening multiplication. */
16450 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND)
16451 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
16452 == GET_MODE_SIZE (mode))
16454 int is_mulwiden = 0;
16455 enum machine_mode inner_mode = GET_MODE (op0);
16457 if (GET_CODE (op0) == GET_CODE (op1))
16458 is_mulwiden = 1, op1 = XEXP (op1, 0);
16459 else if (GET_CODE (op1) == CONST_INT)
16461 if (GET_CODE (op0) == SIGN_EXTEND)
16462 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
16463 == INTVAL (op1);
16464 else
16465 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
16468 if (is_mulwiden)
16469 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
16472 *total = COSTS_N_INSNS (ix86_cost->mult_init[MODE_INDEX (mode)]
16473 + nbits * ix86_cost->mult_bit)
16474 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code);
16476 return true;
16479 case DIV:
16480 case UDIV:
16481 case MOD:
16482 case UMOD:
16483 if (FLOAT_MODE_P (mode))
16484 *total = COSTS_N_INSNS (ix86_cost->fdiv);
16485 else
16486 *total = COSTS_N_INSNS (ix86_cost->divide[MODE_INDEX (mode)]);
16487 return false;
16489 case PLUS:
16490 if (FLOAT_MODE_P (mode))
16491 *total = COSTS_N_INSNS (ix86_cost->fadd);
16492 else if (GET_MODE_CLASS (mode) == MODE_INT
16493 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
16495 if (GET_CODE (XEXP (x, 0)) == PLUS
16496 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
16497 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
16498 && CONSTANT_P (XEXP (x, 1)))
16500 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
16501 if (val == 2 || val == 4 || val == 8)
16503 *total = COSTS_N_INSNS (ix86_cost->lea);
16504 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
16505 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
16506 outer_code);
16507 *total += rtx_cost (XEXP (x, 1), outer_code);
16508 return true;
16511 else if (GET_CODE (XEXP (x, 0)) == MULT
16512 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
16514 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
16515 if (val == 2 || val == 4 || val == 8)
16517 *total = COSTS_N_INSNS (ix86_cost->lea);
16518 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
16519 *total += rtx_cost (XEXP (x, 1), outer_code);
16520 return true;
16523 else if (GET_CODE (XEXP (x, 0)) == PLUS)
16525 *total = COSTS_N_INSNS (ix86_cost->lea);
16526 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
16527 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
16528 *total += rtx_cost (XEXP (x, 1), outer_code);
16529 return true;
16532 /* FALLTHRU */
16534 case MINUS:
16535 if (FLOAT_MODE_P (mode))
16537 *total = COSTS_N_INSNS (ix86_cost->fadd);
16538 return false;
16540 /* FALLTHRU */
16542 case AND:
16543 case IOR:
16544 case XOR:
16545 if (!TARGET_64BIT && mode == DImode)
16547 *total = (COSTS_N_INSNS (ix86_cost->add) * 2
16548 + (rtx_cost (XEXP (x, 0), outer_code)
16549 << (GET_MODE (XEXP (x, 0)) != DImode))
16550 + (rtx_cost (XEXP (x, 1), outer_code)
16551 << (GET_MODE (XEXP (x, 1)) != DImode)));
16552 return true;
16554 /* FALLTHRU */
16556 case NEG:
16557 if (FLOAT_MODE_P (mode))
16559 *total = COSTS_N_INSNS (ix86_cost->fchs);
16560 return false;
16562 /* FALLTHRU */
16564 case NOT:
16565 if (!TARGET_64BIT && mode == DImode)
16566 *total = COSTS_N_INSNS (ix86_cost->add * 2);
16567 else
16568 *total = COSTS_N_INSNS (ix86_cost->add);
16569 return false;
16571 case COMPARE:
16572 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
16573 && XEXP (XEXP (x, 0), 1) == const1_rtx
16574 && GET_CODE (XEXP (XEXP (x, 0), 2)) == CONST_INT
16575 && XEXP (x, 1) == const0_rtx)
16577 /* This kind of construct is implemented using test[bwl].
16578 Treat it as if we had an AND. */
16579 *total = (COSTS_N_INSNS (ix86_cost->add)
16580 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
16581 + rtx_cost (const1_rtx, outer_code));
16582 return true;
16584 return false;
16586 case FLOAT_EXTEND:
16587 if (!TARGET_SSE_MATH
16588 || mode == XFmode
16589 || (mode == DFmode && !TARGET_SSE2))
16590 *total = 0;
16591 return false;
16593 case ABS:
16594 if (FLOAT_MODE_P (mode))
16595 *total = COSTS_N_INSNS (ix86_cost->fabs);
16596 return false;
16598 case SQRT:
16599 if (FLOAT_MODE_P (mode))
16600 *total = COSTS_N_INSNS (ix86_cost->fsqrt);
16601 return false;
16603 case UNSPEC:
16604 if (XINT (x, 1) == UNSPEC_TP)
16605 *total = 0;
16606 return false;
16608 default:
16609 return false;
16613 #if TARGET_MACHO
16615 static int current_machopic_label_num;
16617 /* Given a symbol name and its associated stub, write out the
16618 definition of the stub. */
16620 void
16621 machopic_output_stub (FILE *file, const char *symb, const char *stub)
16623 unsigned int length;
16624 char *binder_name, *symbol_name, lazy_ptr_name[32];
16625 int label = ++current_machopic_label_num;
16627 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
16628 symb = (*targetm.strip_name_encoding) (symb);
16630 length = strlen (stub);
16631 binder_name = alloca (length + 32);
16632 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
16634 length = strlen (symb);
16635 symbol_name = alloca (length + 32);
16636 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
16638 sprintf (lazy_ptr_name, "L%d$lz", label);
16640 if (MACHOPIC_PURE)
16641 switch_to_section (machopic_picsymbol_stub_section);
16642 else
16643 switch_to_section (machopic_symbol_stub_section);
16645 fprintf (file, "%s:\n", stub);
16646 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
16648 if (MACHOPIC_PURE)
16650 fprintf (file, "\tcall LPC$%d\nLPC$%d:\tpopl %%eax\n", label, label);
16651 fprintf (file, "\tmovl %s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
16652 fprintf (file, "\tjmp %%edx\n");
16654 else
16655 fprintf (file, "\tjmp *%s\n", lazy_ptr_name);
16657 fprintf (file, "%s:\n", binder_name);
16659 if (MACHOPIC_PURE)
16661 fprintf (file, "\tlea %s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
16662 fprintf (file, "\tpushl %%eax\n");
16664 else
16665 fprintf (file, "\t pushl $%s\n", lazy_ptr_name);
16667 fprintf (file, "\tjmp dyld_stub_binding_helper\n");
16669 switch_to_section (machopic_lazy_symbol_ptr_section);
16670 fprintf (file, "%s:\n", lazy_ptr_name);
16671 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
16672 fprintf (file, "\t.long %s\n", binder_name);
16674 #endif /* TARGET_MACHO */
16676 /* Order the registers for register allocator. */
16678 void
16679 x86_order_regs_for_local_alloc (void)
16681 int pos = 0;
16682 int i;
16684 /* First allocate the local general purpose registers. */
16685 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
16686 if (GENERAL_REGNO_P (i) && call_used_regs[i])
16687 reg_alloc_order [pos++] = i;
16689 /* Global general purpose registers. */
16690 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
16691 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
16692 reg_alloc_order [pos++] = i;
16694 /* x87 registers come first in case we are doing FP math
16695 using them. */
16696 if (!TARGET_SSE_MATH)
16697 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
16698 reg_alloc_order [pos++] = i;
16700 /* SSE registers. */
16701 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
16702 reg_alloc_order [pos++] = i;
16703 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
16704 reg_alloc_order [pos++] = i;
16706 /* x87 registers. */
16707 if (TARGET_SSE_MATH)
16708 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
16709 reg_alloc_order [pos++] = i;
16711 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
16712 reg_alloc_order [pos++] = i;
16714 /* Initialize the rest of array as we do not allocate some registers
16715 at all. */
16716 while (pos < FIRST_PSEUDO_REGISTER)
16717 reg_alloc_order [pos++] = 0;
16720 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
16721 struct attribute_spec.handler. */
16722 static tree
16723 ix86_handle_struct_attribute (tree *node, tree name,
16724 tree args ATTRIBUTE_UNUSED,
16725 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
16727 tree *type = NULL;
16728 if (DECL_P (*node))
16730 if (TREE_CODE (*node) == TYPE_DECL)
16731 type = &TREE_TYPE (*node);
16733 else
16734 type = node;
16736 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
16737 || TREE_CODE (*type) == UNION_TYPE)))
16739 warning (OPT_Wattributes, "%qs attribute ignored",
16740 IDENTIFIER_POINTER (name));
16741 *no_add_attrs = true;
16744 else if ((is_attribute_p ("ms_struct", name)
16745 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
16746 || ((is_attribute_p ("gcc_struct", name)
16747 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
16749 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
16750 IDENTIFIER_POINTER (name));
16751 *no_add_attrs = true;
16754 return NULL_TREE;
16757 static bool
16758 ix86_ms_bitfield_layout_p (tree record_type)
16760 return (TARGET_MS_BITFIELD_LAYOUT &&
16761 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
16762 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
16765 /* Returns an expression indicating where the this parameter is
16766 located on entry to the FUNCTION. */
16768 static rtx
16769 x86_this_parameter (tree function)
16771 tree type = TREE_TYPE (function);
16773 if (TARGET_64BIT)
16775 int n = aggregate_value_p (TREE_TYPE (type), type) != 0;
16776 return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]);
16779 if (ix86_function_regparm (type, function) > 0)
16781 tree parm;
16783 parm = TYPE_ARG_TYPES (type);
16784 /* Figure out whether or not the function has a variable number of
16785 arguments. */
16786 for (; parm; parm = TREE_CHAIN (parm))
16787 if (TREE_VALUE (parm) == void_type_node)
16788 break;
16789 /* If not, the this parameter is in the first argument. */
16790 if (parm)
16792 int regno = 0;
16793 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
16794 regno = 2;
16795 return gen_rtx_REG (SImode, regno);
16799 if (aggregate_value_p (TREE_TYPE (type), type))
16800 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8));
16801 else
16802 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4));
16805 /* Determine whether x86_output_mi_thunk can succeed. */
16807 static bool
16808 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
16809 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
16810 HOST_WIDE_INT vcall_offset, tree function)
16812 /* 64-bit can handle anything. */
16813 if (TARGET_64BIT)
16814 return true;
16816 /* For 32-bit, everything's fine if we have one free register. */
16817 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
16818 return true;
16820 /* Need a free register for vcall_offset. */
16821 if (vcall_offset)
16822 return false;
16824 /* Need a free register for GOT references. */
16825 if (flag_pic && !(*targetm.binds_local_p) (function))
16826 return false;
16828 /* Otherwise ok. */
16829 return true;
16832 /* Output the assembler code for a thunk function. THUNK_DECL is the
16833 declaration for the thunk function itself, FUNCTION is the decl for
16834 the target function. DELTA is an immediate constant offset to be
16835 added to THIS. If VCALL_OFFSET is nonzero, the word at
16836 *(*this + vcall_offset) should be added to THIS. */
16838 static void
16839 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
16840 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
16841 HOST_WIDE_INT vcall_offset, tree function)
16843 rtx xops[3];
16844 rtx this = x86_this_parameter (function);
16845 rtx this_reg, tmp;
16847 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
16848 pull it in now and let DELTA benefit. */
16849 if (REG_P (this))
16850 this_reg = this;
16851 else if (vcall_offset)
16853 /* Put the this parameter into %eax. */
16854 xops[0] = this;
16855 xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
16856 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
16858 else
16859 this_reg = NULL_RTX;
16861 /* Adjust the this parameter by a fixed constant. */
16862 if (delta)
16864 xops[0] = GEN_INT (delta);
16865 xops[1] = this_reg ? this_reg : this;
16866 if (TARGET_64BIT)
16868 if (!x86_64_general_operand (xops[0], DImode))
16870 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
16871 xops[1] = tmp;
16872 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
16873 xops[0] = tmp;
16874 xops[1] = this;
16876 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
16878 else
16879 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
16882 /* Adjust the this parameter by a value stored in the vtable. */
16883 if (vcall_offset)
16885 if (TARGET_64BIT)
16886 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
16887 else
16889 int tmp_regno = 2 /* ECX */;
16890 if (lookup_attribute ("fastcall",
16891 TYPE_ATTRIBUTES (TREE_TYPE (function))))
16892 tmp_regno = 0 /* EAX */;
16893 tmp = gen_rtx_REG (SImode, tmp_regno);
16896 xops[0] = gen_rtx_MEM (Pmode, this_reg);
16897 xops[1] = tmp;
16898 if (TARGET_64BIT)
16899 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
16900 else
16901 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
16903 /* Adjust the this parameter. */
16904 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
16905 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
16907 rtx tmp2 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
16908 xops[0] = GEN_INT (vcall_offset);
16909 xops[1] = tmp2;
16910 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
16911 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
16913 xops[1] = this_reg;
16914 if (TARGET_64BIT)
16915 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
16916 else
16917 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
16920 /* If necessary, drop THIS back to its stack slot. */
16921 if (this_reg && this_reg != this)
16923 xops[0] = this_reg;
16924 xops[1] = this;
16925 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
16928 xops[0] = XEXP (DECL_RTL (function), 0);
16929 if (TARGET_64BIT)
16931 if (!flag_pic || (*targetm.binds_local_p) (function))
16932 output_asm_insn ("jmp\t%P0", xops);
16933 else
16935 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
16936 tmp = gen_rtx_CONST (Pmode, tmp);
16937 tmp = gen_rtx_MEM (QImode, tmp);
16938 xops[0] = tmp;
16939 output_asm_insn ("jmp\t%A0", xops);
16942 else
16944 if (!flag_pic || (*targetm.binds_local_p) (function))
16945 output_asm_insn ("jmp\t%P0", xops);
16946 else
16947 #if TARGET_MACHO
16948 if (TARGET_MACHO)
16950 rtx sym_ref = XEXP (DECL_RTL (function), 0);
16951 tmp = (gen_rtx_SYMBOL_REF
16952 (Pmode,
16953 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
16954 tmp = gen_rtx_MEM (QImode, tmp);
16955 xops[0] = tmp;
16956 output_asm_insn ("jmp\t%0", xops);
16958 else
16959 #endif /* TARGET_MACHO */
16961 tmp = gen_rtx_REG (SImode, 2 /* ECX */);
16962 output_set_got (tmp);
16964 xops[1] = tmp;
16965 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
16966 output_asm_insn ("jmp\t{*}%1", xops);
16971 static void
16972 x86_file_start (void)
16974 default_file_start ();
16975 if (X86_FILE_START_VERSION_DIRECTIVE)
16976 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
16977 if (X86_FILE_START_FLTUSED)
16978 fputs ("\t.global\t__fltused\n", asm_out_file);
16979 if (ix86_asm_dialect == ASM_INTEL)
16980 fputs ("\t.intel_syntax\n", asm_out_file);
16984 x86_field_alignment (tree field, int computed)
16986 enum machine_mode mode;
16987 tree type = TREE_TYPE (field);
16989 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
16990 return computed;
16991 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
16992 ? get_inner_array_type (type) : type);
16993 if (mode == DFmode || mode == DCmode
16994 || GET_MODE_CLASS (mode) == MODE_INT
16995 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
16996 return MIN (32, computed);
16997 return computed;
17000 /* Output assembler code to FILE to increment profiler label # LABELNO
17001 for profiling a function entry. */
17002 void
17003 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
17005 if (TARGET_64BIT)
17006 if (flag_pic)
17008 #ifndef NO_PROFILE_COUNTERS
17009 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
17010 #endif
17011 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
17013 else
17015 #ifndef NO_PROFILE_COUNTERS
17016 fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno);
17017 #endif
17018 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
17020 else if (flag_pic)
17022 #ifndef NO_PROFILE_COUNTERS
17023 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
17024 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
17025 #endif
17026 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
17028 else
17030 #ifndef NO_PROFILE_COUNTERS
17031 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
17032 PROFILE_COUNT_REGISTER);
17033 #endif
17034 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
17038 /* We don't have exact information about the insn sizes, but we may assume
17039 quite safely that we are informed about all 1 byte insns and memory
17040 address sizes. This is enough to eliminate unnecessary padding in
17041 99% of cases. */
17043 static int
17044 min_insn_size (rtx insn)
17046 int l = 0;
17048 if (!INSN_P (insn) || !active_insn_p (insn))
17049 return 0;
17051 /* Discard alignments we've emit and jump instructions. */
17052 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
17053 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
17054 return 0;
17055 if (GET_CODE (insn) == JUMP_INSN
17056 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
17057 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
17058 return 0;
17060 /* Important case - calls are always 5 bytes.
17061 It is common to have many calls in the row. */
17062 if (GET_CODE (insn) == CALL_INSN
17063 && symbolic_reference_mentioned_p (PATTERN (insn))
17064 && !SIBLING_CALL_P (insn))
17065 return 5;
17066 if (get_attr_length (insn) <= 1)
17067 return 1;
17069 /* For normal instructions we may rely on the sizes of addresses
17070 and the presence of symbol to require 4 bytes of encoding.
17071 This is not the case for jumps where references are PC relative. */
17072 if (GET_CODE (insn) != JUMP_INSN)
17074 l = get_attr_length_address (insn);
17075 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
17076 l = 4;
17078 if (l)
17079 return 1+l;
17080 else
17081 return 2;
17084 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
17085 window. */
17087 static void
17088 ix86_avoid_jump_misspredicts (void)
17090 rtx insn, start = get_insns ();
17091 int nbytes = 0, njumps = 0;
17092 int isjump = 0;
17094 /* Look for all minimal intervals of instructions containing 4 jumps.
17095 The intervals are bounded by START and INSN. NBYTES is the total
17096 size of instructions in the interval including INSN and not including
17097 START. When the NBYTES is smaller than 16 bytes, it is possible
17098 that the end of START and INSN ends up in the same 16byte page.
17100 The smallest offset in the page INSN can start is the case where START
17101 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
17102 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
17104 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
17107 nbytes += min_insn_size (insn);
17108 if (dump_file)
17109 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
17110 INSN_UID (insn), min_insn_size (insn));
17111 if ((GET_CODE (insn) == JUMP_INSN
17112 && GET_CODE (PATTERN (insn)) != ADDR_VEC
17113 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
17114 || GET_CODE (insn) == CALL_INSN)
17115 njumps++;
17116 else
17117 continue;
17119 while (njumps > 3)
17121 start = NEXT_INSN (start);
17122 if ((GET_CODE (start) == JUMP_INSN
17123 && GET_CODE (PATTERN (start)) != ADDR_VEC
17124 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
17125 || GET_CODE (start) == CALL_INSN)
17126 njumps--, isjump = 1;
17127 else
17128 isjump = 0;
17129 nbytes -= min_insn_size (start);
17131 gcc_assert (njumps >= 0);
17132 if (dump_file)
17133 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
17134 INSN_UID (start), INSN_UID (insn), nbytes);
17136 if (njumps == 3 && isjump && nbytes < 16)
17138 int padsize = 15 - nbytes + min_insn_size (insn);
17140 if (dump_file)
17141 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
17142 INSN_UID (insn), padsize);
17143 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
17148 /* AMD Athlon works faster
17149 when RET is not destination of conditional jump or directly preceded
17150 by other jump instruction. We avoid the penalty by inserting NOP just
17151 before the RET instructions in such cases. */
17152 static void
17153 ix86_pad_returns (void)
17155 edge e;
17156 edge_iterator ei;
17158 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
17160 basic_block bb = e->src;
17161 rtx ret = BB_END (bb);
17162 rtx prev;
17163 bool replace = false;
17165 if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN
17166 || !maybe_hot_bb_p (bb))
17167 continue;
17168 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
17169 if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL)
17170 break;
17171 if (prev && GET_CODE (prev) == CODE_LABEL)
17173 edge e;
17174 edge_iterator ei;
17176 FOR_EACH_EDGE (e, ei, bb->preds)
17177 if (EDGE_FREQUENCY (e) && e->src->index >= 0
17178 && !(e->flags & EDGE_FALLTHRU))
17179 replace = true;
17181 if (!replace)
17183 prev = prev_active_insn (ret);
17184 if (prev
17185 && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev))
17186 || GET_CODE (prev) == CALL_INSN))
17187 replace = true;
17188 /* Empty functions get branch mispredict even when the jump destination
17189 is not visible to us. */
17190 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
17191 replace = true;
17193 if (replace)
17195 emit_insn_before (gen_return_internal_long (), ret);
17196 delete_insn (ret);
17201 /* Implement machine specific optimizations. We implement padding of returns
17202 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
17203 static void
17204 ix86_reorg (void)
17206 if (TARGET_ATHLON_K8 && optimize && !optimize_size)
17207 ix86_pad_returns ();
17208 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
17209 ix86_avoid_jump_misspredicts ();
17212 /* Return nonzero when QImode register that must be represented via REX prefix
17213 is used. */
17214 bool
17215 x86_extended_QIreg_mentioned_p (rtx insn)
17217 int i;
17218 extract_insn_cached (insn);
17219 for (i = 0; i < recog_data.n_operands; i++)
17220 if (REG_P (recog_data.operand[i])
17221 && REGNO (recog_data.operand[i]) >= 4)
17222 return true;
17223 return false;
17226 /* Return nonzero when P points to register encoded via REX prefix.
17227 Called via for_each_rtx. */
17228 static int
17229 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
17231 unsigned int regno;
17232 if (!REG_P (*p))
17233 return 0;
17234 regno = REGNO (*p);
17235 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
17238 /* Return true when INSN mentions register that must be encoded using REX
17239 prefix. */
17240 bool
17241 x86_extended_reg_mentioned_p (rtx insn)
17243 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
17246 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
17247 optabs would emit if we didn't have TFmode patterns. */
17249 void
17250 x86_emit_floatuns (rtx operands[2])
17252 rtx neglab, donelab, i0, i1, f0, in, out;
17253 enum machine_mode mode, inmode;
17255 inmode = GET_MODE (operands[1]);
17256 gcc_assert (inmode == SImode || inmode == DImode);
17258 out = operands[0];
17259 in = force_reg (inmode, operands[1]);
17260 mode = GET_MODE (out);
17261 neglab = gen_label_rtx ();
17262 donelab = gen_label_rtx ();
17263 i1 = gen_reg_rtx (Pmode);
17264 f0 = gen_reg_rtx (mode);
17266 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab);
17268 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
17269 emit_jump_insn (gen_jump (donelab));
17270 emit_barrier ();
17272 emit_label (neglab);
17274 i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
17275 i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
17276 i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
17277 expand_float (f0, i0, 0);
17278 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
17280 emit_label (donelab);
17283 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
17284 with all elements equal to VAR. Return true if successful. */
17286 static bool
17287 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
17288 rtx target, rtx val)
17290 enum machine_mode smode, wsmode, wvmode;
17291 rtx x;
17293 switch (mode)
17295 case V2SImode:
17296 case V2SFmode:
17297 if (!mmx_ok && !TARGET_SSE)
17298 return false;
17299 /* FALLTHRU */
17301 case V2DFmode:
17302 case V2DImode:
17303 case V4SFmode:
17304 case V4SImode:
17305 val = force_reg (GET_MODE_INNER (mode), val);
17306 x = gen_rtx_VEC_DUPLICATE (mode, val);
17307 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17308 return true;
17310 case V4HImode:
17311 if (!mmx_ok)
17312 return false;
17313 if (TARGET_SSE || TARGET_3DNOW_A)
17315 val = gen_lowpart (SImode, val);
17316 x = gen_rtx_TRUNCATE (HImode, val);
17317 x = gen_rtx_VEC_DUPLICATE (mode, x);
17318 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17319 return true;
17321 else
17323 smode = HImode;
17324 wsmode = SImode;
17325 wvmode = V2SImode;
17326 goto widen;
17329 case V8QImode:
17330 if (!mmx_ok)
17331 return false;
17332 smode = QImode;
17333 wsmode = HImode;
17334 wvmode = V4HImode;
17335 goto widen;
17336 case V8HImode:
17337 smode = HImode;
17338 wsmode = SImode;
17339 wvmode = V4SImode;
17340 goto widen;
17341 case V16QImode:
17342 smode = QImode;
17343 wsmode = HImode;
17344 wvmode = V8HImode;
17345 goto widen;
17346 widen:
17347 /* Replicate the value once into the next wider mode and recurse. */
17348 val = convert_modes (wsmode, smode, val, true);
17349 x = expand_simple_binop (wsmode, ASHIFT, val,
17350 GEN_INT (GET_MODE_BITSIZE (smode)),
17351 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17352 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
17354 x = gen_reg_rtx (wvmode);
17355 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
17356 gcc_unreachable ();
17357 emit_move_insn (target, gen_lowpart (mode, x));
17358 return true;
17360 default:
17361 return false;
17365 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
17366 whose low element is VAR, and other elements are zero. Return true
17367 if successful. */
17369 static bool
17370 ix86_expand_vector_init_low_nonzero (bool mmx_ok, enum machine_mode mode,
17371 rtx target, rtx var)
17373 enum machine_mode vsimode;
17374 rtx x;
17376 switch (mode)
17378 case V2SFmode:
17379 case V2SImode:
17380 if (!mmx_ok && !TARGET_SSE)
17381 return false;
17382 /* FALLTHRU */
17384 case V2DFmode:
17385 case V2DImode:
17386 var = force_reg (GET_MODE_INNER (mode), var);
17387 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
17388 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17389 return true;
17391 case V4SFmode:
17392 case V4SImode:
17393 var = force_reg (GET_MODE_INNER (mode), var);
17394 x = gen_rtx_VEC_DUPLICATE (mode, var);
17395 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
17396 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17397 return true;
17399 case V8HImode:
17400 case V16QImode:
17401 vsimode = V4SImode;
17402 goto widen;
17403 case V4HImode:
17404 case V8QImode:
17405 if (!mmx_ok)
17406 return false;
17407 vsimode = V2SImode;
17408 goto widen;
17409 widen:
17410 /* Zero extend the variable element to SImode and recurse. */
17411 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
17413 x = gen_reg_rtx (vsimode);
17414 if (!ix86_expand_vector_init_low_nonzero (mmx_ok, vsimode, x, var))
17415 gcc_unreachable ();
17417 emit_move_insn (target, gen_lowpart (mode, x));
17418 return true;
17420 default:
17421 return false;
17425 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
17426 consisting of the values in VALS. It is known that all elements
17427 except ONE_VAR are constants. Return true if successful. */
17429 static bool
17430 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
17431 rtx target, rtx vals, int one_var)
17433 rtx var = XVECEXP (vals, 0, one_var);
17434 enum machine_mode wmode;
17435 rtx const_vec, x;
17437 const_vec = copy_rtx (vals);
17438 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
17439 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
17441 switch (mode)
17443 case V2DFmode:
17444 case V2DImode:
17445 case V2SFmode:
17446 case V2SImode:
17447 /* For the two element vectors, it's just as easy to use
17448 the general case. */
17449 return false;
17451 case V4SFmode:
17452 case V4SImode:
17453 case V8HImode:
17454 case V4HImode:
17455 break;
17457 case V16QImode:
17458 wmode = V8HImode;
17459 goto widen;
17460 case V8QImode:
17461 wmode = V4HImode;
17462 goto widen;
17463 widen:
17464 /* There's no way to set one QImode entry easily. Combine
17465 the variable value with its adjacent constant value, and
17466 promote to an HImode set. */
17467 x = XVECEXP (vals, 0, one_var ^ 1);
17468 if (one_var & 1)
17470 var = convert_modes (HImode, QImode, var, true);
17471 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
17472 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17473 x = GEN_INT (INTVAL (x) & 0xff);
17475 else
17477 var = convert_modes (HImode, QImode, var, true);
17478 x = gen_int_mode (INTVAL (x) << 8, HImode);
17480 if (x != const0_rtx)
17481 var = expand_simple_binop (HImode, IOR, var, x, var,
17482 1, OPTAB_LIB_WIDEN);
17484 x = gen_reg_rtx (wmode);
17485 emit_move_insn (x, gen_lowpart (wmode, const_vec));
17486 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
17488 emit_move_insn (target, gen_lowpart (mode, x));
17489 return true;
17491 default:
17492 return false;
17495 emit_move_insn (target, const_vec);
17496 ix86_expand_vector_set (mmx_ok, target, var, one_var);
17497 return true;
17500 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
17501 all values variable, and none identical. */
17503 static void
17504 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
17505 rtx target, rtx vals)
17507 enum machine_mode half_mode = GET_MODE_INNER (mode);
17508 rtx op0 = NULL, op1 = NULL;
17509 bool use_vec_concat = false;
17511 switch (mode)
17513 case V2SFmode:
17514 case V2SImode:
17515 if (!mmx_ok && !TARGET_SSE)
17516 break;
17517 /* FALLTHRU */
17519 case V2DFmode:
17520 case V2DImode:
17521 /* For the two element vectors, we always implement VEC_CONCAT. */
17522 op0 = XVECEXP (vals, 0, 0);
17523 op1 = XVECEXP (vals, 0, 1);
17524 use_vec_concat = true;
17525 break;
17527 case V4SFmode:
17528 half_mode = V2SFmode;
17529 goto half;
17530 case V4SImode:
17531 half_mode = V2SImode;
17532 goto half;
17533 half:
17535 rtvec v;
17537 /* For V4SF and V4SI, we implement a concat of two V2 vectors.
17538 Recurse to load the two halves. */
17540 op0 = gen_reg_rtx (half_mode);
17541 v = gen_rtvec (2, XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1));
17542 ix86_expand_vector_init (false, op0, gen_rtx_PARALLEL (half_mode, v));
17544 op1 = gen_reg_rtx (half_mode);
17545 v = gen_rtvec (2, XVECEXP (vals, 0, 2), XVECEXP (vals, 0, 3));
17546 ix86_expand_vector_init (false, op1, gen_rtx_PARALLEL (half_mode, v));
17548 use_vec_concat = true;
17550 break;
17552 case V8HImode:
17553 case V16QImode:
17554 case V4HImode:
17555 case V8QImode:
17556 break;
17558 default:
17559 gcc_unreachable ();
17562 if (use_vec_concat)
17564 if (!register_operand (op0, half_mode))
17565 op0 = force_reg (half_mode, op0);
17566 if (!register_operand (op1, half_mode))
17567 op1 = force_reg (half_mode, op1);
17569 emit_insn (gen_rtx_SET (VOIDmode, target,
17570 gen_rtx_VEC_CONCAT (mode, op0, op1)));
17572 else
17574 int i, j, n_elts, n_words, n_elt_per_word;
17575 enum machine_mode inner_mode;
17576 rtx words[4], shift;
17578 inner_mode = GET_MODE_INNER (mode);
17579 n_elts = GET_MODE_NUNITS (mode);
17580 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
17581 n_elt_per_word = n_elts / n_words;
17582 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
17584 for (i = 0; i < n_words; ++i)
17586 rtx word = NULL_RTX;
17588 for (j = 0; j < n_elt_per_word; ++j)
17590 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
17591 elt = convert_modes (word_mode, inner_mode, elt, true);
17593 if (j == 0)
17594 word = elt;
17595 else
17597 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
17598 word, 1, OPTAB_LIB_WIDEN);
17599 word = expand_simple_binop (word_mode, IOR, word, elt,
17600 word, 1, OPTAB_LIB_WIDEN);
17604 words[i] = word;
17607 if (n_words == 1)
17608 emit_move_insn (target, gen_lowpart (mode, words[0]));
17609 else if (n_words == 2)
17611 rtx tmp = gen_reg_rtx (mode);
17612 emit_insn (gen_rtx_CLOBBER (VOIDmode, tmp));
17613 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
17614 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
17615 emit_move_insn (target, tmp);
17617 else if (n_words == 4)
17619 rtx tmp = gen_reg_rtx (V4SImode);
17620 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
17621 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
17622 emit_move_insn (target, gen_lowpart (mode, tmp));
17624 else
17625 gcc_unreachable ();
17629 /* Initialize vector TARGET via VALS. Suppress the use of MMX
17630 instructions unless MMX_OK is true. */
17632 void
17633 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
17635 enum machine_mode mode = GET_MODE (target);
17636 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17637 int n_elts = GET_MODE_NUNITS (mode);
17638 int n_var = 0, one_var = -1;
17639 bool all_same = true, all_const_zero = true;
17640 int i;
17641 rtx x;
17643 for (i = 0; i < n_elts; ++i)
17645 x = XVECEXP (vals, 0, i);
17646 if (!CONSTANT_P (x))
17647 n_var++, one_var = i;
17648 else if (x != CONST0_RTX (inner_mode))
17649 all_const_zero = false;
17650 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
17651 all_same = false;
17654 /* Constants are best loaded from the constant pool. */
17655 if (n_var == 0)
17657 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
17658 return;
17661 /* If all values are identical, broadcast the value. */
17662 if (all_same
17663 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
17664 XVECEXP (vals, 0, 0)))
17665 return;
17667 /* Values where only one field is non-constant are best loaded from
17668 the pool and overwritten via move later. */
17669 if (n_var == 1)
17671 if (all_const_zero && one_var == 0
17672 && ix86_expand_vector_init_low_nonzero (mmx_ok, mode, target,
17673 XVECEXP (vals, 0, 0)))
17674 return;
17676 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
17677 return;
17680 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
17683 void
17684 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
17686 enum machine_mode mode = GET_MODE (target);
17687 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17688 bool use_vec_merge = false;
17689 rtx tmp;
17691 switch (mode)
17693 case V2SFmode:
17694 case V2SImode:
17695 if (mmx_ok)
17697 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
17698 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
17699 if (elt == 0)
17700 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
17701 else
17702 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
17703 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17704 return;
17706 break;
17708 case V2DFmode:
17709 case V2DImode:
17711 rtx op0, op1;
17713 /* For the two element vectors, we implement a VEC_CONCAT with
17714 the extraction of the other element. */
17716 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
17717 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
17719 if (elt == 0)
17720 op0 = val, op1 = tmp;
17721 else
17722 op0 = tmp, op1 = val;
17724 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
17725 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17727 return;
17729 case V4SFmode:
17730 switch (elt)
17732 case 0:
17733 use_vec_merge = true;
17734 break;
17736 case 1:
17737 /* tmp = target = A B C D */
17738 tmp = copy_to_reg (target);
17739 /* target = A A B B */
17740 emit_insn (gen_vec_interleave_lowv4sf (target, target, target));
17741 /* target = X A B B */
17742 ix86_expand_vector_set (false, target, val, 0);
17743 /* target = A X C D */
17744 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17745 GEN_INT (1), GEN_INT (0),
17746 GEN_INT (2+4), GEN_INT (3+4)));
17747 return;
17749 case 2:
17750 /* tmp = target = A B C D */
17751 tmp = copy_to_reg (target);
17752 /* tmp = X B C D */
17753 ix86_expand_vector_set (false, tmp, val, 0);
17754 /* target = A B X D */
17755 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17756 GEN_INT (0), GEN_INT (1),
17757 GEN_INT (0+4), GEN_INT (3+4)));
17758 return;
17760 case 3:
17761 /* tmp = target = A B C D */
17762 tmp = copy_to_reg (target);
17763 /* tmp = X B C D */
17764 ix86_expand_vector_set (false, tmp, val, 0);
17765 /* target = A B X D */
17766 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17767 GEN_INT (0), GEN_INT (1),
17768 GEN_INT (2+4), GEN_INT (0+4)));
17769 return;
17771 default:
17772 gcc_unreachable ();
17774 break;
17776 case V4SImode:
17777 /* Element 0 handled by vec_merge below. */
17778 if (elt == 0)
17780 use_vec_merge = true;
17781 break;
17784 if (TARGET_SSE2)
17786 /* With SSE2, use integer shuffles to swap element 0 and ELT,
17787 store into element 0, then shuffle them back. */
17789 rtx order[4];
17791 order[0] = GEN_INT (elt);
17792 order[1] = const1_rtx;
17793 order[2] = const2_rtx;
17794 order[3] = GEN_INT (3);
17795 order[elt] = const0_rtx;
17797 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
17798 order[1], order[2], order[3]));
17800 ix86_expand_vector_set (false, target, val, 0);
17802 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
17803 order[1], order[2], order[3]));
17805 else
17807 /* For SSE1, we have to reuse the V4SF code. */
17808 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
17809 gen_lowpart (SFmode, val), elt);
17811 return;
17813 case V8HImode:
17814 use_vec_merge = TARGET_SSE2;
17815 break;
17816 case V4HImode:
17817 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
17818 break;
17820 case V16QImode:
17821 case V8QImode:
17822 default:
17823 break;
17826 if (use_vec_merge)
17828 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
17829 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
17830 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17832 else
17834 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
17836 emit_move_insn (mem, target);
17838 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
17839 emit_move_insn (tmp, val);
17841 emit_move_insn (target, mem);
17845 void
17846 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
17848 enum machine_mode mode = GET_MODE (vec);
17849 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17850 bool use_vec_extr = false;
17851 rtx tmp;
17853 switch (mode)
17855 case V2SImode:
17856 case V2SFmode:
17857 if (!mmx_ok)
17858 break;
17859 /* FALLTHRU */
17861 case V2DFmode:
17862 case V2DImode:
17863 use_vec_extr = true;
17864 break;
17866 case V4SFmode:
17867 switch (elt)
17869 case 0:
17870 tmp = vec;
17871 break;
17873 case 1:
17874 case 3:
17875 tmp = gen_reg_rtx (mode);
17876 emit_insn (gen_sse_shufps_1 (tmp, vec, vec,
17877 GEN_INT (elt), GEN_INT (elt),
17878 GEN_INT (elt+4), GEN_INT (elt+4)));
17879 break;
17881 case 2:
17882 tmp = gen_reg_rtx (mode);
17883 emit_insn (gen_vec_interleave_highv4sf (tmp, vec, vec));
17884 break;
17886 default:
17887 gcc_unreachable ();
17889 vec = tmp;
17890 use_vec_extr = true;
17891 elt = 0;
17892 break;
17894 case V4SImode:
17895 if (TARGET_SSE2)
17897 switch (elt)
17899 case 0:
17900 tmp = vec;
17901 break;
17903 case 1:
17904 case 3:
17905 tmp = gen_reg_rtx (mode);
17906 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
17907 GEN_INT (elt), GEN_INT (elt),
17908 GEN_INT (elt), GEN_INT (elt)));
17909 break;
17911 case 2:
17912 tmp = gen_reg_rtx (mode);
17913 emit_insn (gen_vec_interleave_highv2di (tmp, vec, vec));
17914 break;
17916 default:
17917 gcc_unreachable ();
17919 vec = tmp;
17920 use_vec_extr = true;
17921 elt = 0;
17923 else
17925 /* For SSE1, we have to reuse the V4SF code. */
17926 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
17927 gen_lowpart (V4SFmode, vec), elt);
17928 return;
17930 break;
17932 case V8HImode:
17933 use_vec_extr = TARGET_SSE2;
17934 break;
17935 case V4HImode:
17936 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
17937 break;
17939 case V16QImode:
17940 case V8QImode:
17941 /* ??? Could extract the appropriate HImode element and shift. */
17942 default:
17943 break;
17946 if (use_vec_extr)
17948 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
17949 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
17951 /* Let the rtl optimizers know about the zero extension performed. */
17952 if (inner_mode == HImode)
17954 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
17955 target = gen_lowpart (SImode, target);
17958 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17960 else
17962 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
17964 emit_move_insn (mem, vec);
17966 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
17967 emit_move_insn (target, tmp);
17971 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
17972 pattern to reduce; DEST is the destination; IN is the input vector. */
17974 void
17975 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
17977 rtx tmp1, tmp2, tmp3;
17979 tmp1 = gen_reg_rtx (V4SFmode);
17980 tmp2 = gen_reg_rtx (V4SFmode);
17981 tmp3 = gen_reg_rtx (V4SFmode);
17983 emit_insn (gen_sse_movhlps (tmp1, in, in));
17984 emit_insn (fn (tmp2, tmp1, in));
17986 emit_insn (gen_sse_shufps_1 (tmp3, tmp2, tmp2,
17987 GEN_INT (1), GEN_INT (1),
17988 GEN_INT (1+4), GEN_INT (1+4)));
17989 emit_insn (fn (dest, tmp2, tmp3));
17992 /* Implements target hook vector_mode_supported_p. */
17993 static bool
17994 ix86_vector_mode_supported_p (enum machine_mode mode)
17996 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
17997 return true;
17998 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
17999 return true;
18000 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
18001 return true;
18002 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
18003 return true;
18004 return false;
18007 /* Worker function for TARGET_MD_ASM_CLOBBERS.
18009 We do this in the new i386 backend to maintain source compatibility
18010 with the old cc0-based compiler. */
18012 static tree
18013 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
18014 tree inputs ATTRIBUTE_UNUSED,
18015 tree clobbers)
18017 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
18018 clobbers);
18019 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
18020 clobbers);
18021 clobbers = tree_cons (NULL_TREE, build_string (7, "dirflag"),
18022 clobbers);
18023 return clobbers;
18026 /* Return true if this goes in small data/bss. */
18028 static bool
18029 ix86_in_large_data_p (tree exp)
18031 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
18032 return false;
18034 /* Functions are never large data. */
18035 if (TREE_CODE (exp) == FUNCTION_DECL)
18036 return false;
18038 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
18040 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
18041 if (strcmp (section, ".ldata") == 0
18042 || strcmp (section, ".lbss") == 0)
18043 return true;
18044 return false;
18046 else
18048 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
18050 /* If this is an incomplete type with size 0, then we can't put it
18051 in data because it might be too big when completed. */
18052 if (!size || size > ix86_section_threshold)
18053 return true;
18056 return false;
18058 static void
18059 ix86_encode_section_info (tree decl, rtx rtl, int first)
18061 default_encode_section_info (decl, rtl, first);
18063 if (TREE_CODE (decl) == VAR_DECL
18064 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
18065 && ix86_in_large_data_p (decl))
18066 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
18069 /* Worker function for REVERSE_CONDITION. */
18071 enum rtx_code
18072 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
18074 return (mode != CCFPmode && mode != CCFPUmode
18075 ? reverse_condition (code)
18076 : reverse_condition_maybe_unordered (code));
18079 /* Output code to perform an x87 FP register move, from OPERANDS[1]
18080 to OPERANDS[0]. */
18082 const char *
18083 output_387_reg_move (rtx insn, rtx *operands)
18085 if (REG_P (operands[1])
18086 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
18088 if (REGNO (operands[0]) == FIRST_STACK_REG
18089 && TARGET_USE_FFREEP)
18090 return "ffreep\t%y0";
18091 return "fstp\t%y0";
18093 if (STACK_TOP_P (operands[0]))
18094 return "fld%z1\t%y1";
18095 return "fst\t%y0";
18098 /* Output code to perform a conditional jump to LABEL, if C2 flag in
18099 FP status register is set. */
18101 void
18102 ix86_emit_fp_unordered_jump (rtx label)
18104 rtx reg = gen_reg_rtx (HImode);
18105 rtx temp;
18107 emit_insn (gen_x86_fnstsw_1 (reg));
18109 if (TARGET_USE_SAHF)
18111 emit_insn (gen_x86_sahf_1 (reg));
18113 temp = gen_rtx_REG (CCmode, FLAGS_REG);
18114 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
18116 else
18118 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
18120 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
18121 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
18124 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
18125 gen_rtx_LABEL_REF (VOIDmode, label),
18126 pc_rtx);
18127 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
18128 emit_jump_insn (temp);
18131 /* Output code to perform a log1p XFmode calculation. */
18133 void ix86_emit_i387_log1p (rtx op0, rtx op1)
18135 rtx label1 = gen_label_rtx ();
18136 rtx label2 = gen_label_rtx ();
18138 rtx tmp = gen_reg_rtx (XFmode);
18139 rtx tmp2 = gen_reg_rtx (XFmode);
18141 emit_insn (gen_absxf2 (tmp, op1));
18142 emit_insn (gen_cmpxf (tmp,
18143 CONST_DOUBLE_FROM_REAL_VALUE (
18144 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
18145 XFmode)));
18146 emit_jump_insn (gen_bge (label1));
18148 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
18149 emit_insn (gen_fyl2xp1_xf3 (op0, tmp2, op1));
18150 emit_jump (label2);
18152 emit_label (label1);
18153 emit_move_insn (tmp, CONST1_RTX (XFmode));
18154 emit_insn (gen_addxf3 (tmp, op1, tmp));
18155 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
18156 emit_insn (gen_fyl2x_xf3 (op0, tmp2, tmp));
18158 emit_label (label2);
18161 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
18163 static void
18164 i386_solaris_elf_named_section (const char *name, unsigned int flags,
18165 tree decl)
18167 /* With Binutils 2.15, the "@unwind" marker must be specified on
18168 every occurrence of the ".eh_frame" section, not just the first
18169 one. */
18170 if (TARGET_64BIT
18171 && strcmp (name, ".eh_frame") == 0)
18173 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
18174 flags & SECTION_WRITE ? "aw" : "a");
18175 return;
18177 default_elf_asm_named_section (name, flags, decl);
18180 /* Return the mangling of TYPE if it is an extended fundamental type. */
18182 static const char *
18183 ix86_mangle_fundamental_type (tree type)
18185 switch (TYPE_MODE (type))
18187 case TFmode:
18188 /* __float128 is "g". */
18189 return "g";
18190 case XFmode:
18191 /* "long double" or __float80 is "e". */
18192 return "e";
18193 default:
18194 return NULL;
18198 /* For 32-bit code we can save PIC register setup by using
18199 __stack_chk_fail_local hidden function instead of calling
18200 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
18201 register, so it is better to call __stack_chk_fail directly. */
18203 static tree
18204 ix86_stack_protect_fail (void)
18206 return TARGET_64BIT
18207 ? default_external_stack_protect_fail ()
18208 : default_hidden_stack_protect_fail ();
18211 /* Select a format to encode pointers in exception handling data. CODE
18212 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
18213 true if the symbol may be affected by dynamic relocations.
18215 ??? All x86 object file formats are capable of representing this.
18216 After all, the relocation needed is the same as for the call insn.
18217 Whether or not a particular assembler allows us to enter such, I
18218 guess we'll have to see. */
18220 asm_preferred_eh_data_format (int code, int global)
18222 if (flag_pic)
18224 int type = DW_EH_PE_sdata8;
18225 if (!TARGET_64BIT
18226 || ix86_cmodel == CM_SMALL_PIC
18227 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
18228 type = DW_EH_PE_sdata4;
18229 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
18231 if (ix86_cmodel == CM_SMALL
18232 || (ix86_cmodel == CM_MEDIUM && code))
18233 return DW_EH_PE_udata4;
18234 return DW_EH_PE_absptr;
18237 #include "gt-i386.h"