Update FSF address.
[official-gcc.git] / gcc / config / i386 / i386.c
blob5a1af7ac88425d809446d6917f731ba09f6c3141
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
20 Boston, MA 02110-1301, USA. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "tree-gimple.h"
52 #ifndef CHECK_STACK_LIMIT
53 #define CHECK_STACK_LIMIT (-1)
54 #endif
56 /* Return index of given mode in mult and division cost tables. */
57 #define MODE_INDEX(mode) \
58 ((mode) == QImode ? 0 \
59 : (mode) == HImode ? 1 \
60 : (mode) == SImode ? 2 \
61 : (mode) == DImode ? 3 \
62 : 4)
64 /* Processor costs (relative to an add) */
65 static const
66 struct processor_costs size_cost = { /* costs for tunning for size */
67 2, /* cost of an add instruction */
68 3, /* cost of a lea instruction */
69 2, /* variable shift costs */
70 3, /* constant shift costs */
71 {3, 3, 3, 3, 5}, /* cost of starting a multiply */
72 0, /* cost of multiply per each bit set */
73 {3, 3, 3, 3, 5}, /* cost of a divide/mod */
74 3, /* cost of movsx */
75 3, /* cost of movzx */
76 0, /* "large" insn */
77 2, /* MOVE_RATIO */
78 2, /* cost for loading QImode using movzbl */
79 {2, 2, 2}, /* cost of loading integer registers
80 in QImode, HImode and SImode.
81 Relative to reg-reg move (2). */
82 {2, 2, 2}, /* cost of storing integer registers */
83 2, /* cost of reg,reg fld/fst */
84 {2, 2, 2}, /* cost of loading fp registers
85 in SFmode, DFmode and XFmode */
86 {2, 2, 2}, /* cost of loading integer registers */
87 3, /* cost of moving MMX register */
88 {3, 3}, /* cost of loading MMX registers
89 in SImode and DImode */
90 {3, 3}, /* cost of storing MMX registers
91 in SImode and DImode */
92 3, /* cost of moving SSE register */
93 {3, 3, 3}, /* cost of loading SSE registers
94 in SImode, DImode and TImode */
95 {3, 3, 3}, /* cost of storing SSE registers
96 in SImode, DImode and TImode */
97 3, /* MMX or SSE register to integer */
98 0, /* size of prefetch block */
99 0, /* number of parallel prefetches */
100 1, /* Branch cost */
101 2, /* cost of FADD and FSUB insns. */
102 2, /* cost of FMUL instruction. */
103 2, /* cost of FDIV instruction. */
104 2, /* cost of FABS instruction. */
105 2, /* cost of FCHS instruction. */
106 2, /* cost of FSQRT instruction. */
109 /* Processor costs (relative to an add) */
110 static const
111 struct processor_costs i386_cost = { /* 386 specific costs */
112 1, /* cost of an add instruction */
113 1, /* cost of a lea instruction */
114 3, /* variable shift costs */
115 2, /* constant shift costs */
116 {6, 6, 6, 6, 6}, /* cost of starting a multiply */
117 1, /* cost of multiply per each bit set */
118 {23, 23, 23, 23, 23}, /* cost of a divide/mod */
119 3, /* cost of movsx */
120 2, /* cost of movzx */
121 15, /* "large" insn */
122 3, /* MOVE_RATIO */
123 4, /* cost for loading QImode using movzbl */
124 {2, 4, 2}, /* cost of loading integer registers
125 in QImode, HImode and SImode.
126 Relative to reg-reg move (2). */
127 {2, 4, 2}, /* cost of storing integer registers */
128 2, /* cost of reg,reg fld/fst */
129 {8, 8, 8}, /* cost of loading fp registers
130 in SFmode, DFmode and XFmode */
131 {8, 8, 8}, /* cost of loading integer registers */
132 2, /* cost of moving MMX register */
133 {4, 8}, /* cost of loading MMX registers
134 in SImode and DImode */
135 {4, 8}, /* cost of storing MMX registers
136 in SImode and DImode */
137 2, /* cost of moving SSE register */
138 {4, 8, 16}, /* cost of loading SSE registers
139 in SImode, DImode and TImode */
140 {4, 8, 16}, /* cost of storing SSE registers
141 in SImode, DImode and TImode */
142 3, /* MMX or SSE register to integer */
143 0, /* size of prefetch block */
144 0, /* number of parallel prefetches */
145 1, /* Branch cost */
146 23, /* cost of FADD and FSUB insns. */
147 27, /* cost of FMUL instruction. */
148 88, /* cost of FDIV instruction. */
149 22, /* cost of FABS instruction. */
150 24, /* cost of FCHS instruction. */
151 122, /* cost of FSQRT instruction. */
154 static const
155 struct processor_costs i486_cost = { /* 486 specific costs */
156 1, /* cost of an add instruction */
157 1, /* cost of a lea instruction */
158 3, /* variable shift costs */
159 2, /* constant shift costs */
160 {12, 12, 12, 12, 12}, /* cost of starting a multiply */
161 1, /* cost of multiply per each bit set */
162 {40, 40, 40, 40, 40}, /* cost of a divide/mod */
163 3, /* cost of movsx */
164 2, /* cost of movzx */
165 15, /* "large" insn */
166 3, /* MOVE_RATIO */
167 4, /* cost for loading QImode using movzbl */
168 {2, 4, 2}, /* cost of loading integer registers
169 in QImode, HImode and SImode.
170 Relative to reg-reg move (2). */
171 {2, 4, 2}, /* cost of storing integer registers */
172 2, /* cost of reg,reg fld/fst */
173 {8, 8, 8}, /* cost of loading fp registers
174 in SFmode, DFmode and XFmode */
175 {8, 8, 8}, /* cost of loading integer registers */
176 2, /* cost of moving MMX register */
177 {4, 8}, /* cost of loading MMX registers
178 in SImode and DImode */
179 {4, 8}, /* cost of storing MMX registers
180 in SImode and DImode */
181 2, /* cost of moving SSE register */
182 {4, 8, 16}, /* cost of loading SSE registers
183 in SImode, DImode and TImode */
184 {4, 8, 16}, /* cost of storing SSE registers
185 in SImode, DImode and TImode */
186 3, /* MMX or SSE register to integer */
187 0, /* size of prefetch block */
188 0, /* number of parallel prefetches */
189 1, /* Branch cost */
190 8, /* cost of FADD and FSUB insns. */
191 16, /* cost of FMUL instruction. */
192 73, /* cost of FDIV instruction. */
193 3, /* cost of FABS instruction. */
194 3, /* cost of FCHS instruction. */
195 83, /* cost of FSQRT instruction. */
198 static const
199 struct processor_costs pentium_cost = {
200 1, /* cost of an add instruction */
201 1, /* cost of a lea instruction */
202 4, /* variable shift costs */
203 1, /* constant shift costs */
204 {11, 11, 11, 11, 11}, /* cost of starting a multiply */
205 0, /* cost of multiply per each bit set */
206 {25, 25, 25, 25, 25}, /* cost of a divide/mod */
207 3, /* cost of movsx */
208 2, /* cost of movzx */
209 8, /* "large" insn */
210 6, /* MOVE_RATIO */
211 6, /* cost for loading QImode using movzbl */
212 {2, 4, 2}, /* cost of loading integer registers
213 in QImode, HImode and SImode.
214 Relative to reg-reg move (2). */
215 {2, 4, 2}, /* cost of storing integer registers */
216 2, /* cost of reg,reg fld/fst */
217 {2, 2, 6}, /* cost of loading fp registers
218 in SFmode, DFmode and XFmode */
219 {4, 4, 6}, /* cost of loading integer registers */
220 8, /* cost of moving MMX register */
221 {8, 8}, /* cost of loading MMX registers
222 in SImode and DImode */
223 {8, 8}, /* cost of storing MMX registers
224 in SImode and DImode */
225 2, /* cost of moving SSE register */
226 {4, 8, 16}, /* cost of loading SSE registers
227 in SImode, DImode and TImode */
228 {4, 8, 16}, /* cost of storing SSE registers
229 in SImode, DImode and TImode */
230 3, /* MMX or SSE register to integer */
231 0, /* size of prefetch block */
232 0, /* number of parallel prefetches */
233 2, /* Branch cost */
234 3, /* cost of FADD and FSUB insns. */
235 3, /* cost of FMUL instruction. */
236 39, /* cost of FDIV instruction. */
237 1, /* cost of FABS instruction. */
238 1, /* cost of FCHS instruction. */
239 70, /* cost of FSQRT instruction. */
242 static const
243 struct processor_costs pentiumpro_cost = {
244 1, /* cost of an add instruction */
245 1, /* cost of a lea instruction */
246 1, /* variable shift costs */
247 1, /* constant shift costs */
248 {4, 4, 4, 4, 4}, /* cost of starting a multiply */
249 0, /* cost of multiply per each bit set */
250 {17, 17, 17, 17, 17}, /* cost of a divide/mod */
251 1, /* cost of movsx */
252 1, /* cost of movzx */
253 8, /* "large" insn */
254 6, /* MOVE_RATIO */
255 2, /* cost for loading QImode using movzbl */
256 {4, 4, 4}, /* cost of loading integer registers
257 in QImode, HImode and SImode.
258 Relative to reg-reg move (2). */
259 {2, 2, 2}, /* cost of storing integer registers */
260 2, /* cost of reg,reg fld/fst */
261 {2, 2, 6}, /* cost of loading fp registers
262 in SFmode, DFmode and XFmode */
263 {4, 4, 6}, /* cost of loading integer registers */
264 2, /* cost of moving MMX register */
265 {2, 2}, /* cost of loading MMX registers
266 in SImode and DImode */
267 {2, 2}, /* cost of storing MMX registers
268 in SImode and DImode */
269 2, /* cost of moving SSE register */
270 {2, 2, 8}, /* cost of loading SSE registers
271 in SImode, DImode and TImode */
272 {2, 2, 8}, /* cost of storing SSE registers
273 in SImode, DImode and TImode */
274 3, /* MMX or SSE register to integer */
275 32, /* size of prefetch block */
276 6, /* number of parallel prefetches */
277 2, /* Branch cost */
278 3, /* cost of FADD and FSUB insns. */
279 5, /* cost of FMUL instruction. */
280 56, /* cost of FDIV instruction. */
281 2, /* cost of FABS instruction. */
282 2, /* cost of FCHS instruction. */
283 56, /* cost of FSQRT instruction. */
286 static const
287 struct processor_costs k6_cost = {
288 1, /* cost of an add instruction */
289 2, /* cost of a lea instruction */
290 1, /* variable shift costs */
291 1, /* constant shift costs */
292 {3, 3, 3, 3, 3}, /* cost of starting a multiply */
293 0, /* cost of multiply per each bit set */
294 {18, 18, 18, 18, 18}, /* cost of a divide/mod */
295 2, /* cost of movsx */
296 2, /* cost of movzx */
297 8, /* "large" insn */
298 4, /* MOVE_RATIO */
299 3, /* cost for loading QImode using movzbl */
300 {4, 5, 4}, /* cost of loading integer registers
301 in QImode, HImode and SImode.
302 Relative to reg-reg move (2). */
303 {2, 3, 2}, /* cost of storing integer registers */
304 4, /* cost of reg,reg fld/fst */
305 {6, 6, 6}, /* cost of loading fp registers
306 in SFmode, DFmode and XFmode */
307 {4, 4, 4}, /* cost of loading integer registers */
308 2, /* cost of moving MMX register */
309 {2, 2}, /* cost of loading MMX registers
310 in SImode and DImode */
311 {2, 2}, /* cost of storing MMX registers
312 in SImode and DImode */
313 2, /* cost of moving SSE register */
314 {2, 2, 8}, /* cost of loading SSE registers
315 in SImode, DImode and TImode */
316 {2, 2, 8}, /* cost of storing SSE registers
317 in SImode, DImode and TImode */
318 6, /* MMX or SSE register to integer */
319 32, /* size of prefetch block */
320 1, /* number of parallel prefetches */
321 1, /* Branch cost */
322 2, /* cost of FADD and FSUB insns. */
323 2, /* cost of FMUL instruction. */
324 56, /* cost of FDIV instruction. */
325 2, /* cost of FABS instruction. */
326 2, /* cost of FCHS instruction. */
327 56, /* cost of FSQRT instruction. */
330 static const
331 struct processor_costs athlon_cost = {
332 1, /* cost of an add instruction */
333 2, /* cost of a lea instruction */
334 1, /* variable shift costs */
335 1, /* constant shift costs */
336 {5, 5, 5, 5, 5}, /* cost of starting a multiply */
337 0, /* cost of multiply per each bit set */
338 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
339 1, /* cost of movsx */
340 1, /* cost of movzx */
341 8, /* "large" insn */
342 9, /* MOVE_RATIO */
343 4, /* cost for loading QImode using movzbl */
344 {3, 4, 3}, /* cost of loading integer registers
345 in QImode, HImode and SImode.
346 Relative to reg-reg move (2). */
347 {3, 4, 3}, /* cost of storing integer registers */
348 4, /* cost of reg,reg fld/fst */
349 {4, 4, 12}, /* cost of loading fp registers
350 in SFmode, DFmode and XFmode */
351 {6, 6, 8}, /* cost of loading integer registers */
352 2, /* cost of moving MMX register */
353 {4, 4}, /* cost of loading MMX registers
354 in SImode and DImode */
355 {4, 4}, /* cost of storing MMX registers
356 in SImode and DImode */
357 2, /* cost of moving SSE register */
358 {4, 4, 6}, /* cost of loading SSE registers
359 in SImode, DImode and TImode */
360 {4, 4, 5}, /* cost of storing SSE registers
361 in SImode, DImode and TImode */
362 5, /* MMX or SSE register to integer */
363 64, /* size of prefetch block */
364 6, /* number of parallel prefetches */
365 5, /* Branch cost */
366 4, /* cost of FADD and FSUB insns. */
367 4, /* cost of FMUL instruction. */
368 24, /* cost of FDIV instruction. */
369 2, /* cost of FABS instruction. */
370 2, /* cost of FCHS instruction. */
371 35, /* cost of FSQRT instruction. */
374 static const
375 struct processor_costs k8_cost = {
376 1, /* cost of an add instruction */
377 2, /* cost of a lea instruction */
378 1, /* variable shift costs */
379 1, /* constant shift costs */
380 {3, 4, 3, 4, 5}, /* cost of starting a multiply */
381 0, /* cost of multiply per each bit set */
382 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
383 1, /* cost of movsx */
384 1, /* cost of movzx */
385 8, /* "large" insn */
386 9, /* MOVE_RATIO */
387 4, /* cost for loading QImode using movzbl */
388 {3, 4, 3}, /* cost of loading integer registers
389 in QImode, HImode and SImode.
390 Relative to reg-reg move (2). */
391 {3, 4, 3}, /* cost of storing integer registers */
392 4, /* cost of reg,reg fld/fst */
393 {4, 4, 12}, /* cost of loading fp registers
394 in SFmode, DFmode and XFmode */
395 {6, 6, 8}, /* cost of loading integer registers */
396 2, /* cost of moving MMX register */
397 {3, 3}, /* cost of loading MMX registers
398 in SImode and DImode */
399 {4, 4}, /* cost of storing MMX registers
400 in SImode and DImode */
401 2, /* cost of moving SSE register */
402 {4, 3, 6}, /* cost of loading SSE registers
403 in SImode, DImode and TImode */
404 {4, 4, 5}, /* cost of storing SSE registers
405 in SImode, DImode and TImode */
406 5, /* MMX or SSE register to integer */
407 64, /* size of prefetch block */
408 6, /* number of parallel prefetches */
409 5, /* Branch cost */
410 4, /* cost of FADD and FSUB insns. */
411 4, /* cost of FMUL instruction. */
412 19, /* cost of FDIV instruction. */
413 2, /* cost of FABS instruction. */
414 2, /* cost of FCHS instruction. */
415 35, /* cost of FSQRT instruction. */
418 static const
419 struct processor_costs pentium4_cost = {
420 1, /* cost of an add instruction */
421 3, /* cost of a lea instruction */
422 4, /* variable shift costs */
423 4, /* constant shift costs */
424 {15, 15, 15, 15, 15}, /* cost of starting a multiply */
425 0, /* cost of multiply per each bit set */
426 {56, 56, 56, 56, 56}, /* cost of a divide/mod */
427 1, /* cost of movsx */
428 1, /* cost of movzx */
429 16, /* "large" insn */
430 6, /* MOVE_RATIO */
431 2, /* cost for loading QImode using movzbl */
432 {4, 5, 4}, /* cost of loading integer registers
433 in QImode, HImode and SImode.
434 Relative to reg-reg move (2). */
435 {2, 3, 2}, /* cost of storing integer registers */
436 2, /* cost of reg,reg fld/fst */
437 {2, 2, 6}, /* cost of loading fp registers
438 in SFmode, DFmode and XFmode */
439 {4, 4, 6}, /* cost of loading integer registers */
440 2, /* cost of moving MMX register */
441 {2, 2}, /* cost of loading MMX registers
442 in SImode and DImode */
443 {2, 2}, /* cost of storing MMX registers
444 in SImode and DImode */
445 12, /* cost of moving SSE register */
446 {12, 12, 12}, /* cost of loading SSE registers
447 in SImode, DImode and TImode */
448 {2, 2, 8}, /* cost of storing SSE registers
449 in SImode, DImode and TImode */
450 10, /* MMX or SSE register to integer */
451 64, /* size of prefetch block */
452 6, /* number of parallel prefetches */
453 2, /* Branch cost */
454 5, /* cost of FADD and FSUB insns. */
455 7, /* cost of FMUL instruction. */
456 43, /* cost of FDIV instruction. */
457 2, /* cost of FABS instruction. */
458 2, /* cost of FCHS instruction. */
459 43, /* cost of FSQRT instruction. */
462 static const
463 struct processor_costs nocona_cost = {
464 1, /* cost of an add instruction */
465 1, /* cost of a lea instruction */
466 1, /* variable shift costs */
467 1, /* constant shift costs */
468 {10, 10, 10, 10, 10}, /* cost of starting a multiply */
469 0, /* cost of multiply per each bit set */
470 {66, 66, 66, 66, 66}, /* cost of a divide/mod */
471 1, /* cost of movsx */
472 1, /* cost of movzx */
473 16, /* "large" insn */
474 9, /* MOVE_RATIO */
475 4, /* cost for loading QImode using movzbl */
476 {4, 4, 4}, /* cost of loading integer registers
477 in QImode, HImode and SImode.
478 Relative to reg-reg move (2). */
479 {4, 4, 4}, /* cost of storing integer registers */
480 3, /* cost of reg,reg fld/fst */
481 {12, 12, 12}, /* cost of loading fp registers
482 in SFmode, DFmode and XFmode */
483 {4, 4, 4}, /* cost of loading integer registers */
484 6, /* cost of moving MMX register */
485 {12, 12}, /* cost of loading MMX registers
486 in SImode and DImode */
487 {12, 12}, /* cost of storing MMX registers
488 in SImode and DImode */
489 6, /* cost of moving SSE register */
490 {12, 12, 12}, /* cost of loading SSE registers
491 in SImode, DImode and TImode */
492 {12, 12, 12}, /* cost of storing SSE registers
493 in SImode, DImode and TImode */
494 8, /* MMX or SSE register to integer */
495 128, /* size of prefetch block */
496 8, /* number of parallel prefetches */
497 1, /* Branch cost */
498 6, /* cost of FADD and FSUB insns. */
499 8, /* cost of FMUL instruction. */
500 40, /* cost of FDIV instruction. */
501 3, /* cost of FABS instruction. */
502 3, /* cost of FCHS instruction. */
503 44, /* cost of FSQRT instruction. */
506 const struct processor_costs *ix86_cost = &pentium_cost;
508 /* Processor feature/optimization bitmasks. */
509 #define m_386 (1<<PROCESSOR_I386)
510 #define m_486 (1<<PROCESSOR_I486)
511 #define m_PENT (1<<PROCESSOR_PENTIUM)
512 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
513 #define m_K6 (1<<PROCESSOR_K6)
514 #define m_ATHLON (1<<PROCESSOR_ATHLON)
515 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
516 #define m_K8 (1<<PROCESSOR_K8)
517 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
518 #define m_NOCONA (1<<PROCESSOR_NOCONA)
520 const int x86_use_leave = m_386 | m_K6 | m_ATHLON_K8;
521 const int x86_push_memory = m_386 | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
522 const int x86_zero_extend_with_and = m_486 | m_PENT;
523 const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA /* m_386 | m_K6 */;
524 const int x86_double_with_add = ~m_386;
525 const int x86_use_bit_test = m_386;
526 const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6;
527 const int x86_cmove = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
528 const int x86_fisttp = m_NOCONA;
529 const int x86_3dnow_a = m_ATHLON_K8;
530 const int x86_deep_branch = m_PPRO | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
531 /* Branch hints were put in P4 based on simulation result. But
532 after P4 was made, no performance benefit was observed with
533 branch hints. It also increases the code size. As the result,
534 icc never generates branch hints. */
535 const int x86_branch_hints = 0;
536 const int x86_use_sahf = m_PPRO | m_K6 | m_PENT4 | m_NOCONA;
537 const int x86_partial_reg_stall = m_PPRO;
538 const int x86_use_himode_fiop = m_386 | m_486 | m_K6;
539 const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT);
540 const int x86_use_mov0 = m_K6;
541 const int x86_use_cltd = ~(m_PENT | m_K6);
542 const int x86_read_modify_write = ~m_PENT;
543 const int x86_read_modify = ~(m_PENT | m_PPRO);
544 const int x86_split_long_moves = m_PPRO;
545 const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486 | m_ATHLON_K8;
546 const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
547 const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
548 const int x86_qimode_math = ~(0);
549 const int x86_promote_qi_regs = 0;
550 const int x86_himode_math = ~(m_PPRO);
551 const int x86_promote_hi_regs = m_PPRO;
552 const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA;
553 const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA;
554 const int x86_add_esp_4 = m_ATHLON_K8 | m_K6 | m_PENT4 | m_NOCONA;
555 const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6 | m_386 | m_486 | m_PENT4 | m_NOCONA;
556 const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO);
557 const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
558 const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
559 const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO;
560 const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO;
561 const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO;
562 const int x86_decompose_lea = m_PENT4 | m_NOCONA;
563 const int x86_shift1 = ~m_486;
564 const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
565 const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO;
566 /* Set for machines where the type and dependencies are resolved on SSE
567 register parts instead of whole registers, so we may maintain just
568 lower part of scalar values in proper format leaving the upper part
569 undefined. */
570 const int x86_sse_split_regs = m_ATHLON_K8;
571 const int x86_sse_typeless_stores = m_ATHLON_K8;
572 const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
573 const int x86_use_ffreep = m_ATHLON_K8;
574 const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6;
576 /* ??? Allowing interunit moves makes it all too easy for the compiler to put
577 integer data in xmm registers. Which results in pretty abysmal code. */
578 const int x86_inter_unit_moves = 0 /* ~(m_ATHLON_K8) */;
580 const int x86_ext_80387_constants = m_K6 | m_ATHLON | m_PENT4 | m_NOCONA | m_PPRO;
581 /* Some CPU cores are not able to predict more than 4 branch instructions in
582 the 16 byte window. */
583 const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
584 const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6 | m_PENT;
585 const int x86_use_bt = m_ATHLON_K8;
586 /* Compare and exchange was added for 80486. */
587 const int x86_cmpxchg = ~m_386;
588 /* Exchange and add was added for 80486. */
589 const int x86_xadd = ~m_386;
591 /* In case the average insn count for single function invocation is
592 lower than this constant, emit fast (but longer) prologue and
593 epilogue code. */
594 #define FAST_PROLOGUE_INSN_COUNT 20
596 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
597 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
598 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
599 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
601 /* Array of the smallest class containing reg number REGNO, indexed by
602 REGNO. Used by REGNO_REG_CLASS in i386.h. */
604 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
606 /* ax, dx, cx, bx */
607 AREG, DREG, CREG, BREG,
608 /* si, di, bp, sp */
609 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
610 /* FP registers */
611 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
612 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
613 /* arg pointer */
614 NON_Q_REGS,
615 /* flags, fpsr, dirflag, frame */
616 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
617 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
618 SSE_REGS, SSE_REGS,
619 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
620 MMX_REGS, MMX_REGS,
621 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
622 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
623 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
624 SSE_REGS, SSE_REGS,
627 /* The "default" register map used in 32bit mode. */
629 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
631 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
632 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
633 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
634 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
635 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
636 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
637 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
640 static int const x86_64_int_parameter_registers[6] =
642 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
643 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
646 static int const x86_64_int_return_registers[4] =
648 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
651 /* The "default" register map used in 64bit mode. */
652 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
654 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
655 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
656 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
657 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
658 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
659 8,9,10,11,12,13,14,15, /* extended integer registers */
660 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
663 /* Define the register numbers to be used in Dwarf debugging information.
664 The SVR4 reference port C compiler uses the following register numbers
665 in its Dwarf output code:
666 0 for %eax (gcc regno = 0)
667 1 for %ecx (gcc regno = 2)
668 2 for %edx (gcc regno = 1)
669 3 for %ebx (gcc regno = 3)
670 4 for %esp (gcc regno = 7)
671 5 for %ebp (gcc regno = 6)
672 6 for %esi (gcc regno = 4)
673 7 for %edi (gcc regno = 5)
674 The following three DWARF register numbers are never generated by
675 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
676 believes these numbers have these meanings.
677 8 for %eip (no gcc equivalent)
678 9 for %eflags (gcc regno = 17)
679 10 for %trapno (no gcc equivalent)
680 It is not at all clear how we should number the FP stack registers
681 for the x86 architecture. If the version of SDB on x86/svr4 were
682 a bit less brain dead with respect to floating-point then we would
683 have a precedent to follow with respect to DWARF register numbers
684 for x86 FP registers, but the SDB on x86/svr4 is so completely
685 broken with respect to FP registers that it is hardly worth thinking
686 of it as something to strive for compatibility with.
687 The version of x86/svr4 SDB I have at the moment does (partially)
688 seem to believe that DWARF register number 11 is associated with
689 the x86 register %st(0), but that's about all. Higher DWARF
690 register numbers don't seem to be associated with anything in
691 particular, and even for DWARF regno 11, SDB only seems to under-
692 stand that it should say that a variable lives in %st(0) (when
693 asked via an `=' command) if we said it was in DWARF regno 11,
694 but SDB still prints garbage when asked for the value of the
695 variable in question (via a `/' command).
696 (Also note that the labels SDB prints for various FP stack regs
697 when doing an `x' command are all wrong.)
698 Note that these problems generally don't affect the native SVR4
699 C compiler because it doesn't allow the use of -O with -g and
700 because when it is *not* optimizing, it allocates a memory
701 location for each floating-point variable, and the memory
702 location is what gets described in the DWARF AT_location
703 attribute for the variable in question.
704 Regardless of the severe mental illness of the x86/svr4 SDB, we
705 do something sensible here and we use the following DWARF
706 register numbers. Note that these are all stack-top-relative
707 numbers.
708 11 for %st(0) (gcc regno = 8)
709 12 for %st(1) (gcc regno = 9)
710 13 for %st(2) (gcc regno = 10)
711 14 for %st(3) (gcc regno = 11)
712 15 for %st(4) (gcc regno = 12)
713 16 for %st(5) (gcc regno = 13)
714 17 for %st(6) (gcc regno = 14)
715 18 for %st(7) (gcc regno = 15)
717 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
719 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
720 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
721 -1, 9, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
722 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
723 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
724 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
725 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
728 /* Test and compare insns in i386.md store the information needed to
729 generate branch and scc insns here. */
731 rtx ix86_compare_op0 = NULL_RTX;
732 rtx ix86_compare_op1 = NULL_RTX;
733 rtx ix86_compare_emitted = NULL_RTX;
735 /* Size of the register save area. */
736 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
738 /* Define the structure for the machine field in struct function. */
740 struct stack_local_entry GTY(())
742 unsigned short mode;
743 unsigned short n;
744 rtx rtl;
745 struct stack_local_entry *next;
748 /* Structure describing stack frame layout.
749 Stack grows downward:
751 [arguments]
752 <- ARG_POINTER
753 saved pc
755 saved frame pointer if frame_pointer_needed
756 <- HARD_FRAME_POINTER
757 [saved regs]
759 [padding1] \
761 [va_arg registers] (
762 > to_allocate <- FRAME_POINTER
763 [frame] (
765 [padding2] /
767 struct ix86_frame
769 int nregs;
770 int padding1;
771 int va_arg_size;
772 HOST_WIDE_INT frame;
773 int padding2;
774 int outgoing_arguments_size;
775 int red_zone_size;
777 HOST_WIDE_INT to_allocate;
778 /* The offsets relative to ARG_POINTER. */
779 HOST_WIDE_INT frame_pointer_offset;
780 HOST_WIDE_INT hard_frame_pointer_offset;
781 HOST_WIDE_INT stack_pointer_offset;
783 /* When save_regs_using_mov is set, emit prologue using
784 move instead of push instructions. */
785 bool save_regs_using_mov;
788 /* Code model option. */
789 enum cmodel ix86_cmodel;
790 /* Asm dialect. */
791 enum asm_dialect ix86_asm_dialect = ASM_ATT;
792 /* TLS dialext. */
793 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
795 /* Which unit we are generating floating point math for. */
796 enum fpmath_unit ix86_fpmath;
798 /* Which cpu are we scheduling for. */
799 enum processor_type ix86_tune;
800 /* Which instruction set architecture to use. */
801 enum processor_type ix86_arch;
803 /* true if sse prefetch instruction is not NOOP. */
804 int x86_prefetch_sse;
806 /* ix86_regparm_string as a number */
807 static int ix86_regparm;
809 /* Preferred alignment for stack boundary in bits. */
810 unsigned int ix86_preferred_stack_boundary;
812 /* Values 1-5: see jump.c */
813 int ix86_branch_cost;
815 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
816 char internal_label_prefix[16];
817 int internal_label_prefix_len;
819 static bool ix86_handle_option (size_t, const char *, int);
820 static void output_pic_addr_const (FILE *, rtx, int);
821 static void put_condition_code (enum rtx_code, enum machine_mode,
822 int, int, FILE *);
823 static const char *get_some_local_dynamic_name (void);
824 static int get_some_local_dynamic_name_1 (rtx *, void *);
825 static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx);
826 static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *,
827 rtx *);
828 static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *);
829 static enum machine_mode ix86_cc_modes_compatible (enum machine_mode,
830 enum machine_mode);
831 static rtx get_thread_pointer (int);
832 static rtx legitimize_tls_address (rtx, enum tls_model, int);
833 static void get_pc_thunk_name (char [32], unsigned int);
834 static rtx gen_push (rtx);
835 static int ix86_flags_dependant (rtx, rtx, enum attr_type);
836 static int ix86_agi_dependant (rtx, rtx, enum attr_type);
837 static struct machine_function * ix86_init_machine_status (void);
838 static int ix86_split_to_parts (rtx, rtx *, enum machine_mode);
839 static int ix86_nsaved_regs (void);
840 static void ix86_emit_save_regs (void);
841 static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT);
842 static void ix86_emit_restore_regs_using_mov (rtx, HOST_WIDE_INT, int);
843 static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT);
844 static HOST_WIDE_INT ix86_GOT_alias_set (void);
845 static void ix86_adjust_counter (rtx, HOST_WIDE_INT);
846 static rtx ix86_expand_aligntest (rtx, int);
847 static void ix86_expand_strlensi_unroll_1 (rtx, rtx, rtx);
848 static int ix86_issue_rate (void);
849 static int ix86_adjust_cost (rtx, rtx, rtx, int);
850 static int ia32_multipass_dfa_lookahead (void);
851 static void ix86_init_mmx_sse_builtins (void);
852 static rtx x86_this_parameter (tree);
853 static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
854 HOST_WIDE_INT, tree);
855 static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
856 static void x86_file_start (void);
857 static void ix86_reorg (void);
858 static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*);
859 static tree ix86_build_builtin_va_list (void);
860 static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
861 tree, int *, int);
862 static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
863 static bool ix86_vector_mode_supported_p (enum machine_mode);
865 static int ix86_address_cost (rtx);
866 static bool ix86_cannot_force_const_mem (rtx);
867 static rtx ix86_delegitimize_address (rtx);
869 static void i386_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
871 struct builtin_description;
872 static rtx ix86_expand_sse_comi (const struct builtin_description *,
873 tree, rtx);
874 static rtx ix86_expand_sse_compare (const struct builtin_description *,
875 tree, rtx);
876 static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx);
877 static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int);
878 static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx);
879 static rtx ix86_expand_store_builtin (enum insn_code, tree);
880 static rtx safe_vector_operand (rtx, enum machine_mode);
881 static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *);
882 static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code);
883 static int ix86_fp_comparison_fcomi_cost (enum rtx_code code);
884 static int ix86_fp_comparison_sahf_cost (enum rtx_code code);
885 static int ix86_fp_comparison_cost (enum rtx_code code);
886 static unsigned int ix86_select_alt_pic_regnum (void);
887 static int ix86_save_reg (unsigned int, int);
888 static void ix86_compute_frame_layout (struct ix86_frame *);
889 static int ix86_comp_type_attributes (tree, tree);
890 static int ix86_function_regparm (tree, tree);
891 const struct attribute_spec ix86_attribute_table[];
892 static bool ix86_function_ok_for_sibcall (tree, tree);
893 static tree ix86_handle_cconv_attribute (tree *, tree, tree, int, bool *);
894 static int ix86_value_regno (enum machine_mode, tree);
895 static bool contains_128bit_aligned_vector_p (tree);
896 static rtx ix86_struct_value_rtx (tree, int);
897 static bool ix86_ms_bitfield_layout_p (tree);
898 static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *);
899 static int extended_reg_mentioned_1 (rtx *, void *);
900 static bool ix86_rtx_costs (rtx, int, int, int *);
901 static int min_insn_size (rtx);
902 static tree ix86_md_asm_clobbers (tree outputs, tree inputs, tree clobbers);
903 static bool ix86_must_pass_in_stack (enum machine_mode mode, tree type);
904 static bool ix86_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
905 tree, bool);
906 static void ix86_init_builtins (void);
907 static rtx ix86_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
908 static const char *ix86_mangle_fundamental_type (tree);
910 /* This function is only used on Solaris. */
911 static void i386_solaris_elf_named_section (const char *, unsigned int, tree)
912 ATTRIBUTE_UNUSED;
914 /* Register class used for passing given 64bit part of the argument.
915 These represent classes as documented by the PS ABI, with the exception
916 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
917 use SF or DFmode move instead of DImode to avoid reformatting penalties.
919 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
920 whenever possible (upper half does contain padding).
922 enum x86_64_reg_class
924 X86_64_NO_CLASS,
925 X86_64_INTEGER_CLASS,
926 X86_64_INTEGERSI_CLASS,
927 X86_64_SSE_CLASS,
928 X86_64_SSESF_CLASS,
929 X86_64_SSEDF_CLASS,
930 X86_64_SSEUP_CLASS,
931 X86_64_X87_CLASS,
932 X86_64_X87UP_CLASS,
933 X86_64_COMPLEX_X87_CLASS,
934 X86_64_MEMORY_CLASS
936 static const char * const x86_64_reg_class_name[] = {
937 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
938 "sseup", "x87", "x87up", "cplx87", "no"
941 #define MAX_CLASSES 4
943 /* Table of constants used by fldpi, fldln2, etc.... */
944 static REAL_VALUE_TYPE ext_80387_constants_table [5];
945 static bool ext_80387_constants_init = 0;
946 static void init_ext_80387_constants (void);
948 /* Initialize the GCC target structure. */
949 #undef TARGET_ATTRIBUTE_TABLE
950 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
951 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
952 # undef TARGET_MERGE_DECL_ATTRIBUTES
953 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
954 #endif
956 #undef TARGET_COMP_TYPE_ATTRIBUTES
957 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
959 #undef TARGET_INIT_BUILTINS
960 #define TARGET_INIT_BUILTINS ix86_init_builtins
961 #undef TARGET_EXPAND_BUILTIN
962 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
964 #undef TARGET_ASM_FUNCTION_EPILOGUE
965 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
967 #undef TARGET_ASM_OPEN_PAREN
968 #define TARGET_ASM_OPEN_PAREN ""
969 #undef TARGET_ASM_CLOSE_PAREN
970 #define TARGET_ASM_CLOSE_PAREN ""
972 #undef TARGET_ASM_ALIGNED_HI_OP
973 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
974 #undef TARGET_ASM_ALIGNED_SI_OP
975 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
976 #ifdef ASM_QUAD
977 #undef TARGET_ASM_ALIGNED_DI_OP
978 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
979 #endif
981 #undef TARGET_ASM_UNALIGNED_HI_OP
982 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
983 #undef TARGET_ASM_UNALIGNED_SI_OP
984 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
985 #undef TARGET_ASM_UNALIGNED_DI_OP
986 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
988 #undef TARGET_SCHED_ADJUST_COST
989 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
990 #undef TARGET_SCHED_ISSUE_RATE
991 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
992 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
993 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
994 ia32_multipass_dfa_lookahead
996 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
997 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
999 #ifdef HAVE_AS_TLS
1000 #undef TARGET_HAVE_TLS
1001 #define TARGET_HAVE_TLS true
1002 #endif
1003 #undef TARGET_CANNOT_FORCE_CONST_MEM
1004 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
1006 #undef TARGET_DELEGITIMIZE_ADDRESS
1007 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
1009 #undef TARGET_MS_BITFIELD_LAYOUT_P
1010 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
1012 #if TARGET_MACHO
1013 #undef TARGET_BINDS_LOCAL_P
1014 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1015 #endif
1017 #undef TARGET_ASM_OUTPUT_MI_THUNK
1018 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
1019 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1020 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
1022 #undef TARGET_ASM_FILE_START
1023 #define TARGET_ASM_FILE_START x86_file_start
1025 #undef TARGET_DEFAULT_TARGET_FLAGS
1026 #define TARGET_DEFAULT_TARGET_FLAGS \
1027 (TARGET_DEFAULT \
1028 | TARGET_64BIT_DEFAULT \
1029 | TARGET_SUBTARGET_DEFAULT \
1030 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
1032 #undef TARGET_HANDLE_OPTION
1033 #define TARGET_HANDLE_OPTION ix86_handle_option
1035 #undef TARGET_RTX_COSTS
1036 #define TARGET_RTX_COSTS ix86_rtx_costs
1037 #undef TARGET_ADDRESS_COST
1038 #define TARGET_ADDRESS_COST ix86_address_cost
1040 #undef TARGET_FIXED_CONDITION_CODE_REGS
1041 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
1042 #undef TARGET_CC_MODES_COMPATIBLE
1043 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
1045 #undef TARGET_MACHINE_DEPENDENT_REORG
1046 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
1048 #undef TARGET_BUILD_BUILTIN_VA_LIST
1049 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
1051 #undef TARGET_MD_ASM_CLOBBERS
1052 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
1054 #undef TARGET_PROMOTE_PROTOTYPES
1055 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1056 #undef TARGET_STRUCT_VALUE_RTX
1057 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
1058 #undef TARGET_SETUP_INCOMING_VARARGS
1059 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
1060 #undef TARGET_MUST_PASS_IN_STACK
1061 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
1062 #undef TARGET_PASS_BY_REFERENCE
1063 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
1065 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1066 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
1068 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1069 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
1071 #ifdef HAVE_AS_TLS
1072 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1073 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
1074 #endif
1076 #ifdef SUBTARGET_INSERT_ATTRIBUTES
1077 #undef TARGET_INSERT_ATTRIBUTES
1078 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
1079 #endif
1081 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
1082 #define TARGET_MANGLE_FUNDAMENTAL_TYPE ix86_mangle_fundamental_type
1084 struct gcc_target targetm = TARGET_INITIALIZER;
1087 /* The svr4 ABI for the i386 says that records and unions are returned
1088 in memory. */
1089 #ifndef DEFAULT_PCC_STRUCT_RETURN
1090 #define DEFAULT_PCC_STRUCT_RETURN 1
1091 #endif
1093 /* Implement TARGET_HANDLE_OPTION. */
1095 static bool
1096 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
1098 switch (code)
1100 case OPT_m3dnow:
1101 if (!value)
1103 target_flags &= ~MASK_3DNOW_A;
1104 target_flags_explicit |= MASK_3DNOW_A;
1106 return true;
1108 case OPT_mmmx:
1109 if (!value)
1111 target_flags &= ~(MASK_3DNOW | MASK_3DNOW_A);
1112 target_flags_explicit |= MASK_3DNOW | MASK_3DNOW_A;
1114 return true;
1116 case OPT_msse:
1117 if (!value)
1119 target_flags &= ~(MASK_SSE2 | MASK_SSE3);
1120 target_flags_explicit |= MASK_SSE2 | MASK_SSE3;
1122 return true;
1124 case OPT_msse2:
1125 if (!value)
1127 target_flags &= ~MASK_SSE3;
1128 target_flags_explicit |= MASK_SSE3;
1130 return true;
1132 default:
1133 return true;
1137 /* Sometimes certain combinations of command options do not make
1138 sense on a particular target machine. You can define a macro
1139 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1140 defined, is executed once just after all the command options have
1141 been parsed.
1143 Don't use this macro to turn on various extra optimizations for
1144 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1146 void
1147 override_options (void)
1149 int i;
1150 int ix86_tune_defaulted = 0;
1152 /* Comes from final.c -- no real reason to change it. */
1153 #define MAX_CODE_ALIGN 16
1155 static struct ptt
1157 const struct processor_costs *cost; /* Processor costs */
1158 const int target_enable; /* Target flags to enable. */
1159 const int target_disable; /* Target flags to disable. */
1160 const int align_loop; /* Default alignments. */
1161 const int align_loop_max_skip;
1162 const int align_jump;
1163 const int align_jump_max_skip;
1164 const int align_func;
1166 const processor_target_table[PROCESSOR_max] =
1168 {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
1169 {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
1170 {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
1171 {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
1172 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1173 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1174 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1175 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1176 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0}
1179 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1180 static struct pta
1182 const char *const name; /* processor name or nickname. */
1183 const enum processor_type processor;
1184 const enum pta_flags
1186 PTA_SSE = 1,
1187 PTA_SSE2 = 2,
1188 PTA_SSE3 = 4,
1189 PTA_MMX = 8,
1190 PTA_PREFETCH_SSE = 16,
1191 PTA_3DNOW = 32,
1192 PTA_3DNOW_A = 64,
1193 PTA_64BIT = 128
1194 } flags;
1196 const processor_alias_table[] =
1198 {"i386", PROCESSOR_I386, 0},
1199 {"i486", PROCESSOR_I486, 0},
1200 {"i586", PROCESSOR_PENTIUM, 0},
1201 {"pentium", PROCESSOR_PENTIUM, 0},
1202 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
1203 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
1204 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1205 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1206 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
1207 {"i686", PROCESSOR_PENTIUMPRO, 0},
1208 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
1209 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
1210 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1211 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1212 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1213 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1214 | PTA_MMX | PTA_PREFETCH_SSE},
1215 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1216 | PTA_MMX | PTA_PREFETCH_SSE},
1217 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1218 | PTA_MMX | PTA_PREFETCH_SSE},
1219 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1220 | PTA_MMX | PTA_PREFETCH_SSE},
1221 {"k6", PROCESSOR_K6, PTA_MMX},
1222 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1223 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1224 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1225 | PTA_3DNOW_A},
1226 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
1227 | PTA_3DNOW | PTA_3DNOW_A},
1228 {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1229 | PTA_3DNOW_A | PTA_SSE},
1230 {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1231 | PTA_3DNOW_A | PTA_SSE},
1232 {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1233 | PTA_3DNOW_A | PTA_SSE},
1234 {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT
1235 | PTA_SSE | PTA_SSE2 },
1236 {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1237 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1238 {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1239 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1240 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1241 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1242 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1243 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1246 int const pta_size = ARRAY_SIZE (processor_alias_table);
1248 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1249 SUBTARGET_OVERRIDE_OPTIONS;
1250 #endif
1252 /* Set the default values for switches whose default depends on TARGET_64BIT
1253 in case they weren't overwritten by command line options. */
1254 if (TARGET_64BIT)
1256 if (flag_omit_frame_pointer == 2)
1257 flag_omit_frame_pointer = 1;
1258 if (flag_asynchronous_unwind_tables == 2)
1259 flag_asynchronous_unwind_tables = 1;
1260 if (flag_pcc_struct_return == 2)
1261 flag_pcc_struct_return = 0;
1263 else
1265 if (flag_omit_frame_pointer == 2)
1266 flag_omit_frame_pointer = 0;
1267 if (flag_asynchronous_unwind_tables == 2)
1268 flag_asynchronous_unwind_tables = 0;
1269 if (flag_pcc_struct_return == 2)
1270 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
1273 if (!ix86_tune_string && ix86_arch_string)
1274 ix86_tune_string = ix86_arch_string;
1275 if (!ix86_tune_string)
1277 ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
1278 ix86_tune_defaulted = 1;
1280 if (!ix86_arch_string)
1281 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
1283 if (ix86_cmodel_string != 0)
1285 if (!strcmp (ix86_cmodel_string, "small"))
1286 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1287 else if (flag_pic)
1288 sorry ("code model %s not supported in PIC mode", ix86_cmodel_string);
1289 else if (!strcmp (ix86_cmodel_string, "32"))
1290 ix86_cmodel = CM_32;
1291 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
1292 ix86_cmodel = CM_KERNEL;
1293 else if (!strcmp (ix86_cmodel_string, "medium") && !flag_pic)
1294 ix86_cmodel = CM_MEDIUM;
1295 else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic)
1296 ix86_cmodel = CM_LARGE;
1297 else
1298 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
1300 else
1302 ix86_cmodel = CM_32;
1303 if (TARGET_64BIT)
1304 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1306 if (ix86_asm_string != 0)
1308 if (!strcmp (ix86_asm_string, "intel"))
1309 ix86_asm_dialect = ASM_INTEL;
1310 else if (!strcmp (ix86_asm_string, "att"))
1311 ix86_asm_dialect = ASM_ATT;
1312 else
1313 error ("bad value (%s) for -masm= switch", ix86_asm_string);
1315 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
1316 error ("code model %qs not supported in the %s bit mode",
1317 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
1318 if (ix86_cmodel == CM_LARGE)
1319 sorry ("code model %<large%> not supported yet");
1320 if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
1321 sorry ("%i-bit mode not compiled in",
1322 (target_flags & MASK_64BIT) ? 64 : 32);
1324 for (i = 0; i < pta_size; i++)
1325 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
1327 ix86_arch = processor_alias_table[i].processor;
1328 /* Default cpu tuning to the architecture. */
1329 ix86_tune = ix86_arch;
1330 if (processor_alias_table[i].flags & PTA_MMX
1331 && !(target_flags_explicit & MASK_MMX))
1332 target_flags |= MASK_MMX;
1333 if (processor_alias_table[i].flags & PTA_3DNOW
1334 && !(target_flags_explicit & MASK_3DNOW))
1335 target_flags |= MASK_3DNOW;
1336 if (processor_alias_table[i].flags & PTA_3DNOW_A
1337 && !(target_flags_explicit & MASK_3DNOW_A))
1338 target_flags |= MASK_3DNOW_A;
1339 if (processor_alias_table[i].flags & PTA_SSE
1340 && !(target_flags_explicit & MASK_SSE))
1341 target_flags |= MASK_SSE;
1342 if (processor_alias_table[i].flags & PTA_SSE2
1343 && !(target_flags_explicit & MASK_SSE2))
1344 target_flags |= MASK_SSE2;
1345 if (processor_alias_table[i].flags & PTA_SSE3
1346 && !(target_flags_explicit & MASK_SSE3))
1347 target_flags |= MASK_SSE3;
1348 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1349 x86_prefetch_sse = true;
1350 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1351 error ("CPU you selected does not support x86-64 "
1352 "instruction set");
1353 break;
1356 if (i == pta_size)
1357 error ("bad value (%s) for -march= switch", ix86_arch_string);
1359 for (i = 0; i < pta_size; i++)
1360 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
1362 ix86_tune = processor_alias_table[i].processor;
1363 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1365 if (ix86_tune_defaulted)
1367 ix86_tune_string = "x86-64";
1368 for (i = 0; i < pta_size; i++)
1369 if (! strcmp (ix86_tune_string,
1370 processor_alias_table[i].name))
1371 break;
1372 ix86_tune = processor_alias_table[i].processor;
1374 else
1375 error ("CPU you selected does not support x86-64 "
1376 "instruction set");
1378 /* Intel CPUs have always interpreted SSE prefetch instructions as
1379 NOPs; so, we can enable SSE prefetch instructions even when
1380 -mtune (rather than -march) points us to a processor that has them.
1381 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
1382 higher processors. */
1383 if (TARGET_CMOVE && (processor_alias_table[i].flags & PTA_PREFETCH_SSE))
1384 x86_prefetch_sse = true;
1385 break;
1387 if (i == pta_size)
1388 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1390 if (optimize_size)
1391 ix86_cost = &size_cost;
1392 else
1393 ix86_cost = processor_target_table[ix86_tune].cost;
1394 target_flags |= processor_target_table[ix86_tune].target_enable;
1395 target_flags &= ~processor_target_table[ix86_tune].target_disable;
1397 /* Arrange to set up i386_stack_locals for all functions. */
1398 init_machine_status = ix86_init_machine_status;
1400 /* Validate -mregparm= value. */
1401 if (ix86_regparm_string)
1403 i = atoi (ix86_regparm_string);
1404 if (i < 0 || i > REGPARM_MAX)
1405 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
1406 else
1407 ix86_regparm = i;
1409 else
1410 if (TARGET_64BIT)
1411 ix86_regparm = REGPARM_MAX;
1413 /* If the user has provided any of the -malign-* options,
1414 warn and use that value only if -falign-* is not set.
1415 Remove this code in GCC 3.2 or later. */
1416 if (ix86_align_loops_string)
1418 warning (0, "-malign-loops is obsolete, use -falign-loops");
1419 if (align_loops == 0)
1421 i = atoi (ix86_align_loops_string);
1422 if (i < 0 || i > MAX_CODE_ALIGN)
1423 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1424 else
1425 align_loops = 1 << i;
1429 if (ix86_align_jumps_string)
1431 warning (0, "-malign-jumps is obsolete, use -falign-jumps");
1432 if (align_jumps == 0)
1434 i = atoi (ix86_align_jumps_string);
1435 if (i < 0 || i > MAX_CODE_ALIGN)
1436 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1437 else
1438 align_jumps = 1 << i;
1442 if (ix86_align_funcs_string)
1444 warning (0, "-malign-functions is obsolete, use -falign-functions");
1445 if (align_functions == 0)
1447 i = atoi (ix86_align_funcs_string);
1448 if (i < 0 || i > MAX_CODE_ALIGN)
1449 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1450 else
1451 align_functions = 1 << i;
1455 /* Default align_* from the processor table. */
1456 if (align_loops == 0)
1458 align_loops = processor_target_table[ix86_tune].align_loop;
1459 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
1461 if (align_jumps == 0)
1463 align_jumps = processor_target_table[ix86_tune].align_jump;
1464 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
1466 if (align_functions == 0)
1468 align_functions = processor_target_table[ix86_tune].align_func;
1471 /* Validate -mpreferred-stack-boundary= value, or provide default.
1472 The default of 128 bits is for Pentium III's SSE __m128, but we
1473 don't want additional code to keep the stack aligned when
1474 optimizing for code size. */
1475 ix86_preferred_stack_boundary = (optimize_size
1476 ? TARGET_64BIT ? 128 : 32
1477 : 128);
1478 if (ix86_preferred_stack_boundary_string)
1480 i = atoi (ix86_preferred_stack_boundary_string);
1481 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
1482 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
1483 TARGET_64BIT ? 4 : 2);
1484 else
1485 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
1488 /* Validate -mbranch-cost= value, or provide default. */
1489 ix86_branch_cost = processor_target_table[ix86_tune].cost->branch_cost;
1490 if (ix86_branch_cost_string)
1492 i = atoi (ix86_branch_cost_string);
1493 if (i < 0 || i > 5)
1494 error ("-mbranch-cost=%d is not between 0 and 5", i);
1495 else
1496 ix86_branch_cost = i;
1499 if (ix86_tls_dialect_string)
1501 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
1502 ix86_tls_dialect = TLS_DIALECT_GNU;
1503 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
1504 ix86_tls_dialect = TLS_DIALECT_SUN;
1505 else
1506 error ("bad value (%s) for -mtls-dialect= switch",
1507 ix86_tls_dialect_string);
1510 /* Keep nonleaf frame pointers. */
1511 if (flag_omit_frame_pointer)
1512 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
1513 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
1514 flag_omit_frame_pointer = 1;
1516 /* If we're doing fast math, we don't care about comparison order
1517 wrt NaNs. This lets us use a shorter comparison sequence. */
1518 if (flag_unsafe_math_optimizations)
1519 target_flags &= ~MASK_IEEE_FP;
1521 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
1522 since the insns won't need emulation. */
1523 if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
1524 target_flags &= ~MASK_NO_FANCY_MATH_387;
1526 /* Likewise, if the target doesn't have a 387, or we've specified
1527 software floating point, don't use 387 inline intrinsics. */
1528 if (!TARGET_80387)
1529 target_flags |= MASK_NO_FANCY_MATH_387;
1531 /* Turn on SSE2 builtins for -msse3. */
1532 if (TARGET_SSE3)
1533 target_flags |= MASK_SSE2;
1535 /* Turn on SSE builtins for -msse2. */
1536 if (TARGET_SSE2)
1537 target_flags |= MASK_SSE;
1539 /* Turn on MMX builtins for -msse. */
1540 if (TARGET_SSE)
1542 target_flags |= MASK_MMX & ~target_flags_explicit;
1543 x86_prefetch_sse = true;
1546 /* Turn on MMX builtins for 3Dnow. */
1547 if (TARGET_3DNOW)
1548 target_flags |= MASK_MMX;
1550 if (TARGET_64BIT)
1552 if (TARGET_ALIGN_DOUBLE)
1553 error ("-malign-double makes no sense in the 64bit mode");
1554 if (TARGET_RTD)
1555 error ("-mrtd calling convention not supported in the 64bit mode");
1557 /* Enable by default the SSE and MMX builtins. Do allow the user to
1558 explicitly disable any of these. In particular, disabling SSE and
1559 MMX for kernel code is extremely useful. */
1560 target_flags
1561 |= ((MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE)
1562 & ~target_flags_explicit);
1564 else
1566 /* i386 ABI does not specify red zone. It still makes sense to use it
1567 when programmer takes care to stack from being destroyed. */
1568 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
1569 target_flags |= MASK_NO_RED_ZONE;
1572 ix86_fpmath = TARGET_FPMATH_DEFAULT;
1574 if (ix86_fpmath_string != 0)
1576 if (! strcmp (ix86_fpmath_string, "387"))
1577 ix86_fpmath = FPMATH_387;
1578 else if (! strcmp (ix86_fpmath_string, "sse"))
1580 if (!TARGET_SSE)
1582 warning (0, "SSE instruction set disabled, using 387 arithmetics");
1583 ix86_fpmath = FPMATH_387;
1585 else
1586 ix86_fpmath = FPMATH_SSE;
1588 else if (! strcmp (ix86_fpmath_string, "387,sse")
1589 || ! strcmp (ix86_fpmath_string, "sse,387"))
1591 if (!TARGET_SSE)
1593 warning (0, "SSE instruction set disabled, using 387 arithmetics");
1594 ix86_fpmath = FPMATH_387;
1596 else if (!TARGET_80387)
1598 warning (0, "387 instruction set disabled, using SSE arithmetics");
1599 ix86_fpmath = FPMATH_SSE;
1601 else
1602 ix86_fpmath = FPMATH_SSE | FPMATH_387;
1604 else
1605 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
1608 /* If the i387 is disabled, then do not return values in it. */
1609 if (!TARGET_80387)
1610 target_flags &= ~MASK_FLOAT_RETURNS;
1612 if ((x86_accumulate_outgoing_args & TUNEMASK)
1613 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
1614 && !optimize_size)
1615 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
1617 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
1619 char *p;
1620 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
1621 p = strchr (internal_label_prefix, 'X');
1622 internal_label_prefix_len = p - internal_label_prefix;
1623 *p = '\0';
1626 /* When scheduling description is not available, disable scheduler pass
1627 so it won't slow down the compilation and make x87 code slower. */
1628 if (!TARGET_SCHEDULE)
1629 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
1632 void
1633 optimization_options (int level, int size ATTRIBUTE_UNUSED)
1635 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
1636 make the problem with not enough registers even worse. */
1637 #ifdef INSN_SCHEDULING
1638 if (level > 1)
1639 flag_schedule_insns = 0;
1640 #endif
1642 if (TARGET_MACHO)
1643 /* The Darwin libraries never set errno, so we might as well
1644 avoid calling them when that's the only reason we would. */
1645 flag_errno_math = 0;
1647 /* The default values of these switches depend on the TARGET_64BIT
1648 that is not known at this moment. Mark these values with 2 and
1649 let user the to override these. In case there is no command line option
1650 specifying them, we will set the defaults in override_options. */
1651 if (optimize >= 1)
1652 flag_omit_frame_pointer = 2;
1653 flag_pcc_struct_return = 2;
1654 flag_asynchronous_unwind_tables = 2;
1655 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
1656 SUBTARGET_OPTIMIZATION_OPTIONS;
1657 #endif
1660 /* Table of valid machine attributes. */
1661 const struct attribute_spec ix86_attribute_table[] =
1663 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
1664 /* Stdcall attribute says callee is responsible for popping arguments
1665 if they are not variable. */
1666 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1667 /* Fastcall attribute says callee is responsible for popping arguments
1668 if they are not variable. */
1669 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1670 /* Cdecl attribute says the callee is a normal C declaration */
1671 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1672 /* Regparm attribute specifies how many integer arguments are to be
1673 passed in registers. */
1674 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
1675 /* Sseregparm attribute says we are using x86_64 calling conventions
1676 for FP arguments. */
1677 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1678 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1679 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
1680 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
1681 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
1682 #endif
1683 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1684 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1685 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1686 SUBTARGET_ATTRIBUTE_TABLE,
1687 #endif
1688 { NULL, 0, 0, false, false, false, NULL }
1691 /* Decide whether we can make a sibling call to a function. DECL is the
1692 declaration of the function being targeted by the call and EXP is the
1693 CALL_EXPR representing the call. */
1695 static bool
1696 ix86_function_ok_for_sibcall (tree decl, tree exp)
1698 tree func;
1700 /* If we are generating position-independent code, we cannot sibcall
1701 optimize any indirect call, or a direct call to a global function,
1702 as the PLT requires %ebx be live. */
1703 if (!TARGET_64BIT && flag_pic && (!decl || TREE_PUBLIC (decl)))
1704 return false;
1706 if (decl)
1707 func = decl;
1708 else
1709 func = NULL;
1711 /* If we are returning floats on the 80387 register stack, we cannot
1712 make a sibcall from a function that doesn't return a float to a
1713 function that does or, conversely, from a function that does return
1714 a float to a function that doesn't; the necessary stack adjustment
1715 would not be executed. */
1716 if (STACK_REG_P (ix86_function_value (TREE_TYPE (exp), func))
1717 != STACK_REG_P (ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
1718 cfun->decl)))
1719 return false;
1721 /* If this call is indirect, we'll need to be able to use a call-clobbered
1722 register for the address of the target function. Make sure that all
1723 such registers are not used for passing parameters. */
1724 if (!decl && !TARGET_64BIT)
1726 tree type;
1728 /* We're looking at the CALL_EXPR, we need the type of the function. */
1729 type = TREE_OPERAND (exp, 0); /* pointer expression */
1730 type = TREE_TYPE (type); /* pointer type */
1731 type = TREE_TYPE (type); /* function type */
1733 if (ix86_function_regparm (type, NULL) >= 3)
1735 /* ??? Need to count the actual number of registers to be used,
1736 not the possible number of registers. Fix later. */
1737 return false;
1741 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1742 /* Dllimport'd functions are also called indirectly. */
1743 if (decl && lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl))
1744 && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
1745 return false;
1746 #endif
1748 /* Otherwise okay. That also includes certain types of indirect calls. */
1749 return true;
1752 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
1753 calling convention attributes;
1754 arguments as in struct attribute_spec.handler. */
1756 static tree
1757 ix86_handle_cconv_attribute (tree *node, tree name,
1758 tree args,
1759 int flags ATTRIBUTE_UNUSED,
1760 bool *no_add_attrs)
1762 if (TREE_CODE (*node) != FUNCTION_TYPE
1763 && TREE_CODE (*node) != METHOD_TYPE
1764 && TREE_CODE (*node) != FIELD_DECL
1765 && TREE_CODE (*node) != TYPE_DECL)
1767 warning (OPT_Wattributes, "%qs attribute only applies to functions",
1768 IDENTIFIER_POINTER (name));
1769 *no_add_attrs = true;
1770 return NULL_TREE;
1773 /* Can combine regparm with all attributes but fastcall. */
1774 if (is_attribute_p ("regparm", name))
1776 tree cst;
1778 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
1780 error ("fastcall and regparm attributes are not compatible");
1783 cst = TREE_VALUE (args);
1784 if (TREE_CODE (cst) != INTEGER_CST)
1786 warning (OPT_Wattributes,
1787 "%qs attribute requires an integer constant argument",
1788 IDENTIFIER_POINTER (name));
1789 *no_add_attrs = true;
1791 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
1793 warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
1794 IDENTIFIER_POINTER (name), REGPARM_MAX);
1795 *no_add_attrs = true;
1798 return NULL_TREE;
1801 if (TARGET_64BIT)
1803 warning (OPT_Wattributes, "%qs attribute ignored",
1804 IDENTIFIER_POINTER (name));
1805 *no_add_attrs = true;
1806 return NULL_TREE;
1809 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
1810 if (is_attribute_p ("fastcall", name))
1812 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
1814 error ("fastcall and cdecl attributes are not compatible");
1816 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
1818 error ("fastcall and stdcall attributes are not compatible");
1820 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
1822 error ("fastcall and regparm attributes are not compatible");
1826 /* Can combine stdcall with fastcall (redundant), regparm and
1827 sseregparm. */
1828 else if (is_attribute_p ("stdcall", name))
1830 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
1832 error ("stdcall and cdecl attributes are not compatible");
1834 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
1836 error ("stdcall and fastcall attributes are not compatible");
1840 /* Can combine cdecl with regparm and sseregparm. */
1841 else if (is_attribute_p ("cdecl", name))
1843 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
1845 error ("stdcall and cdecl attributes are not compatible");
1847 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
1849 error ("fastcall and cdecl attributes are not compatible");
1853 /* Can combine sseregparm with all attributes. */
1855 return NULL_TREE;
1858 /* Return 0 if the attributes for two types are incompatible, 1 if they
1859 are compatible, and 2 if they are nearly compatible (which causes a
1860 warning to be generated). */
1862 static int
1863 ix86_comp_type_attributes (tree type1, tree type2)
1865 /* Check for mismatch of non-default calling convention. */
1866 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
1868 if (TREE_CODE (type1) != FUNCTION_TYPE)
1869 return 1;
1871 /* Check for mismatched fastcall/regparm types. */
1872 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
1873 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
1874 || (ix86_function_regparm (type1, NULL)
1875 != ix86_function_regparm (type2, NULL)))
1876 return 0;
1878 /* Check for mismatched sseregparm types. */
1879 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
1880 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
1881 return 0;
1883 /* Check for mismatched return types (cdecl vs stdcall). */
1884 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
1885 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
1886 return 0;
1888 return 1;
1891 /* Return the regparm value for a function with the indicated TYPE and DECL.
1892 DECL may be NULL when calling function indirectly
1893 or considering a libcall. */
1895 static int
1896 ix86_function_regparm (tree type, tree decl)
1898 tree attr;
1899 int regparm = ix86_regparm;
1900 bool user_convention = false;
1902 if (!TARGET_64BIT)
1904 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
1905 if (attr)
1907 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
1908 user_convention = true;
1911 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
1913 regparm = 2;
1914 user_convention = true;
1917 /* Use register calling convention for local functions when possible. */
1918 if (!TARGET_64BIT && !user_convention && decl
1919 && flag_unit_at_a_time && !profile_flag)
1921 struct cgraph_local_info *i = cgraph_local_info (decl);
1922 if (i && i->local)
1924 /* We can't use regparm(3) for nested functions as these use
1925 static chain pointer in third argument. */
1926 if (DECL_CONTEXT (decl) && !DECL_NO_STATIC_CHAIN (decl))
1927 regparm = 2;
1928 else
1929 regparm = 3;
1933 return regparm;
1936 /* Return 1 or 2, if we can pass up to 8 SFmode (1) and DFmode (2) arguments
1937 in SSE registers for a function with the indicated TYPE and DECL.
1938 DECL may be NULL when calling function indirectly
1939 or considering a libcall. Otherwise return 0. */
1941 static int
1942 ix86_function_sseregparm (tree type, tree decl)
1944 /* Use SSE registers to pass SFmode and DFmode arguments if requested
1945 by the sseregparm attribute. */
1946 if (type
1947 && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type)))
1949 if (!TARGET_SSE)
1951 if (decl)
1952 error ("Calling %qD with attribute sseregparm without "
1953 "SSE/SSE2 enabled", decl);
1954 else
1955 error ("Calling %qT with attribute sseregparm without "
1956 "SSE/SSE2 enabled", type);
1957 return 0;
1960 return 2;
1963 /* For local functions, pass SFmode (and DFmode for SSE2) arguments
1964 in SSE registers even for 32-bit mode and not just 3, but up to
1965 8 SSE arguments in registers. */
1966 if (!TARGET_64BIT && decl
1967 && TARGET_SSE_MATH && flag_unit_at_a_time && !profile_flag)
1969 struct cgraph_local_info *i = cgraph_local_info (decl);
1970 if (i && i->local)
1971 return TARGET_SSE2 ? 2 : 1;
1974 return 0;
1977 /* Return true if EAX is live at the start of the function. Used by
1978 ix86_expand_prologue to determine if we need special help before
1979 calling allocate_stack_worker. */
1981 static bool
1982 ix86_eax_live_at_start_p (void)
1984 /* Cheat. Don't bother working forward from ix86_function_regparm
1985 to the function type to whether an actual argument is located in
1986 eax. Instead just look at cfg info, which is still close enough
1987 to correct at this point. This gives false positives for broken
1988 functions that might use uninitialized data that happens to be
1989 allocated in eax, but who cares? */
1990 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->il.rtl->global_live_at_end, 0);
1993 /* Value is the number of bytes of arguments automatically
1994 popped when returning from a subroutine call.
1995 FUNDECL is the declaration node of the function (as a tree),
1996 FUNTYPE is the data type of the function (as a tree),
1997 or for a library call it is an identifier node for the subroutine name.
1998 SIZE is the number of bytes of arguments passed on the stack.
2000 On the 80386, the RTD insn may be used to pop them if the number
2001 of args is fixed, but if the number is variable then the caller
2002 must pop them all. RTD can't be used for library calls now
2003 because the library is compiled with the Unix compiler.
2004 Use of RTD is a selectable option, since it is incompatible with
2005 standard Unix calling sequences. If the option is not selected,
2006 the caller must always pop the args.
2008 The attribute stdcall is equivalent to RTD on a per module basis. */
2011 ix86_return_pops_args (tree fundecl, tree funtype, int size)
2013 int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
2015 /* Cdecl functions override -mrtd, and never pop the stack. */
2016 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
2018 /* Stdcall and fastcall functions will pop the stack if not
2019 variable args. */
2020 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
2021 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
2022 rtd = 1;
2024 if (rtd
2025 && (TYPE_ARG_TYPES (funtype) == NULL_TREE
2026 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
2027 == void_type_node)))
2028 return size;
2031 /* Lose any fake structure return argument if it is passed on the stack. */
2032 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
2033 && !TARGET_64BIT
2034 && !KEEP_AGGREGATE_RETURN_POINTER)
2036 int nregs = ix86_function_regparm (funtype, fundecl);
2038 if (!nregs)
2039 return GET_MODE_SIZE (Pmode);
2042 return 0;
2045 /* Argument support functions. */
2047 /* Return true when register may be used to pass function parameters. */
2048 bool
2049 ix86_function_arg_regno_p (int regno)
2051 int i;
2052 if (!TARGET_64BIT)
2053 return (regno < REGPARM_MAX
2054 || (TARGET_MMX && MMX_REGNO_P (regno)
2055 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
2056 || (TARGET_SSE && SSE_REGNO_P (regno)
2057 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
2059 if (TARGET_SSE && SSE_REGNO_P (regno)
2060 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
2061 return true;
2062 /* RAX is used as hidden argument to va_arg functions. */
2063 if (!regno)
2064 return true;
2065 for (i = 0; i < REGPARM_MAX; i++)
2066 if (regno == x86_64_int_parameter_registers[i])
2067 return true;
2068 return false;
2071 /* Return if we do not know how to pass TYPE solely in registers. */
2073 static bool
2074 ix86_must_pass_in_stack (enum machine_mode mode, tree type)
2076 if (must_pass_in_stack_var_size_or_pad (mode, type))
2077 return true;
2079 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
2080 The layout_type routine is crafty and tries to trick us into passing
2081 currently unsupported vector types on the stack by using TImode. */
2082 return (!TARGET_64BIT && mode == TImode
2083 && type && TREE_CODE (type) != VECTOR_TYPE);
2086 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2087 for a call to a function whose data type is FNTYPE.
2088 For a library call, FNTYPE is 0. */
2090 void
2091 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
2092 tree fntype, /* tree ptr for function decl */
2093 rtx libname, /* SYMBOL_REF of library name or 0 */
2094 tree fndecl)
2096 static CUMULATIVE_ARGS zero_cum;
2097 tree param, next_param;
2099 if (TARGET_DEBUG_ARG)
2101 fprintf (stderr, "\ninit_cumulative_args (");
2102 if (fntype)
2103 fprintf (stderr, "fntype code = %s, ret code = %s",
2104 tree_code_name[(int) TREE_CODE (fntype)],
2105 tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
2106 else
2107 fprintf (stderr, "no fntype");
2109 if (libname)
2110 fprintf (stderr, ", libname = %s", XSTR (libname, 0));
2113 *cum = zero_cum;
2115 /* Set up the number of registers to use for passing arguments. */
2116 cum->nregs = ix86_regparm;
2117 if (TARGET_SSE)
2118 cum->sse_nregs = SSE_REGPARM_MAX;
2119 if (TARGET_MMX)
2120 cum->mmx_nregs = MMX_REGPARM_MAX;
2121 cum->warn_sse = true;
2122 cum->warn_mmx = true;
2123 cum->maybe_vaarg = false;
2125 /* Use ecx and edx registers if function has fastcall attribute,
2126 else look for regparm information. */
2127 if (fntype && !TARGET_64BIT)
2129 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
2131 cum->nregs = 2;
2132 cum->fastcall = 1;
2134 else
2135 cum->nregs = ix86_function_regparm (fntype, fndecl);
2138 /* Set up the number of SSE registers used for passing SFmode
2139 and DFmode arguments. Warn for mismatching ABI. */
2140 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl);
2142 /* Determine if this function has variable arguments. This is
2143 indicated by the last argument being 'void_type_mode' if there
2144 are no variable arguments. If there are variable arguments, then
2145 we won't pass anything in registers in 32-bit mode. */
2147 if (cum->nregs || cum->mmx_nregs || cum->sse_nregs)
2149 for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
2150 param != 0; param = next_param)
2152 next_param = TREE_CHAIN (param);
2153 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
2155 if (!TARGET_64BIT)
2157 cum->nregs = 0;
2158 cum->sse_nregs = 0;
2159 cum->mmx_nregs = 0;
2160 cum->warn_sse = 0;
2161 cum->warn_mmx = 0;
2162 cum->fastcall = 0;
2163 cum->float_in_sse = 0;
2165 cum->maybe_vaarg = true;
2169 if ((!fntype && !libname)
2170 || (fntype && !TYPE_ARG_TYPES (fntype)))
2171 cum->maybe_vaarg = true;
2173 if (TARGET_DEBUG_ARG)
2174 fprintf (stderr, ", nregs=%d )\n", cum->nregs);
2176 return;
2179 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
2180 But in the case of vector types, it is some vector mode.
2182 When we have only some of our vector isa extensions enabled, then there
2183 are some modes for which vector_mode_supported_p is false. For these
2184 modes, the generic vector support in gcc will choose some non-vector mode
2185 in order to implement the type. By computing the natural mode, we'll
2186 select the proper ABI location for the operand and not depend on whatever
2187 the middle-end decides to do with these vector types. */
2189 static enum machine_mode
2190 type_natural_mode (tree type)
2192 enum machine_mode mode = TYPE_MODE (type);
2194 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
2196 HOST_WIDE_INT size = int_size_in_bytes (type);
2197 if ((size == 8 || size == 16)
2198 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
2199 && TYPE_VECTOR_SUBPARTS (type) > 1)
2201 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
2203 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
2204 mode = MIN_MODE_VECTOR_FLOAT;
2205 else
2206 mode = MIN_MODE_VECTOR_INT;
2208 /* Get the mode which has this inner mode and number of units. */
2209 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
2210 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
2211 && GET_MODE_INNER (mode) == innermode)
2212 return mode;
2214 gcc_unreachable ();
2218 return mode;
2221 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
2222 this may not agree with the mode that the type system has chosen for the
2223 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
2224 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
2226 static rtx
2227 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
2228 unsigned int regno)
2230 rtx tmp;
2232 if (orig_mode != BLKmode)
2233 tmp = gen_rtx_REG (orig_mode, regno);
2234 else
2236 tmp = gen_rtx_REG (mode, regno);
2237 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
2238 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
2241 return tmp;
2244 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
2245 of this code is to classify each 8bytes of incoming argument by the register
2246 class and assign registers accordingly. */
2248 /* Return the union class of CLASS1 and CLASS2.
2249 See the x86-64 PS ABI for details. */
2251 static enum x86_64_reg_class
2252 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
2254 /* Rule #1: If both classes are equal, this is the resulting class. */
2255 if (class1 == class2)
2256 return class1;
2258 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
2259 the other class. */
2260 if (class1 == X86_64_NO_CLASS)
2261 return class2;
2262 if (class2 == X86_64_NO_CLASS)
2263 return class1;
2265 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
2266 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
2267 return X86_64_MEMORY_CLASS;
2269 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
2270 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
2271 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
2272 return X86_64_INTEGERSI_CLASS;
2273 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
2274 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
2275 return X86_64_INTEGER_CLASS;
2277 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
2278 MEMORY is used. */
2279 if (class1 == X86_64_X87_CLASS
2280 || class1 == X86_64_X87UP_CLASS
2281 || class1 == X86_64_COMPLEX_X87_CLASS
2282 || class2 == X86_64_X87_CLASS
2283 || class2 == X86_64_X87UP_CLASS
2284 || class2 == X86_64_COMPLEX_X87_CLASS)
2285 return X86_64_MEMORY_CLASS;
2287 /* Rule #6: Otherwise class SSE is used. */
2288 return X86_64_SSE_CLASS;
2291 /* Classify the argument of type TYPE and mode MODE.
2292 CLASSES will be filled by the register class used to pass each word
2293 of the operand. The number of words is returned. In case the parameter
2294 should be passed in memory, 0 is returned. As a special case for zero
2295 sized containers, classes[0] will be NO_CLASS and 1 is returned.
2297 BIT_OFFSET is used internally for handling records and specifies offset
2298 of the offset in bits modulo 256 to avoid overflow cases.
2300 See the x86-64 PS ABI for details.
2303 static int
2304 classify_argument (enum machine_mode mode, tree type,
2305 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
2307 HOST_WIDE_INT bytes =
2308 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2309 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2311 /* Variable sized entities are always passed/returned in memory. */
2312 if (bytes < 0)
2313 return 0;
2315 if (mode != VOIDmode
2316 && targetm.calls.must_pass_in_stack (mode, type))
2317 return 0;
2319 if (type && AGGREGATE_TYPE_P (type))
2321 int i;
2322 tree field;
2323 enum x86_64_reg_class subclasses[MAX_CLASSES];
2325 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
2326 if (bytes > 16)
2327 return 0;
2329 for (i = 0; i < words; i++)
2330 classes[i] = X86_64_NO_CLASS;
2332 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
2333 signalize memory class, so handle it as special case. */
2334 if (!words)
2336 classes[0] = X86_64_NO_CLASS;
2337 return 1;
2340 /* Classify each field of record and merge classes. */
2341 switch (TREE_CODE (type))
2343 case RECORD_TYPE:
2344 /* For classes first merge in the field of the subclasses. */
2345 if (TYPE_BINFO (type))
2347 tree binfo, base_binfo;
2348 int basenum;
2350 for (binfo = TYPE_BINFO (type), basenum = 0;
2351 BINFO_BASE_ITERATE (binfo, basenum, base_binfo); basenum++)
2353 int num;
2354 int offset = tree_low_cst (BINFO_OFFSET (base_binfo), 0) * 8;
2355 tree type = BINFO_TYPE (base_binfo);
2357 num = classify_argument (TYPE_MODE (type),
2358 type, subclasses,
2359 (offset + bit_offset) % 256);
2360 if (!num)
2361 return 0;
2362 for (i = 0; i < num; i++)
2364 int pos = (offset + (bit_offset % 64)) / 8 / 8;
2365 classes[i + pos] =
2366 merge_classes (subclasses[i], classes[i + pos]);
2370 /* And now merge the fields of structure. */
2371 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2373 if (TREE_CODE (field) == FIELD_DECL)
2375 int num;
2377 /* Bitfields are always classified as integer. Handle them
2378 early, since later code would consider them to be
2379 misaligned integers. */
2380 if (DECL_BIT_FIELD (field))
2382 for (i = int_bit_position (field) / 8 / 8;
2383 i < (int_bit_position (field)
2384 + tree_low_cst (DECL_SIZE (field), 0)
2385 + 63) / 8 / 8; i++)
2386 classes[i] =
2387 merge_classes (X86_64_INTEGER_CLASS,
2388 classes[i]);
2390 else
2392 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2393 TREE_TYPE (field), subclasses,
2394 (int_bit_position (field)
2395 + bit_offset) % 256);
2396 if (!num)
2397 return 0;
2398 for (i = 0; i < num; i++)
2400 int pos =
2401 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
2402 classes[i + pos] =
2403 merge_classes (subclasses[i], classes[i + pos]);
2408 break;
2410 case ARRAY_TYPE:
2411 /* Arrays are handled as small records. */
2413 int num;
2414 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
2415 TREE_TYPE (type), subclasses, bit_offset);
2416 if (!num)
2417 return 0;
2419 /* The partial classes are now full classes. */
2420 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
2421 subclasses[0] = X86_64_SSE_CLASS;
2422 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
2423 subclasses[0] = X86_64_INTEGER_CLASS;
2425 for (i = 0; i < words; i++)
2426 classes[i] = subclasses[i % num];
2428 break;
2430 case UNION_TYPE:
2431 case QUAL_UNION_TYPE:
2432 /* Unions are similar to RECORD_TYPE but offset is always 0.
2435 /* Unions are not derived. */
2436 gcc_assert (!TYPE_BINFO (type)
2437 || !BINFO_N_BASE_BINFOS (TYPE_BINFO (type)));
2438 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2440 if (TREE_CODE (field) == FIELD_DECL)
2442 int num;
2443 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2444 TREE_TYPE (field), subclasses,
2445 bit_offset);
2446 if (!num)
2447 return 0;
2448 for (i = 0; i < num; i++)
2449 classes[i] = merge_classes (subclasses[i], classes[i]);
2452 break;
2454 default:
2455 gcc_unreachable ();
2458 /* Final merger cleanup. */
2459 for (i = 0; i < words; i++)
2461 /* If one class is MEMORY, everything should be passed in
2462 memory. */
2463 if (classes[i] == X86_64_MEMORY_CLASS)
2464 return 0;
2466 /* The X86_64_SSEUP_CLASS should be always preceded by
2467 X86_64_SSE_CLASS. */
2468 if (classes[i] == X86_64_SSEUP_CLASS
2469 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
2470 classes[i] = X86_64_SSE_CLASS;
2472 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
2473 if (classes[i] == X86_64_X87UP_CLASS
2474 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
2475 classes[i] = X86_64_SSE_CLASS;
2477 return words;
2480 /* Compute alignment needed. We align all types to natural boundaries with
2481 exception of XFmode that is aligned to 64bits. */
2482 if (mode != VOIDmode && mode != BLKmode)
2484 int mode_alignment = GET_MODE_BITSIZE (mode);
2486 if (mode == XFmode)
2487 mode_alignment = 128;
2488 else if (mode == XCmode)
2489 mode_alignment = 256;
2490 if (COMPLEX_MODE_P (mode))
2491 mode_alignment /= 2;
2492 /* Misaligned fields are always returned in memory. */
2493 if (bit_offset % mode_alignment)
2494 return 0;
2497 /* for V1xx modes, just use the base mode */
2498 if (VECTOR_MODE_P (mode)
2499 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
2500 mode = GET_MODE_INNER (mode);
2502 /* Classification of atomic types. */
2503 switch (mode)
2505 case DImode:
2506 case SImode:
2507 case HImode:
2508 case QImode:
2509 case CSImode:
2510 case CHImode:
2511 case CQImode:
2512 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2513 classes[0] = X86_64_INTEGERSI_CLASS;
2514 else
2515 classes[0] = X86_64_INTEGER_CLASS;
2516 return 1;
2517 case CDImode:
2518 case TImode:
2519 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
2520 return 2;
2521 case CTImode:
2522 return 0;
2523 case SFmode:
2524 if (!(bit_offset % 64))
2525 classes[0] = X86_64_SSESF_CLASS;
2526 else
2527 classes[0] = X86_64_SSE_CLASS;
2528 return 1;
2529 case DFmode:
2530 classes[0] = X86_64_SSEDF_CLASS;
2531 return 1;
2532 case XFmode:
2533 classes[0] = X86_64_X87_CLASS;
2534 classes[1] = X86_64_X87UP_CLASS;
2535 return 2;
2536 case TFmode:
2537 classes[0] = X86_64_SSE_CLASS;
2538 classes[1] = X86_64_SSEUP_CLASS;
2539 return 2;
2540 case SCmode:
2541 classes[0] = X86_64_SSE_CLASS;
2542 return 1;
2543 case DCmode:
2544 classes[0] = X86_64_SSEDF_CLASS;
2545 classes[1] = X86_64_SSEDF_CLASS;
2546 return 2;
2547 case XCmode:
2548 classes[0] = X86_64_COMPLEX_X87_CLASS;
2549 return 1;
2550 case TCmode:
2551 /* This modes is larger than 16 bytes. */
2552 return 0;
2553 case V4SFmode:
2554 case V4SImode:
2555 case V16QImode:
2556 case V8HImode:
2557 case V2DFmode:
2558 case V2DImode:
2559 classes[0] = X86_64_SSE_CLASS;
2560 classes[1] = X86_64_SSEUP_CLASS;
2561 return 2;
2562 case V2SFmode:
2563 case V2SImode:
2564 case V4HImode:
2565 case V8QImode:
2566 classes[0] = X86_64_SSE_CLASS;
2567 return 1;
2568 case BLKmode:
2569 case VOIDmode:
2570 return 0;
2571 default:
2572 gcc_assert (VECTOR_MODE_P (mode));
2574 if (bytes > 16)
2575 return 0;
2577 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
2579 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2580 classes[0] = X86_64_INTEGERSI_CLASS;
2581 else
2582 classes[0] = X86_64_INTEGER_CLASS;
2583 classes[1] = X86_64_INTEGER_CLASS;
2584 return 1 + (bytes > 8);
2588 /* Examine the argument and return set number of register required in each
2589 class. Return 0 iff parameter should be passed in memory. */
2590 static int
2591 examine_argument (enum machine_mode mode, tree type, int in_return,
2592 int *int_nregs, int *sse_nregs)
2594 enum x86_64_reg_class class[MAX_CLASSES];
2595 int n = classify_argument (mode, type, class, 0);
2597 *int_nregs = 0;
2598 *sse_nregs = 0;
2599 if (!n)
2600 return 0;
2601 for (n--; n >= 0; n--)
2602 switch (class[n])
2604 case X86_64_INTEGER_CLASS:
2605 case X86_64_INTEGERSI_CLASS:
2606 (*int_nregs)++;
2607 break;
2608 case X86_64_SSE_CLASS:
2609 case X86_64_SSESF_CLASS:
2610 case X86_64_SSEDF_CLASS:
2611 (*sse_nregs)++;
2612 break;
2613 case X86_64_NO_CLASS:
2614 case X86_64_SSEUP_CLASS:
2615 break;
2616 case X86_64_X87_CLASS:
2617 case X86_64_X87UP_CLASS:
2618 if (!in_return)
2619 return 0;
2620 break;
2621 case X86_64_COMPLEX_X87_CLASS:
2622 return in_return ? 2 : 0;
2623 case X86_64_MEMORY_CLASS:
2624 gcc_unreachable ();
2626 return 1;
2629 /* Construct container for the argument used by GCC interface. See
2630 FUNCTION_ARG for the detailed description. */
2632 static rtx
2633 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
2634 tree type, int in_return, int nintregs, int nsseregs,
2635 const int *intreg, int sse_regno)
2637 enum machine_mode tmpmode;
2638 int bytes =
2639 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2640 enum x86_64_reg_class class[MAX_CLASSES];
2641 int n;
2642 int i;
2643 int nexps = 0;
2644 int needed_sseregs, needed_intregs;
2645 rtx exp[MAX_CLASSES];
2646 rtx ret;
2648 n = classify_argument (mode, type, class, 0);
2649 if (TARGET_DEBUG_ARG)
2651 if (!n)
2652 fprintf (stderr, "Memory class\n");
2653 else
2655 fprintf (stderr, "Classes:");
2656 for (i = 0; i < n; i++)
2658 fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]);
2660 fprintf (stderr, "\n");
2663 if (!n)
2664 return NULL;
2665 if (!examine_argument (mode, type, in_return, &needed_intregs,
2666 &needed_sseregs))
2667 return NULL;
2668 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
2669 return NULL;
2671 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
2672 some less clueful developer tries to use floating-point anyway. */
2673 if (needed_sseregs && !TARGET_SSE)
2675 static bool issued_error;
2676 if (!issued_error)
2678 issued_error = true;
2679 if (in_return)
2680 error ("SSE register return with SSE disabled");
2681 else
2682 error ("SSE register argument with SSE disabled");
2684 return NULL;
2687 /* First construct simple cases. Avoid SCmode, since we want to use
2688 single register to pass this type. */
2689 if (n == 1 && mode != SCmode)
2690 switch (class[0])
2692 case X86_64_INTEGER_CLASS:
2693 case X86_64_INTEGERSI_CLASS:
2694 return gen_rtx_REG (mode, intreg[0]);
2695 case X86_64_SSE_CLASS:
2696 case X86_64_SSESF_CLASS:
2697 case X86_64_SSEDF_CLASS:
2698 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
2699 case X86_64_X87_CLASS:
2700 case X86_64_COMPLEX_X87_CLASS:
2701 return gen_rtx_REG (mode, FIRST_STACK_REG);
2702 case X86_64_NO_CLASS:
2703 /* Zero sized array, struct or class. */
2704 return NULL;
2705 default:
2706 gcc_unreachable ();
2708 if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS
2709 && mode != BLKmode)
2710 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
2711 if (n == 2
2712 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
2713 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
2714 if (n == 2 && class[0] == X86_64_INTEGER_CLASS
2715 && class[1] == X86_64_INTEGER_CLASS
2716 && (mode == CDImode || mode == TImode || mode == TFmode)
2717 && intreg[0] + 1 == intreg[1])
2718 return gen_rtx_REG (mode, intreg[0]);
2720 /* Otherwise figure out the entries of the PARALLEL. */
2721 for (i = 0; i < n; i++)
2723 switch (class[i])
2725 case X86_64_NO_CLASS:
2726 break;
2727 case X86_64_INTEGER_CLASS:
2728 case X86_64_INTEGERSI_CLASS:
2729 /* Merge TImodes on aligned occasions here too. */
2730 if (i * 8 + 8 > bytes)
2731 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
2732 else if (class[i] == X86_64_INTEGERSI_CLASS)
2733 tmpmode = SImode;
2734 else
2735 tmpmode = DImode;
2736 /* We've requested 24 bytes we don't have mode for. Use DImode. */
2737 if (tmpmode == BLKmode)
2738 tmpmode = DImode;
2739 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2740 gen_rtx_REG (tmpmode, *intreg),
2741 GEN_INT (i*8));
2742 intreg++;
2743 break;
2744 case X86_64_SSESF_CLASS:
2745 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2746 gen_rtx_REG (SFmode,
2747 SSE_REGNO (sse_regno)),
2748 GEN_INT (i*8));
2749 sse_regno++;
2750 break;
2751 case X86_64_SSEDF_CLASS:
2752 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2753 gen_rtx_REG (DFmode,
2754 SSE_REGNO (sse_regno)),
2755 GEN_INT (i*8));
2756 sse_regno++;
2757 break;
2758 case X86_64_SSE_CLASS:
2759 if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
2760 tmpmode = TImode;
2761 else
2762 tmpmode = DImode;
2763 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2764 gen_rtx_REG (tmpmode,
2765 SSE_REGNO (sse_regno)),
2766 GEN_INT (i*8));
2767 if (tmpmode == TImode)
2768 i++;
2769 sse_regno++;
2770 break;
2771 default:
2772 gcc_unreachable ();
2776 /* Empty aligned struct, union or class. */
2777 if (nexps == 0)
2778 return NULL;
2780 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
2781 for (i = 0; i < nexps; i++)
2782 XVECEXP (ret, 0, i) = exp [i];
2783 return ret;
2786 /* Update the data in CUM to advance over an argument
2787 of mode MODE and data type TYPE.
2788 (TYPE is null for libcalls where that information may not be available.) */
2790 void
2791 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
2792 tree type, int named)
2794 int bytes =
2795 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2796 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2798 if (type)
2799 mode = type_natural_mode (type);
2801 if (TARGET_DEBUG_ARG)
2802 fprintf (stderr, "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, "
2803 "mode=%s, named=%d)\n\n",
2804 words, cum->words, cum->nregs, cum->sse_nregs,
2805 GET_MODE_NAME (mode), named);
2807 if (TARGET_64BIT)
2809 int int_nregs, sse_nregs;
2810 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
2811 cum->words += words;
2812 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
2814 cum->nregs -= int_nregs;
2815 cum->sse_nregs -= sse_nregs;
2816 cum->regno += int_nregs;
2817 cum->sse_regno += sse_nregs;
2819 else
2820 cum->words += words;
2822 else
2824 switch (mode)
2826 default:
2827 break;
2829 case BLKmode:
2830 if (bytes < 0)
2831 break;
2832 /* FALLTHRU */
2834 case DImode:
2835 case SImode:
2836 case HImode:
2837 case QImode:
2838 cum->words += words;
2839 cum->nregs -= words;
2840 cum->regno += words;
2842 if (cum->nregs <= 0)
2844 cum->nregs = 0;
2845 cum->regno = 0;
2847 break;
2849 case DFmode:
2850 if (cum->float_in_sse < 2)
2851 break;
2852 case SFmode:
2853 if (cum->float_in_sse < 1)
2854 break;
2855 /* FALLTHRU */
2857 case TImode:
2858 case V16QImode:
2859 case V8HImode:
2860 case V4SImode:
2861 case V2DImode:
2862 case V4SFmode:
2863 case V2DFmode:
2864 if (!type || !AGGREGATE_TYPE_P (type))
2866 cum->sse_words += words;
2867 cum->sse_nregs -= 1;
2868 cum->sse_regno += 1;
2869 if (cum->sse_nregs <= 0)
2871 cum->sse_nregs = 0;
2872 cum->sse_regno = 0;
2875 break;
2877 case V8QImode:
2878 case V4HImode:
2879 case V2SImode:
2880 case V2SFmode:
2881 if (!type || !AGGREGATE_TYPE_P (type))
2883 cum->mmx_words += words;
2884 cum->mmx_nregs -= 1;
2885 cum->mmx_regno += 1;
2886 if (cum->mmx_nregs <= 0)
2888 cum->mmx_nregs = 0;
2889 cum->mmx_regno = 0;
2892 break;
2897 /* Define where to put the arguments to a function.
2898 Value is zero to push the argument on the stack,
2899 or a hard register in which to store the argument.
2901 MODE is the argument's machine mode.
2902 TYPE is the data type of the argument (as a tree).
2903 This is null for libcalls where that information may
2904 not be available.
2905 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2906 the preceding args and about the function being called.
2907 NAMED is nonzero if this argument is a named parameter
2908 (otherwise it is an extra parameter matching an ellipsis). */
2911 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode orig_mode,
2912 tree type, int named)
2914 enum machine_mode mode = orig_mode;
2915 rtx ret = NULL_RTX;
2916 int bytes =
2917 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2918 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2919 static bool warnedsse, warnedmmx;
2921 /* To simplify the code below, represent vector types with a vector mode
2922 even if MMX/SSE are not active. */
2923 if (type && TREE_CODE (type) == VECTOR_TYPE)
2924 mode = type_natural_mode (type);
2926 /* Handle a hidden AL argument containing number of registers for varargs
2927 x86-64 functions. For i386 ABI just return constm1_rtx to avoid
2928 any AL settings. */
2929 if (mode == VOIDmode)
2931 if (TARGET_64BIT)
2932 return GEN_INT (cum->maybe_vaarg
2933 ? (cum->sse_nregs < 0
2934 ? SSE_REGPARM_MAX
2935 : cum->sse_regno)
2936 : -1);
2937 else
2938 return constm1_rtx;
2940 if (TARGET_64BIT)
2941 ret = construct_container (mode, orig_mode, type, 0, cum->nregs,
2942 cum->sse_nregs,
2943 &x86_64_int_parameter_registers [cum->regno],
2944 cum->sse_regno);
2945 else
2946 switch (mode)
2948 /* For now, pass fp/complex values on the stack. */
2949 default:
2950 break;
2952 case BLKmode:
2953 if (bytes < 0)
2954 break;
2955 /* FALLTHRU */
2956 case DImode:
2957 case SImode:
2958 case HImode:
2959 case QImode:
2960 if (words <= cum->nregs)
2962 int regno = cum->regno;
2964 /* Fastcall allocates the first two DWORD (SImode) or
2965 smaller arguments to ECX and EDX. */
2966 if (cum->fastcall)
2968 if (mode == BLKmode || mode == DImode)
2969 break;
2971 /* ECX not EAX is the first allocated register. */
2972 if (regno == 0)
2973 regno = 2;
2975 ret = gen_rtx_REG (mode, regno);
2977 break;
2978 case DFmode:
2979 if (cum->float_in_sse < 2)
2980 break;
2981 case SFmode:
2982 if (cum->float_in_sse < 1)
2983 break;
2984 /* FALLTHRU */
2985 case TImode:
2986 case V16QImode:
2987 case V8HImode:
2988 case V4SImode:
2989 case V2DImode:
2990 case V4SFmode:
2991 case V2DFmode:
2992 if (!type || !AGGREGATE_TYPE_P (type))
2994 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
2996 warnedsse = true;
2997 warning (0, "SSE vector argument without SSE enabled "
2998 "changes the ABI");
3000 if (cum->sse_nregs)
3001 ret = gen_reg_or_parallel (mode, orig_mode,
3002 cum->sse_regno + FIRST_SSE_REG);
3004 break;
3005 case V8QImode:
3006 case V4HImode:
3007 case V2SImode:
3008 case V2SFmode:
3009 if (!type || !AGGREGATE_TYPE_P (type))
3011 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
3013 warnedmmx = true;
3014 warning (0, "MMX vector argument without MMX enabled "
3015 "changes the ABI");
3017 if (cum->mmx_nregs)
3018 ret = gen_reg_or_parallel (mode, orig_mode,
3019 cum->mmx_regno + FIRST_MMX_REG);
3021 break;
3024 if (TARGET_DEBUG_ARG)
3026 fprintf (stderr,
3027 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
3028 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
3030 if (ret)
3031 print_simple_rtl (stderr, ret);
3032 else
3033 fprintf (stderr, ", stack");
3035 fprintf (stderr, " )\n");
3038 return ret;
3041 /* A C expression that indicates when an argument must be passed by
3042 reference. If nonzero for an argument, a copy of that argument is
3043 made in memory and a pointer to the argument is passed instead of
3044 the argument itself. The pointer is passed in whatever way is
3045 appropriate for passing a pointer to that type. */
3047 static bool
3048 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3049 enum machine_mode mode ATTRIBUTE_UNUSED,
3050 tree type, bool named ATTRIBUTE_UNUSED)
3052 if (!TARGET_64BIT)
3053 return 0;
3055 if (type && int_size_in_bytes (type) == -1)
3057 if (TARGET_DEBUG_ARG)
3058 fprintf (stderr, "function_arg_pass_by_reference\n");
3059 return 1;
3062 return 0;
3065 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
3066 ABI. Only called if TARGET_SSE. */
3067 static bool
3068 contains_128bit_aligned_vector_p (tree type)
3070 enum machine_mode mode = TYPE_MODE (type);
3071 if (SSE_REG_MODE_P (mode)
3072 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
3073 return true;
3074 if (TYPE_ALIGN (type) < 128)
3075 return false;
3077 if (AGGREGATE_TYPE_P (type))
3079 /* Walk the aggregates recursively. */
3080 switch (TREE_CODE (type))
3082 case RECORD_TYPE:
3083 case UNION_TYPE:
3084 case QUAL_UNION_TYPE:
3086 tree field;
3088 if (TYPE_BINFO (type))
3090 tree binfo, base_binfo;
3091 int i;
3093 for (binfo = TYPE_BINFO (type), i = 0;
3094 BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
3095 if (contains_128bit_aligned_vector_p
3096 (BINFO_TYPE (base_binfo)))
3097 return true;
3099 /* And now merge the fields of structure. */
3100 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3102 if (TREE_CODE (field) == FIELD_DECL
3103 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
3104 return true;
3106 break;
3109 case ARRAY_TYPE:
3110 /* Just for use if some languages passes arrays by value. */
3111 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
3112 return true;
3114 default:
3115 gcc_unreachable ();
3118 return false;
3121 /* Gives the alignment boundary, in bits, of an argument with the
3122 specified mode and type. */
3125 ix86_function_arg_boundary (enum machine_mode mode, tree type)
3127 int align;
3128 if (type)
3129 align = TYPE_ALIGN (type);
3130 else
3131 align = GET_MODE_ALIGNMENT (mode);
3132 if (align < PARM_BOUNDARY)
3133 align = PARM_BOUNDARY;
3134 if (!TARGET_64BIT)
3136 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
3137 make an exception for SSE modes since these require 128bit
3138 alignment.
3140 The handling here differs from field_alignment. ICC aligns MMX
3141 arguments to 4 byte boundaries, while structure fields are aligned
3142 to 8 byte boundaries. */
3143 if (!TARGET_SSE)
3144 align = PARM_BOUNDARY;
3145 else if (!type)
3147 if (!SSE_REG_MODE_P (mode))
3148 align = PARM_BOUNDARY;
3150 else
3152 if (!contains_128bit_aligned_vector_p (type))
3153 align = PARM_BOUNDARY;
3156 if (align > 128)
3157 align = 128;
3158 return align;
3161 /* Return true if N is a possible register number of function value. */
3162 bool
3163 ix86_function_value_regno_p (int regno)
3165 if (regno == 0
3166 || (regno == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
3167 || (regno == FIRST_SSE_REG && TARGET_SSE))
3168 return true;
3170 if (!TARGET_64BIT
3171 && (regno == FIRST_MMX_REG && TARGET_MMX))
3172 return true;
3174 return false;
3177 /* Define how to find the value returned by a function.
3178 VALTYPE is the data type of the value (as a tree).
3179 If the precise function being called is known, FUNC is its FUNCTION_DECL;
3180 otherwise, FUNC is 0. */
3182 ix86_function_value (tree valtype, tree func)
3184 enum machine_mode natmode = type_natural_mode (valtype);
3186 if (TARGET_64BIT)
3188 rtx ret = construct_container (natmode, TYPE_MODE (valtype), valtype,
3189 1, REGPARM_MAX, SSE_REGPARM_MAX,
3190 x86_64_int_return_registers, 0);
3191 /* For zero sized structures, construct_container return NULL, but we
3192 need to keep rest of compiler happy by returning meaningful value. */
3193 if (!ret)
3194 ret = gen_rtx_REG (TYPE_MODE (valtype), 0);
3195 return ret;
3197 else
3198 return gen_rtx_REG (TYPE_MODE (valtype), ix86_value_regno (natmode, func));
3201 /* Return false iff type is returned in memory. */
3203 ix86_return_in_memory (tree type)
3205 int needed_intregs, needed_sseregs, size;
3206 enum machine_mode mode = type_natural_mode (type);
3208 if (TARGET_64BIT)
3209 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
3211 if (mode == BLKmode)
3212 return 1;
3214 size = int_size_in_bytes (type);
3216 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
3217 return 0;
3219 if (VECTOR_MODE_P (mode) || mode == TImode)
3221 /* User-created vectors small enough to fit in EAX. */
3222 if (size < 8)
3223 return 0;
3225 /* MMX/3dNow values are returned in MM0,
3226 except when it doesn't exits. */
3227 if (size == 8)
3228 return (TARGET_MMX ? 0 : 1);
3230 /* SSE values are returned in XMM0, except when it doesn't exist. */
3231 if (size == 16)
3232 return (TARGET_SSE ? 0 : 1);
3235 if (mode == XFmode)
3236 return 0;
3238 if (size > 12)
3239 return 1;
3240 return 0;
3243 /* When returning SSE vector types, we have a choice of either
3244 (1) being abi incompatible with a -march switch, or
3245 (2) generating an error.
3246 Given no good solution, I think the safest thing is one warning.
3247 The user won't be able to use -Werror, but....
3249 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
3250 called in response to actually generating a caller or callee that
3251 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
3252 via aggregate_value_p for general type probing from tree-ssa. */
3254 static rtx
3255 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
3257 static bool warnedsse, warnedmmx;
3259 if (type)
3261 /* Look at the return type of the function, not the function type. */
3262 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
3264 if (!TARGET_SSE && !warnedsse)
3266 if (mode == TImode
3267 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3269 warnedsse = true;
3270 warning (0, "SSE vector return without SSE enabled "
3271 "changes the ABI");
3275 if (!TARGET_MMX && !warnedmmx)
3277 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
3279 warnedmmx = true;
3280 warning (0, "MMX vector return without MMX enabled "
3281 "changes the ABI");
3286 return NULL;
3289 /* Define how to find the value returned by a library function
3290 assuming the value has mode MODE. */
3292 ix86_libcall_value (enum machine_mode mode)
3294 if (TARGET_64BIT)
3296 switch (mode)
3298 case SFmode:
3299 case SCmode:
3300 case DFmode:
3301 case DCmode:
3302 case TFmode:
3303 return gen_rtx_REG (mode, FIRST_SSE_REG);
3304 case XFmode:
3305 case XCmode:
3306 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
3307 case TCmode:
3308 return NULL;
3309 default:
3310 return gen_rtx_REG (mode, 0);
3313 else
3314 return gen_rtx_REG (mode, ix86_value_regno (mode, NULL));
3317 /* Given a mode, return the register to use for a return value. */
3319 static int
3320 ix86_value_regno (enum machine_mode mode, tree func)
3322 gcc_assert (!TARGET_64BIT);
3324 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
3325 we prevent this case when mmx is not available. */
3326 if ((VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8))
3327 return FIRST_MMX_REG;
3329 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
3330 we prevent this case when sse is not available. */
3331 if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3332 return FIRST_SSE_REG;
3334 /* Most things go in %eax, except (unless -mno-fp-ret-in-387) fp values. */
3335 if (GET_MODE_CLASS (mode) != MODE_FLOAT || !TARGET_FLOAT_RETURNS_IN_80387)
3336 return 0;
3338 /* Floating point return values in %st(0), except for local functions when
3339 SSE math is enabled or for functions with sseregparm attribute. */
3340 if (func && (mode == SFmode || mode == DFmode))
3342 int sse_level = ix86_function_sseregparm (TREE_TYPE (func), func);
3343 if ((sse_level >= 1 && mode == SFmode)
3344 || (sse_level == 2 && mode == DFmode))
3345 return FIRST_SSE_REG;
3348 return FIRST_FLOAT_REG;
3351 /* Create the va_list data type. */
3353 static tree
3354 ix86_build_builtin_va_list (void)
3356 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
3358 /* For i386 we use plain pointer to argument area. */
3359 if (!TARGET_64BIT)
3360 return build_pointer_type (char_type_node);
3362 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
3363 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
3365 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
3366 unsigned_type_node);
3367 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
3368 unsigned_type_node);
3369 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
3370 ptr_type_node);
3371 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
3372 ptr_type_node);
3374 va_list_gpr_counter_field = f_gpr;
3375 va_list_fpr_counter_field = f_fpr;
3377 DECL_FIELD_CONTEXT (f_gpr) = record;
3378 DECL_FIELD_CONTEXT (f_fpr) = record;
3379 DECL_FIELD_CONTEXT (f_ovf) = record;
3380 DECL_FIELD_CONTEXT (f_sav) = record;
3382 TREE_CHAIN (record) = type_decl;
3383 TYPE_NAME (record) = type_decl;
3384 TYPE_FIELDS (record) = f_gpr;
3385 TREE_CHAIN (f_gpr) = f_fpr;
3386 TREE_CHAIN (f_fpr) = f_ovf;
3387 TREE_CHAIN (f_ovf) = f_sav;
3389 layout_type (record);
3391 /* The correct type is an array type of one element. */
3392 return build_array_type (record, build_index_type (size_zero_node));
3395 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
3397 static void
3398 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3399 tree type, int *pretend_size ATTRIBUTE_UNUSED,
3400 int no_rtl)
3402 CUMULATIVE_ARGS next_cum;
3403 rtx save_area = NULL_RTX, mem;
3404 rtx label;
3405 rtx label_ref;
3406 rtx tmp_reg;
3407 rtx nsse_reg;
3408 int set;
3409 tree fntype;
3410 int stdarg_p;
3411 int i;
3413 if (!TARGET_64BIT)
3414 return;
3416 if (! cfun->va_list_gpr_size && ! cfun->va_list_fpr_size)
3417 return;
3419 /* Indicate to allocate space on the stack for varargs save area. */
3420 ix86_save_varrargs_registers = 1;
3422 cfun->stack_alignment_needed = 128;
3424 fntype = TREE_TYPE (current_function_decl);
3425 stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
3426 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
3427 != void_type_node));
3429 /* For varargs, we do not want to skip the dummy va_dcl argument.
3430 For stdargs, we do want to skip the last named argument. */
3431 next_cum = *cum;
3432 if (stdarg_p)
3433 function_arg_advance (&next_cum, mode, type, 1);
3435 if (!no_rtl)
3436 save_area = frame_pointer_rtx;
3438 set = get_varargs_alias_set ();
3440 for (i = next_cum.regno;
3441 i < ix86_regparm
3442 && i < next_cum.regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
3443 i++)
3445 mem = gen_rtx_MEM (Pmode,
3446 plus_constant (save_area, i * UNITS_PER_WORD));
3447 set_mem_alias_set (mem, set);
3448 emit_move_insn (mem, gen_rtx_REG (Pmode,
3449 x86_64_int_parameter_registers[i]));
3452 if (next_cum.sse_nregs && cfun->va_list_fpr_size)
3454 /* Now emit code to save SSE registers. The AX parameter contains number
3455 of SSE parameter registers used to call this function. We use
3456 sse_prologue_save insn template that produces computed jump across
3457 SSE saves. We need some preparation work to get this working. */
3459 label = gen_label_rtx ();
3460 label_ref = gen_rtx_LABEL_REF (Pmode, label);
3462 /* Compute address to jump to :
3463 label - 5*eax + nnamed_sse_arguments*5 */
3464 tmp_reg = gen_reg_rtx (Pmode);
3465 nsse_reg = gen_reg_rtx (Pmode);
3466 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
3467 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3468 gen_rtx_MULT (Pmode, nsse_reg,
3469 GEN_INT (4))));
3470 if (next_cum.sse_regno)
3471 emit_move_insn
3472 (nsse_reg,
3473 gen_rtx_CONST (DImode,
3474 gen_rtx_PLUS (DImode,
3475 label_ref,
3476 GEN_INT (next_cum.sse_regno * 4))));
3477 else
3478 emit_move_insn (nsse_reg, label_ref);
3479 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
3481 /* Compute address of memory block we save into. We always use pointer
3482 pointing 127 bytes after first byte to store - this is needed to keep
3483 instruction size limited by 4 bytes. */
3484 tmp_reg = gen_reg_rtx (Pmode);
3485 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3486 plus_constant (save_area,
3487 8 * REGPARM_MAX + 127)));
3488 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
3489 set_mem_alias_set (mem, set);
3490 set_mem_align (mem, BITS_PER_WORD);
3492 /* And finally do the dirty job! */
3493 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
3494 GEN_INT (next_cum.sse_regno), label));
3499 /* Implement va_start. */
3501 void
3502 ix86_va_start (tree valist, rtx nextarg)
3504 HOST_WIDE_INT words, n_gpr, n_fpr;
3505 tree f_gpr, f_fpr, f_ovf, f_sav;
3506 tree gpr, fpr, ovf, sav, t;
3508 /* Only 64bit target needs something special. */
3509 if (!TARGET_64BIT)
3511 std_expand_builtin_va_start (valist, nextarg);
3512 return;
3515 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3516 f_fpr = TREE_CHAIN (f_gpr);
3517 f_ovf = TREE_CHAIN (f_fpr);
3518 f_sav = TREE_CHAIN (f_ovf);
3520 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
3521 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3522 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3523 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3524 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3526 /* Count number of gp and fp argument registers used. */
3527 words = current_function_args_info.words;
3528 n_gpr = current_function_args_info.regno;
3529 n_fpr = current_function_args_info.sse_regno;
3531 if (TARGET_DEBUG_ARG)
3532 fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
3533 (int) words, (int) n_gpr, (int) n_fpr);
3535 if (cfun->va_list_gpr_size)
3537 t = build (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
3538 build_int_cst (NULL_TREE, n_gpr * 8));
3539 TREE_SIDE_EFFECTS (t) = 1;
3540 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3543 if (cfun->va_list_fpr_size)
3545 t = build (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
3546 build_int_cst (NULL_TREE, n_fpr * 16 + 8*REGPARM_MAX));
3547 TREE_SIDE_EFFECTS (t) = 1;
3548 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3551 /* Find the overflow area. */
3552 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
3553 if (words != 0)
3554 t = build (PLUS_EXPR, TREE_TYPE (ovf), t,
3555 build_int_cst (NULL_TREE, words * UNITS_PER_WORD));
3556 t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3557 TREE_SIDE_EFFECTS (t) = 1;
3558 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3560 if (cfun->va_list_gpr_size || cfun->va_list_fpr_size)
3562 /* Find the register save area.
3563 Prologue of the function save it right above stack frame. */
3564 t = make_tree (TREE_TYPE (sav), frame_pointer_rtx);
3565 t = build (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
3566 TREE_SIDE_EFFECTS (t) = 1;
3567 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3571 /* Implement va_arg. */
3573 tree
3574 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
3576 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
3577 tree f_gpr, f_fpr, f_ovf, f_sav;
3578 tree gpr, fpr, ovf, sav, t;
3579 int size, rsize;
3580 tree lab_false, lab_over = NULL_TREE;
3581 tree addr, t2;
3582 rtx container;
3583 int indirect_p = 0;
3584 tree ptrtype;
3585 enum machine_mode nat_mode;
3587 /* Only 64bit target needs something special. */
3588 if (!TARGET_64BIT)
3589 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3591 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3592 f_fpr = TREE_CHAIN (f_gpr);
3593 f_ovf = TREE_CHAIN (f_fpr);
3594 f_sav = TREE_CHAIN (f_ovf);
3596 valist = build_va_arg_indirect_ref (valist);
3597 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3598 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3599 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3600 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3602 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
3603 if (indirect_p)
3604 type = build_pointer_type (type);
3605 size = int_size_in_bytes (type);
3606 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3608 nat_mode = type_natural_mode (type);
3609 container = construct_container (nat_mode, TYPE_MODE (type), type, 0,
3610 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
3612 /* Pull the value out of the saved registers. */
3614 addr = create_tmp_var (ptr_type_node, "addr");
3615 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
3617 if (container)
3619 int needed_intregs, needed_sseregs;
3620 bool need_temp;
3621 tree int_addr, sse_addr;
3623 lab_false = create_artificial_label ();
3624 lab_over = create_artificial_label ();
3626 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
3628 need_temp = (!REG_P (container)
3629 && ((needed_intregs && TYPE_ALIGN (type) > 64)
3630 || TYPE_ALIGN (type) > 128));
3632 /* In case we are passing structure, verify that it is consecutive block
3633 on the register save area. If not we need to do moves. */
3634 if (!need_temp && !REG_P (container))
3636 /* Verify that all registers are strictly consecutive */
3637 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
3639 int i;
3641 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3643 rtx slot = XVECEXP (container, 0, i);
3644 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
3645 || INTVAL (XEXP (slot, 1)) != i * 16)
3646 need_temp = 1;
3649 else
3651 int i;
3653 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3655 rtx slot = XVECEXP (container, 0, i);
3656 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
3657 || INTVAL (XEXP (slot, 1)) != i * 8)
3658 need_temp = 1;
3662 if (!need_temp)
3664 int_addr = addr;
3665 sse_addr = addr;
3667 else
3669 int_addr = create_tmp_var (ptr_type_node, "int_addr");
3670 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
3671 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
3672 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
3675 /* First ensure that we fit completely in registers. */
3676 if (needed_intregs)
3678 t = build_int_cst (TREE_TYPE (gpr),
3679 (REGPARM_MAX - needed_intregs + 1) * 8);
3680 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
3681 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3682 t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3683 gimplify_and_add (t, pre_p);
3685 if (needed_sseregs)
3687 t = build_int_cst (TREE_TYPE (fpr),
3688 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
3689 + REGPARM_MAX * 8);
3690 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
3691 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3692 t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3693 gimplify_and_add (t, pre_p);
3696 /* Compute index to start of area used for integer regs. */
3697 if (needed_intregs)
3699 /* int_addr = gpr + sav; */
3700 t = fold_convert (ptr_type_node, gpr);
3701 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
3702 t = build2 (MODIFY_EXPR, void_type_node, int_addr, t);
3703 gimplify_and_add (t, pre_p);
3705 if (needed_sseregs)
3707 /* sse_addr = fpr + sav; */
3708 t = fold_convert (ptr_type_node, fpr);
3709 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
3710 t = build2 (MODIFY_EXPR, void_type_node, sse_addr, t);
3711 gimplify_and_add (t, pre_p);
3713 if (need_temp)
3715 int i;
3716 tree temp = create_tmp_var (type, "va_arg_tmp");
3718 /* addr = &temp; */
3719 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
3720 t = build2 (MODIFY_EXPR, void_type_node, addr, t);
3721 gimplify_and_add (t, pre_p);
3723 for (i = 0; i < XVECLEN (container, 0); i++)
3725 rtx slot = XVECEXP (container, 0, i);
3726 rtx reg = XEXP (slot, 0);
3727 enum machine_mode mode = GET_MODE (reg);
3728 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
3729 tree addr_type = build_pointer_type (piece_type);
3730 tree src_addr, src;
3731 int src_offset;
3732 tree dest_addr, dest;
3734 if (SSE_REGNO_P (REGNO (reg)))
3736 src_addr = sse_addr;
3737 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
3739 else
3741 src_addr = int_addr;
3742 src_offset = REGNO (reg) * 8;
3744 src_addr = fold_convert (addr_type, src_addr);
3745 src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr,
3746 size_int (src_offset)));
3747 src = build_va_arg_indirect_ref (src_addr);
3749 dest_addr = fold_convert (addr_type, addr);
3750 dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr,
3751 size_int (INTVAL (XEXP (slot, 1)))));
3752 dest = build_va_arg_indirect_ref (dest_addr);
3754 t = build2 (MODIFY_EXPR, void_type_node, dest, src);
3755 gimplify_and_add (t, pre_p);
3759 if (needed_intregs)
3761 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
3762 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
3763 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr, t);
3764 gimplify_and_add (t, pre_p);
3766 if (needed_sseregs)
3768 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
3769 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
3770 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr, t);
3771 gimplify_and_add (t, pre_p);
3774 t = build1 (GOTO_EXPR, void_type_node, lab_over);
3775 gimplify_and_add (t, pre_p);
3777 t = build1 (LABEL_EXPR, void_type_node, lab_false);
3778 append_to_statement_list (t, pre_p);
3781 /* ... otherwise out of the overflow area. */
3783 /* Care for on-stack alignment if needed. */
3784 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64)
3785 t = ovf;
3786 else
3788 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
3789 t = build (PLUS_EXPR, TREE_TYPE (ovf), ovf,
3790 build_int_cst (TREE_TYPE (ovf), align - 1));
3791 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
3792 build_int_cst (TREE_TYPE (t), -align));
3794 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
3796 t2 = build2 (MODIFY_EXPR, void_type_node, addr, t);
3797 gimplify_and_add (t2, pre_p);
3799 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
3800 build_int_cst (TREE_TYPE (t), rsize * UNITS_PER_WORD));
3801 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3802 gimplify_and_add (t, pre_p);
3804 if (container)
3806 t = build1 (LABEL_EXPR, void_type_node, lab_over);
3807 append_to_statement_list (t, pre_p);
3810 ptrtype = build_pointer_type (type);
3811 addr = fold_convert (ptrtype, addr);
3813 if (indirect_p)
3814 addr = build_va_arg_indirect_ref (addr);
3815 return build_va_arg_indirect_ref (addr);
3818 /* Return nonzero if OPNUM's MEM should be matched
3819 in movabs* patterns. */
3822 ix86_check_movabs (rtx insn, int opnum)
3824 rtx set, mem;
3826 set = PATTERN (insn);
3827 if (GET_CODE (set) == PARALLEL)
3828 set = XVECEXP (set, 0, 0);
3829 gcc_assert (GET_CODE (set) == SET);
3830 mem = XEXP (set, opnum);
3831 while (GET_CODE (mem) == SUBREG)
3832 mem = SUBREG_REG (mem);
3833 gcc_assert (GET_CODE (mem) == MEM);
3834 return (volatile_ok || !MEM_VOLATILE_P (mem));
3837 /* Initialize the table of extra 80387 mathematical constants. */
3839 static void
3840 init_ext_80387_constants (void)
3842 static const char * cst[5] =
3844 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
3845 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
3846 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
3847 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
3848 "3.1415926535897932385128089594061862044", /* 4: fldpi */
3850 int i;
3852 for (i = 0; i < 5; i++)
3854 real_from_string (&ext_80387_constants_table[i], cst[i]);
3855 /* Ensure each constant is rounded to XFmode precision. */
3856 real_convert (&ext_80387_constants_table[i],
3857 XFmode, &ext_80387_constants_table[i]);
3860 ext_80387_constants_init = 1;
3863 /* Return true if the constant is something that can be loaded with
3864 a special instruction. */
3867 standard_80387_constant_p (rtx x)
3869 if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
3870 return -1;
3872 if (x == CONST0_RTX (GET_MODE (x)))
3873 return 1;
3874 if (x == CONST1_RTX (GET_MODE (x)))
3875 return 2;
3877 /* For XFmode constants, try to find a special 80387 instruction when
3878 optimizing for size or on those CPUs that benefit from them. */
3879 if (GET_MODE (x) == XFmode
3880 && (optimize_size || x86_ext_80387_constants & TUNEMASK))
3882 REAL_VALUE_TYPE r;
3883 int i;
3885 if (! ext_80387_constants_init)
3886 init_ext_80387_constants ();
3888 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3889 for (i = 0; i < 5; i++)
3890 if (real_identical (&r, &ext_80387_constants_table[i]))
3891 return i + 3;
3894 return 0;
3897 /* Return the opcode of the special instruction to be used to load
3898 the constant X. */
3900 const char *
3901 standard_80387_constant_opcode (rtx x)
3903 switch (standard_80387_constant_p (x))
3905 case 1:
3906 return "fldz";
3907 case 2:
3908 return "fld1";
3909 case 3:
3910 return "fldlg2";
3911 case 4:
3912 return "fldln2";
3913 case 5:
3914 return "fldl2e";
3915 case 6:
3916 return "fldl2t";
3917 case 7:
3918 return "fldpi";
3919 default:
3920 gcc_unreachable ();
3924 /* Return the CONST_DOUBLE representing the 80387 constant that is
3925 loaded by the specified special instruction. The argument IDX
3926 matches the return value from standard_80387_constant_p. */
3929 standard_80387_constant_rtx (int idx)
3931 int i;
3933 if (! ext_80387_constants_init)
3934 init_ext_80387_constants ();
3936 switch (idx)
3938 case 3:
3939 case 4:
3940 case 5:
3941 case 6:
3942 case 7:
3943 i = idx - 3;
3944 break;
3946 default:
3947 gcc_unreachable ();
3950 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
3951 XFmode);
3954 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
3957 standard_sse_constant_p (rtx x)
3959 if (x == const0_rtx)
3960 return 1;
3961 return (x == CONST0_RTX (GET_MODE (x)));
3964 /* Returns 1 if OP contains a symbol reference */
3967 symbolic_reference_mentioned_p (rtx op)
3969 const char *fmt;
3970 int i;
3972 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3973 return 1;
3975 fmt = GET_RTX_FORMAT (GET_CODE (op));
3976 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3978 if (fmt[i] == 'E')
3980 int j;
3982 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3983 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3984 return 1;
3987 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3988 return 1;
3991 return 0;
3994 /* Return 1 if it is appropriate to emit `ret' instructions in the
3995 body of a function. Do this only if the epilogue is simple, needing a
3996 couple of insns. Prior to reloading, we can't tell how many registers
3997 must be saved, so return 0 then. Return 0 if there is no frame
3998 marker to de-allocate. */
4001 ix86_can_use_return_insn_p (void)
4003 struct ix86_frame frame;
4005 if (! reload_completed || frame_pointer_needed)
4006 return 0;
4008 /* Don't allow more than 32 pop, since that's all we can do
4009 with one instruction. */
4010 if (current_function_pops_args
4011 && current_function_args_size >= 32768)
4012 return 0;
4014 ix86_compute_frame_layout (&frame);
4015 return frame.to_allocate == 0 && frame.nregs == 0;
4018 /* Value should be nonzero if functions must have frame pointers.
4019 Zero means the frame pointer need not be set up (and parms may
4020 be accessed via the stack pointer) in functions that seem suitable. */
4023 ix86_frame_pointer_required (void)
4025 /* If we accessed previous frames, then the generated code expects
4026 to be able to access the saved ebp value in our frame. */
4027 if (cfun->machine->accesses_prev_frame)
4028 return 1;
4030 /* Several x86 os'es need a frame pointer for other reasons,
4031 usually pertaining to setjmp. */
4032 if (SUBTARGET_FRAME_POINTER_REQUIRED)
4033 return 1;
4035 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
4036 the frame pointer by default. Turn it back on now if we've not
4037 got a leaf function. */
4038 if (TARGET_OMIT_LEAF_FRAME_POINTER
4039 && (!current_function_is_leaf))
4040 return 1;
4042 if (current_function_profile)
4043 return 1;
4045 return 0;
4048 /* Record that the current function accesses previous call frames. */
4050 void
4051 ix86_setup_frame_addresses (void)
4053 cfun->machine->accesses_prev_frame = 1;
4056 #if defined(HAVE_GAS_HIDDEN) && defined(SUPPORTS_ONE_ONLY)
4057 # define USE_HIDDEN_LINKONCE 1
4058 #else
4059 # define USE_HIDDEN_LINKONCE 0
4060 #endif
4062 static int pic_labels_used;
4064 /* Fills in the label name that should be used for a pc thunk for
4065 the given register. */
4067 static void
4068 get_pc_thunk_name (char name[32], unsigned int regno)
4070 if (USE_HIDDEN_LINKONCE)
4071 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
4072 else
4073 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
4077 /* This function generates code for -fpic that loads %ebx with
4078 the return address of the caller and then returns. */
4080 void
4081 ix86_file_end (void)
4083 rtx xops[2];
4084 int regno;
4086 for (regno = 0; regno < 8; ++regno)
4088 char name[32];
4090 if (! ((pic_labels_used >> regno) & 1))
4091 continue;
4093 get_pc_thunk_name (name, regno);
4095 if (USE_HIDDEN_LINKONCE)
4097 tree decl;
4099 decl = build_decl (FUNCTION_DECL, get_identifier (name),
4100 error_mark_node);
4101 TREE_PUBLIC (decl) = 1;
4102 TREE_STATIC (decl) = 1;
4103 DECL_ONE_ONLY (decl) = 1;
4105 (*targetm.asm_out.unique_section) (decl, 0);
4106 named_section (decl, NULL, 0);
4108 (*targetm.asm_out.globalize_label) (asm_out_file, name);
4109 fputs ("\t.hidden\t", asm_out_file);
4110 assemble_name (asm_out_file, name);
4111 fputc ('\n', asm_out_file);
4112 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
4114 else
4116 text_section ();
4117 ASM_OUTPUT_LABEL (asm_out_file, name);
4120 xops[0] = gen_rtx_REG (SImode, regno);
4121 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
4122 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
4123 output_asm_insn ("ret", xops);
4126 if (NEED_INDICATE_EXEC_STACK)
4127 file_end_indicate_exec_stack ();
4130 /* Emit code for the SET_GOT patterns. */
4132 const char *
4133 output_set_got (rtx dest)
4135 rtx xops[3];
4137 xops[0] = dest;
4138 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
4140 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
4142 xops[2] = gen_rtx_LABEL_REF (Pmode, gen_label_rtx ());
4144 if (!flag_pic)
4145 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
4146 else
4147 output_asm_insn ("call\t%a2", xops);
4149 #if TARGET_MACHO
4150 /* Output the "canonical" label name ("Lxx$pb") here too. This
4151 is what will be referred to by the Mach-O PIC subsystem. */
4152 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
4153 #endif
4154 (*targetm.asm_out.internal_label) (asm_out_file, "L",
4155 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
4157 if (flag_pic)
4158 output_asm_insn ("pop{l}\t%0", xops);
4160 else
4162 char name[32];
4163 get_pc_thunk_name (name, REGNO (dest));
4164 pic_labels_used |= 1 << REGNO (dest);
4166 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
4167 xops[2] = gen_rtx_MEM (QImode, xops[2]);
4168 output_asm_insn ("call\t%X2", xops);
4171 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
4172 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
4173 else if (!TARGET_MACHO)
4174 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %a1+(.-%a2)}", xops);
4176 return "";
4179 /* Generate an "push" pattern for input ARG. */
4181 static rtx
4182 gen_push (rtx arg)
4184 return gen_rtx_SET (VOIDmode,
4185 gen_rtx_MEM (Pmode,
4186 gen_rtx_PRE_DEC (Pmode,
4187 stack_pointer_rtx)),
4188 arg);
4191 /* Return >= 0 if there is an unused call-clobbered register available
4192 for the entire function. */
4194 static unsigned int
4195 ix86_select_alt_pic_regnum (void)
4197 if (current_function_is_leaf && !current_function_profile)
4199 int i;
4200 for (i = 2; i >= 0; --i)
4201 if (!regs_ever_live[i])
4202 return i;
4205 return INVALID_REGNUM;
4208 /* Return 1 if we need to save REGNO. */
4209 static int
4210 ix86_save_reg (unsigned int regno, int maybe_eh_return)
4212 if (pic_offset_table_rtx
4213 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
4214 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4215 || current_function_profile
4216 || current_function_calls_eh_return
4217 || current_function_uses_const_pool))
4219 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
4220 return 0;
4221 return 1;
4224 if (current_function_calls_eh_return && maybe_eh_return)
4226 unsigned i;
4227 for (i = 0; ; i++)
4229 unsigned test = EH_RETURN_DATA_REGNO (i);
4230 if (test == INVALID_REGNUM)
4231 break;
4232 if (test == regno)
4233 return 1;
4237 return (regs_ever_live[regno]
4238 && !call_used_regs[regno]
4239 && !fixed_regs[regno]
4240 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
4243 /* Return number of registers to be saved on the stack. */
4245 static int
4246 ix86_nsaved_regs (void)
4248 int nregs = 0;
4249 int regno;
4251 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
4252 if (ix86_save_reg (regno, true))
4253 nregs++;
4254 return nregs;
4257 /* Return the offset between two registers, one to be eliminated, and the other
4258 its replacement, at the start of a routine. */
4260 HOST_WIDE_INT
4261 ix86_initial_elimination_offset (int from, int to)
4263 struct ix86_frame frame;
4264 ix86_compute_frame_layout (&frame);
4266 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
4267 return frame.hard_frame_pointer_offset;
4268 else if (from == FRAME_POINTER_REGNUM
4269 && to == HARD_FRAME_POINTER_REGNUM)
4270 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
4271 else
4273 gcc_assert (to == STACK_POINTER_REGNUM);
4275 if (from == ARG_POINTER_REGNUM)
4276 return frame.stack_pointer_offset;
4278 gcc_assert (from == FRAME_POINTER_REGNUM);
4279 return frame.stack_pointer_offset - frame.frame_pointer_offset;
4283 /* Fill structure ix86_frame about frame of currently computed function. */
4285 static void
4286 ix86_compute_frame_layout (struct ix86_frame *frame)
4288 HOST_WIDE_INT total_size;
4289 unsigned int stack_alignment_needed;
4290 HOST_WIDE_INT offset;
4291 unsigned int preferred_alignment;
4292 HOST_WIDE_INT size = get_frame_size ();
4294 frame->nregs = ix86_nsaved_regs ();
4295 total_size = size;
4297 stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
4298 preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
4300 /* During reload iteration the amount of registers saved can change.
4301 Recompute the value as needed. Do not recompute when amount of registers
4302 didn't change as reload does multiple calls to the function and does not
4303 expect the decision to change within single iteration. */
4304 if (!optimize_size
4305 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
4307 int count = frame->nregs;
4309 cfun->machine->use_fast_prologue_epilogue_nregs = count;
4310 /* The fast prologue uses move instead of push to save registers. This
4311 is significantly longer, but also executes faster as modern hardware
4312 can execute the moves in parallel, but can't do that for push/pop.
4314 Be careful about choosing what prologue to emit: When function takes
4315 many instructions to execute we may use slow version as well as in
4316 case function is known to be outside hot spot (this is known with
4317 feedback only). Weight the size of function by number of registers
4318 to save as it is cheap to use one or two push instructions but very
4319 slow to use many of them. */
4320 if (count)
4321 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
4322 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
4323 || (flag_branch_probabilities
4324 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
4325 cfun->machine->use_fast_prologue_epilogue = false;
4326 else
4327 cfun->machine->use_fast_prologue_epilogue
4328 = !expensive_function_p (count);
4330 if (TARGET_PROLOGUE_USING_MOVE
4331 && cfun->machine->use_fast_prologue_epilogue)
4332 frame->save_regs_using_mov = true;
4333 else
4334 frame->save_regs_using_mov = false;
4337 /* Skip return address and saved base pointer. */
4338 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
4340 frame->hard_frame_pointer_offset = offset;
4342 /* Do some sanity checking of stack_alignment_needed and
4343 preferred_alignment, since i386 port is the only using those features
4344 that may break easily. */
4346 gcc_assert (!size || stack_alignment_needed);
4347 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
4348 gcc_assert (preferred_alignment <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
4349 gcc_assert (stack_alignment_needed
4350 <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
4352 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
4353 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
4355 /* Register save area */
4356 offset += frame->nregs * UNITS_PER_WORD;
4358 /* Va-arg area */
4359 if (ix86_save_varrargs_registers)
4361 offset += X86_64_VARARGS_SIZE;
4362 frame->va_arg_size = X86_64_VARARGS_SIZE;
4364 else
4365 frame->va_arg_size = 0;
4367 /* Align start of frame for local function. */
4368 frame->padding1 = ((offset + stack_alignment_needed - 1)
4369 & -stack_alignment_needed) - offset;
4371 offset += frame->padding1;
4373 /* Frame pointer points here. */
4374 frame->frame_pointer_offset = offset;
4376 offset += size;
4378 /* Add outgoing arguments area. Can be skipped if we eliminated
4379 all the function calls as dead code.
4380 Skipping is however impossible when function calls alloca. Alloca
4381 expander assumes that last current_function_outgoing_args_size
4382 of stack frame are unused. */
4383 if (ACCUMULATE_OUTGOING_ARGS
4384 && (!current_function_is_leaf || current_function_calls_alloca))
4386 offset += current_function_outgoing_args_size;
4387 frame->outgoing_arguments_size = current_function_outgoing_args_size;
4389 else
4390 frame->outgoing_arguments_size = 0;
4392 /* Align stack boundary. Only needed if we're calling another function
4393 or using alloca. */
4394 if (!current_function_is_leaf || current_function_calls_alloca)
4395 frame->padding2 = ((offset + preferred_alignment - 1)
4396 & -preferred_alignment) - offset;
4397 else
4398 frame->padding2 = 0;
4400 offset += frame->padding2;
4402 /* We've reached end of stack frame. */
4403 frame->stack_pointer_offset = offset;
4405 /* Size prologue needs to allocate. */
4406 frame->to_allocate =
4407 (size + frame->padding1 + frame->padding2
4408 + frame->outgoing_arguments_size + frame->va_arg_size);
4410 if ((!frame->to_allocate && frame->nregs <= 1)
4411 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
4412 frame->save_regs_using_mov = false;
4414 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
4415 && current_function_is_leaf)
4417 frame->red_zone_size = frame->to_allocate;
4418 if (frame->save_regs_using_mov)
4419 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
4420 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
4421 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
4423 else
4424 frame->red_zone_size = 0;
4425 frame->to_allocate -= frame->red_zone_size;
4426 frame->stack_pointer_offset -= frame->red_zone_size;
4427 #if 0
4428 fprintf (stderr, "nregs: %i\n", frame->nregs);
4429 fprintf (stderr, "size: %i\n", size);
4430 fprintf (stderr, "alignment1: %i\n", stack_alignment_needed);
4431 fprintf (stderr, "padding1: %i\n", frame->padding1);
4432 fprintf (stderr, "va_arg: %i\n", frame->va_arg_size);
4433 fprintf (stderr, "padding2: %i\n", frame->padding2);
4434 fprintf (stderr, "to_allocate: %i\n", frame->to_allocate);
4435 fprintf (stderr, "red_zone_size: %i\n", frame->red_zone_size);
4436 fprintf (stderr, "frame_pointer_offset: %i\n", frame->frame_pointer_offset);
4437 fprintf (stderr, "hard_frame_pointer_offset: %i\n",
4438 frame->hard_frame_pointer_offset);
4439 fprintf (stderr, "stack_pointer_offset: %i\n", frame->stack_pointer_offset);
4440 #endif
4443 /* Emit code to save registers in the prologue. */
4445 static void
4446 ix86_emit_save_regs (void)
4448 int regno;
4449 rtx insn;
4451 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
4452 if (ix86_save_reg (regno, true))
4454 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
4455 RTX_FRAME_RELATED_P (insn) = 1;
4459 /* Emit code to save registers using MOV insns. First register
4460 is restored from POINTER + OFFSET. */
4461 static void
4462 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
4464 int regno;
4465 rtx insn;
4467 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4468 if (ix86_save_reg (regno, true))
4470 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
4471 Pmode, offset),
4472 gen_rtx_REG (Pmode, regno));
4473 RTX_FRAME_RELATED_P (insn) = 1;
4474 offset += UNITS_PER_WORD;
4478 /* Expand prologue or epilogue stack adjustment.
4479 The pattern exist to put a dependency on all ebp-based memory accesses.
4480 STYLE should be negative if instructions should be marked as frame related,
4481 zero if %r11 register is live and cannot be freely used and positive
4482 otherwise. */
4484 static void
4485 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
4487 rtx insn;
4489 if (! TARGET_64BIT)
4490 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
4491 else if (x86_64_immediate_operand (offset, DImode))
4492 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
4493 else
4495 rtx r11;
4496 /* r11 is used by indirect sibcall return as well, set before the
4497 epilogue and used after the epilogue. ATM indirect sibcall
4498 shouldn't be used together with huge frame sizes in one
4499 function because of the frame_size check in sibcall.c. */
4500 gcc_assert (style);
4501 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4502 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
4503 if (style < 0)
4504 RTX_FRAME_RELATED_P (insn) = 1;
4505 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
4506 offset));
4508 if (style < 0)
4509 RTX_FRAME_RELATED_P (insn) = 1;
4512 /* Expand the prologue into a bunch of separate insns. */
4514 void
4515 ix86_expand_prologue (void)
4517 rtx insn;
4518 bool pic_reg_used;
4519 struct ix86_frame frame;
4520 HOST_WIDE_INT allocate;
4522 ix86_compute_frame_layout (&frame);
4524 /* Note: AT&T enter does NOT have reversed args. Enter is probably
4525 slower on all targets. Also sdb doesn't like it. */
4527 if (frame_pointer_needed)
4529 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
4530 RTX_FRAME_RELATED_P (insn) = 1;
4532 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
4533 RTX_FRAME_RELATED_P (insn) = 1;
4536 allocate = frame.to_allocate;
4538 if (!frame.save_regs_using_mov)
4539 ix86_emit_save_regs ();
4540 else
4541 allocate += frame.nregs * UNITS_PER_WORD;
4543 /* When using red zone we may start register saving before allocating
4544 the stack frame saving one cycle of the prologue. */
4545 if (TARGET_RED_ZONE && frame.save_regs_using_mov)
4546 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
4547 : stack_pointer_rtx,
4548 -frame.nregs * UNITS_PER_WORD);
4550 if (allocate == 0)
4552 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
4553 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4554 GEN_INT (-allocate), -1);
4555 else
4557 /* Only valid for Win32. */
4558 rtx eax = gen_rtx_REG (SImode, 0);
4559 bool eax_live = ix86_eax_live_at_start_p ();
4560 rtx t;
4562 gcc_assert (!TARGET_64BIT);
4564 if (eax_live)
4566 emit_insn (gen_push (eax));
4567 allocate -= 4;
4570 emit_move_insn (eax, GEN_INT (allocate));
4572 insn = emit_insn (gen_allocate_stack_worker (eax));
4573 RTX_FRAME_RELATED_P (insn) = 1;
4574 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
4575 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
4576 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
4577 t, REG_NOTES (insn));
4579 if (eax_live)
4581 if (frame_pointer_needed)
4582 t = plus_constant (hard_frame_pointer_rtx,
4583 allocate
4584 - frame.to_allocate
4585 - frame.nregs * UNITS_PER_WORD);
4586 else
4587 t = plus_constant (stack_pointer_rtx, allocate);
4588 emit_move_insn (eax, gen_rtx_MEM (SImode, t));
4592 if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
4594 if (!frame_pointer_needed || !frame.to_allocate)
4595 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
4596 else
4597 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
4598 -frame.nregs * UNITS_PER_WORD);
4601 pic_reg_used = false;
4602 if (pic_offset_table_rtx
4603 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4604 || current_function_profile))
4606 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
4608 if (alt_pic_reg_used != INVALID_REGNUM)
4609 REGNO (pic_offset_table_rtx) = alt_pic_reg_used;
4611 pic_reg_used = true;
4614 if (pic_reg_used)
4616 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
4618 /* Even with accurate pre-reload life analysis, we can wind up
4619 deleting all references to the pic register after reload.
4620 Consider if cross-jumping unifies two sides of a branch
4621 controlled by a comparison vs the only read from a global.
4622 In which case, allow the set_got to be deleted, though we're
4623 too late to do anything about the ebx save in the prologue. */
4624 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
4627 /* Prevent function calls from be scheduled before the call to mcount.
4628 In the pic_reg_used case, make sure that the got load isn't deleted. */
4629 if (current_function_profile)
4630 emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
4633 /* Emit code to restore saved registers using MOV insns. First register
4634 is restored from POINTER + OFFSET. */
4635 static void
4636 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
4637 int maybe_eh_return)
4639 int regno;
4640 rtx base_address = gen_rtx_MEM (Pmode, pointer);
4642 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4643 if (ix86_save_reg (regno, maybe_eh_return))
4645 /* Ensure that adjust_address won't be forced to produce pointer
4646 out of range allowed by x86-64 instruction set. */
4647 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
4649 rtx r11;
4651 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4652 emit_move_insn (r11, GEN_INT (offset));
4653 emit_insn (gen_adddi3 (r11, r11, pointer));
4654 base_address = gen_rtx_MEM (Pmode, r11);
4655 offset = 0;
4657 emit_move_insn (gen_rtx_REG (Pmode, regno),
4658 adjust_address (base_address, Pmode, offset));
4659 offset += UNITS_PER_WORD;
4663 /* Restore function stack, frame, and registers. */
4665 void
4666 ix86_expand_epilogue (int style)
4668 int regno;
4669 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
4670 struct ix86_frame frame;
4671 HOST_WIDE_INT offset;
4673 ix86_compute_frame_layout (&frame);
4675 /* Calculate start of saved registers relative to ebp. Special care
4676 must be taken for the normal return case of a function using
4677 eh_return: the eax and edx registers are marked as saved, but not
4678 restored along this path. */
4679 offset = frame.nregs;
4680 if (current_function_calls_eh_return && style != 2)
4681 offset -= 2;
4682 offset *= -UNITS_PER_WORD;
4684 /* If we're only restoring one register and sp is not valid then
4685 using a move instruction to restore the register since it's
4686 less work than reloading sp and popping the register.
4688 The default code result in stack adjustment using add/lea instruction,
4689 while this code results in LEAVE instruction (or discrete equivalent),
4690 so it is profitable in some other cases as well. Especially when there
4691 are no registers to restore. We also use this code when TARGET_USE_LEAVE
4692 and there is exactly one register to pop. This heuristic may need some
4693 tuning in future. */
4694 if ((!sp_valid && frame.nregs <= 1)
4695 || (TARGET_EPILOGUE_USING_MOVE
4696 && cfun->machine->use_fast_prologue_epilogue
4697 && (frame.nregs > 1 || frame.to_allocate))
4698 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
4699 || (frame_pointer_needed && TARGET_USE_LEAVE
4700 && cfun->machine->use_fast_prologue_epilogue
4701 && frame.nregs == 1)
4702 || current_function_calls_eh_return)
4704 /* Restore registers. We can use ebp or esp to address the memory
4705 locations. If both are available, default to ebp, since offsets
4706 are known to be small. Only exception is esp pointing directly to the
4707 end of block of saved registers, where we may simplify addressing
4708 mode. */
4710 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
4711 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
4712 frame.to_allocate, style == 2);
4713 else
4714 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
4715 offset, style == 2);
4717 /* eh_return epilogues need %ecx added to the stack pointer. */
4718 if (style == 2)
4720 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
4722 if (frame_pointer_needed)
4724 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
4725 tmp = plus_constant (tmp, UNITS_PER_WORD);
4726 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
4728 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
4729 emit_move_insn (hard_frame_pointer_rtx, tmp);
4731 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
4732 const0_rtx, style);
4734 else
4736 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
4737 tmp = plus_constant (tmp, (frame.to_allocate
4738 + frame.nregs * UNITS_PER_WORD));
4739 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
4742 else if (!frame_pointer_needed)
4743 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4744 GEN_INT (frame.to_allocate
4745 + frame.nregs * UNITS_PER_WORD),
4746 style);
4747 /* If not an i386, mov & pop is faster than "leave". */
4748 else if (TARGET_USE_LEAVE || optimize_size
4749 || !cfun->machine->use_fast_prologue_epilogue)
4750 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
4751 else
4753 pro_epilogue_adjust_stack (stack_pointer_rtx,
4754 hard_frame_pointer_rtx,
4755 const0_rtx, style);
4756 if (TARGET_64BIT)
4757 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
4758 else
4759 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
4762 else
4764 /* First step is to deallocate the stack frame so that we can
4765 pop the registers. */
4766 if (!sp_valid)
4768 gcc_assert (frame_pointer_needed);
4769 pro_epilogue_adjust_stack (stack_pointer_rtx,
4770 hard_frame_pointer_rtx,
4771 GEN_INT (offset), style);
4773 else if (frame.to_allocate)
4774 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4775 GEN_INT (frame.to_allocate), style);
4777 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4778 if (ix86_save_reg (regno, false))
4780 if (TARGET_64BIT)
4781 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
4782 else
4783 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
4785 if (frame_pointer_needed)
4787 /* Leave results in shorter dependency chains on CPUs that are
4788 able to grok it fast. */
4789 if (TARGET_USE_LEAVE)
4790 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
4791 else if (TARGET_64BIT)
4792 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
4793 else
4794 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
4798 /* Sibcall epilogues don't want a return instruction. */
4799 if (style == 0)
4800 return;
4802 if (current_function_pops_args && current_function_args_size)
4804 rtx popc = GEN_INT (current_function_pops_args);
4806 /* i386 can only pop 64K bytes. If asked to pop more, pop
4807 return address, do explicit add, and jump indirectly to the
4808 caller. */
4810 if (current_function_pops_args >= 65536)
4812 rtx ecx = gen_rtx_REG (SImode, 2);
4814 /* There is no "pascal" calling convention in 64bit ABI. */
4815 gcc_assert (!TARGET_64BIT);
4817 emit_insn (gen_popsi1 (ecx));
4818 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
4819 emit_jump_insn (gen_return_indirect_internal (ecx));
4821 else
4822 emit_jump_insn (gen_return_pop_internal (popc));
4824 else
4825 emit_jump_insn (gen_return_internal ());
4828 /* Reset from the function's potential modifications. */
4830 static void
4831 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
4832 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4834 if (pic_offset_table_rtx)
4835 REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
4838 /* Extract the parts of an RTL expression that is a valid memory address
4839 for an instruction. Return 0 if the structure of the address is
4840 grossly off. Return -1 if the address contains ASHIFT, so it is not
4841 strictly valid, but still used for computing length of lea instruction. */
4844 ix86_decompose_address (rtx addr, struct ix86_address *out)
4846 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
4847 rtx base_reg, index_reg;
4848 HOST_WIDE_INT scale = 1;
4849 rtx scale_rtx = NULL_RTX;
4850 int retval = 1;
4851 enum ix86_address_seg seg = SEG_DEFAULT;
4853 if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
4854 base = addr;
4855 else if (GET_CODE (addr) == PLUS)
4857 rtx addends[4], op;
4858 int n = 0, i;
4860 op = addr;
4863 if (n >= 4)
4864 return 0;
4865 addends[n++] = XEXP (op, 1);
4866 op = XEXP (op, 0);
4868 while (GET_CODE (op) == PLUS);
4869 if (n >= 4)
4870 return 0;
4871 addends[n] = op;
4873 for (i = n; i >= 0; --i)
4875 op = addends[i];
4876 switch (GET_CODE (op))
4878 case MULT:
4879 if (index)
4880 return 0;
4881 index = XEXP (op, 0);
4882 scale_rtx = XEXP (op, 1);
4883 break;
4885 case UNSPEC:
4886 if (XINT (op, 1) == UNSPEC_TP
4887 && TARGET_TLS_DIRECT_SEG_REFS
4888 && seg == SEG_DEFAULT)
4889 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
4890 else
4891 return 0;
4892 break;
4894 case REG:
4895 case SUBREG:
4896 if (!base)
4897 base = op;
4898 else if (!index)
4899 index = op;
4900 else
4901 return 0;
4902 break;
4904 case CONST:
4905 case CONST_INT:
4906 case SYMBOL_REF:
4907 case LABEL_REF:
4908 if (disp)
4909 return 0;
4910 disp = op;
4911 break;
4913 default:
4914 return 0;
4918 else if (GET_CODE (addr) == MULT)
4920 index = XEXP (addr, 0); /* index*scale */
4921 scale_rtx = XEXP (addr, 1);
4923 else if (GET_CODE (addr) == ASHIFT)
4925 rtx tmp;
4927 /* We're called for lea too, which implements ashift on occasion. */
4928 index = XEXP (addr, 0);
4929 tmp = XEXP (addr, 1);
4930 if (GET_CODE (tmp) != CONST_INT)
4931 return 0;
4932 scale = INTVAL (tmp);
4933 if ((unsigned HOST_WIDE_INT) scale > 3)
4934 return 0;
4935 scale = 1 << scale;
4936 retval = -1;
4938 else
4939 disp = addr; /* displacement */
4941 /* Extract the integral value of scale. */
4942 if (scale_rtx)
4944 if (GET_CODE (scale_rtx) != CONST_INT)
4945 return 0;
4946 scale = INTVAL (scale_rtx);
4949 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
4950 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
4952 /* Allow arg pointer and stack pointer as index if there is not scaling. */
4953 if (base_reg && index_reg && scale == 1
4954 && (index_reg == arg_pointer_rtx
4955 || index_reg == frame_pointer_rtx
4956 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
4958 rtx tmp;
4959 tmp = base, base = index, index = tmp;
4960 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
4963 /* Special case: %ebp cannot be encoded as a base without a displacement. */
4964 if ((base_reg == hard_frame_pointer_rtx
4965 || base_reg == frame_pointer_rtx
4966 || base_reg == arg_pointer_rtx) && !disp)
4967 disp = const0_rtx;
4969 /* Special case: on K6, [%esi] makes the instruction vector decoded.
4970 Avoid this by transforming to [%esi+0]. */
4971 if (ix86_tune == PROCESSOR_K6 && !optimize_size
4972 && base_reg && !index_reg && !disp
4973 && REG_P (base_reg)
4974 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
4975 disp = const0_rtx;
4977 /* Special case: encode reg+reg instead of reg*2. */
4978 if (!base && index && scale && scale == 2)
4979 base = index, base_reg = index_reg, scale = 1;
4981 /* Special case: scaling cannot be encoded without base or displacement. */
4982 if (!base && !disp && index && scale != 1)
4983 disp = const0_rtx;
4985 out->base = base;
4986 out->index = index;
4987 out->disp = disp;
4988 out->scale = scale;
4989 out->seg = seg;
4991 return retval;
4994 /* Return cost of the memory address x.
4995 For i386, it is better to use a complex address than let gcc copy
4996 the address into a reg and make a new pseudo. But not if the address
4997 requires to two regs - that would mean more pseudos with longer
4998 lifetimes. */
4999 static int
5000 ix86_address_cost (rtx x)
5002 struct ix86_address parts;
5003 int cost = 1;
5004 int ok = ix86_decompose_address (x, &parts);
5006 gcc_assert (ok);
5008 if (parts.base && GET_CODE (parts.base) == SUBREG)
5009 parts.base = SUBREG_REG (parts.base);
5010 if (parts.index && GET_CODE (parts.index) == SUBREG)
5011 parts.index = SUBREG_REG (parts.index);
5013 /* More complex memory references are better. */
5014 if (parts.disp && parts.disp != const0_rtx)
5015 cost--;
5016 if (parts.seg != SEG_DEFAULT)
5017 cost--;
5019 /* Attempt to minimize number of registers in the address. */
5020 if ((parts.base
5021 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
5022 || (parts.index
5023 && (!REG_P (parts.index)
5024 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
5025 cost++;
5027 if (parts.base
5028 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
5029 && parts.index
5030 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
5031 && parts.base != parts.index)
5032 cost++;
5034 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
5035 since it's predecode logic can't detect the length of instructions
5036 and it degenerates to vector decoded. Increase cost of such
5037 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
5038 to split such addresses or even refuse such addresses at all.
5040 Following addressing modes are affected:
5041 [base+scale*index]
5042 [scale*index+disp]
5043 [base+index]
5045 The first and last case may be avoidable by explicitly coding the zero in
5046 memory address, but I don't have AMD-K6 machine handy to check this
5047 theory. */
5049 if (TARGET_K6
5050 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
5051 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
5052 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
5053 cost += 10;
5055 return cost;
5058 /* If X is a machine specific address (i.e. a symbol or label being
5059 referenced as a displacement from the GOT implemented using an
5060 UNSPEC), then return the base term. Otherwise return X. */
5063 ix86_find_base_term (rtx x)
5065 rtx term;
5067 if (TARGET_64BIT)
5069 if (GET_CODE (x) != CONST)
5070 return x;
5071 term = XEXP (x, 0);
5072 if (GET_CODE (term) == PLUS
5073 && (GET_CODE (XEXP (term, 1)) == CONST_INT
5074 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
5075 term = XEXP (term, 0);
5076 if (GET_CODE (term) != UNSPEC
5077 || XINT (term, 1) != UNSPEC_GOTPCREL)
5078 return x;
5080 term = XVECEXP (term, 0, 0);
5082 if (GET_CODE (term) != SYMBOL_REF
5083 && GET_CODE (term) != LABEL_REF)
5084 return x;
5086 return term;
5089 term = ix86_delegitimize_address (x);
5091 if (GET_CODE (term) != SYMBOL_REF
5092 && GET_CODE (term) != LABEL_REF)
5093 return x;
5095 return term;
5098 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
5099 this is used for to form addresses to local data when -fPIC is in
5100 use. */
5102 static bool
5103 darwin_local_data_pic (rtx disp)
5105 if (GET_CODE (disp) == MINUS)
5107 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
5108 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
5109 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
5111 const char *sym_name = XSTR (XEXP (disp, 1), 0);
5112 if (! strcmp (sym_name, "<pic base>"))
5113 return true;
5117 return false;
5120 /* Determine if a given RTX is a valid constant. We already know this
5121 satisfies CONSTANT_P. */
5123 bool
5124 legitimate_constant_p (rtx x)
5126 switch (GET_CODE (x))
5128 case CONST:
5129 x = XEXP (x, 0);
5131 if (GET_CODE (x) == PLUS)
5133 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5134 return false;
5135 x = XEXP (x, 0);
5138 if (TARGET_MACHO && darwin_local_data_pic (x))
5139 return true;
5141 /* Only some unspecs are valid as "constants". */
5142 if (GET_CODE (x) == UNSPEC)
5143 switch (XINT (x, 1))
5145 case UNSPEC_TPOFF:
5146 case UNSPEC_NTPOFF:
5147 return local_exec_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
5148 case UNSPEC_DTPOFF:
5149 return local_dynamic_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
5150 default:
5151 return false;
5154 /* We must have drilled down to a symbol. */
5155 if (!symbolic_operand (x, Pmode))
5156 return false;
5157 /* FALLTHRU */
5159 case SYMBOL_REF:
5160 /* TLS symbols are never valid. */
5161 if (tls_symbolic_operand (x, Pmode))
5162 return false;
5163 break;
5165 default:
5166 break;
5169 /* Otherwise we handle everything else in the move patterns. */
5170 return true;
5173 /* Determine if it's legal to put X into the constant pool. This
5174 is not possible for the address of thread-local symbols, which
5175 is checked above. */
5177 static bool
5178 ix86_cannot_force_const_mem (rtx x)
5180 return !legitimate_constant_p (x);
5183 /* Determine if a given RTX is a valid constant address. */
5185 bool
5186 constant_address_p (rtx x)
5188 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
5191 /* Nonzero if the constant value X is a legitimate general operand
5192 when generating PIC code. It is given that flag_pic is on and
5193 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
5195 bool
5196 legitimate_pic_operand_p (rtx x)
5198 rtx inner;
5200 switch (GET_CODE (x))
5202 case CONST:
5203 inner = XEXP (x, 0);
5205 /* Only some unspecs are valid as "constants". */
5206 if (GET_CODE (inner) == UNSPEC)
5207 switch (XINT (inner, 1))
5209 case UNSPEC_TPOFF:
5210 return local_exec_symbolic_operand (XVECEXP (inner, 0, 0), Pmode);
5211 default:
5212 return false;
5214 /* FALLTHRU */
5216 case SYMBOL_REF:
5217 case LABEL_REF:
5218 return legitimate_pic_address_disp_p (x);
5220 default:
5221 return true;
5225 /* Determine if a given CONST RTX is a valid memory displacement
5226 in PIC mode. */
5229 legitimate_pic_address_disp_p (rtx disp)
5231 bool saw_plus;
5233 /* In 64bit mode we can allow direct addresses of symbols and labels
5234 when they are not dynamic symbols. */
5235 if (TARGET_64BIT)
5237 /* TLS references should always be enclosed in UNSPEC. */
5238 if (tls_symbolic_operand (disp, GET_MODE (disp)))
5239 return 0;
5240 if (GET_CODE (disp) == SYMBOL_REF
5241 && ix86_cmodel == CM_SMALL_PIC
5242 && SYMBOL_REF_LOCAL_P (disp))
5243 return 1;
5244 if (GET_CODE (disp) == LABEL_REF)
5245 return 1;
5246 if (GET_CODE (disp) == CONST
5247 && GET_CODE (XEXP (disp, 0)) == PLUS)
5249 rtx op0 = XEXP (XEXP (disp, 0), 0);
5250 rtx op1 = XEXP (XEXP (disp, 0), 1);
5252 /* TLS references should always be enclosed in UNSPEC. */
5253 if (tls_symbolic_operand (op0, GET_MODE (op0)))
5254 return 0;
5255 if (((GET_CODE (op0) == SYMBOL_REF
5256 && ix86_cmodel == CM_SMALL_PIC
5257 && SYMBOL_REF_LOCAL_P (op0))
5258 || GET_CODE (op0) == LABEL_REF)
5259 && GET_CODE (op1) == CONST_INT
5260 && INTVAL (op1) < 16*1024*1024
5261 && INTVAL (op1) >= -16*1024*1024)
5262 return 1;
5265 if (GET_CODE (disp) != CONST)
5266 return 0;
5267 disp = XEXP (disp, 0);
5269 if (TARGET_64BIT)
5271 /* We are unsafe to allow PLUS expressions. This limit allowed distance
5272 of GOT tables. We should not need these anyway. */
5273 if (GET_CODE (disp) != UNSPEC
5274 || XINT (disp, 1) != UNSPEC_GOTPCREL)
5275 return 0;
5277 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
5278 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
5279 return 0;
5280 return 1;
5283 saw_plus = false;
5284 if (GET_CODE (disp) == PLUS)
5286 if (GET_CODE (XEXP (disp, 1)) != CONST_INT)
5287 return 0;
5288 disp = XEXP (disp, 0);
5289 saw_plus = true;
5292 if (TARGET_MACHO && darwin_local_data_pic (disp))
5293 return 1;
5295 if (GET_CODE (disp) != UNSPEC)
5296 return 0;
5298 switch (XINT (disp, 1))
5300 case UNSPEC_GOT:
5301 if (saw_plus)
5302 return false;
5303 return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF;
5304 case UNSPEC_GOTOFF:
5305 if (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
5306 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
5307 return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5308 return false;
5309 case UNSPEC_GOTTPOFF:
5310 case UNSPEC_GOTNTPOFF:
5311 case UNSPEC_INDNTPOFF:
5312 if (saw_plus)
5313 return false;
5314 return initial_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5315 case UNSPEC_NTPOFF:
5316 return local_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5317 case UNSPEC_DTPOFF:
5318 return local_dynamic_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5321 return 0;
5324 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
5325 memory address for an instruction. The MODE argument is the machine mode
5326 for the MEM expression that wants to use this address.
5328 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
5329 convert common non-canonical forms to canonical form so that they will
5330 be recognized. */
5333 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
5335 struct ix86_address parts;
5336 rtx base, index, disp;
5337 HOST_WIDE_INT scale;
5338 const char *reason = NULL;
5339 rtx reason_rtx = NULL_RTX;
5341 if (TARGET_DEBUG_ADDR)
5343 fprintf (stderr,
5344 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
5345 GET_MODE_NAME (mode), strict);
5346 debug_rtx (addr);
5349 if (ix86_decompose_address (addr, &parts) <= 0)
5351 reason = "decomposition failed";
5352 goto report_error;
5355 base = parts.base;
5356 index = parts.index;
5357 disp = parts.disp;
5358 scale = parts.scale;
5360 /* Validate base register.
5362 Don't allow SUBREG's that span more than a word here. It can lead to spill
5363 failures when the base is one word out of a two word structure, which is
5364 represented internally as a DImode int. */
5366 if (base)
5368 rtx reg;
5369 reason_rtx = base;
5371 if (REG_P (base))
5372 reg = base;
5373 else if (GET_CODE (base) == SUBREG
5374 && REG_P (SUBREG_REG (base))
5375 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
5376 <= UNITS_PER_WORD)
5377 reg = SUBREG_REG (base);
5378 else
5380 reason = "base is not a register";
5381 goto report_error;
5384 if (GET_MODE (base) != Pmode)
5386 reason = "base is not in Pmode";
5387 goto report_error;
5390 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
5391 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
5393 reason = "base is not valid";
5394 goto report_error;
5398 /* Validate index register.
5400 Don't allow SUBREG's that span more than a word here -- same as above. */
5402 if (index)
5404 rtx reg;
5405 reason_rtx = index;
5407 if (REG_P (index))
5408 reg = index;
5409 else if (GET_CODE (index) == SUBREG
5410 && REG_P (SUBREG_REG (index))
5411 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
5412 <= UNITS_PER_WORD)
5413 reg = SUBREG_REG (index);
5414 else
5416 reason = "index is not a register";
5417 goto report_error;
5420 if (GET_MODE (index) != Pmode)
5422 reason = "index is not in Pmode";
5423 goto report_error;
5426 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
5427 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
5429 reason = "index is not valid";
5430 goto report_error;
5434 /* Validate scale factor. */
5435 if (scale != 1)
5437 reason_rtx = GEN_INT (scale);
5438 if (!index)
5440 reason = "scale without index";
5441 goto report_error;
5444 if (scale != 2 && scale != 4 && scale != 8)
5446 reason = "scale is not a valid multiplier";
5447 goto report_error;
5451 /* Validate displacement. */
5452 if (disp)
5454 reason_rtx = disp;
5456 if (GET_CODE (disp) == CONST
5457 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
5458 switch (XINT (XEXP (disp, 0), 1))
5460 case UNSPEC_GOT:
5461 case UNSPEC_GOTOFF:
5462 case UNSPEC_GOTPCREL:
5463 gcc_assert (flag_pic);
5464 goto is_legitimate_pic;
5466 case UNSPEC_GOTTPOFF:
5467 case UNSPEC_GOTNTPOFF:
5468 case UNSPEC_INDNTPOFF:
5469 case UNSPEC_NTPOFF:
5470 case UNSPEC_DTPOFF:
5471 break;
5473 default:
5474 reason = "invalid address unspec";
5475 goto report_error;
5478 else if (flag_pic && (SYMBOLIC_CONST (disp)
5479 #if TARGET_MACHO
5480 && !machopic_operand_p (disp)
5481 #endif
5484 is_legitimate_pic:
5485 if (TARGET_64BIT && (index || base))
5487 /* foo@dtpoff(%rX) is ok. */
5488 if (GET_CODE (disp) != CONST
5489 || GET_CODE (XEXP (disp, 0)) != PLUS
5490 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
5491 || GET_CODE (XEXP (XEXP (disp, 0), 1)) != CONST_INT
5492 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
5493 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
5495 reason = "non-constant pic memory reference";
5496 goto report_error;
5499 else if (! legitimate_pic_address_disp_p (disp))
5501 reason = "displacement is an invalid pic construct";
5502 goto report_error;
5505 /* This code used to verify that a symbolic pic displacement
5506 includes the pic_offset_table_rtx register.
5508 While this is good idea, unfortunately these constructs may
5509 be created by "adds using lea" optimization for incorrect
5510 code like:
5512 int a;
5513 int foo(int i)
5515 return *(&a+i);
5518 This code is nonsensical, but results in addressing
5519 GOT table with pic_offset_table_rtx base. We can't
5520 just refuse it easily, since it gets matched by
5521 "addsi3" pattern, that later gets split to lea in the
5522 case output register differs from input. While this
5523 can be handled by separate addsi pattern for this case
5524 that never results in lea, this seems to be easier and
5525 correct fix for crash to disable this test. */
5527 else if (GET_CODE (disp) != LABEL_REF
5528 && GET_CODE (disp) != CONST_INT
5529 && (GET_CODE (disp) != CONST
5530 || !legitimate_constant_p (disp))
5531 && (GET_CODE (disp) != SYMBOL_REF
5532 || !legitimate_constant_p (disp)))
5534 reason = "displacement is not constant";
5535 goto report_error;
5537 else if (TARGET_64BIT
5538 && !x86_64_immediate_operand (disp, VOIDmode))
5540 reason = "displacement is out of range";
5541 goto report_error;
5545 /* Everything looks valid. */
5546 if (TARGET_DEBUG_ADDR)
5547 fprintf (stderr, "Success.\n");
5548 return TRUE;
5550 report_error:
5551 if (TARGET_DEBUG_ADDR)
5553 fprintf (stderr, "Error: %s\n", reason);
5554 debug_rtx (reason_rtx);
5556 return FALSE;
5559 /* Return an unique alias set for the GOT. */
5561 static HOST_WIDE_INT
5562 ix86_GOT_alias_set (void)
5564 static HOST_WIDE_INT set = -1;
5565 if (set == -1)
5566 set = new_alias_set ();
5567 return set;
5570 /* Return a legitimate reference for ORIG (an address) using the
5571 register REG. If REG is 0, a new pseudo is generated.
5573 There are two types of references that must be handled:
5575 1. Global data references must load the address from the GOT, via
5576 the PIC reg. An insn is emitted to do this load, and the reg is
5577 returned.
5579 2. Static data references, constant pool addresses, and code labels
5580 compute the address as an offset from the GOT, whose base is in
5581 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
5582 differentiate them from global data objects. The returned
5583 address is the PIC reg + an unspec constant.
5585 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
5586 reg also appears in the address. */
5588 static rtx
5589 legitimize_pic_address (rtx orig, rtx reg)
5591 rtx addr = orig;
5592 rtx new = orig;
5593 rtx base;
5595 #if TARGET_MACHO
5596 if (reg == 0)
5597 reg = gen_reg_rtx (Pmode);
5598 /* Use the generic Mach-O PIC machinery. */
5599 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
5600 #endif
5602 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
5603 new = addr;
5604 else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode))
5606 /* This symbol may be referenced via a displacement from the PIC
5607 base address (@GOTOFF). */
5609 if (reload_in_progress)
5610 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5611 if (GET_CODE (addr) == CONST)
5612 addr = XEXP (addr, 0);
5613 if (GET_CODE (addr) == PLUS)
5615 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
5616 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
5618 else
5619 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
5620 new = gen_rtx_CONST (Pmode, new);
5621 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5623 if (reg != 0)
5625 emit_move_insn (reg, new);
5626 new = reg;
5629 else if (GET_CODE (addr) == SYMBOL_REF)
5631 if (TARGET_64BIT)
5633 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
5634 new = gen_rtx_CONST (Pmode, new);
5635 new = gen_const_mem (Pmode, new);
5636 set_mem_alias_set (new, ix86_GOT_alias_set ());
5638 if (reg == 0)
5639 reg = gen_reg_rtx (Pmode);
5640 /* Use directly gen_movsi, otherwise the address is loaded
5641 into register for CSE. We don't want to CSE this addresses,
5642 instead we CSE addresses from the GOT table, so skip this. */
5643 emit_insn (gen_movsi (reg, new));
5644 new = reg;
5646 else
5648 /* This symbol must be referenced via a load from the
5649 Global Offset Table (@GOT). */
5651 if (reload_in_progress)
5652 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5653 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
5654 new = gen_rtx_CONST (Pmode, new);
5655 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5656 new = gen_const_mem (Pmode, new);
5657 set_mem_alias_set (new, ix86_GOT_alias_set ());
5659 if (reg == 0)
5660 reg = gen_reg_rtx (Pmode);
5661 emit_move_insn (reg, new);
5662 new = reg;
5665 else
5667 if (GET_CODE (addr) == CONST)
5669 addr = XEXP (addr, 0);
5671 /* We must match stuff we generate before. Assume the only
5672 unspecs that can get here are ours. Not that we could do
5673 anything with them anyway.... */
5674 if (GET_CODE (addr) == UNSPEC
5675 || (GET_CODE (addr) == PLUS
5676 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
5677 return orig;
5678 gcc_assert (GET_CODE (addr) == PLUS);
5680 if (GET_CODE (addr) == PLUS)
5682 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
5684 /* Check first to see if this is a constant offset from a @GOTOFF
5685 symbol reference. */
5686 if (local_symbolic_operand (op0, Pmode)
5687 && GET_CODE (op1) == CONST_INT)
5689 if (!TARGET_64BIT)
5691 if (reload_in_progress)
5692 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5693 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
5694 UNSPEC_GOTOFF);
5695 new = gen_rtx_PLUS (Pmode, new, op1);
5696 new = gen_rtx_CONST (Pmode, new);
5697 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5699 if (reg != 0)
5701 emit_move_insn (reg, new);
5702 new = reg;
5705 else
5707 if (INTVAL (op1) < -16*1024*1024
5708 || INTVAL (op1) >= 16*1024*1024)
5709 new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
5712 else
5714 base = legitimize_pic_address (XEXP (addr, 0), reg);
5715 new = legitimize_pic_address (XEXP (addr, 1),
5716 base == reg ? NULL_RTX : reg);
5718 if (GET_CODE (new) == CONST_INT)
5719 new = plus_constant (base, INTVAL (new));
5720 else
5722 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
5724 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
5725 new = XEXP (new, 1);
5727 new = gen_rtx_PLUS (Pmode, base, new);
5732 return new;
5735 /* Load the thread pointer. If TO_REG is true, force it into a register. */
5737 static rtx
5738 get_thread_pointer (int to_reg)
5740 rtx tp, reg, insn;
5742 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
5743 if (!to_reg)
5744 return tp;
5746 reg = gen_reg_rtx (Pmode);
5747 insn = gen_rtx_SET (VOIDmode, reg, tp);
5748 insn = emit_insn (insn);
5750 return reg;
5753 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
5754 false if we expect this to be used for a memory address and true if
5755 we expect to load the address into a register. */
5757 static rtx
5758 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
5760 rtx dest, base, off, pic;
5761 int type;
5763 switch (model)
5765 case TLS_MODEL_GLOBAL_DYNAMIC:
5766 dest = gen_reg_rtx (Pmode);
5767 if (TARGET_64BIT)
5769 rtx rax = gen_rtx_REG (Pmode, 0), insns;
5771 start_sequence ();
5772 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
5773 insns = get_insns ();
5774 end_sequence ();
5776 emit_libcall_block (insns, dest, rax, x);
5778 else
5779 emit_insn (gen_tls_global_dynamic_32 (dest, x));
5780 break;
5782 case TLS_MODEL_LOCAL_DYNAMIC:
5783 base = gen_reg_rtx (Pmode);
5784 if (TARGET_64BIT)
5786 rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
5788 start_sequence ();
5789 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
5790 insns = get_insns ();
5791 end_sequence ();
5793 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
5794 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
5795 emit_libcall_block (insns, base, rax, note);
5797 else
5798 emit_insn (gen_tls_local_dynamic_base_32 (base));
5800 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
5801 off = gen_rtx_CONST (Pmode, off);
5803 return gen_rtx_PLUS (Pmode, base, off);
5805 case TLS_MODEL_INITIAL_EXEC:
5806 if (TARGET_64BIT)
5808 pic = NULL;
5809 type = UNSPEC_GOTNTPOFF;
5811 else if (flag_pic)
5813 if (reload_in_progress)
5814 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5815 pic = pic_offset_table_rtx;
5816 type = TARGET_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
5818 else if (!TARGET_GNU_TLS)
5820 pic = gen_reg_rtx (Pmode);
5821 emit_insn (gen_set_got (pic));
5822 type = UNSPEC_GOTTPOFF;
5824 else
5826 pic = NULL;
5827 type = UNSPEC_INDNTPOFF;
5830 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
5831 off = gen_rtx_CONST (Pmode, off);
5832 if (pic)
5833 off = gen_rtx_PLUS (Pmode, pic, off);
5834 off = gen_const_mem (Pmode, off);
5835 set_mem_alias_set (off, ix86_GOT_alias_set ());
5837 if (TARGET_64BIT || TARGET_GNU_TLS)
5839 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
5840 off = force_reg (Pmode, off);
5841 return gen_rtx_PLUS (Pmode, base, off);
5843 else
5845 base = get_thread_pointer (true);
5846 dest = gen_reg_rtx (Pmode);
5847 emit_insn (gen_subsi3 (dest, base, off));
5849 break;
5851 case TLS_MODEL_LOCAL_EXEC:
5852 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
5853 (TARGET_64BIT || TARGET_GNU_TLS)
5854 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
5855 off = gen_rtx_CONST (Pmode, off);
5857 if (TARGET_64BIT || TARGET_GNU_TLS)
5859 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
5860 return gen_rtx_PLUS (Pmode, base, off);
5862 else
5864 base = get_thread_pointer (true);
5865 dest = gen_reg_rtx (Pmode);
5866 emit_insn (gen_subsi3 (dest, base, off));
5868 break;
5870 default:
5871 gcc_unreachable ();
5874 return dest;
5877 /* Try machine-dependent ways of modifying an illegitimate address
5878 to be legitimate. If we find one, return the new, valid address.
5879 This macro is used in only one place: `memory_address' in explow.c.
5881 OLDX is the address as it was before break_out_memory_refs was called.
5882 In some cases it is useful to look at this to decide what needs to be done.
5884 MODE and WIN are passed so that this macro can use
5885 GO_IF_LEGITIMATE_ADDRESS.
5887 It is always safe for this macro to do nothing. It exists to recognize
5888 opportunities to optimize the output.
5890 For the 80386, we handle X+REG by loading X into a register R and
5891 using R+REG. R will go in a general reg and indexing will be used.
5892 However, if REG is a broken-out memory address or multiplication,
5893 nothing needs to be done because REG can certainly go in a general reg.
5895 When -fpic is used, special handling is needed for symbolic references.
5896 See comments by legitimize_pic_address in i386.c for details. */
5899 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
5901 int changed = 0;
5902 unsigned log;
5904 if (TARGET_DEBUG_ADDR)
5906 fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
5907 GET_MODE_NAME (mode));
5908 debug_rtx (x);
5911 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
5912 if (log)
5913 return legitimize_tls_address (x, log, false);
5914 if (GET_CODE (x) == CONST
5915 && GET_CODE (XEXP (x, 0)) == PLUS
5916 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
5917 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
5919 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0), log, false);
5920 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
5923 if (flag_pic && SYMBOLIC_CONST (x))
5924 return legitimize_pic_address (x, 0);
5926 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
5927 if (GET_CODE (x) == ASHIFT
5928 && GET_CODE (XEXP (x, 1)) == CONST_INT
5929 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
5931 changed = 1;
5932 log = INTVAL (XEXP (x, 1));
5933 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
5934 GEN_INT (1 << log));
5937 if (GET_CODE (x) == PLUS)
5939 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
5941 if (GET_CODE (XEXP (x, 0)) == ASHIFT
5942 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
5943 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
5945 changed = 1;
5946 log = INTVAL (XEXP (XEXP (x, 0), 1));
5947 XEXP (x, 0) = gen_rtx_MULT (Pmode,
5948 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
5949 GEN_INT (1 << log));
5952 if (GET_CODE (XEXP (x, 1)) == ASHIFT
5953 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5954 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
5956 changed = 1;
5957 log = INTVAL (XEXP (XEXP (x, 1), 1));
5958 XEXP (x, 1) = gen_rtx_MULT (Pmode,
5959 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
5960 GEN_INT (1 << log));
5963 /* Put multiply first if it isn't already. */
5964 if (GET_CODE (XEXP (x, 1)) == MULT)
5966 rtx tmp = XEXP (x, 0);
5967 XEXP (x, 0) = XEXP (x, 1);
5968 XEXP (x, 1) = tmp;
5969 changed = 1;
5972 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
5973 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
5974 created by virtual register instantiation, register elimination, and
5975 similar optimizations. */
5976 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
5978 changed = 1;
5979 x = gen_rtx_PLUS (Pmode,
5980 gen_rtx_PLUS (Pmode, XEXP (x, 0),
5981 XEXP (XEXP (x, 1), 0)),
5982 XEXP (XEXP (x, 1), 1));
5985 /* Canonicalize
5986 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
5987 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
5988 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
5989 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5990 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
5991 && CONSTANT_P (XEXP (x, 1)))
5993 rtx constant;
5994 rtx other = NULL_RTX;
5996 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5998 constant = XEXP (x, 1);
5999 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
6001 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
6003 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
6004 other = XEXP (x, 1);
6006 else
6007 constant = 0;
6009 if (constant)
6011 changed = 1;
6012 x = gen_rtx_PLUS (Pmode,
6013 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
6014 XEXP (XEXP (XEXP (x, 0), 1), 0)),
6015 plus_constant (other, INTVAL (constant)));
6019 if (changed && legitimate_address_p (mode, x, FALSE))
6020 return x;
6022 if (GET_CODE (XEXP (x, 0)) == MULT)
6024 changed = 1;
6025 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
6028 if (GET_CODE (XEXP (x, 1)) == MULT)
6030 changed = 1;
6031 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
6034 if (changed
6035 && GET_CODE (XEXP (x, 1)) == REG
6036 && GET_CODE (XEXP (x, 0)) == REG)
6037 return x;
6039 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
6041 changed = 1;
6042 x = legitimize_pic_address (x, 0);
6045 if (changed && legitimate_address_p (mode, x, FALSE))
6046 return x;
6048 if (GET_CODE (XEXP (x, 0)) == REG)
6050 rtx temp = gen_reg_rtx (Pmode);
6051 rtx val = force_operand (XEXP (x, 1), temp);
6052 if (val != temp)
6053 emit_move_insn (temp, val);
6055 XEXP (x, 1) = temp;
6056 return x;
6059 else if (GET_CODE (XEXP (x, 1)) == REG)
6061 rtx temp = gen_reg_rtx (Pmode);
6062 rtx val = force_operand (XEXP (x, 0), temp);
6063 if (val != temp)
6064 emit_move_insn (temp, val);
6066 XEXP (x, 0) = temp;
6067 return x;
6071 return x;
6074 /* Print an integer constant expression in assembler syntax. Addition
6075 and subtraction are the only arithmetic that may appear in these
6076 expressions. FILE is the stdio stream to write to, X is the rtx, and
6077 CODE is the operand print code from the output string. */
6079 static void
6080 output_pic_addr_const (FILE *file, rtx x, int code)
6082 char buf[256];
6084 switch (GET_CODE (x))
6086 case PC:
6087 gcc_assert (flag_pic);
6088 putc ('.', file);
6089 break;
6091 case SYMBOL_REF:
6092 assemble_name (file, XSTR (x, 0));
6093 if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
6094 fputs ("@PLT", file);
6095 break;
6097 case LABEL_REF:
6098 x = XEXP (x, 0);
6099 /* FALLTHRU */
6100 case CODE_LABEL:
6101 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
6102 assemble_name (asm_out_file, buf);
6103 break;
6105 case CONST_INT:
6106 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
6107 break;
6109 case CONST:
6110 /* This used to output parentheses around the expression,
6111 but that does not work on the 386 (either ATT or BSD assembler). */
6112 output_pic_addr_const (file, XEXP (x, 0), code);
6113 break;
6115 case CONST_DOUBLE:
6116 if (GET_MODE (x) == VOIDmode)
6118 /* We can use %d if the number is <32 bits and positive. */
6119 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
6120 fprintf (file, "0x%lx%08lx",
6121 (unsigned long) CONST_DOUBLE_HIGH (x),
6122 (unsigned long) CONST_DOUBLE_LOW (x));
6123 else
6124 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
6126 else
6127 /* We can't handle floating point constants;
6128 PRINT_OPERAND must handle them. */
6129 output_operand_lossage ("floating constant misused");
6130 break;
6132 case PLUS:
6133 /* Some assemblers need integer constants to appear first. */
6134 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
6136 output_pic_addr_const (file, XEXP (x, 0), code);
6137 putc ('+', file);
6138 output_pic_addr_const (file, XEXP (x, 1), code);
6140 else
6142 gcc_assert (GET_CODE (XEXP (x, 1)) == CONST_INT);
6143 output_pic_addr_const (file, XEXP (x, 1), code);
6144 putc ('+', file);
6145 output_pic_addr_const (file, XEXP (x, 0), code);
6147 break;
6149 case MINUS:
6150 if (!TARGET_MACHO)
6151 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
6152 output_pic_addr_const (file, XEXP (x, 0), code);
6153 putc ('-', file);
6154 output_pic_addr_const (file, XEXP (x, 1), code);
6155 if (!TARGET_MACHO)
6156 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
6157 break;
6159 case UNSPEC:
6160 gcc_assert (XVECLEN (x, 0) == 1);
6161 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
6162 switch (XINT (x, 1))
6164 case UNSPEC_GOT:
6165 fputs ("@GOT", file);
6166 break;
6167 case UNSPEC_GOTOFF:
6168 fputs ("@GOTOFF", file);
6169 break;
6170 case UNSPEC_GOTPCREL:
6171 fputs ("@GOTPCREL(%rip)", file);
6172 break;
6173 case UNSPEC_GOTTPOFF:
6174 /* FIXME: This might be @TPOFF in Sun ld too. */
6175 fputs ("@GOTTPOFF", file);
6176 break;
6177 case UNSPEC_TPOFF:
6178 fputs ("@TPOFF", file);
6179 break;
6180 case UNSPEC_NTPOFF:
6181 if (TARGET_64BIT)
6182 fputs ("@TPOFF", file);
6183 else
6184 fputs ("@NTPOFF", file);
6185 break;
6186 case UNSPEC_DTPOFF:
6187 fputs ("@DTPOFF", file);
6188 break;
6189 case UNSPEC_GOTNTPOFF:
6190 if (TARGET_64BIT)
6191 fputs ("@GOTTPOFF(%rip)", file);
6192 else
6193 fputs ("@GOTNTPOFF", file);
6194 break;
6195 case UNSPEC_INDNTPOFF:
6196 fputs ("@INDNTPOFF", file);
6197 break;
6198 default:
6199 output_operand_lossage ("invalid UNSPEC as operand");
6200 break;
6202 break;
6204 default:
6205 output_operand_lossage ("invalid expression as operand");
6209 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6210 We need to emit DTP-relative relocations. */
6212 static void
6213 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
6215 fputs (ASM_LONG, file);
6216 output_addr_const (file, x);
6217 fputs ("@DTPOFF", file);
6218 switch (size)
6220 case 4:
6221 break;
6222 case 8:
6223 fputs (", 0", file);
6224 break;
6225 default:
6226 gcc_unreachable ();
6230 /* In the name of slightly smaller debug output, and to cater to
6231 general assembler lossage, recognize PIC+GOTOFF and turn it back
6232 into a direct symbol reference. */
6234 static rtx
6235 ix86_delegitimize_address (rtx orig_x)
6237 rtx x = orig_x, y;
6239 if (GET_CODE (x) == MEM)
6240 x = XEXP (x, 0);
6242 if (TARGET_64BIT)
6244 if (GET_CODE (x) != CONST
6245 || GET_CODE (XEXP (x, 0)) != UNSPEC
6246 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
6247 || GET_CODE (orig_x) != MEM)
6248 return orig_x;
6249 return XVECEXP (XEXP (x, 0), 0, 0);
6252 if (GET_CODE (x) != PLUS
6253 || GET_CODE (XEXP (x, 1)) != CONST)
6254 return orig_x;
6256 if (GET_CODE (XEXP (x, 0)) == REG
6257 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6258 /* %ebx + GOT/GOTOFF */
6259 y = NULL;
6260 else if (GET_CODE (XEXP (x, 0)) == PLUS)
6262 /* %ebx + %reg * scale + GOT/GOTOFF */
6263 y = XEXP (x, 0);
6264 if (GET_CODE (XEXP (y, 0)) == REG
6265 && REGNO (XEXP (y, 0)) == PIC_OFFSET_TABLE_REGNUM)
6266 y = XEXP (y, 1);
6267 else if (GET_CODE (XEXP (y, 1)) == REG
6268 && REGNO (XEXP (y, 1)) == PIC_OFFSET_TABLE_REGNUM)
6269 y = XEXP (y, 0);
6270 else
6271 return orig_x;
6272 if (GET_CODE (y) != REG
6273 && GET_CODE (y) != MULT
6274 && GET_CODE (y) != ASHIFT)
6275 return orig_x;
6277 else
6278 return orig_x;
6280 x = XEXP (XEXP (x, 1), 0);
6281 if (GET_CODE (x) == UNSPEC
6282 && ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6283 || (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM)))
6285 if (y)
6286 return gen_rtx_PLUS (Pmode, y, XVECEXP (x, 0, 0));
6287 return XVECEXP (x, 0, 0);
6290 if (GET_CODE (x) == PLUS
6291 && GET_CODE (XEXP (x, 0)) == UNSPEC
6292 && GET_CODE (XEXP (x, 1)) == CONST_INT
6293 && ((XINT (XEXP (x, 0), 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6294 || (XINT (XEXP (x, 0), 1) == UNSPEC_GOTOFF
6295 && GET_CODE (orig_x) != MEM)))
6297 x = gen_rtx_PLUS (VOIDmode, XVECEXP (XEXP (x, 0), 0, 0), XEXP (x, 1));
6298 if (y)
6299 return gen_rtx_PLUS (Pmode, y, x);
6300 return x;
6303 return orig_x;
6306 static void
6307 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
6308 int fp, FILE *file)
6310 const char *suffix;
6312 if (mode == CCFPmode || mode == CCFPUmode)
6314 enum rtx_code second_code, bypass_code;
6315 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
6316 gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
6317 code = ix86_fp_compare_code_to_integer (code);
6318 mode = CCmode;
6320 if (reverse)
6321 code = reverse_condition (code);
6323 switch (code)
6325 case EQ:
6326 suffix = "e";
6327 break;
6328 case NE:
6329 suffix = "ne";
6330 break;
6331 case GT:
6332 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
6333 suffix = "g";
6334 break;
6335 case GTU:
6336 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
6337 Those same assemblers have the same but opposite lossage on cmov. */
6338 gcc_assert (mode == CCmode);
6339 suffix = fp ? "nbe" : "a";
6340 break;
6341 case LT:
6342 switch (mode)
6344 case CCNOmode:
6345 case CCGOCmode:
6346 suffix = "s";
6347 break;
6349 case CCmode:
6350 case CCGCmode:
6351 suffix = "l";
6352 break;
6354 default:
6355 gcc_unreachable ();
6357 break;
6358 case LTU:
6359 gcc_assert (mode == CCmode);
6360 suffix = "b";
6361 break;
6362 case GE:
6363 switch (mode)
6365 case CCNOmode:
6366 case CCGOCmode:
6367 suffix = "ns";
6368 break;
6370 case CCmode:
6371 case CCGCmode:
6372 suffix = "ge";
6373 break;
6375 default:
6376 gcc_unreachable ();
6378 break;
6379 case GEU:
6380 /* ??? As above. */
6381 gcc_assert (mode == CCmode);
6382 suffix = fp ? "nb" : "ae";
6383 break;
6384 case LE:
6385 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
6386 suffix = "le";
6387 break;
6388 case LEU:
6389 gcc_assert (mode == CCmode);
6390 suffix = "be";
6391 break;
6392 case UNORDERED:
6393 suffix = fp ? "u" : "p";
6394 break;
6395 case ORDERED:
6396 suffix = fp ? "nu" : "np";
6397 break;
6398 default:
6399 gcc_unreachable ();
6401 fputs (suffix, file);
6404 /* Print the name of register X to FILE based on its machine mode and number.
6405 If CODE is 'w', pretend the mode is HImode.
6406 If CODE is 'b', pretend the mode is QImode.
6407 If CODE is 'k', pretend the mode is SImode.
6408 If CODE is 'q', pretend the mode is DImode.
6409 If CODE is 'h', pretend the reg is the 'high' byte register.
6410 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
6412 void
6413 print_reg (rtx x, int code, FILE *file)
6415 gcc_assert (REGNO (x) != ARG_POINTER_REGNUM
6416 && REGNO (x) != FRAME_POINTER_REGNUM
6417 && REGNO (x) != FLAGS_REG
6418 && REGNO (x) != FPSR_REG);
6420 if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
6421 putc ('%', file);
6423 if (code == 'w' || MMX_REG_P (x))
6424 code = 2;
6425 else if (code == 'b')
6426 code = 1;
6427 else if (code == 'k')
6428 code = 4;
6429 else if (code == 'q')
6430 code = 8;
6431 else if (code == 'y')
6432 code = 3;
6433 else if (code == 'h')
6434 code = 0;
6435 else
6436 code = GET_MODE_SIZE (GET_MODE (x));
6438 /* Irritatingly, AMD extended registers use different naming convention
6439 from the normal registers. */
6440 if (REX_INT_REG_P (x))
6442 gcc_assert (TARGET_64BIT);
6443 switch (code)
6445 case 0:
6446 error ("extended registers have no high halves");
6447 break;
6448 case 1:
6449 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
6450 break;
6451 case 2:
6452 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
6453 break;
6454 case 4:
6455 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
6456 break;
6457 case 8:
6458 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
6459 break;
6460 default:
6461 error ("unsupported operand size for extended register");
6462 break;
6464 return;
6466 switch (code)
6468 case 3:
6469 if (STACK_TOP_P (x))
6471 fputs ("st(0)", file);
6472 break;
6474 /* FALLTHRU */
6475 case 8:
6476 case 4:
6477 case 12:
6478 if (! ANY_FP_REG_P (x))
6479 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
6480 /* FALLTHRU */
6481 case 16:
6482 case 2:
6483 normal:
6484 fputs (hi_reg_name[REGNO (x)], file);
6485 break;
6486 case 1:
6487 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
6488 goto normal;
6489 fputs (qi_reg_name[REGNO (x)], file);
6490 break;
6491 case 0:
6492 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
6493 goto normal;
6494 fputs (qi_high_reg_name[REGNO (x)], file);
6495 break;
6496 default:
6497 gcc_unreachable ();
6501 /* Locate some local-dynamic symbol still in use by this function
6502 so that we can print its name in some tls_local_dynamic_base
6503 pattern. */
6505 static const char *
6506 get_some_local_dynamic_name (void)
6508 rtx insn;
6510 if (cfun->machine->some_ld_name)
6511 return cfun->machine->some_ld_name;
6513 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
6514 if (INSN_P (insn)
6515 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
6516 return cfun->machine->some_ld_name;
6518 gcc_unreachable ();
6521 static int
6522 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
6524 rtx x = *px;
6526 if (GET_CODE (x) == SYMBOL_REF
6527 && local_dynamic_symbolic_operand (x, Pmode))
6529 cfun->machine->some_ld_name = XSTR (x, 0);
6530 return 1;
6533 return 0;
6536 /* Meaning of CODE:
6537 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
6538 C -- print opcode suffix for set/cmov insn.
6539 c -- like C, but print reversed condition
6540 F,f -- likewise, but for floating-point.
6541 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
6542 otherwise nothing
6543 R -- print the prefix for register names.
6544 z -- print the opcode suffix for the size of the current operand.
6545 * -- print a star (in certain assembler syntax)
6546 A -- print an absolute memory reference.
6547 w -- print the operand as if it's a "word" (HImode) even if it isn't.
6548 s -- print a shift double count, followed by the assemblers argument
6549 delimiter.
6550 b -- print the QImode name of the register for the indicated operand.
6551 %b0 would print %al if operands[0] is reg 0.
6552 w -- likewise, print the HImode name of the register.
6553 k -- likewise, print the SImode name of the register.
6554 q -- likewise, print the DImode name of the register.
6555 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
6556 y -- print "st(0)" instead of "st" as a register.
6557 D -- print condition for SSE cmp instruction.
6558 P -- if PIC, print an @PLT suffix.
6559 X -- don't print any sort of PIC '@' suffix for a symbol.
6560 & -- print some in-use local-dynamic symbol name.
6561 H -- print a memory address offset by 8; used for sse high-parts
6564 void
6565 print_operand (FILE *file, rtx x, int code)
6567 if (code)
6569 switch (code)
6571 case '*':
6572 if (ASSEMBLER_DIALECT == ASM_ATT)
6573 putc ('*', file);
6574 return;
6576 case '&':
6577 assemble_name (file, get_some_local_dynamic_name ());
6578 return;
6580 case 'A':
6581 switch (ASSEMBLER_DIALECT)
6583 case ASM_ATT:
6584 putc ('*', file);
6585 break;
6587 case ASM_INTEL:
6588 /* Intel syntax. For absolute addresses, registers should not
6589 be surrounded by braces. */
6590 if (GET_CODE (x) != REG)
6592 putc ('[', file);
6593 PRINT_OPERAND (file, x, 0);
6594 putc (']', file);
6595 return;
6597 break;
6599 default:
6600 gcc_unreachable ();
6603 PRINT_OPERAND (file, x, 0);
6604 return;
6607 case 'L':
6608 if (ASSEMBLER_DIALECT == ASM_ATT)
6609 putc ('l', file);
6610 return;
6612 case 'W':
6613 if (ASSEMBLER_DIALECT == ASM_ATT)
6614 putc ('w', file);
6615 return;
6617 case 'B':
6618 if (ASSEMBLER_DIALECT == ASM_ATT)
6619 putc ('b', file);
6620 return;
6622 case 'Q':
6623 if (ASSEMBLER_DIALECT == ASM_ATT)
6624 putc ('l', file);
6625 return;
6627 case 'S':
6628 if (ASSEMBLER_DIALECT == ASM_ATT)
6629 putc ('s', file);
6630 return;
6632 case 'T':
6633 if (ASSEMBLER_DIALECT == ASM_ATT)
6634 putc ('t', file);
6635 return;
6637 case 'z':
6638 /* 387 opcodes don't get size suffixes if the operands are
6639 registers. */
6640 if (STACK_REG_P (x))
6641 return;
6643 /* Likewise if using Intel opcodes. */
6644 if (ASSEMBLER_DIALECT == ASM_INTEL)
6645 return;
6647 /* This is the size of op from size of operand. */
6648 switch (GET_MODE_SIZE (GET_MODE (x)))
6650 case 2:
6651 #ifdef HAVE_GAS_FILDS_FISTS
6652 putc ('s', file);
6653 #endif
6654 return;
6656 case 4:
6657 if (GET_MODE (x) == SFmode)
6659 putc ('s', file);
6660 return;
6662 else
6663 putc ('l', file);
6664 return;
6666 case 12:
6667 case 16:
6668 putc ('t', file);
6669 return;
6671 case 8:
6672 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
6674 #ifdef GAS_MNEMONICS
6675 putc ('q', file);
6676 #else
6677 putc ('l', file);
6678 putc ('l', file);
6679 #endif
6681 else
6682 putc ('l', file);
6683 return;
6685 default:
6686 gcc_unreachable ();
6689 case 'b':
6690 case 'w':
6691 case 'k':
6692 case 'q':
6693 case 'h':
6694 case 'y':
6695 case 'X':
6696 case 'P':
6697 break;
6699 case 's':
6700 if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
6702 PRINT_OPERAND (file, x, 0);
6703 putc (',', file);
6705 return;
6707 case 'D':
6708 /* Little bit of braindamage here. The SSE compare instructions
6709 does use completely different names for the comparisons that the
6710 fp conditional moves. */
6711 switch (GET_CODE (x))
6713 case EQ:
6714 case UNEQ:
6715 fputs ("eq", file);
6716 break;
6717 case LT:
6718 case UNLT:
6719 fputs ("lt", file);
6720 break;
6721 case LE:
6722 case UNLE:
6723 fputs ("le", file);
6724 break;
6725 case UNORDERED:
6726 fputs ("unord", file);
6727 break;
6728 case NE:
6729 case LTGT:
6730 fputs ("neq", file);
6731 break;
6732 case UNGE:
6733 case GE:
6734 fputs ("nlt", file);
6735 break;
6736 case UNGT:
6737 case GT:
6738 fputs ("nle", file);
6739 break;
6740 case ORDERED:
6741 fputs ("ord", file);
6742 break;
6743 default:
6744 gcc_unreachable ();
6746 return;
6747 case 'O':
6748 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6749 if (ASSEMBLER_DIALECT == ASM_ATT)
6751 switch (GET_MODE (x))
6753 case HImode: putc ('w', file); break;
6754 case SImode:
6755 case SFmode: putc ('l', file); break;
6756 case DImode:
6757 case DFmode: putc ('q', file); break;
6758 default: gcc_unreachable ();
6760 putc ('.', file);
6762 #endif
6763 return;
6764 case 'C':
6765 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
6766 return;
6767 case 'F':
6768 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6769 if (ASSEMBLER_DIALECT == ASM_ATT)
6770 putc ('.', file);
6771 #endif
6772 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
6773 return;
6775 /* Like above, but reverse condition */
6776 case 'c':
6777 /* Check to see if argument to %c is really a constant
6778 and not a condition code which needs to be reversed. */
6779 if (!COMPARISON_P (x))
6781 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
6782 return;
6784 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
6785 return;
6786 case 'f':
6787 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6788 if (ASSEMBLER_DIALECT == ASM_ATT)
6789 putc ('.', file);
6790 #endif
6791 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
6792 return;
6794 case 'H':
6795 /* It doesn't actually matter what mode we use here, as we're
6796 only going to use this for printing. */
6797 x = adjust_address_nv (x, DImode, 8);
6798 break;
6800 case '+':
6802 rtx x;
6804 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
6805 return;
6807 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
6808 if (x)
6810 int pred_val = INTVAL (XEXP (x, 0));
6812 if (pred_val < REG_BR_PROB_BASE * 45 / 100
6813 || pred_val > REG_BR_PROB_BASE * 55 / 100)
6815 int taken = pred_val > REG_BR_PROB_BASE / 2;
6816 int cputaken = final_forward_branch_p (current_output_insn) == 0;
6818 /* Emit hints only in the case default branch prediction
6819 heuristics would fail. */
6820 if (taken != cputaken)
6822 /* We use 3e (DS) prefix for taken branches and
6823 2e (CS) prefix for not taken branches. */
6824 if (taken)
6825 fputs ("ds ; ", file);
6826 else
6827 fputs ("cs ; ", file);
6831 return;
6833 default:
6834 output_operand_lossage ("invalid operand code '%c'", code);
6838 if (GET_CODE (x) == REG)
6839 print_reg (x, code, file);
6841 else if (GET_CODE (x) == MEM)
6843 /* No `byte ptr' prefix for call instructions. */
6844 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
6846 const char * size;
6847 switch (GET_MODE_SIZE (GET_MODE (x)))
6849 case 1: size = "BYTE"; break;
6850 case 2: size = "WORD"; break;
6851 case 4: size = "DWORD"; break;
6852 case 8: size = "QWORD"; break;
6853 case 12: size = "XWORD"; break;
6854 case 16: size = "XMMWORD"; break;
6855 default:
6856 gcc_unreachable ();
6859 /* Check for explicit size override (codes 'b', 'w' and 'k') */
6860 if (code == 'b')
6861 size = "BYTE";
6862 else if (code == 'w')
6863 size = "WORD";
6864 else if (code == 'k')
6865 size = "DWORD";
6867 fputs (size, file);
6868 fputs (" PTR ", file);
6871 x = XEXP (x, 0);
6872 /* Avoid (%rip) for call operands. */
6873 if (CONSTANT_ADDRESS_P (x) && code == 'P'
6874 && GET_CODE (x) != CONST_INT)
6875 output_addr_const (file, x);
6876 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
6877 output_operand_lossage ("invalid constraints for operand");
6878 else
6879 output_address (x);
6882 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
6884 REAL_VALUE_TYPE r;
6885 long l;
6887 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
6888 REAL_VALUE_TO_TARGET_SINGLE (r, l);
6890 if (ASSEMBLER_DIALECT == ASM_ATT)
6891 putc ('$', file);
6892 fprintf (file, "0x%08lx", l);
6895 /* These float cases don't actually occur as immediate operands. */
6896 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
6898 char dstr[30];
6900 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
6901 fprintf (file, "%s", dstr);
6904 else if (GET_CODE (x) == CONST_DOUBLE
6905 && GET_MODE (x) == XFmode)
6907 char dstr[30];
6909 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
6910 fprintf (file, "%s", dstr);
6913 else
6915 /* We have patterns that allow zero sets of memory, for instance.
6916 In 64-bit mode, we should probably support all 8-byte vectors,
6917 since we can in fact encode that into an immediate. */
6918 if (GET_CODE (x) == CONST_VECTOR)
6920 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
6921 x = const0_rtx;
6924 if (code != 'P')
6926 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
6928 if (ASSEMBLER_DIALECT == ASM_ATT)
6929 putc ('$', file);
6931 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
6932 || GET_CODE (x) == LABEL_REF)
6934 if (ASSEMBLER_DIALECT == ASM_ATT)
6935 putc ('$', file);
6936 else
6937 fputs ("OFFSET FLAT:", file);
6940 if (GET_CODE (x) == CONST_INT)
6941 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
6942 else if (flag_pic)
6943 output_pic_addr_const (file, x, code);
6944 else
6945 output_addr_const (file, x);
6949 /* Print a memory operand whose address is ADDR. */
6951 void
6952 print_operand_address (FILE *file, rtx addr)
6954 struct ix86_address parts;
6955 rtx base, index, disp;
6956 int scale;
6957 int ok = ix86_decompose_address (addr, &parts);
6959 gcc_assert (ok);
6961 base = parts.base;
6962 index = parts.index;
6963 disp = parts.disp;
6964 scale = parts.scale;
6966 switch (parts.seg)
6968 case SEG_DEFAULT:
6969 break;
6970 case SEG_FS:
6971 case SEG_GS:
6972 if (USER_LABEL_PREFIX[0] == 0)
6973 putc ('%', file);
6974 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
6975 break;
6976 default:
6977 gcc_unreachable ();
6980 if (!base && !index)
6982 /* Displacement only requires special attention. */
6984 if (GET_CODE (disp) == CONST_INT)
6986 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
6988 if (USER_LABEL_PREFIX[0] == 0)
6989 putc ('%', file);
6990 fputs ("ds:", file);
6992 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
6994 else if (flag_pic)
6995 output_pic_addr_const (file, disp, 0);
6996 else
6997 output_addr_const (file, disp);
6999 /* Use one byte shorter RIP relative addressing for 64bit mode. */
7000 if (TARGET_64BIT
7001 && ((GET_CODE (disp) == SYMBOL_REF
7002 && ! tls_symbolic_operand (disp, GET_MODE (disp)))
7003 || GET_CODE (disp) == LABEL_REF
7004 || (GET_CODE (disp) == CONST
7005 && GET_CODE (XEXP (disp, 0)) == PLUS
7006 && (GET_CODE (XEXP (XEXP (disp, 0), 0)) == SYMBOL_REF
7007 || GET_CODE (XEXP (XEXP (disp, 0), 0)) == LABEL_REF)
7008 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)))
7009 fputs ("(%rip)", file);
7011 else
7013 if (ASSEMBLER_DIALECT == ASM_ATT)
7015 if (disp)
7017 if (flag_pic)
7018 output_pic_addr_const (file, disp, 0);
7019 else if (GET_CODE (disp) == LABEL_REF)
7020 output_asm_label (disp);
7021 else
7022 output_addr_const (file, disp);
7025 putc ('(', file);
7026 if (base)
7027 print_reg (base, 0, file);
7028 if (index)
7030 putc (',', file);
7031 print_reg (index, 0, file);
7032 if (scale != 1)
7033 fprintf (file, ",%d", scale);
7035 putc (')', file);
7037 else
7039 rtx offset = NULL_RTX;
7041 if (disp)
7043 /* Pull out the offset of a symbol; print any symbol itself. */
7044 if (GET_CODE (disp) == CONST
7045 && GET_CODE (XEXP (disp, 0)) == PLUS
7046 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
7048 offset = XEXP (XEXP (disp, 0), 1);
7049 disp = gen_rtx_CONST (VOIDmode,
7050 XEXP (XEXP (disp, 0), 0));
7053 if (flag_pic)
7054 output_pic_addr_const (file, disp, 0);
7055 else if (GET_CODE (disp) == LABEL_REF)
7056 output_asm_label (disp);
7057 else if (GET_CODE (disp) == CONST_INT)
7058 offset = disp;
7059 else
7060 output_addr_const (file, disp);
7063 putc ('[', file);
7064 if (base)
7066 print_reg (base, 0, file);
7067 if (offset)
7069 if (INTVAL (offset) >= 0)
7070 putc ('+', file);
7071 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
7074 else if (offset)
7075 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
7076 else
7077 putc ('0', file);
7079 if (index)
7081 putc ('+', file);
7082 print_reg (index, 0, file);
7083 if (scale != 1)
7084 fprintf (file, "*%d", scale);
7086 putc (']', file);
7091 bool
7092 output_addr_const_extra (FILE *file, rtx x)
7094 rtx op;
7096 if (GET_CODE (x) != UNSPEC)
7097 return false;
7099 op = XVECEXP (x, 0, 0);
7100 switch (XINT (x, 1))
7102 case UNSPEC_GOTTPOFF:
7103 output_addr_const (file, op);
7104 /* FIXME: This might be @TPOFF in Sun ld. */
7105 fputs ("@GOTTPOFF", file);
7106 break;
7107 case UNSPEC_TPOFF:
7108 output_addr_const (file, op);
7109 fputs ("@TPOFF", file);
7110 break;
7111 case UNSPEC_NTPOFF:
7112 output_addr_const (file, op);
7113 if (TARGET_64BIT)
7114 fputs ("@TPOFF", file);
7115 else
7116 fputs ("@NTPOFF", file);
7117 break;
7118 case UNSPEC_DTPOFF:
7119 output_addr_const (file, op);
7120 fputs ("@DTPOFF", file);
7121 break;
7122 case UNSPEC_GOTNTPOFF:
7123 output_addr_const (file, op);
7124 if (TARGET_64BIT)
7125 fputs ("@GOTTPOFF(%rip)", file);
7126 else
7127 fputs ("@GOTNTPOFF", file);
7128 break;
7129 case UNSPEC_INDNTPOFF:
7130 output_addr_const (file, op);
7131 fputs ("@INDNTPOFF", file);
7132 break;
7134 default:
7135 return false;
7138 return true;
7141 /* Split one or more DImode RTL references into pairs of SImode
7142 references. The RTL can be REG, offsettable MEM, integer constant, or
7143 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
7144 split and "num" is its length. lo_half and hi_half are output arrays
7145 that parallel "operands". */
7147 void
7148 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
7150 while (num--)
7152 rtx op = operands[num];
7154 /* simplify_subreg refuse to split volatile memory addresses,
7155 but we still have to handle it. */
7156 if (GET_CODE (op) == MEM)
7158 lo_half[num] = adjust_address (op, SImode, 0);
7159 hi_half[num] = adjust_address (op, SImode, 4);
7161 else
7163 lo_half[num] = simplify_gen_subreg (SImode, op,
7164 GET_MODE (op) == VOIDmode
7165 ? DImode : GET_MODE (op), 0);
7166 hi_half[num] = simplify_gen_subreg (SImode, op,
7167 GET_MODE (op) == VOIDmode
7168 ? DImode : GET_MODE (op), 4);
7172 /* Split one or more TImode RTL references into pairs of SImode
7173 references. The RTL can be REG, offsettable MEM, integer constant, or
7174 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
7175 split and "num" is its length. lo_half and hi_half are output arrays
7176 that parallel "operands". */
7178 void
7179 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
7181 while (num--)
7183 rtx op = operands[num];
7185 /* simplify_subreg refuse to split volatile memory addresses, but we
7186 still have to handle it. */
7187 if (GET_CODE (op) == MEM)
7189 lo_half[num] = adjust_address (op, DImode, 0);
7190 hi_half[num] = adjust_address (op, DImode, 8);
7192 else
7194 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
7195 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
7200 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
7201 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
7202 is the expression of the binary operation. The output may either be
7203 emitted here, or returned to the caller, like all output_* functions.
7205 There is no guarantee that the operands are the same mode, as they
7206 might be within FLOAT or FLOAT_EXTEND expressions. */
7208 #ifndef SYSV386_COMPAT
7209 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
7210 wants to fix the assemblers because that causes incompatibility
7211 with gcc. No-one wants to fix gcc because that causes
7212 incompatibility with assemblers... You can use the option of
7213 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
7214 #define SYSV386_COMPAT 1
7215 #endif
7217 const char *
7218 output_387_binary_op (rtx insn, rtx *operands)
7220 static char buf[30];
7221 const char *p;
7222 const char *ssep;
7223 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
7225 #ifdef ENABLE_CHECKING
7226 /* Even if we do not want to check the inputs, this documents input
7227 constraints. Which helps in understanding the following code. */
7228 if (STACK_REG_P (operands[0])
7229 && ((REG_P (operands[1])
7230 && REGNO (operands[0]) == REGNO (operands[1])
7231 && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM))
7232 || (REG_P (operands[2])
7233 && REGNO (operands[0]) == REGNO (operands[2])
7234 && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)))
7235 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
7236 ; /* ok */
7237 else
7238 gcc_assert (is_sse);
7239 #endif
7241 switch (GET_CODE (operands[3]))
7243 case PLUS:
7244 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7245 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7246 p = "fiadd";
7247 else
7248 p = "fadd";
7249 ssep = "add";
7250 break;
7252 case MINUS:
7253 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7254 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7255 p = "fisub";
7256 else
7257 p = "fsub";
7258 ssep = "sub";
7259 break;
7261 case MULT:
7262 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7263 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7264 p = "fimul";
7265 else
7266 p = "fmul";
7267 ssep = "mul";
7268 break;
7270 case DIV:
7271 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7272 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7273 p = "fidiv";
7274 else
7275 p = "fdiv";
7276 ssep = "div";
7277 break;
7279 default:
7280 gcc_unreachable ();
7283 if (is_sse)
7285 strcpy (buf, ssep);
7286 if (GET_MODE (operands[0]) == SFmode)
7287 strcat (buf, "ss\t{%2, %0|%0, %2}");
7288 else
7289 strcat (buf, "sd\t{%2, %0|%0, %2}");
7290 return buf;
7292 strcpy (buf, p);
7294 switch (GET_CODE (operands[3]))
7296 case MULT:
7297 case PLUS:
7298 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
7300 rtx temp = operands[2];
7301 operands[2] = operands[1];
7302 operands[1] = temp;
7305 /* know operands[0] == operands[1]. */
7307 if (GET_CODE (operands[2]) == MEM)
7309 p = "%z2\t%2";
7310 break;
7313 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7315 if (STACK_TOP_P (operands[0]))
7316 /* How is it that we are storing to a dead operand[2]?
7317 Well, presumably operands[1] is dead too. We can't
7318 store the result to st(0) as st(0) gets popped on this
7319 instruction. Instead store to operands[2] (which I
7320 think has to be st(1)). st(1) will be popped later.
7321 gcc <= 2.8.1 didn't have this check and generated
7322 assembly code that the Unixware assembler rejected. */
7323 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7324 else
7325 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7326 break;
7329 if (STACK_TOP_P (operands[0]))
7330 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7331 else
7332 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7333 break;
7335 case MINUS:
7336 case DIV:
7337 if (GET_CODE (operands[1]) == MEM)
7339 p = "r%z1\t%1";
7340 break;
7343 if (GET_CODE (operands[2]) == MEM)
7345 p = "%z2\t%2";
7346 break;
7349 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7351 #if SYSV386_COMPAT
7352 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
7353 derived assemblers, confusingly reverse the direction of
7354 the operation for fsub{r} and fdiv{r} when the
7355 destination register is not st(0). The Intel assembler
7356 doesn't have this brain damage. Read !SYSV386_COMPAT to
7357 figure out what the hardware really does. */
7358 if (STACK_TOP_P (operands[0]))
7359 p = "{p\t%0, %2|rp\t%2, %0}";
7360 else
7361 p = "{rp\t%2, %0|p\t%0, %2}";
7362 #else
7363 if (STACK_TOP_P (operands[0]))
7364 /* As above for fmul/fadd, we can't store to st(0). */
7365 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7366 else
7367 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7368 #endif
7369 break;
7372 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
7374 #if SYSV386_COMPAT
7375 if (STACK_TOP_P (operands[0]))
7376 p = "{rp\t%0, %1|p\t%1, %0}";
7377 else
7378 p = "{p\t%1, %0|rp\t%0, %1}";
7379 #else
7380 if (STACK_TOP_P (operands[0]))
7381 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
7382 else
7383 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
7384 #endif
7385 break;
7388 if (STACK_TOP_P (operands[0]))
7390 if (STACK_TOP_P (operands[1]))
7391 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7392 else
7393 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
7394 break;
7396 else if (STACK_TOP_P (operands[1]))
7398 #if SYSV386_COMPAT
7399 p = "{\t%1, %0|r\t%0, %1}";
7400 #else
7401 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
7402 #endif
7404 else
7406 #if SYSV386_COMPAT
7407 p = "{r\t%2, %0|\t%0, %2}";
7408 #else
7409 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7410 #endif
7412 break;
7414 default:
7415 gcc_unreachable ();
7418 strcat (buf, p);
7419 return buf;
7422 /* Return needed mode for entity in optimize_mode_switching pass. */
7425 ix86_mode_needed (int entity, rtx insn)
7427 enum attr_i387_cw mode;
7429 /* The mode UNINITIALIZED is used to store control word after a
7430 function call or ASM pattern. The mode ANY specify that function
7431 has no requirements on the control word and make no changes in the
7432 bits we are interested in. */
7434 if (CALL_P (insn)
7435 || (NONJUMP_INSN_P (insn)
7436 && (asm_noperands (PATTERN (insn)) >= 0
7437 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
7438 return I387_CW_UNINITIALIZED;
7440 if (recog_memoized (insn) < 0)
7441 return I387_CW_ANY;
7443 mode = get_attr_i387_cw (insn);
7445 switch (entity)
7447 case I387_TRUNC:
7448 if (mode == I387_CW_TRUNC)
7449 return mode;
7450 break;
7452 case I387_FLOOR:
7453 if (mode == I387_CW_FLOOR)
7454 return mode;
7455 break;
7457 case I387_CEIL:
7458 if (mode == I387_CW_CEIL)
7459 return mode;
7460 break;
7462 case I387_MASK_PM:
7463 if (mode == I387_CW_MASK_PM)
7464 return mode;
7465 break;
7467 default:
7468 gcc_unreachable ();
7471 return I387_CW_ANY;
7474 /* Output code to initialize control word copies used by trunc?f?i and
7475 rounding patterns. CURRENT_MODE is set to current control word,
7476 while NEW_MODE is set to new control word. */
7478 void
7479 emit_i387_cw_initialization (int mode)
7481 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
7482 rtx new_mode;
7484 int slot;
7486 rtx reg = gen_reg_rtx (HImode);
7488 emit_insn (gen_x86_fnstcw_1 (stored_mode));
7489 emit_move_insn (reg, stored_mode);
7491 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL || optimize_size)
7493 switch (mode)
7495 case I387_CW_TRUNC:
7496 /* round toward zero (truncate) */
7497 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
7498 slot = SLOT_CW_TRUNC;
7499 break;
7501 case I387_CW_FLOOR:
7502 /* round down toward -oo */
7503 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7504 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
7505 slot = SLOT_CW_FLOOR;
7506 break;
7508 case I387_CW_CEIL:
7509 /* round up toward +oo */
7510 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7511 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
7512 slot = SLOT_CW_CEIL;
7513 break;
7515 case I387_CW_MASK_PM:
7516 /* mask precision exception for nearbyint() */
7517 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
7518 slot = SLOT_CW_MASK_PM;
7519 break;
7521 default:
7522 gcc_unreachable ();
7525 else
7527 switch (mode)
7529 case I387_CW_TRUNC:
7530 /* round toward zero (truncate) */
7531 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
7532 slot = SLOT_CW_TRUNC;
7533 break;
7535 case I387_CW_FLOOR:
7536 /* round down toward -oo */
7537 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
7538 slot = SLOT_CW_FLOOR;
7539 break;
7541 case I387_CW_CEIL:
7542 /* round up toward +oo */
7543 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
7544 slot = SLOT_CW_CEIL;
7545 break;
7547 case I387_CW_MASK_PM:
7548 /* mask precision exception for nearbyint() */
7549 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
7550 slot = SLOT_CW_MASK_PM;
7551 break;
7553 default:
7554 gcc_unreachable ();
7558 gcc_assert (slot < MAX_386_STACK_LOCALS);
7560 new_mode = assign_386_stack_local (HImode, slot);
7561 emit_move_insn (new_mode, reg);
7564 /* Output code for INSN to convert a float to a signed int. OPERANDS
7565 are the insn operands. The output may be [HSD]Imode and the input
7566 operand may be [SDX]Fmode. */
7568 const char *
7569 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
7571 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
7572 int dimode_p = GET_MODE (operands[0]) == DImode;
7573 int round_mode = get_attr_i387_cw (insn);
7575 /* Jump through a hoop or two for DImode, since the hardware has no
7576 non-popping instruction. We used to do this a different way, but
7577 that was somewhat fragile and broke with post-reload splitters. */
7578 if ((dimode_p || fisttp) && !stack_top_dies)
7579 output_asm_insn ("fld\t%y1", operands);
7581 gcc_assert (STACK_TOP_P (operands[1]));
7582 gcc_assert (GET_CODE (operands[0]) == MEM);
7584 if (fisttp)
7585 output_asm_insn ("fisttp%z0\t%0", operands);
7586 else
7588 if (round_mode != I387_CW_ANY)
7589 output_asm_insn ("fldcw\t%3", operands);
7590 if (stack_top_dies || dimode_p)
7591 output_asm_insn ("fistp%z0\t%0", operands);
7592 else
7593 output_asm_insn ("fist%z0\t%0", operands);
7594 if (round_mode != I387_CW_ANY)
7595 output_asm_insn ("fldcw\t%2", operands);
7598 return "";
7601 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
7602 should be used. UNORDERED_P is true when fucom should be used. */
7604 const char *
7605 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
7607 int stack_top_dies;
7608 rtx cmp_op0, cmp_op1;
7609 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
7611 if (eflags_p)
7613 cmp_op0 = operands[0];
7614 cmp_op1 = operands[1];
7616 else
7618 cmp_op0 = operands[1];
7619 cmp_op1 = operands[2];
7622 if (is_sse)
7624 if (GET_MODE (operands[0]) == SFmode)
7625 if (unordered_p)
7626 return "ucomiss\t{%1, %0|%0, %1}";
7627 else
7628 return "comiss\t{%1, %0|%0, %1}";
7629 else
7630 if (unordered_p)
7631 return "ucomisd\t{%1, %0|%0, %1}";
7632 else
7633 return "comisd\t{%1, %0|%0, %1}";
7636 gcc_assert (STACK_TOP_P (cmp_op0));
7638 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
7640 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
7642 if (stack_top_dies)
7644 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
7645 return TARGET_USE_FFREEP ? "ffreep\t%y1" : "fstp\t%y1";
7647 else
7648 return "ftst\n\tfnstsw\t%0";
7651 if (STACK_REG_P (cmp_op1)
7652 && stack_top_dies
7653 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
7654 && REGNO (cmp_op1) != FIRST_STACK_REG)
7656 /* If both the top of the 387 stack dies, and the other operand
7657 is also a stack register that dies, then this must be a
7658 `fcompp' float compare */
7660 if (eflags_p)
7662 /* There is no double popping fcomi variant. Fortunately,
7663 eflags is immune from the fstp's cc clobbering. */
7664 if (unordered_p)
7665 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
7666 else
7667 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
7668 return TARGET_USE_FFREEP ? "ffreep\t%y0" : "fstp\t%y0";
7670 else
7672 if (unordered_p)
7673 return "fucompp\n\tfnstsw\t%0";
7674 else
7675 return "fcompp\n\tfnstsw\t%0";
7678 else
7680 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
7682 static const char * const alt[16] =
7684 "fcom%z2\t%y2\n\tfnstsw\t%0",
7685 "fcomp%z2\t%y2\n\tfnstsw\t%0",
7686 "fucom%z2\t%y2\n\tfnstsw\t%0",
7687 "fucomp%z2\t%y2\n\tfnstsw\t%0",
7689 "ficom%z2\t%y2\n\tfnstsw\t%0",
7690 "ficomp%z2\t%y2\n\tfnstsw\t%0",
7691 NULL,
7692 NULL,
7694 "fcomi\t{%y1, %0|%0, %y1}",
7695 "fcomip\t{%y1, %0|%0, %y1}",
7696 "fucomi\t{%y1, %0|%0, %y1}",
7697 "fucomip\t{%y1, %0|%0, %y1}",
7699 NULL,
7700 NULL,
7701 NULL,
7702 NULL
7705 int mask;
7706 const char *ret;
7708 mask = eflags_p << 3;
7709 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
7710 mask |= unordered_p << 1;
7711 mask |= stack_top_dies;
7713 gcc_assert (mask < 16);
7714 ret = alt[mask];
7715 gcc_assert (ret);
7717 return ret;
7721 void
7722 ix86_output_addr_vec_elt (FILE *file, int value)
7724 const char *directive = ASM_LONG;
7726 #ifdef ASM_QUAD
7727 if (TARGET_64BIT)
7728 directive = ASM_QUAD;
7729 #else
7730 gcc_assert (!TARGET_64BIT);
7731 #endif
7733 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
7736 void
7737 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
7739 if (TARGET_64BIT)
7740 fprintf (file, "%s%s%d-%s%d\n",
7741 ASM_LONG, LPREFIX, value, LPREFIX, rel);
7742 else if (HAVE_AS_GOTOFF_IN_DATA)
7743 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
7744 #if TARGET_MACHO
7745 else if (TARGET_MACHO)
7747 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
7748 machopic_output_function_base_name (file);
7749 fprintf(file, "\n");
7751 #endif
7752 else
7753 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
7754 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
7757 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
7758 for the target. */
7760 void
7761 ix86_expand_clear (rtx dest)
7763 rtx tmp;
7765 /* We play register width games, which are only valid after reload. */
7766 gcc_assert (reload_completed);
7768 /* Avoid HImode and its attendant prefix byte. */
7769 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
7770 dest = gen_rtx_REG (SImode, REGNO (dest));
7772 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
7774 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
7775 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
7777 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
7778 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
7781 emit_insn (tmp);
7784 /* X is an unchanging MEM. If it is a constant pool reference, return
7785 the constant pool rtx, else NULL. */
7788 maybe_get_pool_constant (rtx x)
7790 x = ix86_delegitimize_address (XEXP (x, 0));
7792 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
7793 return get_pool_constant (x);
7795 return NULL_RTX;
7798 void
7799 ix86_expand_move (enum machine_mode mode, rtx operands[])
7801 int strict = (reload_in_progress || reload_completed);
7802 rtx op0, op1;
7803 enum tls_model model;
7805 op0 = operands[0];
7806 op1 = operands[1];
7808 if (GET_CODE (op1) == SYMBOL_REF)
7810 model = SYMBOL_REF_TLS_MODEL (op1);
7811 if (model)
7813 op1 = legitimize_tls_address (op1, model, true);
7814 op1 = force_operand (op1, op0);
7815 if (op1 == op0)
7816 return;
7819 else if (GET_CODE (op1) == CONST
7820 && GET_CODE (XEXP (op1, 0)) == PLUS
7821 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
7823 model = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (op1, 0), 0));
7824 if (model)
7826 rtx addend = XEXP (XEXP (op1, 0), 1);
7827 op1 = legitimize_tls_address (XEXP (XEXP (op1, 0), 0), model, true);
7828 op1 = force_operand (op1, NULL);
7829 op1 = expand_simple_binop (Pmode, PLUS, op1, addend,
7830 op0, 1, OPTAB_DIRECT);
7831 if (op1 == op0)
7832 return;
7836 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
7838 #if TARGET_MACHO
7839 if (MACHOPIC_PURE)
7841 rtx temp = ((reload_in_progress
7842 || ((op0 && GET_CODE (op0) == REG)
7843 && mode == Pmode))
7844 ? op0 : gen_reg_rtx (Pmode));
7845 op1 = machopic_indirect_data_reference (op1, temp);
7846 op1 = machopic_legitimize_pic_address (op1, mode,
7847 temp == op1 ? 0 : temp);
7849 else if (MACHOPIC_INDIRECT)
7850 op1 = machopic_indirect_data_reference (op1, 0);
7851 if (op0 == op1)
7852 return;
7853 #else
7854 if (GET_CODE (op0) == MEM)
7855 op1 = force_reg (Pmode, op1);
7856 else
7857 op1 = legitimize_address (op1, op1, Pmode);
7858 #endif /* TARGET_MACHO */
7860 else
7862 if (GET_CODE (op0) == MEM
7863 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
7864 || !push_operand (op0, mode))
7865 && GET_CODE (op1) == MEM)
7866 op1 = force_reg (mode, op1);
7868 if (push_operand (op0, mode)
7869 && ! general_no_elim_operand (op1, mode))
7870 op1 = copy_to_mode_reg (mode, op1);
7872 /* Force large constants in 64bit compilation into register
7873 to get them CSEed. */
7874 if (TARGET_64BIT && mode == DImode
7875 && immediate_operand (op1, mode)
7876 && !x86_64_zext_immediate_operand (op1, VOIDmode)
7877 && !register_operand (op0, mode)
7878 && optimize && !reload_completed && !reload_in_progress)
7879 op1 = copy_to_mode_reg (mode, op1);
7881 if (FLOAT_MODE_P (mode))
7883 /* If we are loading a floating point constant to a register,
7884 force the value to memory now, since we'll get better code
7885 out the back end. */
7887 if (strict)
7889 else if (GET_CODE (op1) == CONST_DOUBLE)
7891 op1 = validize_mem (force_const_mem (mode, op1));
7892 if (!register_operand (op0, mode))
7894 rtx temp = gen_reg_rtx (mode);
7895 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
7896 emit_move_insn (op0, temp);
7897 return;
7903 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
7906 void
7907 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
7909 rtx op0 = operands[0], op1 = operands[1];
7911 /* Force constants other than zero into memory. We do not know how
7912 the instructions used to build constants modify the upper 64 bits
7913 of the register, once we have that information we may be able
7914 to handle some of them more efficiently. */
7915 if ((reload_in_progress | reload_completed) == 0
7916 && register_operand (op0, mode)
7917 && CONSTANT_P (op1) && op1 != CONST0_RTX (mode))
7918 op1 = validize_mem (force_const_mem (mode, op1));
7920 /* Make operand1 a register if it isn't already. */
7921 if (!no_new_pseudos
7922 && !register_operand (op0, mode)
7923 && !register_operand (op1, mode))
7925 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
7926 return;
7929 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
7932 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
7933 straight to ix86_expand_vector_move. */
7935 void
7936 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
7938 rtx op0, op1, m;
7940 op0 = operands[0];
7941 op1 = operands[1];
7943 if (MEM_P (op1))
7945 /* If we're optimizing for size, movups is the smallest. */
7946 if (optimize_size)
7948 op0 = gen_lowpart (V4SFmode, op0);
7949 op1 = gen_lowpart (V4SFmode, op1);
7950 emit_insn (gen_sse_movups (op0, op1));
7951 return;
7954 /* ??? If we have typed data, then it would appear that using
7955 movdqu is the only way to get unaligned data loaded with
7956 integer type. */
7957 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
7959 op0 = gen_lowpart (V16QImode, op0);
7960 op1 = gen_lowpart (V16QImode, op1);
7961 emit_insn (gen_sse2_movdqu (op0, op1));
7962 return;
7965 if (TARGET_SSE2 && mode == V2DFmode)
7967 rtx zero;
7969 /* When SSE registers are split into halves, we can avoid
7970 writing to the top half twice. */
7971 if (TARGET_SSE_SPLIT_REGS)
7973 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
7974 zero = op0;
7976 else
7978 /* ??? Not sure about the best option for the Intel chips.
7979 The following would seem to satisfy; the register is
7980 entirely cleared, breaking the dependency chain. We
7981 then store to the upper half, with a dependency depth
7982 of one. A rumor has it that Intel recommends two movsd
7983 followed by an unpacklpd, but this is unconfirmed. And
7984 given that the dependency depth of the unpacklpd would
7985 still be one, I'm not sure why this would be better. */
7986 zero = CONST0_RTX (V2DFmode);
7989 m = adjust_address (op1, DFmode, 0);
7990 emit_insn (gen_sse2_loadlpd (op0, zero, m));
7991 m = adjust_address (op1, DFmode, 8);
7992 emit_insn (gen_sse2_loadhpd (op0, op0, m));
7994 else
7996 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
7997 emit_move_insn (op0, CONST0_RTX (mode));
7998 else
7999 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
8001 if (mode != V4SFmode)
8002 op0 = gen_lowpart (V4SFmode, op0);
8003 m = adjust_address (op1, V2SFmode, 0);
8004 emit_insn (gen_sse_loadlps (op0, op0, m));
8005 m = adjust_address (op1, V2SFmode, 8);
8006 emit_insn (gen_sse_loadhps (op0, op0, m));
8009 else if (MEM_P (op0))
8011 /* If we're optimizing for size, movups is the smallest. */
8012 if (optimize_size)
8014 op0 = gen_lowpart (V4SFmode, op0);
8015 op1 = gen_lowpart (V4SFmode, op1);
8016 emit_insn (gen_sse_movups (op0, op1));
8017 return;
8020 /* ??? Similar to above, only less clear because of quote
8021 typeless stores unquote. */
8022 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
8023 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
8025 op0 = gen_lowpart (V16QImode, op0);
8026 op1 = gen_lowpart (V16QImode, op1);
8027 emit_insn (gen_sse2_movdqu (op0, op1));
8028 return;
8031 if (TARGET_SSE2 && mode == V2DFmode)
8033 m = adjust_address (op0, DFmode, 0);
8034 emit_insn (gen_sse2_storelpd (m, op1));
8035 m = adjust_address (op0, DFmode, 8);
8036 emit_insn (gen_sse2_storehpd (m, op1));
8038 else
8040 if (mode != V4SFmode)
8041 op1 = gen_lowpart (V4SFmode, op1);
8042 m = adjust_address (op0, V2SFmode, 0);
8043 emit_insn (gen_sse_storelps (m, op1));
8044 m = adjust_address (op0, V2SFmode, 8);
8045 emit_insn (gen_sse_storehps (m, op1));
8048 else
8049 gcc_unreachable ();
8052 /* Expand a push in MODE. This is some mode for which we do not support
8053 proper push instructions, at least from the registers that we expect
8054 the value to live in. */
8056 void
8057 ix86_expand_push (enum machine_mode mode, rtx x)
8059 rtx tmp;
8061 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
8062 GEN_INT (-GET_MODE_SIZE (mode)),
8063 stack_pointer_rtx, 1, OPTAB_DIRECT);
8064 if (tmp != stack_pointer_rtx)
8065 emit_move_insn (stack_pointer_rtx, tmp);
8067 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
8068 emit_move_insn (tmp, x);
8071 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
8072 destination to use for the operation. If different from the true
8073 destination in operands[0], a copy operation will be required. */
8076 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
8077 rtx operands[])
8079 int matching_memory;
8080 rtx src1, src2, dst;
8082 dst = operands[0];
8083 src1 = operands[1];
8084 src2 = operands[2];
8086 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
8087 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8088 && (rtx_equal_p (dst, src2)
8089 || immediate_operand (src1, mode)))
8091 rtx temp = src1;
8092 src1 = src2;
8093 src2 = temp;
8096 /* If the destination is memory, and we do not have matching source
8097 operands, do things in registers. */
8098 matching_memory = 0;
8099 if (GET_CODE (dst) == MEM)
8101 if (rtx_equal_p (dst, src1))
8102 matching_memory = 1;
8103 else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8104 && rtx_equal_p (dst, src2))
8105 matching_memory = 2;
8106 else
8107 dst = gen_reg_rtx (mode);
8110 /* Both source operands cannot be in memory. */
8111 if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM)
8113 if (matching_memory != 2)
8114 src2 = force_reg (mode, src2);
8115 else
8116 src1 = force_reg (mode, src1);
8119 /* If the operation is not commutable, source 1 cannot be a constant
8120 or non-matching memory. */
8121 if ((CONSTANT_P (src1)
8122 || (!matching_memory && GET_CODE (src1) == MEM))
8123 && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
8124 src1 = force_reg (mode, src1);
8126 /* If optimizing, copy to regs to improve CSE */
8127 if (optimize && ! no_new_pseudos)
8129 if (GET_CODE (dst) == MEM)
8130 dst = gen_reg_rtx (mode);
8131 if (GET_CODE (src1) == MEM)
8132 src1 = force_reg (mode, src1);
8133 if (GET_CODE (src2) == MEM)
8134 src2 = force_reg (mode, src2);
8137 src1 = operands[1] = src1;
8138 src2 = operands[2] = src2;
8139 return dst;
8142 /* Similarly, but assume that the destination has already been
8143 set up properly. */
8145 void
8146 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
8147 enum machine_mode mode, rtx operands[])
8149 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
8150 gcc_assert (dst == operands[0]);
8153 /* Attempt to expand a binary operator. Make the expansion closer to the
8154 actual machine, then just general_operand, which will allow 3 separate
8155 memory references (one output, two input) in a single insn. */
8157 void
8158 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
8159 rtx operands[])
8161 rtx src1, src2, dst, op, clob;
8163 dst = ix86_fixup_binary_operands (code, mode, operands);
8164 src1 = operands[1];
8165 src2 = operands[2];
8167 /* Emit the instruction. */
8169 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
8170 if (reload_in_progress)
8172 /* Reload doesn't know about the flags register, and doesn't know that
8173 it doesn't want to clobber it. We can only do this with PLUS. */
8174 gcc_assert (code == PLUS);
8175 emit_insn (op);
8177 else
8179 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8180 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
8183 /* Fix up the destination if needed. */
8184 if (dst != operands[0])
8185 emit_move_insn (operands[0], dst);
8188 /* Return TRUE or FALSE depending on whether the binary operator meets the
8189 appropriate constraints. */
8192 ix86_binary_operator_ok (enum rtx_code code,
8193 enum machine_mode mode ATTRIBUTE_UNUSED,
8194 rtx operands[3])
8196 /* Both source operands cannot be in memory. */
8197 if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM)
8198 return 0;
8199 /* If the operation is not commutable, source 1 cannot be a constant. */
8200 if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
8201 return 0;
8202 /* If the destination is memory, we must have a matching source operand. */
8203 if (GET_CODE (operands[0]) == MEM
8204 && ! (rtx_equal_p (operands[0], operands[1])
8205 || (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8206 && rtx_equal_p (operands[0], operands[2]))))
8207 return 0;
8208 /* If the operation is not commutable and the source 1 is memory, we must
8209 have a matching destination. */
8210 if (GET_CODE (operands[1]) == MEM
8211 && GET_RTX_CLASS (code) != RTX_COMM_ARITH
8212 && ! rtx_equal_p (operands[0], operands[1]))
8213 return 0;
8214 return 1;
8217 /* Attempt to expand a unary operator. Make the expansion closer to the
8218 actual machine, then just general_operand, which will allow 2 separate
8219 memory references (one output, one input) in a single insn. */
8221 void
8222 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
8223 rtx operands[])
8225 int matching_memory;
8226 rtx src, dst, op, clob;
8228 dst = operands[0];
8229 src = operands[1];
8231 /* If the destination is memory, and we do not have matching source
8232 operands, do things in registers. */
8233 matching_memory = 0;
8234 if (MEM_P (dst))
8236 if (rtx_equal_p (dst, src))
8237 matching_memory = 1;
8238 else
8239 dst = gen_reg_rtx (mode);
8242 /* When source operand is memory, destination must match. */
8243 if (MEM_P (src) && !matching_memory)
8244 src = force_reg (mode, src);
8246 /* If optimizing, copy to regs to improve CSE. */
8247 if (optimize && ! no_new_pseudos)
8249 if (GET_CODE (dst) == MEM)
8250 dst = gen_reg_rtx (mode);
8251 if (GET_CODE (src) == MEM)
8252 src = force_reg (mode, src);
8255 /* Emit the instruction. */
8257 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
8258 if (reload_in_progress || code == NOT)
8260 /* Reload doesn't know about the flags register, and doesn't know that
8261 it doesn't want to clobber it. */
8262 gcc_assert (code == NOT);
8263 emit_insn (op);
8265 else
8267 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8268 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
8271 /* Fix up the destination if needed. */
8272 if (dst != operands[0])
8273 emit_move_insn (operands[0], dst);
8276 /* Return TRUE or FALSE depending on whether the unary operator meets the
8277 appropriate constraints. */
8280 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
8281 enum machine_mode mode ATTRIBUTE_UNUSED,
8282 rtx operands[2] ATTRIBUTE_UNUSED)
8284 /* If one of operands is memory, source and destination must match. */
8285 if ((GET_CODE (operands[0]) == MEM
8286 || GET_CODE (operands[1]) == MEM)
8287 && ! rtx_equal_p (operands[0], operands[1]))
8288 return FALSE;
8289 return TRUE;
8292 /* A subroutine of ix86_expand_fp_absneg_operator and copysign expanders.
8293 Create a mask for the sign bit in MODE for an SSE register. If VECT is
8294 true, then replicate the mask for all elements of the vector register.
8295 If INVERT is true, then create a mask excluding the sign bit. */
8298 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
8300 enum machine_mode vec_mode;
8301 HOST_WIDE_INT hi, lo;
8302 int shift = 63;
8303 rtvec v;
8304 rtx mask;
8306 /* Find the sign bit, sign extended to 2*HWI. */
8307 if (mode == SFmode)
8308 lo = 0x80000000, hi = lo < 0;
8309 else if (HOST_BITS_PER_WIDE_INT >= 64)
8310 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
8311 else
8312 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
8314 if (invert)
8315 lo = ~lo, hi = ~hi;
8317 /* Force this value into the low part of a fp vector constant. */
8318 mask = immed_double_const (lo, hi, mode == SFmode ? SImode : DImode);
8319 mask = gen_lowpart (mode, mask);
8321 if (mode == SFmode)
8323 if (vect)
8324 v = gen_rtvec (4, mask, mask, mask, mask);
8325 else
8326 v = gen_rtvec (4, mask, CONST0_RTX (SFmode),
8327 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
8328 vec_mode = V4SFmode;
8330 else
8332 if (vect)
8333 v = gen_rtvec (2, mask, mask);
8334 else
8335 v = gen_rtvec (2, mask, CONST0_RTX (DFmode));
8336 vec_mode = V2DFmode;
8339 return force_reg (vec_mode, gen_rtx_CONST_VECTOR (vec_mode, v));
8342 /* Generate code for floating point ABS or NEG. */
8344 void
8345 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
8346 rtx operands[])
8348 rtx mask, set, use, clob, dst, src;
8349 bool matching_memory;
8350 bool use_sse = false;
8351 bool vector_mode = VECTOR_MODE_P (mode);
8352 enum machine_mode elt_mode = mode;
8354 if (vector_mode)
8356 elt_mode = GET_MODE_INNER (mode);
8357 use_sse = true;
8359 else if (TARGET_SSE_MATH)
8360 use_sse = SSE_FLOAT_MODE_P (mode);
8362 /* NEG and ABS performed with SSE use bitwise mask operations.
8363 Create the appropriate mask now. */
8364 if (use_sse)
8365 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
8366 else
8368 /* When not using SSE, we don't use the mask, but prefer to keep the
8369 same general form of the insn pattern to reduce duplication when
8370 it comes time to split. */
8371 mask = const0_rtx;
8374 dst = operands[0];
8375 src = operands[1];
8377 /* If the destination is memory, and we don't have matching source
8378 operands, do things in registers. */
8379 matching_memory = false;
8380 if (MEM_P (dst))
8382 if (rtx_equal_p (dst, src) && (!optimize || no_new_pseudos))
8383 matching_memory = true;
8384 else
8385 dst = gen_reg_rtx (mode);
8387 if (MEM_P (src) && !matching_memory)
8388 src = force_reg (mode, src);
8390 if (vector_mode)
8392 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
8393 set = gen_rtx_SET (VOIDmode, dst, set);
8394 emit_insn (set);
8396 else
8398 set = gen_rtx_fmt_e (code, mode, src);
8399 set = gen_rtx_SET (VOIDmode, dst, set);
8400 use = gen_rtx_USE (VOIDmode, mask);
8401 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8402 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, set, use, clob)));
8405 if (dst != operands[0])
8406 emit_move_insn (operands[0], dst);
8409 /* Expand a copysign operation. Special case operand 0 being a constant. */
8411 void
8412 ix86_expand_copysign (rtx operands[])
8414 enum machine_mode mode, vmode;
8415 rtx dest, op0, op1, mask, nmask;
8417 dest = operands[0];
8418 op0 = operands[1];
8419 op1 = operands[2];
8421 mode = GET_MODE (dest);
8422 vmode = mode == SFmode ? V4SFmode : V2DFmode;
8424 if (GET_CODE (op0) == CONST_DOUBLE)
8426 rtvec v;
8428 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
8429 op0 = simplify_unary_operation (ABS, mode, op0, mode);
8431 if (op0 == CONST0_RTX (mode))
8432 op0 = CONST0_RTX (vmode);
8433 else
8435 if (mode == SFmode)
8436 v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
8437 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
8438 else
8439 v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
8440 op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
8443 mask = ix86_build_signbit_mask (mode, 0, 0);
8445 if (mode == SFmode)
8446 emit_insn (gen_copysignsf3_const (dest, op0, op1, mask));
8447 else
8448 emit_insn (gen_copysigndf3_const (dest, op0, op1, mask));
8450 else
8452 nmask = ix86_build_signbit_mask (mode, 0, 1);
8453 mask = ix86_build_signbit_mask (mode, 0, 0);
8455 if (mode == SFmode)
8456 emit_insn (gen_copysignsf3_var (dest, NULL, op0, op1, nmask, mask));
8457 else
8458 emit_insn (gen_copysigndf3_var (dest, NULL, op0, op1, nmask, mask));
8462 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
8463 be a constant, and so has already been expanded into a vector constant. */
8465 void
8466 ix86_split_copysign_const (rtx operands[])
8468 enum machine_mode mode, vmode;
8469 rtx dest, op0, op1, mask, x;
8471 dest = operands[0];
8472 op0 = operands[1];
8473 op1 = operands[2];
8474 mask = operands[3];
8476 mode = GET_MODE (dest);
8477 vmode = GET_MODE (mask);
8479 dest = simplify_gen_subreg (vmode, dest, mode, 0);
8480 x = gen_rtx_AND (vmode, dest, mask);
8481 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8483 if (op0 != CONST0_RTX (vmode))
8485 x = gen_rtx_IOR (vmode, dest, op0);
8486 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8490 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
8491 so we have to do two masks. */
8493 void
8494 ix86_split_copysign_var (rtx operands[])
8496 enum machine_mode mode, vmode;
8497 rtx dest, scratch, op0, op1, mask, nmask, x;
8499 dest = operands[0];
8500 scratch = operands[1];
8501 op0 = operands[2];
8502 op1 = operands[3];
8503 nmask = operands[4];
8504 mask = operands[5];
8506 mode = GET_MODE (dest);
8507 vmode = GET_MODE (mask);
8509 if (rtx_equal_p (op0, op1))
8511 /* Shouldn't happen often (it's useless, obviously), but when it does
8512 we'd generate incorrect code if we continue below. */
8513 emit_move_insn (dest, op0);
8514 return;
8517 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
8519 gcc_assert (REGNO (op1) == REGNO (scratch));
8521 x = gen_rtx_AND (vmode, scratch, mask);
8522 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
8524 dest = mask;
8525 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
8526 x = gen_rtx_NOT (vmode, dest);
8527 x = gen_rtx_AND (vmode, x, op0);
8528 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8530 else
8532 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
8534 x = gen_rtx_AND (vmode, scratch, mask);
8536 else /* alternative 2,4 */
8538 gcc_assert (REGNO (mask) == REGNO (scratch));
8539 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
8540 x = gen_rtx_AND (vmode, scratch, op1);
8542 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
8544 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
8546 dest = simplify_gen_subreg (vmode, op0, mode, 0);
8547 x = gen_rtx_AND (vmode, dest, nmask);
8549 else /* alternative 3,4 */
8551 gcc_assert (REGNO (nmask) == REGNO (dest));
8552 dest = nmask;
8553 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
8554 x = gen_rtx_AND (vmode, dest, op0);
8556 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8559 x = gen_rtx_IOR (vmode, dest, scratch);
8560 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8563 /* Return TRUE or FALSE depending on whether the first SET in INSN
8564 has source and destination with matching CC modes, and that the
8565 CC mode is at least as constrained as REQ_MODE. */
8568 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
8570 rtx set;
8571 enum machine_mode set_mode;
8573 set = PATTERN (insn);
8574 if (GET_CODE (set) == PARALLEL)
8575 set = XVECEXP (set, 0, 0);
8576 gcc_assert (GET_CODE (set) == SET);
8577 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
8579 set_mode = GET_MODE (SET_DEST (set));
8580 switch (set_mode)
8582 case CCNOmode:
8583 if (req_mode != CCNOmode
8584 && (req_mode != CCmode
8585 || XEXP (SET_SRC (set), 1) != const0_rtx))
8586 return 0;
8587 break;
8588 case CCmode:
8589 if (req_mode == CCGCmode)
8590 return 0;
8591 /* FALLTHRU */
8592 case CCGCmode:
8593 if (req_mode == CCGOCmode || req_mode == CCNOmode)
8594 return 0;
8595 /* FALLTHRU */
8596 case CCGOCmode:
8597 if (req_mode == CCZmode)
8598 return 0;
8599 /* FALLTHRU */
8600 case CCZmode:
8601 break;
8603 default:
8604 gcc_unreachable ();
8607 return (GET_MODE (SET_SRC (set)) == set_mode);
8610 /* Generate insn patterns to do an integer compare of OPERANDS. */
8612 static rtx
8613 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
8615 enum machine_mode cmpmode;
8616 rtx tmp, flags;
8618 cmpmode = SELECT_CC_MODE (code, op0, op1);
8619 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
8621 /* This is very simple, but making the interface the same as in the
8622 FP case makes the rest of the code easier. */
8623 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
8624 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
8626 /* Return the test that should be put into the flags user, i.e.
8627 the bcc, scc, or cmov instruction. */
8628 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
8631 /* Figure out whether to use ordered or unordered fp comparisons.
8632 Return the appropriate mode to use. */
8634 enum machine_mode
8635 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
8637 /* ??? In order to make all comparisons reversible, we do all comparisons
8638 non-trapping when compiling for IEEE. Once gcc is able to distinguish
8639 all forms trapping and nontrapping comparisons, we can make inequality
8640 comparisons trapping again, since it results in better code when using
8641 FCOM based compares. */
8642 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
8645 enum machine_mode
8646 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
8648 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
8649 return ix86_fp_compare_mode (code);
8650 switch (code)
8652 /* Only zero flag is needed. */
8653 case EQ: /* ZF=0 */
8654 case NE: /* ZF!=0 */
8655 return CCZmode;
8656 /* Codes needing carry flag. */
8657 case GEU: /* CF=0 */
8658 case GTU: /* CF=0 & ZF=0 */
8659 case LTU: /* CF=1 */
8660 case LEU: /* CF=1 | ZF=1 */
8661 return CCmode;
8662 /* Codes possibly doable only with sign flag when
8663 comparing against zero. */
8664 case GE: /* SF=OF or SF=0 */
8665 case LT: /* SF<>OF or SF=1 */
8666 if (op1 == const0_rtx)
8667 return CCGOCmode;
8668 else
8669 /* For other cases Carry flag is not required. */
8670 return CCGCmode;
8671 /* Codes doable only with sign flag when comparing
8672 against zero, but we miss jump instruction for it
8673 so we need to use relational tests against overflow
8674 that thus needs to be zero. */
8675 case GT: /* ZF=0 & SF=OF */
8676 case LE: /* ZF=1 | SF<>OF */
8677 if (op1 == const0_rtx)
8678 return CCNOmode;
8679 else
8680 return CCGCmode;
8681 /* strcmp pattern do (use flags) and combine may ask us for proper
8682 mode. */
8683 case USE:
8684 return CCmode;
8685 default:
8686 gcc_unreachable ();
8690 /* Return the fixed registers used for condition codes. */
8692 static bool
8693 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
8695 *p1 = FLAGS_REG;
8696 *p2 = FPSR_REG;
8697 return true;
8700 /* If two condition code modes are compatible, return a condition code
8701 mode which is compatible with both. Otherwise, return
8702 VOIDmode. */
8704 static enum machine_mode
8705 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
8707 if (m1 == m2)
8708 return m1;
8710 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
8711 return VOIDmode;
8713 if ((m1 == CCGCmode && m2 == CCGOCmode)
8714 || (m1 == CCGOCmode && m2 == CCGCmode))
8715 return CCGCmode;
8717 switch (m1)
8719 default:
8720 gcc_unreachable ();
8722 case CCmode:
8723 case CCGCmode:
8724 case CCGOCmode:
8725 case CCNOmode:
8726 case CCZmode:
8727 switch (m2)
8729 default:
8730 return VOIDmode;
8732 case CCmode:
8733 case CCGCmode:
8734 case CCGOCmode:
8735 case CCNOmode:
8736 case CCZmode:
8737 return CCmode;
8740 case CCFPmode:
8741 case CCFPUmode:
8742 /* These are only compatible with themselves, which we already
8743 checked above. */
8744 return VOIDmode;
8748 /* Return true if we should use an FCOMI instruction for this fp comparison. */
8751 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
8753 enum rtx_code swapped_code = swap_condition (code);
8754 return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code))
8755 || (ix86_fp_comparison_cost (swapped_code)
8756 == ix86_fp_comparison_fcomi_cost (swapped_code)));
8759 /* Swap, force into registers, or otherwise massage the two operands
8760 to a fp comparison. The operands are updated in place; the new
8761 comparison code is returned. */
8763 static enum rtx_code
8764 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
8766 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
8767 rtx op0 = *pop0, op1 = *pop1;
8768 enum machine_mode op_mode = GET_MODE (op0);
8769 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
8771 /* All of the unordered compare instructions only work on registers.
8772 The same is true of the fcomi compare instructions. The same is
8773 true of the XFmode compare instructions if not comparing with
8774 zero (ftst insn is used in this case). */
8776 if (!is_sse
8777 && (fpcmp_mode == CCFPUmode
8778 || (op_mode == XFmode
8779 && ! (standard_80387_constant_p (op0) == 1
8780 || standard_80387_constant_p (op1) == 1))
8781 || ix86_use_fcomi_compare (code)))
8783 op0 = force_reg (op_mode, op0);
8784 op1 = force_reg (op_mode, op1);
8786 else
8788 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
8789 things around if they appear profitable, otherwise force op0
8790 into a register. */
8792 if (standard_80387_constant_p (op0) == 0
8793 || (GET_CODE (op0) == MEM
8794 && ! (standard_80387_constant_p (op1) == 0
8795 || GET_CODE (op1) == MEM)))
8797 rtx tmp;
8798 tmp = op0, op0 = op1, op1 = tmp;
8799 code = swap_condition (code);
8802 if (GET_CODE (op0) != REG)
8803 op0 = force_reg (op_mode, op0);
8805 if (CONSTANT_P (op1))
8807 int tmp = standard_80387_constant_p (op1);
8808 if (tmp == 0)
8809 op1 = validize_mem (force_const_mem (op_mode, op1));
8810 else if (tmp == 1)
8812 if (TARGET_CMOVE)
8813 op1 = force_reg (op_mode, op1);
8815 else
8816 op1 = force_reg (op_mode, op1);
8820 /* Try to rearrange the comparison to make it cheaper. */
8821 if (ix86_fp_comparison_cost (code)
8822 > ix86_fp_comparison_cost (swap_condition (code))
8823 && (GET_CODE (op1) == REG || !no_new_pseudos))
8825 rtx tmp;
8826 tmp = op0, op0 = op1, op1 = tmp;
8827 code = swap_condition (code);
8828 if (GET_CODE (op0) != REG)
8829 op0 = force_reg (op_mode, op0);
8832 *pop0 = op0;
8833 *pop1 = op1;
8834 return code;
8837 /* Convert comparison codes we use to represent FP comparison to integer
8838 code that will result in proper branch. Return UNKNOWN if no such code
8839 is available. */
8841 enum rtx_code
8842 ix86_fp_compare_code_to_integer (enum rtx_code code)
8844 switch (code)
8846 case GT:
8847 return GTU;
8848 case GE:
8849 return GEU;
8850 case ORDERED:
8851 case UNORDERED:
8852 return code;
8853 break;
8854 case UNEQ:
8855 return EQ;
8856 break;
8857 case UNLT:
8858 return LTU;
8859 break;
8860 case UNLE:
8861 return LEU;
8862 break;
8863 case LTGT:
8864 return NE;
8865 break;
8866 default:
8867 return UNKNOWN;
8871 /* Split comparison code CODE into comparisons we can do using branch
8872 instructions. BYPASS_CODE is comparison code for branch that will
8873 branch around FIRST_CODE and SECOND_CODE. If some of branches
8874 is not required, set value to UNKNOWN.
8875 We never require more than two branches. */
8877 void
8878 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
8879 enum rtx_code *first_code,
8880 enum rtx_code *second_code)
8882 *first_code = code;
8883 *bypass_code = UNKNOWN;
8884 *second_code = UNKNOWN;
8886 /* The fcomi comparison sets flags as follows:
8888 cmp ZF PF CF
8889 > 0 0 0
8890 < 0 0 1
8891 = 1 0 0
8892 un 1 1 1 */
8894 switch (code)
8896 case GT: /* GTU - CF=0 & ZF=0 */
8897 case GE: /* GEU - CF=0 */
8898 case ORDERED: /* PF=0 */
8899 case UNORDERED: /* PF=1 */
8900 case UNEQ: /* EQ - ZF=1 */
8901 case UNLT: /* LTU - CF=1 */
8902 case UNLE: /* LEU - CF=1 | ZF=1 */
8903 case LTGT: /* EQ - ZF=0 */
8904 break;
8905 case LT: /* LTU - CF=1 - fails on unordered */
8906 *first_code = UNLT;
8907 *bypass_code = UNORDERED;
8908 break;
8909 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
8910 *first_code = UNLE;
8911 *bypass_code = UNORDERED;
8912 break;
8913 case EQ: /* EQ - ZF=1 - fails on unordered */
8914 *first_code = UNEQ;
8915 *bypass_code = UNORDERED;
8916 break;
8917 case NE: /* NE - ZF=0 - fails on unordered */
8918 *first_code = LTGT;
8919 *second_code = UNORDERED;
8920 break;
8921 case UNGE: /* GEU - CF=0 - fails on unordered */
8922 *first_code = GE;
8923 *second_code = UNORDERED;
8924 break;
8925 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
8926 *first_code = GT;
8927 *second_code = UNORDERED;
8928 break;
8929 default:
8930 gcc_unreachable ();
8932 if (!TARGET_IEEE_FP)
8934 *second_code = UNKNOWN;
8935 *bypass_code = UNKNOWN;
8939 /* Return cost of comparison done fcom + arithmetics operations on AX.
8940 All following functions do use number of instructions as a cost metrics.
8941 In future this should be tweaked to compute bytes for optimize_size and
8942 take into account performance of various instructions on various CPUs. */
8943 static int
8944 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
8946 if (!TARGET_IEEE_FP)
8947 return 4;
8948 /* The cost of code output by ix86_expand_fp_compare. */
8949 switch (code)
8951 case UNLE:
8952 case UNLT:
8953 case LTGT:
8954 case GT:
8955 case GE:
8956 case UNORDERED:
8957 case ORDERED:
8958 case UNEQ:
8959 return 4;
8960 break;
8961 case LT:
8962 case NE:
8963 case EQ:
8964 case UNGE:
8965 return 5;
8966 break;
8967 case LE:
8968 case UNGT:
8969 return 6;
8970 break;
8971 default:
8972 gcc_unreachable ();
8976 /* Return cost of comparison done using fcomi operation.
8977 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8978 static int
8979 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
8981 enum rtx_code bypass_code, first_code, second_code;
8982 /* Return arbitrarily high cost when instruction is not supported - this
8983 prevents gcc from using it. */
8984 if (!TARGET_CMOVE)
8985 return 1024;
8986 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8987 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
8990 /* Return cost of comparison done using sahf operation.
8991 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8992 static int
8993 ix86_fp_comparison_sahf_cost (enum rtx_code code)
8995 enum rtx_code bypass_code, first_code, second_code;
8996 /* Return arbitrarily high cost when instruction is not preferred - this
8997 avoids gcc from using it. */
8998 if (!TARGET_USE_SAHF && !optimize_size)
8999 return 1024;
9000 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9001 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
9004 /* Compute cost of the comparison done using any method.
9005 See ix86_fp_comparison_arithmetics_cost for the metrics. */
9006 static int
9007 ix86_fp_comparison_cost (enum rtx_code code)
9009 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
9010 int min;
9012 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
9013 sahf_cost = ix86_fp_comparison_sahf_cost (code);
9015 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
9016 if (min > sahf_cost)
9017 min = sahf_cost;
9018 if (min > fcomi_cost)
9019 min = fcomi_cost;
9020 return min;
9023 /* Generate insn patterns to do a floating point compare of OPERANDS. */
9025 static rtx
9026 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
9027 rtx *second_test, rtx *bypass_test)
9029 enum machine_mode fpcmp_mode, intcmp_mode;
9030 rtx tmp, tmp2;
9031 int cost = ix86_fp_comparison_cost (code);
9032 enum rtx_code bypass_code, first_code, second_code;
9034 fpcmp_mode = ix86_fp_compare_mode (code);
9035 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
9037 if (second_test)
9038 *second_test = NULL_RTX;
9039 if (bypass_test)
9040 *bypass_test = NULL_RTX;
9042 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9044 /* Do fcomi/sahf based test when profitable. */
9045 if ((bypass_code == UNKNOWN || bypass_test)
9046 && (second_code == UNKNOWN || second_test)
9047 && ix86_fp_comparison_arithmetics_cost (code) > cost)
9049 if (TARGET_CMOVE)
9051 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9052 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
9053 tmp);
9054 emit_insn (tmp);
9056 else
9058 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9059 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
9060 if (!scratch)
9061 scratch = gen_reg_rtx (HImode);
9062 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
9063 emit_insn (gen_x86_sahf_1 (scratch));
9066 /* The FP codes work out to act like unsigned. */
9067 intcmp_mode = fpcmp_mode;
9068 code = first_code;
9069 if (bypass_code != UNKNOWN)
9070 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
9071 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9072 const0_rtx);
9073 if (second_code != UNKNOWN)
9074 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
9075 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9076 const0_rtx);
9078 else
9080 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
9081 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9082 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
9083 if (!scratch)
9084 scratch = gen_reg_rtx (HImode);
9085 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
9087 /* In the unordered case, we have to check C2 for NaN's, which
9088 doesn't happen to work out to anything nice combination-wise.
9089 So do some bit twiddling on the value we've got in AH to come
9090 up with an appropriate set of condition codes. */
9092 intcmp_mode = CCNOmode;
9093 switch (code)
9095 case GT:
9096 case UNGT:
9097 if (code == GT || !TARGET_IEEE_FP)
9099 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
9100 code = EQ;
9102 else
9104 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9105 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
9106 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
9107 intcmp_mode = CCmode;
9108 code = GEU;
9110 break;
9111 case LT:
9112 case UNLT:
9113 if (code == LT && TARGET_IEEE_FP)
9115 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9116 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
9117 intcmp_mode = CCmode;
9118 code = EQ;
9120 else
9122 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
9123 code = NE;
9125 break;
9126 case GE:
9127 case UNGE:
9128 if (code == GE || !TARGET_IEEE_FP)
9130 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
9131 code = EQ;
9133 else
9135 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9136 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
9137 GEN_INT (0x01)));
9138 code = NE;
9140 break;
9141 case LE:
9142 case UNLE:
9143 if (code == LE && TARGET_IEEE_FP)
9145 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9146 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
9147 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
9148 intcmp_mode = CCmode;
9149 code = LTU;
9151 else
9153 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
9154 code = NE;
9156 break;
9157 case EQ:
9158 case UNEQ:
9159 if (code == EQ && TARGET_IEEE_FP)
9161 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9162 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
9163 intcmp_mode = CCmode;
9164 code = EQ;
9166 else
9168 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
9169 code = NE;
9170 break;
9172 break;
9173 case NE:
9174 case LTGT:
9175 if (code == NE && TARGET_IEEE_FP)
9177 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9178 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
9179 GEN_INT (0x40)));
9180 code = NE;
9182 else
9184 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
9185 code = EQ;
9187 break;
9189 case UNORDERED:
9190 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
9191 code = NE;
9192 break;
9193 case ORDERED:
9194 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
9195 code = EQ;
9196 break;
9198 default:
9199 gcc_unreachable ();
9203 /* Return the test that should be put into the flags user, i.e.
9204 the bcc, scc, or cmov instruction. */
9205 return gen_rtx_fmt_ee (code, VOIDmode,
9206 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9207 const0_rtx);
9211 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
9213 rtx op0, op1, ret;
9214 op0 = ix86_compare_op0;
9215 op1 = ix86_compare_op1;
9217 if (second_test)
9218 *second_test = NULL_RTX;
9219 if (bypass_test)
9220 *bypass_test = NULL_RTX;
9222 if (ix86_compare_emitted)
9224 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
9225 ix86_compare_emitted = NULL_RTX;
9227 else if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
9228 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
9229 second_test, bypass_test);
9230 else
9231 ret = ix86_expand_int_compare (code, op0, op1);
9233 return ret;
9236 /* Return true if the CODE will result in nontrivial jump sequence. */
9237 bool
9238 ix86_fp_jump_nontrivial_p (enum rtx_code code)
9240 enum rtx_code bypass_code, first_code, second_code;
9241 if (!TARGET_CMOVE)
9242 return true;
9243 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9244 return bypass_code != UNKNOWN || second_code != UNKNOWN;
9247 void
9248 ix86_expand_branch (enum rtx_code code, rtx label)
9250 rtx tmp;
9252 switch (GET_MODE (ix86_compare_op0))
9254 case QImode:
9255 case HImode:
9256 case SImode:
9257 simple:
9258 tmp = ix86_expand_compare (code, NULL, NULL);
9259 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
9260 gen_rtx_LABEL_REF (VOIDmode, label),
9261 pc_rtx);
9262 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
9263 return;
9265 case SFmode:
9266 case DFmode:
9267 case XFmode:
9269 rtvec vec;
9270 int use_fcomi;
9271 enum rtx_code bypass_code, first_code, second_code;
9273 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
9274 &ix86_compare_op1);
9276 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9278 /* Check whether we will use the natural sequence with one jump. If
9279 so, we can expand jump early. Otherwise delay expansion by
9280 creating compound insn to not confuse optimizers. */
9281 if (bypass_code == UNKNOWN && second_code == UNKNOWN
9282 && TARGET_CMOVE)
9284 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
9285 gen_rtx_LABEL_REF (VOIDmode, label),
9286 pc_rtx, NULL_RTX, NULL_RTX);
9288 else
9290 tmp = gen_rtx_fmt_ee (code, VOIDmode,
9291 ix86_compare_op0, ix86_compare_op1);
9292 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
9293 gen_rtx_LABEL_REF (VOIDmode, label),
9294 pc_rtx);
9295 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
9297 use_fcomi = ix86_use_fcomi_compare (code);
9298 vec = rtvec_alloc (3 + !use_fcomi);
9299 RTVEC_ELT (vec, 0) = tmp;
9300 RTVEC_ELT (vec, 1)
9301 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
9302 RTVEC_ELT (vec, 2)
9303 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
9304 if (! use_fcomi)
9305 RTVEC_ELT (vec, 3)
9306 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
9308 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
9310 return;
9313 case DImode:
9314 if (TARGET_64BIT)
9315 goto simple;
9316 /* Expand DImode branch into multiple compare+branch. */
9318 rtx lo[2], hi[2], label2;
9319 enum rtx_code code1, code2, code3;
9321 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
9323 tmp = ix86_compare_op0;
9324 ix86_compare_op0 = ix86_compare_op1;
9325 ix86_compare_op1 = tmp;
9326 code = swap_condition (code);
9328 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
9329 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
9331 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
9332 avoid two branches. This costs one extra insn, so disable when
9333 optimizing for size. */
9335 if ((code == EQ || code == NE)
9336 && (!optimize_size
9337 || hi[1] == const0_rtx || lo[1] == const0_rtx))
9339 rtx xor0, xor1;
9341 xor1 = hi[0];
9342 if (hi[1] != const0_rtx)
9343 xor1 = expand_binop (SImode, xor_optab, xor1, hi[1],
9344 NULL_RTX, 0, OPTAB_WIDEN);
9346 xor0 = lo[0];
9347 if (lo[1] != const0_rtx)
9348 xor0 = expand_binop (SImode, xor_optab, xor0, lo[1],
9349 NULL_RTX, 0, OPTAB_WIDEN);
9351 tmp = expand_binop (SImode, ior_optab, xor1, xor0,
9352 NULL_RTX, 0, OPTAB_WIDEN);
9354 ix86_compare_op0 = tmp;
9355 ix86_compare_op1 = const0_rtx;
9356 ix86_expand_branch (code, label);
9357 return;
9360 /* Otherwise, if we are doing less-than or greater-or-equal-than,
9361 op1 is a constant and the low word is zero, then we can just
9362 examine the high word. */
9364 if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx)
9365 switch (code)
9367 case LT: case LTU: case GE: case GEU:
9368 ix86_compare_op0 = hi[0];
9369 ix86_compare_op1 = hi[1];
9370 ix86_expand_branch (code, label);
9371 return;
9372 default:
9373 break;
9376 /* Otherwise, we need two or three jumps. */
9378 label2 = gen_label_rtx ();
9380 code1 = code;
9381 code2 = swap_condition (code);
9382 code3 = unsigned_condition (code);
9384 switch (code)
9386 case LT: case GT: case LTU: case GTU:
9387 break;
9389 case LE: code1 = LT; code2 = GT; break;
9390 case GE: code1 = GT; code2 = LT; break;
9391 case LEU: code1 = LTU; code2 = GTU; break;
9392 case GEU: code1 = GTU; code2 = LTU; break;
9394 case EQ: code1 = UNKNOWN; code2 = NE; break;
9395 case NE: code2 = UNKNOWN; break;
9397 default:
9398 gcc_unreachable ();
9402 * a < b =>
9403 * if (hi(a) < hi(b)) goto true;
9404 * if (hi(a) > hi(b)) goto false;
9405 * if (lo(a) < lo(b)) goto true;
9406 * false:
9409 ix86_compare_op0 = hi[0];
9410 ix86_compare_op1 = hi[1];
9412 if (code1 != UNKNOWN)
9413 ix86_expand_branch (code1, label);
9414 if (code2 != UNKNOWN)
9415 ix86_expand_branch (code2, label2);
9417 ix86_compare_op0 = lo[0];
9418 ix86_compare_op1 = lo[1];
9419 ix86_expand_branch (code3, label);
9421 if (code2 != UNKNOWN)
9422 emit_label (label2);
9423 return;
9426 default:
9427 gcc_unreachable ();
9431 /* Split branch based on floating point condition. */
9432 void
9433 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
9434 rtx target1, rtx target2, rtx tmp, rtx pushed)
9436 rtx second, bypass;
9437 rtx label = NULL_RTX;
9438 rtx condition;
9439 int bypass_probability = -1, second_probability = -1, probability = -1;
9440 rtx i;
9442 if (target2 != pc_rtx)
9444 rtx tmp = target2;
9445 code = reverse_condition_maybe_unordered (code);
9446 target2 = target1;
9447 target1 = tmp;
9450 condition = ix86_expand_fp_compare (code, op1, op2,
9451 tmp, &second, &bypass);
9453 /* Remove pushed operand from stack. */
9454 if (pushed)
9455 ix86_free_from_memory (GET_MODE (pushed));
9457 if (split_branch_probability >= 0)
9459 /* Distribute the probabilities across the jumps.
9460 Assume the BYPASS and SECOND to be always test
9461 for UNORDERED. */
9462 probability = split_branch_probability;
9464 /* Value of 1 is low enough to make no need for probability
9465 to be updated. Later we may run some experiments and see
9466 if unordered values are more frequent in practice. */
9467 if (bypass)
9468 bypass_probability = 1;
9469 if (second)
9470 second_probability = 1;
9472 if (bypass != NULL_RTX)
9474 label = gen_label_rtx ();
9475 i = emit_jump_insn (gen_rtx_SET
9476 (VOIDmode, pc_rtx,
9477 gen_rtx_IF_THEN_ELSE (VOIDmode,
9478 bypass,
9479 gen_rtx_LABEL_REF (VOIDmode,
9480 label),
9481 pc_rtx)));
9482 if (bypass_probability >= 0)
9483 REG_NOTES (i)
9484 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9485 GEN_INT (bypass_probability),
9486 REG_NOTES (i));
9488 i = emit_jump_insn (gen_rtx_SET
9489 (VOIDmode, pc_rtx,
9490 gen_rtx_IF_THEN_ELSE (VOIDmode,
9491 condition, target1, target2)));
9492 if (probability >= 0)
9493 REG_NOTES (i)
9494 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9495 GEN_INT (probability),
9496 REG_NOTES (i));
9497 if (second != NULL_RTX)
9499 i = emit_jump_insn (gen_rtx_SET
9500 (VOIDmode, pc_rtx,
9501 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
9502 target2)));
9503 if (second_probability >= 0)
9504 REG_NOTES (i)
9505 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9506 GEN_INT (second_probability),
9507 REG_NOTES (i));
9509 if (label != NULL_RTX)
9510 emit_label (label);
9514 ix86_expand_setcc (enum rtx_code code, rtx dest)
9516 rtx ret, tmp, tmpreg, equiv;
9517 rtx second_test, bypass_test;
9519 if (GET_MODE (ix86_compare_op0) == DImode
9520 && !TARGET_64BIT)
9521 return 0; /* FAIL */
9523 gcc_assert (GET_MODE (dest) == QImode);
9525 ret = ix86_expand_compare (code, &second_test, &bypass_test);
9526 PUT_MODE (ret, QImode);
9528 tmp = dest;
9529 tmpreg = dest;
9531 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
9532 if (bypass_test || second_test)
9534 rtx test = second_test;
9535 int bypass = 0;
9536 rtx tmp2 = gen_reg_rtx (QImode);
9537 if (bypass_test)
9539 gcc_assert (!second_test);
9540 test = bypass_test;
9541 bypass = 1;
9542 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
9544 PUT_MODE (test, QImode);
9545 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
9547 if (bypass)
9548 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
9549 else
9550 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
9553 /* Attach a REG_EQUAL note describing the comparison result. */
9554 if (ix86_compare_op0 && ix86_compare_op1)
9556 equiv = simplify_gen_relational (code, QImode,
9557 GET_MODE (ix86_compare_op0),
9558 ix86_compare_op0, ix86_compare_op1);
9559 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
9562 return 1; /* DONE */
9565 /* Expand comparison setting or clearing carry flag. Return true when
9566 successful and set pop for the operation. */
9567 static bool
9568 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
9570 enum machine_mode mode =
9571 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
9573 /* Do not handle DImode compares that go trought special path. Also we can't
9574 deal with FP compares yet. This is possible to add. */
9575 if ((mode == DImode && !TARGET_64BIT))
9576 return false;
9577 if (FLOAT_MODE_P (mode))
9579 rtx second_test = NULL, bypass_test = NULL;
9580 rtx compare_op, compare_seq;
9582 /* Shortcut: following common codes never translate into carry flag compares. */
9583 if (code == EQ || code == NE || code == UNEQ || code == LTGT
9584 || code == ORDERED || code == UNORDERED)
9585 return false;
9587 /* These comparisons require zero flag; swap operands so they won't. */
9588 if ((code == GT || code == UNLE || code == LE || code == UNGT)
9589 && !TARGET_IEEE_FP)
9591 rtx tmp = op0;
9592 op0 = op1;
9593 op1 = tmp;
9594 code = swap_condition (code);
9597 /* Try to expand the comparison and verify that we end up with carry flag
9598 based comparison. This is fails to be true only when we decide to expand
9599 comparison using arithmetic that is not too common scenario. */
9600 start_sequence ();
9601 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
9602 &second_test, &bypass_test);
9603 compare_seq = get_insns ();
9604 end_sequence ();
9606 if (second_test || bypass_test)
9607 return false;
9608 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
9609 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
9610 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
9611 else
9612 code = GET_CODE (compare_op);
9613 if (code != LTU && code != GEU)
9614 return false;
9615 emit_insn (compare_seq);
9616 *pop = compare_op;
9617 return true;
9619 if (!INTEGRAL_MODE_P (mode))
9620 return false;
9621 switch (code)
9623 case LTU:
9624 case GEU:
9625 break;
9627 /* Convert a==0 into (unsigned)a<1. */
9628 case EQ:
9629 case NE:
9630 if (op1 != const0_rtx)
9631 return false;
9632 op1 = const1_rtx;
9633 code = (code == EQ ? LTU : GEU);
9634 break;
9636 /* Convert a>b into b<a or a>=b-1. */
9637 case GTU:
9638 case LEU:
9639 if (GET_CODE (op1) == CONST_INT)
9641 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
9642 /* Bail out on overflow. We still can swap operands but that
9643 would force loading of the constant into register. */
9644 if (op1 == const0_rtx
9645 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
9646 return false;
9647 code = (code == GTU ? GEU : LTU);
9649 else
9651 rtx tmp = op1;
9652 op1 = op0;
9653 op0 = tmp;
9654 code = (code == GTU ? LTU : GEU);
9656 break;
9658 /* Convert a>=0 into (unsigned)a<0x80000000. */
9659 case LT:
9660 case GE:
9661 if (mode == DImode || op1 != const0_rtx)
9662 return false;
9663 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
9664 code = (code == LT ? GEU : LTU);
9665 break;
9666 case LE:
9667 case GT:
9668 if (mode == DImode || op1 != constm1_rtx)
9669 return false;
9670 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
9671 code = (code == LE ? GEU : LTU);
9672 break;
9674 default:
9675 return false;
9677 /* Swapping operands may cause constant to appear as first operand. */
9678 if (!nonimmediate_operand (op0, VOIDmode))
9680 if (no_new_pseudos)
9681 return false;
9682 op0 = force_reg (mode, op0);
9684 ix86_compare_op0 = op0;
9685 ix86_compare_op1 = op1;
9686 *pop = ix86_expand_compare (code, NULL, NULL);
9687 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
9688 return true;
9692 ix86_expand_int_movcc (rtx operands[])
9694 enum rtx_code code = GET_CODE (operands[1]), compare_code;
9695 rtx compare_seq, compare_op;
9696 rtx second_test, bypass_test;
9697 enum machine_mode mode = GET_MODE (operands[0]);
9698 bool sign_bit_compare_p = false;;
9700 start_sequence ();
9701 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
9702 compare_seq = get_insns ();
9703 end_sequence ();
9705 compare_code = GET_CODE (compare_op);
9707 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
9708 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
9709 sign_bit_compare_p = true;
9711 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
9712 HImode insns, we'd be swallowed in word prefix ops. */
9714 if ((mode != HImode || TARGET_FAST_PREFIX)
9715 && (mode != DImode || TARGET_64BIT)
9716 && GET_CODE (operands[2]) == CONST_INT
9717 && GET_CODE (operands[3]) == CONST_INT)
9719 rtx out = operands[0];
9720 HOST_WIDE_INT ct = INTVAL (operands[2]);
9721 HOST_WIDE_INT cf = INTVAL (operands[3]);
9722 HOST_WIDE_INT diff;
9724 diff = ct - cf;
9725 /* Sign bit compares are better done using shifts than we do by using
9726 sbb. */
9727 if (sign_bit_compare_p
9728 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
9729 ix86_compare_op1, &compare_op))
9731 /* Detect overlap between destination and compare sources. */
9732 rtx tmp = out;
9734 if (!sign_bit_compare_p)
9736 bool fpcmp = false;
9738 compare_code = GET_CODE (compare_op);
9740 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
9741 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
9743 fpcmp = true;
9744 compare_code = ix86_fp_compare_code_to_integer (compare_code);
9747 /* To simplify rest of code, restrict to the GEU case. */
9748 if (compare_code == LTU)
9750 HOST_WIDE_INT tmp = ct;
9751 ct = cf;
9752 cf = tmp;
9753 compare_code = reverse_condition (compare_code);
9754 code = reverse_condition (code);
9756 else
9758 if (fpcmp)
9759 PUT_CODE (compare_op,
9760 reverse_condition_maybe_unordered
9761 (GET_CODE (compare_op)));
9762 else
9763 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
9765 diff = ct - cf;
9767 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
9768 || reg_overlap_mentioned_p (out, ix86_compare_op1))
9769 tmp = gen_reg_rtx (mode);
9771 if (mode == DImode)
9772 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
9773 else
9774 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
9776 else
9778 if (code == GT || code == GE)
9779 code = reverse_condition (code);
9780 else
9782 HOST_WIDE_INT tmp = ct;
9783 ct = cf;
9784 cf = tmp;
9785 diff = ct - cf;
9787 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
9788 ix86_compare_op1, VOIDmode, 0, -1);
9791 if (diff == 1)
9794 * cmpl op0,op1
9795 * sbbl dest,dest
9796 * [addl dest, ct]
9798 * Size 5 - 8.
9800 if (ct)
9801 tmp = expand_simple_binop (mode, PLUS,
9802 tmp, GEN_INT (ct),
9803 copy_rtx (tmp), 1, OPTAB_DIRECT);
9805 else if (cf == -1)
9808 * cmpl op0,op1
9809 * sbbl dest,dest
9810 * orl $ct, dest
9812 * Size 8.
9814 tmp = expand_simple_binop (mode, IOR,
9815 tmp, GEN_INT (ct),
9816 copy_rtx (tmp), 1, OPTAB_DIRECT);
9818 else if (diff == -1 && ct)
9821 * cmpl op0,op1
9822 * sbbl dest,dest
9823 * notl dest
9824 * [addl dest, cf]
9826 * Size 8 - 11.
9828 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
9829 if (cf)
9830 tmp = expand_simple_binop (mode, PLUS,
9831 copy_rtx (tmp), GEN_INT (cf),
9832 copy_rtx (tmp), 1, OPTAB_DIRECT);
9834 else
9837 * cmpl op0,op1
9838 * sbbl dest,dest
9839 * [notl dest]
9840 * andl cf - ct, dest
9841 * [addl dest, ct]
9843 * Size 8 - 11.
9846 if (cf == 0)
9848 cf = ct;
9849 ct = 0;
9850 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
9853 tmp = expand_simple_binop (mode, AND,
9854 copy_rtx (tmp),
9855 gen_int_mode (cf - ct, mode),
9856 copy_rtx (tmp), 1, OPTAB_DIRECT);
9857 if (ct)
9858 tmp = expand_simple_binop (mode, PLUS,
9859 copy_rtx (tmp), GEN_INT (ct),
9860 copy_rtx (tmp), 1, OPTAB_DIRECT);
9863 if (!rtx_equal_p (tmp, out))
9864 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
9866 return 1; /* DONE */
9869 if (diff < 0)
9871 HOST_WIDE_INT tmp;
9872 tmp = ct, ct = cf, cf = tmp;
9873 diff = -diff;
9874 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
9876 /* We may be reversing unordered compare to normal compare, that
9877 is not valid in general (we may convert non-trapping condition
9878 to trapping one), however on i386 we currently emit all
9879 comparisons unordered. */
9880 compare_code = reverse_condition_maybe_unordered (compare_code);
9881 code = reverse_condition_maybe_unordered (code);
9883 else
9885 compare_code = reverse_condition (compare_code);
9886 code = reverse_condition (code);
9890 compare_code = UNKNOWN;
9891 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
9892 && GET_CODE (ix86_compare_op1) == CONST_INT)
9894 if (ix86_compare_op1 == const0_rtx
9895 && (code == LT || code == GE))
9896 compare_code = code;
9897 else if (ix86_compare_op1 == constm1_rtx)
9899 if (code == LE)
9900 compare_code = LT;
9901 else if (code == GT)
9902 compare_code = GE;
9906 /* Optimize dest = (op0 < 0) ? -1 : cf. */
9907 if (compare_code != UNKNOWN
9908 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
9909 && (cf == -1 || ct == -1))
9911 /* If lea code below could be used, only optimize
9912 if it results in a 2 insn sequence. */
9914 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
9915 || diff == 3 || diff == 5 || diff == 9)
9916 || (compare_code == LT && ct == -1)
9917 || (compare_code == GE && cf == -1))
9920 * notl op1 (if necessary)
9921 * sarl $31, op1
9922 * orl cf, op1
9924 if (ct != -1)
9926 cf = ct;
9927 ct = -1;
9928 code = reverse_condition (code);
9931 out = emit_store_flag (out, code, ix86_compare_op0,
9932 ix86_compare_op1, VOIDmode, 0, -1);
9934 out = expand_simple_binop (mode, IOR,
9935 out, GEN_INT (cf),
9936 out, 1, OPTAB_DIRECT);
9937 if (out != operands[0])
9938 emit_move_insn (operands[0], out);
9940 return 1; /* DONE */
9945 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
9946 || diff == 3 || diff == 5 || diff == 9)
9947 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
9948 && (mode != DImode
9949 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
9952 * xorl dest,dest
9953 * cmpl op1,op2
9954 * setcc dest
9955 * lea cf(dest*(ct-cf)),dest
9957 * Size 14.
9959 * This also catches the degenerate setcc-only case.
9962 rtx tmp;
9963 int nops;
9965 out = emit_store_flag (out, code, ix86_compare_op0,
9966 ix86_compare_op1, VOIDmode, 0, 1);
9968 nops = 0;
9969 /* On x86_64 the lea instruction operates on Pmode, so we need
9970 to get arithmetics done in proper mode to match. */
9971 if (diff == 1)
9972 tmp = copy_rtx (out);
9973 else
9975 rtx out1;
9976 out1 = copy_rtx (out);
9977 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
9978 nops++;
9979 if (diff & 1)
9981 tmp = gen_rtx_PLUS (mode, tmp, out1);
9982 nops++;
9985 if (cf != 0)
9987 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
9988 nops++;
9990 if (!rtx_equal_p (tmp, out))
9992 if (nops == 1)
9993 out = force_operand (tmp, copy_rtx (out));
9994 else
9995 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
9997 if (!rtx_equal_p (out, operands[0]))
9998 emit_move_insn (operands[0], copy_rtx (out));
10000 return 1; /* DONE */
10004 * General case: Jumpful:
10005 * xorl dest,dest cmpl op1, op2
10006 * cmpl op1, op2 movl ct, dest
10007 * setcc dest jcc 1f
10008 * decl dest movl cf, dest
10009 * andl (cf-ct),dest 1:
10010 * addl ct,dest
10012 * Size 20. Size 14.
10014 * This is reasonably steep, but branch mispredict costs are
10015 * high on modern cpus, so consider failing only if optimizing
10016 * for space.
10019 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
10020 && BRANCH_COST >= 2)
10022 if (cf == 0)
10024 cf = ct;
10025 ct = 0;
10026 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
10027 /* We may be reversing unordered compare to normal compare,
10028 that is not valid in general (we may convert non-trapping
10029 condition to trapping one), however on i386 we currently
10030 emit all comparisons unordered. */
10031 code = reverse_condition_maybe_unordered (code);
10032 else
10034 code = reverse_condition (code);
10035 if (compare_code != UNKNOWN)
10036 compare_code = reverse_condition (compare_code);
10040 if (compare_code != UNKNOWN)
10042 /* notl op1 (if needed)
10043 sarl $31, op1
10044 andl (cf-ct), op1
10045 addl ct, op1
10047 For x < 0 (resp. x <= -1) there will be no notl,
10048 so if possible swap the constants to get rid of the
10049 complement.
10050 True/false will be -1/0 while code below (store flag
10051 followed by decrement) is 0/-1, so the constants need
10052 to be exchanged once more. */
10054 if (compare_code == GE || !cf)
10056 code = reverse_condition (code);
10057 compare_code = LT;
10059 else
10061 HOST_WIDE_INT tmp = cf;
10062 cf = ct;
10063 ct = tmp;
10066 out = emit_store_flag (out, code, ix86_compare_op0,
10067 ix86_compare_op1, VOIDmode, 0, -1);
10069 else
10071 out = emit_store_flag (out, code, ix86_compare_op0,
10072 ix86_compare_op1, VOIDmode, 0, 1);
10074 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
10075 copy_rtx (out), 1, OPTAB_DIRECT);
10078 out = expand_simple_binop (mode, AND, copy_rtx (out),
10079 gen_int_mode (cf - ct, mode),
10080 copy_rtx (out), 1, OPTAB_DIRECT);
10081 if (ct)
10082 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
10083 copy_rtx (out), 1, OPTAB_DIRECT);
10084 if (!rtx_equal_p (out, operands[0]))
10085 emit_move_insn (operands[0], copy_rtx (out));
10087 return 1; /* DONE */
10091 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
10093 /* Try a few things more with specific constants and a variable. */
10095 optab op;
10096 rtx var, orig_out, out, tmp;
10098 if (BRANCH_COST <= 2)
10099 return 0; /* FAIL */
10101 /* If one of the two operands is an interesting constant, load a
10102 constant with the above and mask it in with a logical operation. */
10104 if (GET_CODE (operands[2]) == CONST_INT)
10106 var = operands[3];
10107 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
10108 operands[3] = constm1_rtx, op = and_optab;
10109 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
10110 operands[3] = const0_rtx, op = ior_optab;
10111 else
10112 return 0; /* FAIL */
10114 else if (GET_CODE (operands[3]) == CONST_INT)
10116 var = operands[2];
10117 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
10118 operands[2] = constm1_rtx, op = and_optab;
10119 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
10120 operands[2] = const0_rtx, op = ior_optab;
10121 else
10122 return 0; /* FAIL */
10124 else
10125 return 0; /* FAIL */
10127 orig_out = operands[0];
10128 tmp = gen_reg_rtx (mode);
10129 operands[0] = tmp;
10131 /* Recurse to get the constant loaded. */
10132 if (ix86_expand_int_movcc (operands) == 0)
10133 return 0; /* FAIL */
10135 /* Mask in the interesting variable. */
10136 out = expand_binop (mode, op, var, tmp, orig_out, 0,
10137 OPTAB_WIDEN);
10138 if (!rtx_equal_p (out, orig_out))
10139 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
10141 return 1; /* DONE */
10145 * For comparison with above,
10147 * movl cf,dest
10148 * movl ct,tmp
10149 * cmpl op1,op2
10150 * cmovcc tmp,dest
10152 * Size 15.
10155 if (! nonimmediate_operand (operands[2], mode))
10156 operands[2] = force_reg (mode, operands[2]);
10157 if (! nonimmediate_operand (operands[3], mode))
10158 operands[3] = force_reg (mode, operands[3]);
10160 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
10162 rtx tmp = gen_reg_rtx (mode);
10163 emit_move_insn (tmp, operands[3]);
10164 operands[3] = tmp;
10166 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
10168 rtx tmp = gen_reg_rtx (mode);
10169 emit_move_insn (tmp, operands[2]);
10170 operands[2] = tmp;
10173 if (! register_operand (operands[2], VOIDmode)
10174 && (mode == QImode
10175 || ! register_operand (operands[3], VOIDmode)))
10176 operands[2] = force_reg (mode, operands[2]);
10178 if (mode == QImode
10179 && ! register_operand (operands[3], VOIDmode))
10180 operands[3] = force_reg (mode, operands[3]);
10182 emit_insn (compare_seq);
10183 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10184 gen_rtx_IF_THEN_ELSE (mode,
10185 compare_op, operands[2],
10186 operands[3])));
10187 if (bypass_test)
10188 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
10189 gen_rtx_IF_THEN_ELSE (mode,
10190 bypass_test,
10191 copy_rtx (operands[3]),
10192 copy_rtx (operands[0]))));
10193 if (second_test)
10194 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
10195 gen_rtx_IF_THEN_ELSE (mode,
10196 second_test,
10197 copy_rtx (operands[2]),
10198 copy_rtx (operands[0]))));
10200 return 1; /* DONE */
10203 /* Swap, force into registers, or otherwise massage the two operands
10204 to an sse comparison with a mask result. Thus we differ a bit from
10205 ix86_prepare_fp_compare_args which expects to produce a flags result.
10207 The DEST operand exists to help determine whether to commute commutative
10208 operators. The POP0/POP1 operands are updated in place. The new
10209 comparison code is returned, or UNKNOWN if not implementable. */
10211 static enum rtx_code
10212 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
10213 rtx *pop0, rtx *pop1)
10215 rtx tmp;
10217 switch (code)
10219 case LTGT:
10220 case UNEQ:
10221 /* We have no LTGT as an operator. We could implement it with
10222 NE & ORDERED, but this requires an extra temporary. It's
10223 not clear that it's worth it. */
10224 return UNKNOWN;
10226 case LT:
10227 case LE:
10228 case UNGT:
10229 case UNGE:
10230 /* These are supported directly. */
10231 break;
10233 case EQ:
10234 case NE:
10235 case UNORDERED:
10236 case ORDERED:
10237 /* For commutative operators, try to canonicalize the destination
10238 operand to be first in the comparison - this helps reload to
10239 avoid extra moves. */
10240 if (!dest || !rtx_equal_p (dest, *pop1))
10241 break;
10242 /* FALLTHRU */
10244 case GE:
10245 case GT:
10246 case UNLE:
10247 case UNLT:
10248 /* These are not supported directly. Swap the comparison operands
10249 to transform into something that is supported. */
10250 tmp = *pop0;
10251 *pop0 = *pop1;
10252 *pop1 = tmp;
10253 code = swap_condition (code);
10254 break;
10256 default:
10257 gcc_unreachable ();
10260 return code;
10263 /* Detect conditional moves that exactly match min/max operational
10264 semantics. Note that this is IEEE safe, as long as we don't
10265 interchange the operands.
10267 Returns FALSE if this conditional move doesn't match a MIN/MAX,
10268 and TRUE if the operation is successful and instructions are emitted. */
10270 static bool
10271 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
10272 rtx cmp_op1, rtx if_true, rtx if_false)
10274 enum machine_mode mode;
10275 bool is_min;
10276 rtx tmp;
10278 if (code == LT)
10280 else if (code == UNGE)
10282 tmp = if_true;
10283 if_true = if_false;
10284 if_false = tmp;
10286 else
10287 return false;
10289 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
10290 is_min = true;
10291 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
10292 is_min = false;
10293 else
10294 return false;
10296 mode = GET_MODE (dest);
10298 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
10299 but MODE may be a vector mode and thus not appropriate. */
10300 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
10302 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
10303 rtvec v;
10305 if_true = force_reg (mode, if_true);
10306 v = gen_rtvec (2, if_true, if_false);
10307 tmp = gen_rtx_UNSPEC (mode, v, u);
10309 else
10311 code = is_min ? SMIN : SMAX;
10312 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
10315 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
10316 return true;
10319 /* Expand an sse vector comparison. Return the register with the result. */
10321 static rtx
10322 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
10323 rtx op_true, rtx op_false)
10325 enum machine_mode mode = GET_MODE (dest);
10326 rtx x;
10328 cmp_op0 = force_reg (mode, cmp_op0);
10329 if (!nonimmediate_operand (cmp_op1, mode))
10330 cmp_op1 = force_reg (mode, cmp_op1);
10332 if (optimize
10333 || reg_overlap_mentioned_p (dest, op_true)
10334 || reg_overlap_mentioned_p (dest, op_false))
10335 dest = gen_reg_rtx (mode);
10337 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
10338 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10340 return dest;
10343 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
10344 operations. This is used for both scalar and vector conditional moves. */
10346 static void
10347 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
10349 enum machine_mode mode = GET_MODE (dest);
10350 rtx t2, t3, x;
10352 if (op_false == CONST0_RTX (mode))
10354 op_true = force_reg (mode, op_true);
10355 x = gen_rtx_AND (mode, cmp, op_true);
10356 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10358 else if (op_true == CONST0_RTX (mode))
10360 op_false = force_reg (mode, op_false);
10361 x = gen_rtx_NOT (mode, cmp);
10362 x = gen_rtx_AND (mode, x, op_false);
10363 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10365 else
10367 op_true = force_reg (mode, op_true);
10368 op_false = force_reg (mode, op_false);
10370 t2 = gen_reg_rtx (mode);
10371 if (optimize)
10372 t3 = gen_reg_rtx (mode);
10373 else
10374 t3 = dest;
10376 x = gen_rtx_AND (mode, op_true, cmp);
10377 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
10379 x = gen_rtx_NOT (mode, cmp);
10380 x = gen_rtx_AND (mode, x, op_false);
10381 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
10383 x = gen_rtx_IOR (mode, t3, t2);
10384 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10388 /* Expand a floating-point conditional move. Return true if successful. */
10391 ix86_expand_fp_movcc (rtx operands[])
10393 enum machine_mode mode = GET_MODE (operands[0]);
10394 enum rtx_code code = GET_CODE (operands[1]);
10395 rtx tmp, compare_op, second_test, bypass_test;
10397 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
10399 enum machine_mode cmode;
10401 /* Since we've no cmove for sse registers, don't force bad register
10402 allocation just to gain access to it. Deny movcc when the
10403 comparison mode doesn't match the move mode. */
10404 cmode = GET_MODE (ix86_compare_op0);
10405 if (cmode == VOIDmode)
10406 cmode = GET_MODE (ix86_compare_op1);
10407 if (cmode != mode)
10408 return 0;
10410 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
10411 &ix86_compare_op0,
10412 &ix86_compare_op1);
10413 if (code == UNKNOWN)
10414 return 0;
10416 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
10417 ix86_compare_op1, operands[2],
10418 operands[3]))
10419 return 1;
10421 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
10422 ix86_compare_op1, operands[2], operands[3]);
10423 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
10424 return 1;
10427 /* The floating point conditional move instructions don't directly
10428 support conditions resulting from a signed integer comparison. */
10430 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
10432 /* The floating point conditional move instructions don't directly
10433 support signed integer comparisons. */
10435 if (!fcmov_comparison_operator (compare_op, VOIDmode))
10437 gcc_assert (!second_test && !bypass_test);
10438 tmp = gen_reg_rtx (QImode);
10439 ix86_expand_setcc (code, tmp);
10440 code = NE;
10441 ix86_compare_op0 = tmp;
10442 ix86_compare_op1 = const0_rtx;
10443 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
10445 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
10447 tmp = gen_reg_rtx (mode);
10448 emit_move_insn (tmp, operands[3]);
10449 operands[3] = tmp;
10451 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
10453 tmp = gen_reg_rtx (mode);
10454 emit_move_insn (tmp, operands[2]);
10455 operands[2] = tmp;
10458 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10459 gen_rtx_IF_THEN_ELSE (mode, compare_op,
10460 operands[2], operands[3])));
10461 if (bypass_test)
10462 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10463 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
10464 operands[3], operands[0])));
10465 if (second_test)
10466 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10467 gen_rtx_IF_THEN_ELSE (mode, second_test,
10468 operands[2], operands[0])));
10470 return 1;
10473 /* Expand a floating-point vector conditional move; a vcond operation
10474 rather than a movcc operation. */
10476 bool
10477 ix86_expand_fp_vcond (rtx operands[])
10479 enum rtx_code code = GET_CODE (operands[3]);
10480 rtx cmp;
10482 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
10483 &operands[4], &operands[5]);
10484 if (code == UNKNOWN)
10485 return false;
10487 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
10488 operands[5], operands[1], operands[2]))
10489 return true;
10491 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
10492 operands[1], operands[2]);
10493 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
10494 return true;
10497 /* Expand a signed integral vector conditional move. */
10499 bool
10500 ix86_expand_int_vcond (rtx operands[], bool unsignedp)
10502 enum machine_mode mode = GET_MODE (operands[0]);
10503 enum rtx_code code = GET_CODE (operands[3]);
10504 rtx cmp, x;
10506 if (unsignedp)
10507 code = signed_condition (code);
10508 if (code == NE || code == LE || code == GE)
10510 /* Inverse of a supported code. */
10511 x = operands[1];
10512 operands[1] = operands[2];
10513 operands[2] = x;
10514 code = reverse_condition (code);
10516 if (code == LT)
10518 /* Swap of a supported code. */
10519 x = operands[4];
10520 operands[4] = operands[5];
10521 operands[5] = x;
10522 code = swap_condition (code);
10524 gcc_assert (code == EQ || code == GT);
10526 /* Unlike floating-point, we can rely on the optimizers to have already
10527 converted to MIN/MAX expressions, so we don't have to handle that. */
10529 /* Unsigned GT is not directly supported. We can zero-extend QI and
10530 HImode elements to the next wider element size, use a signed compare,
10531 then repack. For three extra instructions, this is definitely a win. */
10532 if (code == GT && unsignedp)
10534 rtx o0l, o0h, o1l, o1h, cl, ch, zero;
10535 enum machine_mode wider;
10536 rtx (*unpackl) (rtx, rtx, rtx);
10537 rtx (*unpackh) (rtx, rtx, rtx);
10538 rtx (*pack) (rtx, rtx, rtx);
10540 switch (mode)
10542 case V16QImode:
10543 wider = V8HImode;
10544 unpackl = gen_sse2_punpcklbw;
10545 unpackh = gen_sse2_punpckhbw;
10546 pack = gen_sse2_packsswb;
10547 break;
10548 case V8HImode:
10549 wider = V4SImode;
10550 unpackl = gen_sse2_punpcklwd;
10551 unpackh = gen_sse2_punpckhwd;
10552 pack = gen_sse2_packssdw;
10553 break;
10554 default:
10555 gcc_unreachable ();
10558 operands[4] = force_reg (mode, operands[4]);
10559 operands[5] = force_reg (mode, operands[5]);
10561 o0l = gen_reg_rtx (wider);
10562 o0h = gen_reg_rtx (wider);
10563 o1l = gen_reg_rtx (wider);
10564 o1h = gen_reg_rtx (wider);
10565 cl = gen_reg_rtx (wider);
10566 ch = gen_reg_rtx (wider);
10567 cmp = gen_reg_rtx (mode);
10568 zero = force_reg (mode, CONST0_RTX (mode));
10570 emit_insn (unpackl (gen_lowpart (mode, o0l), operands[4], zero));
10571 emit_insn (unpackh (gen_lowpart (mode, o0h), operands[4], zero));
10572 emit_insn (unpackl (gen_lowpart (mode, o1l), operands[5], zero));
10573 emit_insn (unpackh (gen_lowpart (mode, o1h), operands[5], zero));
10575 x = gen_rtx_GT (wider, o0l, o1l);
10576 emit_insn (gen_rtx_SET (VOIDmode, cl, x));
10578 x = gen_rtx_GT (wider, o0h, o1h);
10579 emit_insn (gen_rtx_SET (VOIDmode, ch, x));
10581 emit_insn (pack (cmp, cl, ch));
10583 else
10584 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
10585 operands[1], operands[2]);
10587 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
10588 return true;
10591 /* Expand conditional increment or decrement using adb/sbb instructions.
10592 The default case using setcc followed by the conditional move can be
10593 done by generic code. */
10595 ix86_expand_int_addcc (rtx operands[])
10597 enum rtx_code code = GET_CODE (operands[1]);
10598 rtx compare_op;
10599 rtx val = const0_rtx;
10600 bool fpcmp = false;
10601 enum machine_mode mode = GET_MODE (operands[0]);
10603 if (operands[3] != const1_rtx
10604 && operands[3] != constm1_rtx)
10605 return 0;
10606 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
10607 ix86_compare_op1, &compare_op))
10608 return 0;
10609 code = GET_CODE (compare_op);
10611 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
10612 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
10614 fpcmp = true;
10615 code = ix86_fp_compare_code_to_integer (code);
10618 if (code != LTU)
10620 val = constm1_rtx;
10621 if (fpcmp)
10622 PUT_CODE (compare_op,
10623 reverse_condition_maybe_unordered
10624 (GET_CODE (compare_op)));
10625 else
10626 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
10628 PUT_MODE (compare_op, mode);
10630 /* Construct either adc or sbb insn. */
10631 if ((code == LTU) == (operands[3] == constm1_rtx))
10633 switch (GET_MODE (operands[0]))
10635 case QImode:
10636 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
10637 break;
10638 case HImode:
10639 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
10640 break;
10641 case SImode:
10642 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
10643 break;
10644 case DImode:
10645 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
10646 break;
10647 default:
10648 gcc_unreachable ();
10651 else
10653 switch (GET_MODE (operands[0]))
10655 case QImode:
10656 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
10657 break;
10658 case HImode:
10659 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
10660 break;
10661 case SImode:
10662 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
10663 break;
10664 case DImode:
10665 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
10666 break;
10667 default:
10668 gcc_unreachable ();
10671 return 1; /* DONE */
10675 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
10676 works for floating pointer parameters and nonoffsetable memories.
10677 For pushes, it returns just stack offsets; the values will be saved
10678 in the right order. Maximally three parts are generated. */
10680 static int
10681 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
10683 int size;
10685 if (!TARGET_64BIT)
10686 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
10687 else
10688 size = (GET_MODE_SIZE (mode) + 4) / 8;
10690 gcc_assert (GET_CODE (operand) != REG || !MMX_REGNO_P (REGNO (operand)));
10691 gcc_assert (size >= 2 && size <= 3);
10693 /* Optimize constant pool reference to immediates. This is used by fp
10694 moves, that force all constants to memory to allow combining. */
10695 if (GET_CODE (operand) == MEM && MEM_READONLY_P (operand))
10697 rtx tmp = maybe_get_pool_constant (operand);
10698 if (tmp)
10699 operand = tmp;
10702 if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand))
10704 /* The only non-offsetable memories we handle are pushes. */
10705 int ok = push_operand (operand, VOIDmode);
10707 gcc_assert (ok);
10709 operand = copy_rtx (operand);
10710 PUT_MODE (operand, Pmode);
10711 parts[0] = parts[1] = parts[2] = operand;
10712 return size;
10715 if (GET_CODE (operand) == CONST_VECTOR)
10717 enum machine_mode imode = int_mode_for_mode (mode);
10718 /* Caution: if we looked through a constant pool memory above,
10719 the operand may actually have a different mode now. That's
10720 ok, since we want to pun this all the way back to an integer. */
10721 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
10722 gcc_assert (operand != NULL);
10723 mode = imode;
10726 if (!TARGET_64BIT)
10728 if (mode == DImode)
10729 split_di (&operand, 1, &parts[0], &parts[1]);
10730 else
10732 if (REG_P (operand))
10734 gcc_assert (reload_completed);
10735 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
10736 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
10737 if (size == 3)
10738 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
10740 else if (offsettable_memref_p (operand))
10742 operand = adjust_address (operand, SImode, 0);
10743 parts[0] = operand;
10744 parts[1] = adjust_address (operand, SImode, 4);
10745 if (size == 3)
10746 parts[2] = adjust_address (operand, SImode, 8);
10748 else if (GET_CODE (operand) == CONST_DOUBLE)
10750 REAL_VALUE_TYPE r;
10751 long l[4];
10753 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
10754 switch (mode)
10756 case XFmode:
10757 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
10758 parts[2] = gen_int_mode (l[2], SImode);
10759 break;
10760 case DFmode:
10761 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
10762 break;
10763 default:
10764 gcc_unreachable ();
10766 parts[1] = gen_int_mode (l[1], SImode);
10767 parts[0] = gen_int_mode (l[0], SImode);
10769 else
10770 gcc_unreachable ();
10773 else
10775 if (mode == TImode)
10776 split_ti (&operand, 1, &parts[0], &parts[1]);
10777 if (mode == XFmode || mode == TFmode)
10779 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
10780 if (REG_P (operand))
10782 gcc_assert (reload_completed);
10783 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
10784 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
10786 else if (offsettable_memref_p (operand))
10788 operand = adjust_address (operand, DImode, 0);
10789 parts[0] = operand;
10790 parts[1] = adjust_address (operand, upper_mode, 8);
10792 else if (GET_CODE (operand) == CONST_DOUBLE)
10794 REAL_VALUE_TYPE r;
10795 long l[4];
10797 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
10798 real_to_target (l, &r, mode);
10800 /* Do not use shift by 32 to avoid warning on 32bit systems. */
10801 if (HOST_BITS_PER_WIDE_INT >= 64)
10802 parts[0]
10803 = gen_int_mode
10804 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
10805 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
10806 DImode);
10807 else
10808 parts[0] = immed_double_const (l[0], l[1], DImode);
10810 if (upper_mode == SImode)
10811 parts[1] = gen_int_mode (l[2], SImode);
10812 else if (HOST_BITS_PER_WIDE_INT >= 64)
10813 parts[1]
10814 = gen_int_mode
10815 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
10816 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
10817 DImode);
10818 else
10819 parts[1] = immed_double_const (l[2], l[3], DImode);
10821 else
10822 gcc_unreachable ();
10826 return size;
10829 /* Emit insns to perform a move or push of DI, DF, and XF values.
10830 Return false when normal moves are needed; true when all required
10831 insns have been emitted. Operands 2-4 contain the input values
10832 int the correct order; operands 5-7 contain the output values. */
10834 void
10835 ix86_split_long_move (rtx operands[])
10837 rtx part[2][3];
10838 int nparts;
10839 int push = 0;
10840 int collisions = 0;
10841 enum machine_mode mode = GET_MODE (operands[0]);
10843 /* The DFmode expanders may ask us to move double.
10844 For 64bit target this is single move. By hiding the fact
10845 here we simplify i386.md splitters. */
10846 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
10848 /* Optimize constant pool reference to immediates. This is used by
10849 fp moves, that force all constants to memory to allow combining. */
10851 if (GET_CODE (operands[1]) == MEM
10852 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10853 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
10854 operands[1] = get_pool_constant (XEXP (operands[1], 0));
10855 if (push_operand (operands[0], VOIDmode))
10857 operands[0] = copy_rtx (operands[0]);
10858 PUT_MODE (operands[0], Pmode);
10860 else
10861 operands[0] = gen_lowpart (DImode, operands[0]);
10862 operands[1] = gen_lowpart (DImode, operands[1]);
10863 emit_move_insn (operands[0], operands[1]);
10864 return;
10867 /* The only non-offsettable memory we handle is push. */
10868 if (push_operand (operands[0], VOIDmode))
10869 push = 1;
10870 else
10871 gcc_assert (GET_CODE (operands[0]) != MEM
10872 || offsettable_memref_p (operands[0]));
10874 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
10875 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
10877 /* When emitting push, take care for source operands on the stack. */
10878 if (push && GET_CODE (operands[1]) == MEM
10879 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
10881 if (nparts == 3)
10882 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
10883 XEXP (part[1][2], 0));
10884 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
10885 XEXP (part[1][1], 0));
10888 /* We need to do copy in the right order in case an address register
10889 of the source overlaps the destination. */
10890 if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM)
10892 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
10893 collisions++;
10894 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
10895 collisions++;
10896 if (nparts == 3
10897 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
10898 collisions++;
10900 /* Collision in the middle part can be handled by reordering. */
10901 if (collisions == 1 && nparts == 3
10902 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
10904 rtx tmp;
10905 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
10906 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
10909 /* If there are more collisions, we can't handle it by reordering.
10910 Do an lea to the last part and use only one colliding move. */
10911 else if (collisions > 1)
10913 rtx base;
10915 collisions = 1;
10917 base = part[0][nparts - 1];
10919 /* Handle the case when the last part isn't valid for lea.
10920 Happens in 64-bit mode storing the 12-byte XFmode. */
10921 if (GET_MODE (base) != Pmode)
10922 base = gen_rtx_REG (Pmode, REGNO (base));
10924 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
10925 part[1][0] = replace_equiv_address (part[1][0], base);
10926 part[1][1] = replace_equiv_address (part[1][1],
10927 plus_constant (base, UNITS_PER_WORD));
10928 if (nparts == 3)
10929 part[1][2] = replace_equiv_address (part[1][2],
10930 plus_constant (base, 8));
10934 if (push)
10936 if (!TARGET_64BIT)
10938 if (nparts == 3)
10940 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
10941 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
10942 emit_move_insn (part[0][2], part[1][2]);
10945 else
10947 /* In 64bit mode we don't have 32bit push available. In case this is
10948 register, it is OK - we will just use larger counterpart. We also
10949 retype memory - these comes from attempt to avoid REX prefix on
10950 moving of second half of TFmode value. */
10951 if (GET_MODE (part[1][1]) == SImode)
10953 switch (GET_CODE (part[1][1]))
10955 case MEM:
10956 part[1][1] = adjust_address (part[1][1], DImode, 0);
10957 break;
10959 case REG:
10960 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
10961 break;
10963 default:
10964 gcc_unreachable ();
10967 if (GET_MODE (part[1][0]) == SImode)
10968 part[1][0] = part[1][1];
10971 emit_move_insn (part[0][1], part[1][1]);
10972 emit_move_insn (part[0][0], part[1][0]);
10973 return;
10976 /* Choose correct order to not overwrite the source before it is copied. */
10977 if ((REG_P (part[0][0])
10978 && REG_P (part[1][1])
10979 && (REGNO (part[0][0]) == REGNO (part[1][1])
10980 || (nparts == 3
10981 && REGNO (part[0][0]) == REGNO (part[1][2]))))
10982 || (collisions > 0
10983 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
10985 if (nparts == 3)
10987 operands[2] = part[0][2];
10988 operands[3] = part[0][1];
10989 operands[4] = part[0][0];
10990 operands[5] = part[1][2];
10991 operands[6] = part[1][1];
10992 operands[7] = part[1][0];
10994 else
10996 operands[2] = part[0][1];
10997 operands[3] = part[0][0];
10998 operands[5] = part[1][1];
10999 operands[6] = part[1][0];
11002 else
11004 if (nparts == 3)
11006 operands[2] = part[0][0];
11007 operands[3] = part[0][1];
11008 operands[4] = part[0][2];
11009 operands[5] = part[1][0];
11010 operands[6] = part[1][1];
11011 operands[7] = part[1][2];
11013 else
11015 operands[2] = part[0][0];
11016 operands[3] = part[0][1];
11017 operands[5] = part[1][0];
11018 operands[6] = part[1][1];
11022 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
11023 if (optimize_size)
11025 if (GET_CODE (operands[5]) == CONST_INT
11026 && operands[5] != const0_rtx
11027 && REG_P (operands[2]))
11029 if (GET_CODE (operands[6]) == CONST_INT
11030 && INTVAL (operands[6]) == INTVAL (operands[5]))
11031 operands[6] = operands[2];
11033 if (nparts == 3
11034 && GET_CODE (operands[7]) == CONST_INT
11035 && INTVAL (operands[7]) == INTVAL (operands[5]))
11036 operands[7] = operands[2];
11039 if (nparts == 3
11040 && GET_CODE (operands[6]) == CONST_INT
11041 && operands[6] != const0_rtx
11042 && REG_P (operands[3])
11043 && GET_CODE (operands[7]) == CONST_INT
11044 && INTVAL (operands[7]) == INTVAL (operands[6]))
11045 operands[7] = operands[3];
11048 emit_move_insn (operands[2], operands[5]);
11049 emit_move_insn (operands[3], operands[6]);
11050 if (nparts == 3)
11051 emit_move_insn (operands[4], operands[7]);
11053 return;
11056 /* Helper function of ix86_split_ashldi used to generate an SImode
11057 left shift by a constant, either using a single shift or
11058 a sequence of add instructions. */
11060 static void
11061 ix86_expand_ashlsi3_const (rtx operand, int count)
11063 if (count == 1)
11064 emit_insn (gen_addsi3 (operand, operand, operand));
11065 else if (!optimize_size
11066 && count * ix86_cost->add <= ix86_cost->shift_const)
11068 int i;
11069 for (i=0; i<count; i++)
11070 emit_insn (gen_addsi3 (operand, operand, operand));
11072 else
11073 emit_insn (gen_ashlsi3 (operand, operand, GEN_INT (count)));
11076 void
11077 ix86_split_ashldi (rtx *operands, rtx scratch)
11079 rtx low[2], high[2];
11080 int count;
11082 if (GET_CODE (operands[2]) == CONST_INT)
11084 split_di (operands, 2, low, high);
11085 count = INTVAL (operands[2]) & 63;
11087 if (count >= 32)
11089 emit_move_insn (high[0], low[1]);
11090 emit_move_insn (low[0], const0_rtx);
11092 if (count > 32)
11093 ix86_expand_ashlsi3_const (high[0], count - 32);
11095 else
11097 if (!rtx_equal_p (operands[0], operands[1]))
11098 emit_move_insn (operands[0], operands[1]);
11099 emit_insn (gen_x86_shld_1 (high[0], low[0], GEN_INT (count)));
11100 ix86_expand_ashlsi3_const (low[0], count);
11102 return;
11105 split_di (operands, 1, low, high);
11107 if (operands[1] == const1_rtx)
11109 /* Assuming we've chosen a QImode capable registers, then 1LL << N
11110 can be done with two 32-bit shifts, no branches, no cmoves. */
11111 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
11113 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
11115 ix86_expand_clear (low[0]);
11116 ix86_expand_clear (high[0]);
11117 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (32)));
11119 d = gen_lowpart (QImode, low[0]);
11120 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
11121 s = gen_rtx_EQ (QImode, flags, const0_rtx);
11122 emit_insn (gen_rtx_SET (VOIDmode, d, s));
11124 d = gen_lowpart (QImode, high[0]);
11125 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
11126 s = gen_rtx_NE (QImode, flags, const0_rtx);
11127 emit_insn (gen_rtx_SET (VOIDmode, d, s));
11130 /* Otherwise, we can get the same results by manually performing
11131 a bit extract operation on bit 5, and then performing the two
11132 shifts. The two methods of getting 0/1 into low/high are exactly
11133 the same size. Avoiding the shift in the bit extract case helps
11134 pentium4 a bit; no one else seems to care much either way. */
11135 else
11137 rtx x;
11139 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
11140 x = gen_rtx_ZERO_EXTEND (SImode, operands[2]);
11141 else
11142 x = gen_lowpart (SImode, operands[2]);
11143 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
11145 emit_insn (gen_lshrsi3 (high[0], high[0], GEN_INT (5)));
11146 emit_insn (gen_andsi3 (high[0], high[0], GEN_INT (1)));
11147 emit_move_insn (low[0], high[0]);
11148 emit_insn (gen_xorsi3 (low[0], low[0], GEN_INT (1)));
11151 emit_insn (gen_ashlsi3 (low[0], low[0], operands[2]));
11152 emit_insn (gen_ashlsi3 (high[0], high[0], operands[2]));
11153 return;
11156 if (operands[1] == constm1_rtx)
11158 /* For -1LL << N, we can avoid the shld instruction, because we
11159 know that we're shifting 0...31 ones into a -1. */
11160 emit_move_insn (low[0], constm1_rtx);
11161 if (optimize_size)
11162 emit_move_insn (high[0], low[0]);
11163 else
11164 emit_move_insn (high[0], constm1_rtx);
11166 else
11168 if (!rtx_equal_p (operands[0], operands[1]))
11169 emit_move_insn (operands[0], operands[1]);
11171 split_di (operands, 1, low, high);
11172 emit_insn (gen_x86_shld_1 (high[0], low[0], operands[2]));
11175 emit_insn (gen_ashlsi3 (low[0], low[0], operands[2]));
11177 if (TARGET_CMOVE && scratch)
11179 ix86_expand_clear (scratch);
11180 emit_insn (gen_x86_shift_adj_1 (high[0], low[0], operands[2], scratch));
11182 else
11183 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
11186 void
11187 ix86_split_ashrdi (rtx *operands, rtx scratch)
11189 rtx low[2], high[2];
11190 int count;
11192 if (GET_CODE (operands[2]) == CONST_INT)
11194 split_di (operands, 2, low, high);
11195 count = INTVAL (operands[2]) & 63;
11197 if (count == 63)
11199 emit_move_insn (high[0], high[1]);
11200 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (31)));
11201 emit_move_insn (low[0], high[0]);
11204 else if (count >= 32)
11206 emit_move_insn (low[0], high[1]);
11207 emit_move_insn (high[0], low[0]);
11208 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (31)));
11209 if (count > 32)
11210 emit_insn (gen_ashrsi3 (low[0], low[0], GEN_INT (count - 32)));
11212 else
11214 if (!rtx_equal_p (operands[0], operands[1]))
11215 emit_move_insn (operands[0], operands[1]);
11216 emit_insn (gen_x86_shrd_1 (low[0], high[0], GEN_INT (count)));
11217 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (count)));
11220 else
11222 if (!rtx_equal_p (operands[0], operands[1]))
11223 emit_move_insn (operands[0], operands[1]);
11225 split_di (operands, 1, low, high);
11227 emit_insn (gen_x86_shrd_1 (low[0], high[0], operands[2]));
11228 emit_insn (gen_ashrsi3 (high[0], high[0], operands[2]));
11230 if (TARGET_CMOVE && scratch)
11232 emit_move_insn (scratch, high[0]);
11233 emit_insn (gen_ashrsi3 (scratch, scratch, GEN_INT (31)));
11234 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
11235 scratch));
11237 else
11238 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
11242 void
11243 ix86_split_lshrdi (rtx *operands, rtx scratch)
11245 rtx low[2], high[2];
11246 int count;
11248 if (GET_CODE (operands[2]) == CONST_INT)
11250 split_di (operands, 2, low, high);
11251 count = INTVAL (operands[2]) & 63;
11253 if (count >= 32)
11255 emit_move_insn (low[0], high[1]);
11256 ix86_expand_clear (high[0]);
11258 if (count > 32)
11259 emit_insn (gen_lshrsi3 (low[0], low[0], GEN_INT (count - 32)));
11261 else
11263 if (!rtx_equal_p (operands[0], operands[1]))
11264 emit_move_insn (operands[0], operands[1]);
11265 emit_insn (gen_x86_shrd_1 (low[0], high[0], GEN_INT (count)));
11266 emit_insn (gen_lshrsi3 (high[0], high[0], GEN_INT (count)));
11269 else
11271 if (!rtx_equal_p (operands[0], operands[1]))
11272 emit_move_insn (operands[0], operands[1]);
11274 split_di (operands, 1, low, high);
11276 emit_insn (gen_x86_shrd_1 (low[0], high[0], operands[2]));
11277 emit_insn (gen_lshrsi3 (high[0], high[0], operands[2]));
11279 /* Heh. By reversing the arguments, we can reuse this pattern. */
11280 if (TARGET_CMOVE && scratch)
11282 ix86_expand_clear (scratch);
11283 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
11284 scratch));
11286 else
11287 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
11291 /* Helper function for the string operations below. Dest VARIABLE whether
11292 it is aligned to VALUE bytes. If true, jump to the label. */
11293 static rtx
11294 ix86_expand_aligntest (rtx variable, int value)
11296 rtx label = gen_label_rtx ();
11297 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
11298 if (GET_MODE (variable) == DImode)
11299 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
11300 else
11301 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
11302 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
11303 1, label);
11304 return label;
11307 /* Adjust COUNTER by the VALUE. */
11308 static void
11309 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
11311 if (GET_MODE (countreg) == DImode)
11312 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
11313 else
11314 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
11317 /* Zero extend possibly SImode EXP to Pmode register. */
11319 ix86_zero_extend_to_Pmode (rtx exp)
11321 rtx r;
11322 if (GET_MODE (exp) == VOIDmode)
11323 return force_reg (Pmode, exp);
11324 if (GET_MODE (exp) == Pmode)
11325 return copy_to_mode_reg (Pmode, exp);
11326 r = gen_reg_rtx (Pmode);
11327 emit_insn (gen_zero_extendsidi2 (r, exp));
11328 return r;
11331 /* Expand string move (memcpy) operation. Use i386 string operations when
11332 profitable. expand_clrmem contains similar code. */
11334 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp)
11336 rtx srcreg, destreg, countreg, srcexp, destexp;
11337 enum machine_mode counter_mode;
11338 HOST_WIDE_INT align = 0;
11339 unsigned HOST_WIDE_INT count = 0;
11341 if (GET_CODE (align_exp) == CONST_INT)
11342 align = INTVAL (align_exp);
11344 /* Can't use any of this if the user has appropriated esi or edi. */
11345 if (global_regs[4] || global_regs[5])
11346 return 0;
11348 /* This simple hack avoids all inlining code and simplifies code below. */
11349 if (!TARGET_ALIGN_STRINGOPS)
11350 align = 64;
11352 if (GET_CODE (count_exp) == CONST_INT)
11354 count = INTVAL (count_exp);
11355 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
11356 return 0;
11359 /* Figure out proper mode for counter. For 32bits it is always SImode,
11360 for 64bits use SImode when possible, otherwise DImode.
11361 Set count to number of bytes copied when known at compile time. */
11362 if (!TARGET_64BIT
11363 || GET_MODE (count_exp) == SImode
11364 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
11365 counter_mode = SImode;
11366 else
11367 counter_mode = DImode;
11369 gcc_assert (counter_mode == SImode || counter_mode == DImode);
11371 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
11372 if (destreg != XEXP (dst, 0))
11373 dst = replace_equiv_address_nv (dst, destreg);
11374 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
11375 if (srcreg != XEXP (src, 0))
11376 src = replace_equiv_address_nv (src, srcreg);
11378 /* When optimizing for size emit simple rep ; movsb instruction for
11379 counts not divisible by 4, except when (movsl;)*(movsw;)?(movsb;)?
11380 sequence is shorter than mov{b,l} $count, %{ecx,cl}; rep; movsb.
11381 Sice of (movsl;)*(movsw;)?(movsb;)? sequence is
11382 count / 4 + (count & 3), the other sequence is either 4 or 7 bytes,
11383 but we don't know whether upper 24 (resp. 56) bits of %ecx will be
11384 known to be zero or not. The rep; movsb sequence causes higher
11385 register pressure though, so take that into account. */
11387 if ((!optimize || optimize_size)
11388 && (count == 0
11389 || ((count & 0x03)
11390 && (!optimize_size
11391 || count > 5 * 4
11392 || (count & 3) + count / 4 > 6))))
11394 emit_insn (gen_cld ());
11395 countreg = ix86_zero_extend_to_Pmode (count_exp);
11396 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
11397 srcexp = gen_rtx_PLUS (Pmode, srcreg, countreg);
11398 emit_insn (gen_rep_mov (destreg, dst, srcreg, src, countreg,
11399 destexp, srcexp));
11402 /* For constant aligned (or small unaligned) copies use rep movsl
11403 followed by code copying the rest. For PentiumPro ensure 8 byte
11404 alignment to allow rep movsl acceleration. */
11406 else if (count != 0
11407 && (align >= 8
11408 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
11409 || optimize_size || count < (unsigned int) 64))
11411 unsigned HOST_WIDE_INT offset = 0;
11412 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
11413 rtx srcmem, dstmem;
11415 emit_insn (gen_cld ());
11416 if (count & ~(size - 1))
11418 if ((TARGET_SINGLE_STRINGOP || optimize_size) && count < 5 * 4)
11420 enum machine_mode movs_mode = size == 4 ? SImode : DImode;
11422 while (offset < (count & ~(size - 1)))
11424 srcmem = adjust_automodify_address_nv (src, movs_mode,
11425 srcreg, offset);
11426 dstmem = adjust_automodify_address_nv (dst, movs_mode,
11427 destreg, offset);
11428 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11429 offset += size;
11432 else
11434 countreg = GEN_INT ((count >> (size == 4 ? 2 : 3))
11435 & (TARGET_64BIT ? -1 : 0x3fffffff));
11436 countreg = copy_to_mode_reg (counter_mode, countreg);
11437 countreg = ix86_zero_extend_to_Pmode (countreg);
11439 destexp = gen_rtx_ASHIFT (Pmode, countreg,
11440 GEN_INT (size == 4 ? 2 : 3));
11441 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
11442 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
11444 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
11445 countreg, destexp, srcexp));
11446 offset = count & ~(size - 1);
11449 if (size == 8 && (count & 0x04))
11451 srcmem = adjust_automodify_address_nv (src, SImode, srcreg,
11452 offset);
11453 dstmem = adjust_automodify_address_nv (dst, SImode, destreg,
11454 offset);
11455 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11456 offset += 4;
11458 if (count & 0x02)
11460 srcmem = adjust_automodify_address_nv (src, HImode, srcreg,
11461 offset);
11462 dstmem = adjust_automodify_address_nv (dst, HImode, destreg,
11463 offset);
11464 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11465 offset += 2;
11467 if (count & 0x01)
11469 srcmem = adjust_automodify_address_nv (src, QImode, srcreg,
11470 offset);
11471 dstmem = adjust_automodify_address_nv (dst, QImode, destreg,
11472 offset);
11473 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11476 /* The generic code based on the glibc implementation:
11477 - align destination to 4 bytes (8 byte alignment is used for PentiumPro
11478 allowing accelerated copying there)
11479 - copy the data using rep movsl
11480 - copy the rest. */
11481 else
11483 rtx countreg2;
11484 rtx label = NULL;
11485 rtx srcmem, dstmem;
11486 int desired_alignment = (TARGET_PENTIUMPRO
11487 && (count == 0 || count >= (unsigned int) 260)
11488 ? 8 : UNITS_PER_WORD);
11489 /* Get rid of MEM_OFFSETs, they won't be accurate. */
11490 dst = change_address (dst, BLKmode, destreg);
11491 src = change_address (src, BLKmode, srcreg);
11493 /* In case we don't know anything about the alignment, default to
11494 library version, since it is usually equally fast and result in
11495 shorter code.
11497 Also emit call when we know that the count is large and call overhead
11498 will not be important. */
11499 if (!TARGET_INLINE_ALL_STRINGOPS
11500 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
11501 return 0;
11503 if (TARGET_SINGLE_STRINGOP)
11504 emit_insn (gen_cld ());
11506 countreg2 = gen_reg_rtx (Pmode);
11507 countreg = copy_to_mode_reg (counter_mode, count_exp);
11509 /* We don't use loops to align destination and to copy parts smaller
11510 than 4 bytes, because gcc is able to optimize such code better (in
11511 the case the destination or the count really is aligned, gcc is often
11512 able to predict the branches) and also it is friendlier to the
11513 hardware branch prediction.
11515 Using loops is beneficial for generic case, because we can
11516 handle small counts using the loops. Many CPUs (such as Athlon)
11517 have large REP prefix setup costs.
11519 This is quite costly. Maybe we can revisit this decision later or
11520 add some customizability to this code. */
11522 if (count == 0 && align < desired_alignment)
11524 label = gen_label_rtx ();
11525 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
11526 LEU, 0, counter_mode, 1, label);
11528 if (align <= 1)
11530 rtx label = ix86_expand_aligntest (destreg, 1);
11531 srcmem = change_address (src, QImode, srcreg);
11532 dstmem = change_address (dst, QImode, destreg);
11533 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11534 ix86_adjust_counter (countreg, 1);
11535 emit_label (label);
11536 LABEL_NUSES (label) = 1;
11538 if (align <= 2)
11540 rtx label = ix86_expand_aligntest (destreg, 2);
11541 srcmem = change_address (src, HImode, srcreg);
11542 dstmem = change_address (dst, HImode, destreg);
11543 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11544 ix86_adjust_counter (countreg, 2);
11545 emit_label (label);
11546 LABEL_NUSES (label) = 1;
11548 if (align <= 4 && desired_alignment > 4)
11550 rtx label = ix86_expand_aligntest (destreg, 4);
11551 srcmem = change_address (src, SImode, srcreg);
11552 dstmem = change_address (dst, SImode, destreg);
11553 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11554 ix86_adjust_counter (countreg, 4);
11555 emit_label (label);
11556 LABEL_NUSES (label) = 1;
11559 if (label && desired_alignment > 4 && !TARGET_64BIT)
11561 emit_label (label);
11562 LABEL_NUSES (label) = 1;
11563 label = NULL_RTX;
11565 if (!TARGET_SINGLE_STRINGOP)
11566 emit_insn (gen_cld ());
11567 if (TARGET_64BIT)
11569 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
11570 GEN_INT (3)));
11571 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
11573 else
11575 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
11576 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
11578 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
11579 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
11580 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
11581 countreg2, destexp, srcexp));
11583 if (label)
11585 emit_label (label);
11586 LABEL_NUSES (label) = 1;
11588 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
11590 srcmem = change_address (src, SImode, srcreg);
11591 dstmem = change_address (dst, SImode, destreg);
11592 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11594 if ((align <= 4 || count == 0) && TARGET_64BIT)
11596 rtx label = ix86_expand_aligntest (countreg, 4);
11597 srcmem = change_address (src, SImode, srcreg);
11598 dstmem = change_address (dst, SImode, destreg);
11599 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11600 emit_label (label);
11601 LABEL_NUSES (label) = 1;
11603 if (align > 2 && count != 0 && (count & 2))
11605 srcmem = change_address (src, HImode, srcreg);
11606 dstmem = change_address (dst, HImode, destreg);
11607 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11609 if (align <= 2 || count == 0)
11611 rtx label = ix86_expand_aligntest (countreg, 2);
11612 srcmem = change_address (src, HImode, srcreg);
11613 dstmem = change_address (dst, HImode, destreg);
11614 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11615 emit_label (label);
11616 LABEL_NUSES (label) = 1;
11618 if (align > 1 && count != 0 && (count & 1))
11620 srcmem = change_address (src, QImode, srcreg);
11621 dstmem = change_address (dst, QImode, destreg);
11622 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11624 if (align <= 1 || count == 0)
11626 rtx label = ix86_expand_aligntest (countreg, 1);
11627 srcmem = change_address (src, QImode, srcreg);
11628 dstmem = change_address (dst, QImode, destreg);
11629 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11630 emit_label (label);
11631 LABEL_NUSES (label) = 1;
11635 return 1;
11638 /* Expand string clear operation (bzero). Use i386 string operations when
11639 profitable. expand_movmem contains similar code. */
11641 ix86_expand_clrmem (rtx dst, rtx count_exp, rtx align_exp)
11643 rtx destreg, zeroreg, countreg, destexp;
11644 enum machine_mode counter_mode;
11645 HOST_WIDE_INT align = 0;
11646 unsigned HOST_WIDE_INT count = 0;
11648 if (GET_CODE (align_exp) == CONST_INT)
11649 align = INTVAL (align_exp);
11651 /* Can't use any of this if the user has appropriated esi. */
11652 if (global_regs[4])
11653 return 0;
11655 /* This simple hack avoids all inlining code and simplifies code below. */
11656 if (!TARGET_ALIGN_STRINGOPS)
11657 align = 32;
11659 if (GET_CODE (count_exp) == CONST_INT)
11661 count = INTVAL (count_exp);
11662 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
11663 return 0;
11665 /* Figure out proper mode for counter. For 32bits it is always SImode,
11666 for 64bits use SImode when possible, otherwise DImode.
11667 Set count to number of bytes copied when known at compile time. */
11668 if (!TARGET_64BIT
11669 || GET_MODE (count_exp) == SImode
11670 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
11671 counter_mode = SImode;
11672 else
11673 counter_mode = DImode;
11675 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
11676 if (destreg != XEXP (dst, 0))
11677 dst = replace_equiv_address_nv (dst, destreg);
11680 /* When optimizing for size emit simple rep ; movsb instruction for
11681 counts not divisible by 4. The movl $N, %ecx; rep; stosb
11682 sequence is 7 bytes long, so if optimizing for size and count is
11683 small enough that some stosl, stosw and stosb instructions without
11684 rep are shorter, fall back into the next if. */
11686 if ((!optimize || optimize_size)
11687 && (count == 0
11688 || ((count & 0x03)
11689 && (!optimize_size || (count & 0x03) + (count >> 2) > 7))))
11691 emit_insn (gen_cld ());
11693 countreg = ix86_zero_extend_to_Pmode (count_exp);
11694 zeroreg = copy_to_mode_reg (QImode, const0_rtx);
11695 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
11696 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg, destexp));
11698 else if (count != 0
11699 && (align >= 8
11700 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
11701 || optimize_size || count < (unsigned int) 64))
11703 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
11704 unsigned HOST_WIDE_INT offset = 0;
11706 emit_insn (gen_cld ());
11708 zeroreg = copy_to_mode_reg (size == 4 ? SImode : DImode, const0_rtx);
11709 if (count & ~(size - 1))
11711 unsigned HOST_WIDE_INT repcount;
11712 unsigned int max_nonrep;
11714 repcount = count >> (size == 4 ? 2 : 3);
11715 if (!TARGET_64BIT)
11716 repcount &= 0x3fffffff;
11718 /* movl $N, %ecx; rep; stosl is 7 bytes, while N x stosl is N bytes.
11719 movl $N, %ecx; rep; stosq is 8 bytes, while N x stosq is 2xN
11720 bytes. In both cases the latter seems to be faster for small
11721 values of N. */
11722 max_nonrep = size == 4 ? 7 : 4;
11723 if (!optimize_size)
11724 switch (ix86_tune)
11726 case PROCESSOR_PENTIUM4:
11727 case PROCESSOR_NOCONA:
11728 max_nonrep = 3;
11729 break;
11730 default:
11731 break;
11734 if (repcount <= max_nonrep)
11735 while (repcount-- > 0)
11737 rtx mem = adjust_automodify_address_nv (dst,
11738 GET_MODE (zeroreg),
11739 destreg, offset);
11740 emit_insn (gen_strset (destreg, mem, zeroreg));
11741 offset += size;
11743 else
11745 countreg = copy_to_mode_reg (counter_mode, GEN_INT (repcount));
11746 countreg = ix86_zero_extend_to_Pmode (countreg);
11747 destexp = gen_rtx_ASHIFT (Pmode, countreg,
11748 GEN_INT (size == 4 ? 2 : 3));
11749 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
11750 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg,
11751 destexp));
11752 offset = count & ~(size - 1);
11755 if (size == 8 && (count & 0x04))
11757 rtx mem = adjust_automodify_address_nv (dst, SImode, destreg,
11758 offset);
11759 emit_insn (gen_strset (destreg, mem,
11760 gen_rtx_SUBREG (SImode, zeroreg, 0)));
11761 offset += 4;
11763 if (count & 0x02)
11765 rtx mem = adjust_automodify_address_nv (dst, HImode, destreg,
11766 offset);
11767 emit_insn (gen_strset (destreg, mem,
11768 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11769 offset += 2;
11771 if (count & 0x01)
11773 rtx mem = adjust_automodify_address_nv (dst, QImode, destreg,
11774 offset);
11775 emit_insn (gen_strset (destreg, mem,
11776 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11779 else
11781 rtx countreg2;
11782 rtx label = NULL;
11783 /* Compute desired alignment of the string operation. */
11784 int desired_alignment = (TARGET_PENTIUMPRO
11785 && (count == 0 || count >= (unsigned int) 260)
11786 ? 8 : UNITS_PER_WORD);
11788 /* In case we don't know anything about the alignment, default to
11789 library version, since it is usually equally fast and result in
11790 shorter code.
11792 Also emit call when we know that the count is large and call overhead
11793 will not be important. */
11794 if (!TARGET_INLINE_ALL_STRINGOPS
11795 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
11796 return 0;
11798 if (TARGET_SINGLE_STRINGOP)
11799 emit_insn (gen_cld ());
11801 countreg2 = gen_reg_rtx (Pmode);
11802 countreg = copy_to_mode_reg (counter_mode, count_exp);
11803 zeroreg = copy_to_mode_reg (Pmode, const0_rtx);
11804 /* Get rid of MEM_OFFSET, it won't be accurate. */
11805 dst = change_address (dst, BLKmode, destreg);
11807 if (count == 0 && align < desired_alignment)
11809 label = gen_label_rtx ();
11810 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
11811 LEU, 0, counter_mode, 1, label);
11813 if (align <= 1)
11815 rtx label = ix86_expand_aligntest (destreg, 1);
11816 emit_insn (gen_strset (destreg, dst,
11817 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11818 ix86_adjust_counter (countreg, 1);
11819 emit_label (label);
11820 LABEL_NUSES (label) = 1;
11822 if (align <= 2)
11824 rtx label = ix86_expand_aligntest (destreg, 2);
11825 emit_insn (gen_strset (destreg, dst,
11826 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11827 ix86_adjust_counter (countreg, 2);
11828 emit_label (label);
11829 LABEL_NUSES (label) = 1;
11831 if (align <= 4 && desired_alignment > 4)
11833 rtx label = ix86_expand_aligntest (destreg, 4);
11834 emit_insn (gen_strset (destreg, dst,
11835 (TARGET_64BIT
11836 ? gen_rtx_SUBREG (SImode, zeroreg, 0)
11837 : zeroreg)));
11838 ix86_adjust_counter (countreg, 4);
11839 emit_label (label);
11840 LABEL_NUSES (label) = 1;
11843 if (label && desired_alignment > 4 && !TARGET_64BIT)
11845 emit_label (label);
11846 LABEL_NUSES (label) = 1;
11847 label = NULL_RTX;
11850 if (!TARGET_SINGLE_STRINGOP)
11851 emit_insn (gen_cld ());
11852 if (TARGET_64BIT)
11854 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
11855 GEN_INT (3)));
11856 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
11858 else
11860 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
11861 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
11863 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
11864 emit_insn (gen_rep_stos (destreg, countreg2, dst, zeroreg, destexp));
11866 if (label)
11868 emit_label (label);
11869 LABEL_NUSES (label) = 1;
11872 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
11873 emit_insn (gen_strset (destreg, dst,
11874 gen_rtx_SUBREG (SImode, zeroreg, 0)));
11875 if (TARGET_64BIT && (align <= 4 || count == 0))
11877 rtx label = ix86_expand_aligntest (countreg, 4);
11878 emit_insn (gen_strset (destreg, dst,
11879 gen_rtx_SUBREG (SImode, zeroreg, 0)));
11880 emit_label (label);
11881 LABEL_NUSES (label) = 1;
11883 if (align > 2 && count != 0 && (count & 2))
11884 emit_insn (gen_strset (destreg, dst,
11885 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11886 if (align <= 2 || count == 0)
11888 rtx label = ix86_expand_aligntest (countreg, 2);
11889 emit_insn (gen_strset (destreg, dst,
11890 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11891 emit_label (label);
11892 LABEL_NUSES (label) = 1;
11894 if (align > 1 && count != 0 && (count & 1))
11895 emit_insn (gen_strset (destreg, dst,
11896 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11897 if (align <= 1 || count == 0)
11899 rtx label = ix86_expand_aligntest (countreg, 1);
11900 emit_insn (gen_strset (destreg, dst,
11901 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11902 emit_label (label);
11903 LABEL_NUSES (label) = 1;
11906 return 1;
11909 /* Expand strlen. */
11911 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
11913 rtx addr, scratch1, scratch2, scratch3, scratch4;
11915 /* The generic case of strlen expander is long. Avoid it's
11916 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
11918 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
11919 && !TARGET_INLINE_ALL_STRINGOPS
11920 && !optimize_size
11921 && (GET_CODE (align) != CONST_INT || INTVAL (align) < 4))
11922 return 0;
11924 addr = force_reg (Pmode, XEXP (src, 0));
11925 scratch1 = gen_reg_rtx (Pmode);
11927 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
11928 && !optimize_size)
11930 /* Well it seems that some optimizer does not combine a call like
11931 foo(strlen(bar), strlen(bar));
11932 when the move and the subtraction is done here. It does calculate
11933 the length just once when these instructions are done inside of
11934 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
11935 often used and I use one fewer register for the lifetime of
11936 output_strlen_unroll() this is better. */
11938 emit_move_insn (out, addr);
11940 ix86_expand_strlensi_unroll_1 (out, src, align);
11942 /* strlensi_unroll_1 returns the address of the zero at the end of
11943 the string, like memchr(), so compute the length by subtracting
11944 the start address. */
11945 if (TARGET_64BIT)
11946 emit_insn (gen_subdi3 (out, out, addr));
11947 else
11948 emit_insn (gen_subsi3 (out, out, addr));
11950 else
11952 rtx unspec;
11953 scratch2 = gen_reg_rtx (Pmode);
11954 scratch3 = gen_reg_rtx (Pmode);
11955 scratch4 = force_reg (Pmode, constm1_rtx);
11957 emit_move_insn (scratch3, addr);
11958 eoschar = force_reg (QImode, eoschar);
11960 emit_insn (gen_cld ());
11961 src = replace_equiv_address_nv (src, scratch3);
11963 /* If .md starts supporting :P, this can be done in .md. */
11964 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
11965 scratch4), UNSPEC_SCAS);
11966 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
11967 if (TARGET_64BIT)
11969 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
11970 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
11972 else
11974 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
11975 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
11978 return 1;
11981 /* Expand the appropriate insns for doing strlen if not just doing
11982 repnz; scasb
11984 out = result, initialized with the start address
11985 align_rtx = alignment of the address.
11986 scratch = scratch register, initialized with the startaddress when
11987 not aligned, otherwise undefined
11989 This is just the body. It needs the initializations mentioned above and
11990 some address computing at the end. These things are done in i386.md. */
11992 static void
11993 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
11995 int align;
11996 rtx tmp;
11997 rtx align_2_label = NULL_RTX;
11998 rtx align_3_label = NULL_RTX;
11999 rtx align_4_label = gen_label_rtx ();
12000 rtx end_0_label = gen_label_rtx ();
12001 rtx mem;
12002 rtx tmpreg = gen_reg_rtx (SImode);
12003 rtx scratch = gen_reg_rtx (SImode);
12004 rtx cmp;
12006 align = 0;
12007 if (GET_CODE (align_rtx) == CONST_INT)
12008 align = INTVAL (align_rtx);
12010 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
12012 /* Is there a known alignment and is it less than 4? */
12013 if (align < 4)
12015 rtx scratch1 = gen_reg_rtx (Pmode);
12016 emit_move_insn (scratch1, out);
12017 /* Is there a known alignment and is it not 2? */
12018 if (align != 2)
12020 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
12021 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
12023 /* Leave just the 3 lower bits. */
12024 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
12025 NULL_RTX, 0, OPTAB_WIDEN);
12027 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
12028 Pmode, 1, align_4_label);
12029 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
12030 Pmode, 1, align_2_label);
12031 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
12032 Pmode, 1, align_3_label);
12034 else
12036 /* Since the alignment is 2, we have to check 2 or 0 bytes;
12037 check if is aligned to 4 - byte. */
12039 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
12040 NULL_RTX, 0, OPTAB_WIDEN);
12042 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
12043 Pmode, 1, align_4_label);
12046 mem = change_address (src, QImode, out);
12048 /* Now compare the bytes. */
12050 /* Compare the first n unaligned byte on a byte per byte basis. */
12051 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
12052 QImode, 1, end_0_label);
12054 /* Increment the address. */
12055 if (TARGET_64BIT)
12056 emit_insn (gen_adddi3 (out, out, const1_rtx));
12057 else
12058 emit_insn (gen_addsi3 (out, out, const1_rtx));
12060 /* Not needed with an alignment of 2 */
12061 if (align != 2)
12063 emit_label (align_2_label);
12065 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
12066 end_0_label);
12068 if (TARGET_64BIT)
12069 emit_insn (gen_adddi3 (out, out, const1_rtx));
12070 else
12071 emit_insn (gen_addsi3 (out, out, const1_rtx));
12073 emit_label (align_3_label);
12076 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
12077 end_0_label);
12079 if (TARGET_64BIT)
12080 emit_insn (gen_adddi3 (out, out, const1_rtx));
12081 else
12082 emit_insn (gen_addsi3 (out, out, const1_rtx));
12085 /* Generate loop to check 4 bytes at a time. It is not a good idea to
12086 align this loop. It gives only huge programs, but does not help to
12087 speed up. */
12088 emit_label (align_4_label);
12090 mem = change_address (src, SImode, out);
12091 emit_move_insn (scratch, mem);
12092 if (TARGET_64BIT)
12093 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
12094 else
12095 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
12097 /* This formula yields a nonzero result iff one of the bytes is zero.
12098 This saves three branches inside loop and many cycles. */
12100 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
12101 emit_insn (gen_one_cmplsi2 (scratch, scratch));
12102 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
12103 emit_insn (gen_andsi3 (tmpreg, tmpreg,
12104 gen_int_mode (0x80808080, SImode)));
12105 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
12106 align_4_label);
12108 if (TARGET_CMOVE)
12110 rtx reg = gen_reg_rtx (SImode);
12111 rtx reg2 = gen_reg_rtx (Pmode);
12112 emit_move_insn (reg, tmpreg);
12113 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
12115 /* If zero is not in the first two bytes, move two bytes forward. */
12116 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
12117 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12118 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
12119 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
12120 gen_rtx_IF_THEN_ELSE (SImode, tmp,
12121 reg,
12122 tmpreg)));
12123 /* Emit lea manually to avoid clobbering of flags. */
12124 emit_insn (gen_rtx_SET (SImode, reg2,
12125 gen_rtx_PLUS (Pmode, out, const2_rtx)));
12127 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12128 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
12129 emit_insn (gen_rtx_SET (VOIDmode, out,
12130 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
12131 reg2,
12132 out)));
12135 else
12137 rtx end_2_label = gen_label_rtx ();
12138 /* Is zero in the first two bytes? */
12140 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
12141 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12142 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
12143 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
12144 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
12145 pc_rtx);
12146 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
12147 JUMP_LABEL (tmp) = end_2_label;
12149 /* Not in the first two. Move two bytes forward. */
12150 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
12151 if (TARGET_64BIT)
12152 emit_insn (gen_adddi3 (out, out, const2_rtx));
12153 else
12154 emit_insn (gen_addsi3 (out, out, const2_rtx));
12156 emit_label (end_2_label);
12160 /* Avoid branch in fixing the byte. */
12161 tmpreg = gen_lowpart (QImode, tmpreg);
12162 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
12163 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
12164 if (TARGET_64BIT)
12165 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
12166 else
12167 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
12169 emit_label (end_0_label);
12172 void
12173 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
12174 rtx callarg2 ATTRIBUTE_UNUSED,
12175 rtx pop, int sibcall)
12177 rtx use = NULL, call;
12179 if (pop == const0_rtx)
12180 pop = NULL;
12181 gcc_assert (!TARGET_64BIT || !pop);
12183 #if TARGET_MACHO
12184 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
12185 fnaddr = machopic_indirect_call_target (fnaddr);
12186 #else
12187 /* Static functions and indirect calls don't need the pic register. */
12188 if (! TARGET_64BIT && flag_pic
12189 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
12190 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
12191 use_reg (&use, pic_offset_table_rtx);
12193 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
12195 rtx al = gen_rtx_REG (QImode, 0);
12196 emit_move_insn (al, callarg2);
12197 use_reg (&use, al);
12199 #endif /* TARGET_MACHO */
12201 if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
12203 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
12204 fnaddr = gen_rtx_MEM (QImode, fnaddr);
12206 if (sibcall && TARGET_64BIT
12207 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
12209 rtx addr;
12210 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
12211 fnaddr = gen_rtx_REG (Pmode, FIRST_REX_INT_REG + 3 /* R11 */);
12212 emit_move_insn (fnaddr, addr);
12213 fnaddr = gen_rtx_MEM (QImode, fnaddr);
12216 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
12217 if (retval)
12218 call = gen_rtx_SET (VOIDmode, retval, call);
12219 if (pop)
12221 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
12222 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
12223 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
12226 call = emit_call_insn (call);
12227 if (use)
12228 CALL_INSN_FUNCTION_USAGE (call) = use;
12232 /* Clear stack slot assignments remembered from previous functions.
12233 This is called from INIT_EXPANDERS once before RTL is emitted for each
12234 function. */
12236 static struct machine_function *
12237 ix86_init_machine_status (void)
12239 struct machine_function *f;
12241 f = ggc_alloc_cleared (sizeof (struct machine_function));
12242 f->use_fast_prologue_epilogue_nregs = -1;
12244 return f;
12247 /* Return a MEM corresponding to a stack slot with mode MODE.
12248 Allocate a new slot if necessary.
12250 The RTL for a function can have several slots available: N is
12251 which slot to use. */
12254 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
12256 struct stack_local_entry *s;
12258 gcc_assert (n < MAX_386_STACK_LOCALS);
12260 for (s = ix86_stack_locals; s; s = s->next)
12261 if (s->mode == mode && s->n == n)
12262 return s->rtl;
12264 s = (struct stack_local_entry *)
12265 ggc_alloc (sizeof (struct stack_local_entry));
12266 s->n = n;
12267 s->mode = mode;
12268 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
12270 s->next = ix86_stack_locals;
12271 ix86_stack_locals = s;
12272 return s->rtl;
12275 /* Construct the SYMBOL_REF for the tls_get_addr function. */
12277 static GTY(()) rtx ix86_tls_symbol;
12279 ix86_tls_get_addr (void)
12282 if (!ix86_tls_symbol)
12284 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
12285 (TARGET_GNU_TLS && !TARGET_64BIT)
12286 ? "___tls_get_addr"
12287 : "__tls_get_addr");
12290 return ix86_tls_symbol;
12293 /* Calculate the length of the memory address in the instruction
12294 encoding. Does not include the one-byte modrm, opcode, or prefix. */
12297 memory_address_length (rtx addr)
12299 struct ix86_address parts;
12300 rtx base, index, disp;
12301 int len;
12302 int ok;
12304 if (GET_CODE (addr) == PRE_DEC
12305 || GET_CODE (addr) == POST_INC
12306 || GET_CODE (addr) == PRE_MODIFY
12307 || GET_CODE (addr) == POST_MODIFY)
12308 return 0;
12310 ok = ix86_decompose_address (addr, &parts);
12311 gcc_assert (ok);
12313 if (parts.base && GET_CODE (parts.base) == SUBREG)
12314 parts.base = SUBREG_REG (parts.base);
12315 if (parts.index && GET_CODE (parts.index) == SUBREG)
12316 parts.index = SUBREG_REG (parts.index);
12318 base = parts.base;
12319 index = parts.index;
12320 disp = parts.disp;
12321 len = 0;
12323 /* Rule of thumb:
12324 - esp as the base always wants an index,
12325 - ebp as the base always wants a displacement. */
12327 /* Register Indirect. */
12328 if (base && !index && !disp)
12330 /* esp (for its index) and ebp (for its displacement) need
12331 the two-byte modrm form. */
12332 if (addr == stack_pointer_rtx
12333 || addr == arg_pointer_rtx
12334 || addr == frame_pointer_rtx
12335 || addr == hard_frame_pointer_rtx)
12336 len = 1;
12339 /* Direct Addressing. */
12340 else if (disp && !base && !index)
12341 len = 4;
12343 else
12345 /* Find the length of the displacement constant. */
12346 if (disp)
12348 if (GET_CODE (disp) == CONST_INT
12349 && CONST_OK_FOR_LETTER_P (INTVAL (disp), 'K')
12350 && base)
12351 len = 1;
12352 else
12353 len = 4;
12355 /* ebp always wants a displacement. */
12356 else if (base == hard_frame_pointer_rtx)
12357 len = 1;
12359 /* An index requires the two-byte modrm form.... */
12360 if (index
12361 /* ...like esp, which always wants an index. */
12362 || base == stack_pointer_rtx
12363 || base == arg_pointer_rtx
12364 || base == frame_pointer_rtx)
12365 len += 1;
12368 return len;
12371 /* Compute default value for "length_immediate" attribute. When SHORTFORM
12372 is set, expect that insn have 8bit immediate alternative. */
12374 ix86_attr_length_immediate_default (rtx insn, int shortform)
12376 int len = 0;
12377 int i;
12378 extract_insn_cached (insn);
12379 for (i = recog_data.n_operands - 1; i >= 0; --i)
12380 if (CONSTANT_P (recog_data.operand[i]))
12382 gcc_assert (!len);
12383 if (shortform
12384 && GET_CODE (recog_data.operand[i]) == CONST_INT
12385 && CONST_OK_FOR_LETTER_P (INTVAL (recog_data.operand[i]), 'K'))
12386 len = 1;
12387 else
12389 switch (get_attr_mode (insn))
12391 case MODE_QI:
12392 len+=1;
12393 break;
12394 case MODE_HI:
12395 len+=2;
12396 break;
12397 case MODE_SI:
12398 len+=4;
12399 break;
12400 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
12401 case MODE_DI:
12402 len+=4;
12403 break;
12404 default:
12405 fatal_insn ("unknown insn mode", insn);
12409 return len;
12411 /* Compute default value for "length_address" attribute. */
12413 ix86_attr_length_address_default (rtx insn)
12415 int i;
12417 if (get_attr_type (insn) == TYPE_LEA)
12419 rtx set = PATTERN (insn);
12421 if (GET_CODE (set) == PARALLEL)
12422 set = XVECEXP (set, 0, 0);
12424 gcc_assert (GET_CODE (set) == SET);
12426 return memory_address_length (SET_SRC (set));
12429 extract_insn_cached (insn);
12430 for (i = recog_data.n_operands - 1; i >= 0; --i)
12431 if (GET_CODE (recog_data.operand[i]) == MEM)
12433 return memory_address_length (XEXP (recog_data.operand[i], 0));
12434 break;
12436 return 0;
12439 /* Return the maximum number of instructions a cpu can issue. */
12441 static int
12442 ix86_issue_rate (void)
12444 switch (ix86_tune)
12446 case PROCESSOR_PENTIUM:
12447 case PROCESSOR_K6:
12448 return 2;
12450 case PROCESSOR_PENTIUMPRO:
12451 case PROCESSOR_PENTIUM4:
12452 case PROCESSOR_ATHLON:
12453 case PROCESSOR_K8:
12454 case PROCESSOR_NOCONA:
12455 return 3;
12457 default:
12458 return 1;
12462 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
12463 by DEP_INSN and nothing set by DEP_INSN. */
12465 static int
12466 ix86_flags_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
12468 rtx set, set2;
12470 /* Simplify the test for uninteresting insns. */
12471 if (insn_type != TYPE_SETCC
12472 && insn_type != TYPE_ICMOV
12473 && insn_type != TYPE_FCMOV
12474 && insn_type != TYPE_IBR)
12475 return 0;
12477 if ((set = single_set (dep_insn)) != 0)
12479 set = SET_DEST (set);
12480 set2 = NULL_RTX;
12482 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
12483 && XVECLEN (PATTERN (dep_insn), 0) == 2
12484 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
12485 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
12487 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
12488 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
12490 else
12491 return 0;
12493 if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG)
12494 return 0;
12496 /* This test is true if the dependent insn reads the flags but
12497 not any other potentially set register. */
12498 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
12499 return 0;
12501 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
12502 return 0;
12504 return 1;
12507 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
12508 address with operands set by DEP_INSN. */
12510 static int
12511 ix86_agi_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
12513 rtx addr;
12515 if (insn_type == TYPE_LEA
12516 && TARGET_PENTIUM)
12518 addr = PATTERN (insn);
12520 if (GET_CODE (addr) == PARALLEL)
12521 addr = XVECEXP (addr, 0, 0);
12523 gcc_assert (GET_CODE (addr) == SET);
12525 addr = SET_SRC (addr);
12527 else
12529 int i;
12530 extract_insn_cached (insn);
12531 for (i = recog_data.n_operands - 1; i >= 0; --i)
12532 if (GET_CODE (recog_data.operand[i]) == MEM)
12534 addr = XEXP (recog_data.operand[i], 0);
12535 goto found;
12537 return 0;
12538 found:;
12541 return modified_in_p (addr, dep_insn);
12544 static int
12545 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
12547 enum attr_type insn_type, dep_insn_type;
12548 enum attr_memory memory;
12549 rtx set, set2;
12550 int dep_insn_code_number;
12552 /* Anti and output dependencies have zero cost on all CPUs. */
12553 if (REG_NOTE_KIND (link) != 0)
12554 return 0;
12556 dep_insn_code_number = recog_memoized (dep_insn);
12558 /* If we can't recognize the insns, we can't really do anything. */
12559 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
12560 return cost;
12562 insn_type = get_attr_type (insn);
12563 dep_insn_type = get_attr_type (dep_insn);
12565 switch (ix86_tune)
12567 case PROCESSOR_PENTIUM:
12568 /* Address Generation Interlock adds a cycle of latency. */
12569 if (ix86_agi_dependant (insn, dep_insn, insn_type))
12570 cost += 1;
12572 /* ??? Compares pair with jump/setcc. */
12573 if (ix86_flags_dependant (insn, dep_insn, insn_type))
12574 cost = 0;
12576 /* Floating point stores require value to be ready one cycle earlier. */
12577 if (insn_type == TYPE_FMOV
12578 && get_attr_memory (insn) == MEMORY_STORE
12579 && !ix86_agi_dependant (insn, dep_insn, insn_type))
12580 cost += 1;
12581 break;
12583 case PROCESSOR_PENTIUMPRO:
12584 memory = get_attr_memory (insn);
12586 /* INT->FP conversion is expensive. */
12587 if (get_attr_fp_int_src (dep_insn))
12588 cost += 5;
12590 /* There is one cycle extra latency between an FP op and a store. */
12591 if (insn_type == TYPE_FMOV
12592 && (set = single_set (dep_insn)) != NULL_RTX
12593 && (set2 = single_set (insn)) != NULL_RTX
12594 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
12595 && GET_CODE (SET_DEST (set2)) == MEM)
12596 cost += 1;
12598 /* Show ability of reorder buffer to hide latency of load by executing
12599 in parallel with previous instruction in case
12600 previous instruction is not needed to compute the address. */
12601 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
12602 && !ix86_agi_dependant (insn, dep_insn, insn_type))
12604 /* Claim moves to take one cycle, as core can issue one load
12605 at time and the next load can start cycle later. */
12606 if (dep_insn_type == TYPE_IMOV
12607 || dep_insn_type == TYPE_FMOV)
12608 cost = 1;
12609 else if (cost > 1)
12610 cost--;
12612 break;
12614 case PROCESSOR_K6:
12615 memory = get_attr_memory (insn);
12617 /* The esp dependency is resolved before the instruction is really
12618 finished. */
12619 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
12620 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
12621 return 1;
12623 /* INT->FP conversion is expensive. */
12624 if (get_attr_fp_int_src (dep_insn))
12625 cost += 5;
12627 /* Show ability of reorder buffer to hide latency of load by executing
12628 in parallel with previous instruction in case
12629 previous instruction is not needed to compute the address. */
12630 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
12631 && !ix86_agi_dependant (insn, dep_insn, insn_type))
12633 /* Claim moves to take one cycle, as core can issue one load
12634 at time and the next load can start cycle later. */
12635 if (dep_insn_type == TYPE_IMOV
12636 || dep_insn_type == TYPE_FMOV)
12637 cost = 1;
12638 else if (cost > 2)
12639 cost -= 2;
12640 else
12641 cost = 1;
12643 break;
12645 case PROCESSOR_ATHLON:
12646 case PROCESSOR_K8:
12647 memory = get_attr_memory (insn);
12649 /* Show ability of reorder buffer to hide latency of load by executing
12650 in parallel with previous instruction in case
12651 previous instruction is not needed to compute the address. */
12652 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
12653 && !ix86_agi_dependant (insn, dep_insn, insn_type))
12655 enum attr_unit unit = get_attr_unit (insn);
12656 int loadcost = 3;
12658 /* Because of the difference between the length of integer and
12659 floating unit pipeline preparation stages, the memory operands
12660 for floating point are cheaper.
12662 ??? For Athlon it the difference is most probably 2. */
12663 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
12664 loadcost = 3;
12665 else
12666 loadcost = TARGET_ATHLON ? 2 : 0;
12668 if (cost >= loadcost)
12669 cost -= loadcost;
12670 else
12671 cost = 0;
12674 default:
12675 break;
12678 return cost;
12681 /* How many alternative schedules to try. This should be as wide as the
12682 scheduling freedom in the DFA, but no wider. Making this value too
12683 large results extra work for the scheduler. */
12685 static int
12686 ia32_multipass_dfa_lookahead (void)
12688 if (ix86_tune == PROCESSOR_PENTIUM)
12689 return 2;
12691 if (ix86_tune == PROCESSOR_PENTIUMPRO
12692 || ix86_tune == PROCESSOR_K6)
12693 return 1;
12695 else
12696 return 0;
12700 /* Compute the alignment given to a constant that is being placed in memory.
12701 EXP is the constant and ALIGN is the alignment that the object would
12702 ordinarily have.
12703 The value of this function is used instead of that alignment to align
12704 the object. */
12707 ix86_constant_alignment (tree exp, int align)
12709 if (TREE_CODE (exp) == REAL_CST)
12711 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
12712 return 64;
12713 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
12714 return 128;
12716 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
12717 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
12718 return BITS_PER_WORD;
12720 return align;
12723 /* Compute the alignment for a static variable.
12724 TYPE is the data type, and ALIGN is the alignment that
12725 the object would ordinarily have. The value of this function is used
12726 instead of that alignment to align the object. */
12729 ix86_data_alignment (tree type, int align)
12731 if (AGGREGATE_TYPE_P (type)
12732 && TYPE_SIZE (type)
12733 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
12734 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 256
12735 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 256)
12736 return 256;
12738 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
12739 to 16byte boundary. */
12740 if (TARGET_64BIT)
12742 if (AGGREGATE_TYPE_P (type)
12743 && TYPE_SIZE (type)
12744 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
12745 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
12746 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
12747 return 128;
12750 if (TREE_CODE (type) == ARRAY_TYPE)
12752 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
12753 return 64;
12754 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
12755 return 128;
12757 else if (TREE_CODE (type) == COMPLEX_TYPE)
12760 if (TYPE_MODE (type) == DCmode && align < 64)
12761 return 64;
12762 if (TYPE_MODE (type) == XCmode && align < 128)
12763 return 128;
12765 else if ((TREE_CODE (type) == RECORD_TYPE
12766 || TREE_CODE (type) == UNION_TYPE
12767 || TREE_CODE (type) == QUAL_UNION_TYPE)
12768 && TYPE_FIELDS (type))
12770 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
12771 return 64;
12772 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
12773 return 128;
12775 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
12776 || TREE_CODE (type) == INTEGER_TYPE)
12778 if (TYPE_MODE (type) == DFmode && align < 64)
12779 return 64;
12780 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
12781 return 128;
12784 return align;
12787 /* Compute the alignment for a local variable.
12788 TYPE is the data type, and ALIGN is the alignment that
12789 the object would ordinarily have. The value of this macro is used
12790 instead of that alignment to align the object. */
12793 ix86_local_alignment (tree type, int align)
12795 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
12796 to 16byte boundary. */
12797 if (TARGET_64BIT)
12799 if (AGGREGATE_TYPE_P (type)
12800 && TYPE_SIZE (type)
12801 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
12802 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
12803 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
12804 return 128;
12806 if (TREE_CODE (type) == ARRAY_TYPE)
12808 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
12809 return 64;
12810 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
12811 return 128;
12813 else if (TREE_CODE (type) == COMPLEX_TYPE)
12815 if (TYPE_MODE (type) == DCmode && align < 64)
12816 return 64;
12817 if (TYPE_MODE (type) == XCmode && align < 128)
12818 return 128;
12820 else if ((TREE_CODE (type) == RECORD_TYPE
12821 || TREE_CODE (type) == UNION_TYPE
12822 || TREE_CODE (type) == QUAL_UNION_TYPE)
12823 && TYPE_FIELDS (type))
12825 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
12826 return 64;
12827 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
12828 return 128;
12830 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
12831 || TREE_CODE (type) == INTEGER_TYPE)
12834 if (TYPE_MODE (type) == DFmode && align < 64)
12835 return 64;
12836 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
12837 return 128;
12839 return align;
12842 /* Emit RTL insns to initialize the variable parts of a trampoline.
12843 FNADDR is an RTX for the address of the function's pure code.
12844 CXT is an RTX for the static chain value for the function. */
12845 void
12846 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
12848 if (!TARGET_64BIT)
12850 /* Compute offset from the end of the jmp to the target function. */
12851 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
12852 plus_constant (tramp, 10),
12853 NULL_RTX, 1, OPTAB_DIRECT);
12854 emit_move_insn (gen_rtx_MEM (QImode, tramp),
12855 gen_int_mode (0xb9, QImode));
12856 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
12857 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
12858 gen_int_mode (0xe9, QImode));
12859 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
12861 else
12863 int offset = 0;
12864 /* Try to load address using shorter movl instead of movabs.
12865 We may want to support movq for kernel mode, but kernel does not use
12866 trampolines at the moment. */
12867 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
12869 fnaddr = copy_to_mode_reg (DImode, fnaddr);
12870 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12871 gen_int_mode (0xbb41, HImode));
12872 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
12873 gen_lowpart (SImode, fnaddr));
12874 offset += 6;
12876 else
12878 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12879 gen_int_mode (0xbb49, HImode));
12880 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
12881 fnaddr);
12882 offset += 10;
12884 /* Load static chain using movabs to r10. */
12885 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12886 gen_int_mode (0xba49, HImode));
12887 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
12888 cxt);
12889 offset += 10;
12890 /* Jump to the r11 */
12891 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12892 gen_int_mode (0xff49, HImode));
12893 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
12894 gen_int_mode (0xe3, QImode));
12895 offset += 3;
12896 gcc_assert (offset <= TRAMPOLINE_SIZE);
12899 #ifdef ENABLE_EXECUTE_STACK
12900 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
12901 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
12902 #endif
12905 /* Codes for all the SSE/MMX builtins. */
12906 enum ix86_builtins
12908 IX86_BUILTIN_ADDPS,
12909 IX86_BUILTIN_ADDSS,
12910 IX86_BUILTIN_DIVPS,
12911 IX86_BUILTIN_DIVSS,
12912 IX86_BUILTIN_MULPS,
12913 IX86_BUILTIN_MULSS,
12914 IX86_BUILTIN_SUBPS,
12915 IX86_BUILTIN_SUBSS,
12917 IX86_BUILTIN_CMPEQPS,
12918 IX86_BUILTIN_CMPLTPS,
12919 IX86_BUILTIN_CMPLEPS,
12920 IX86_BUILTIN_CMPGTPS,
12921 IX86_BUILTIN_CMPGEPS,
12922 IX86_BUILTIN_CMPNEQPS,
12923 IX86_BUILTIN_CMPNLTPS,
12924 IX86_BUILTIN_CMPNLEPS,
12925 IX86_BUILTIN_CMPNGTPS,
12926 IX86_BUILTIN_CMPNGEPS,
12927 IX86_BUILTIN_CMPORDPS,
12928 IX86_BUILTIN_CMPUNORDPS,
12929 IX86_BUILTIN_CMPNEPS,
12930 IX86_BUILTIN_CMPEQSS,
12931 IX86_BUILTIN_CMPLTSS,
12932 IX86_BUILTIN_CMPLESS,
12933 IX86_BUILTIN_CMPNEQSS,
12934 IX86_BUILTIN_CMPNLTSS,
12935 IX86_BUILTIN_CMPNLESS,
12936 IX86_BUILTIN_CMPNGTSS,
12937 IX86_BUILTIN_CMPNGESS,
12938 IX86_BUILTIN_CMPORDSS,
12939 IX86_BUILTIN_CMPUNORDSS,
12940 IX86_BUILTIN_CMPNESS,
12942 IX86_BUILTIN_COMIEQSS,
12943 IX86_BUILTIN_COMILTSS,
12944 IX86_BUILTIN_COMILESS,
12945 IX86_BUILTIN_COMIGTSS,
12946 IX86_BUILTIN_COMIGESS,
12947 IX86_BUILTIN_COMINEQSS,
12948 IX86_BUILTIN_UCOMIEQSS,
12949 IX86_BUILTIN_UCOMILTSS,
12950 IX86_BUILTIN_UCOMILESS,
12951 IX86_BUILTIN_UCOMIGTSS,
12952 IX86_BUILTIN_UCOMIGESS,
12953 IX86_BUILTIN_UCOMINEQSS,
12955 IX86_BUILTIN_CVTPI2PS,
12956 IX86_BUILTIN_CVTPS2PI,
12957 IX86_BUILTIN_CVTSI2SS,
12958 IX86_BUILTIN_CVTSI642SS,
12959 IX86_BUILTIN_CVTSS2SI,
12960 IX86_BUILTIN_CVTSS2SI64,
12961 IX86_BUILTIN_CVTTPS2PI,
12962 IX86_BUILTIN_CVTTSS2SI,
12963 IX86_BUILTIN_CVTTSS2SI64,
12965 IX86_BUILTIN_MAXPS,
12966 IX86_BUILTIN_MAXSS,
12967 IX86_BUILTIN_MINPS,
12968 IX86_BUILTIN_MINSS,
12970 IX86_BUILTIN_LOADUPS,
12971 IX86_BUILTIN_STOREUPS,
12972 IX86_BUILTIN_MOVSS,
12974 IX86_BUILTIN_MOVHLPS,
12975 IX86_BUILTIN_MOVLHPS,
12976 IX86_BUILTIN_LOADHPS,
12977 IX86_BUILTIN_LOADLPS,
12978 IX86_BUILTIN_STOREHPS,
12979 IX86_BUILTIN_STORELPS,
12981 IX86_BUILTIN_MASKMOVQ,
12982 IX86_BUILTIN_MOVMSKPS,
12983 IX86_BUILTIN_PMOVMSKB,
12985 IX86_BUILTIN_MOVNTPS,
12986 IX86_BUILTIN_MOVNTQ,
12988 IX86_BUILTIN_LOADDQU,
12989 IX86_BUILTIN_STOREDQU,
12991 IX86_BUILTIN_PACKSSWB,
12992 IX86_BUILTIN_PACKSSDW,
12993 IX86_BUILTIN_PACKUSWB,
12995 IX86_BUILTIN_PADDB,
12996 IX86_BUILTIN_PADDW,
12997 IX86_BUILTIN_PADDD,
12998 IX86_BUILTIN_PADDQ,
12999 IX86_BUILTIN_PADDSB,
13000 IX86_BUILTIN_PADDSW,
13001 IX86_BUILTIN_PADDUSB,
13002 IX86_BUILTIN_PADDUSW,
13003 IX86_BUILTIN_PSUBB,
13004 IX86_BUILTIN_PSUBW,
13005 IX86_BUILTIN_PSUBD,
13006 IX86_BUILTIN_PSUBQ,
13007 IX86_BUILTIN_PSUBSB,
13008 IX86_BUILTIN_PSUBSW,
13009 IX86_BUILTIN_PSUBUSB,
13010 IX86_BUILTIN_PSUBUSW,
13012 IX86_BUILTIN_PAND,
13013 IX86_BUILTIN_PANDN,
13014 IX86_BUILTIN_POR,
13015 IX86_BUILTIN_PXOR,
13017 IX86_BUILTIN_PAVGB,
13018 IX86_BUILTIN_PAVGW,
13020 IX86_BUILTIN_PCMPEQB,
13021 IX86_BUILTIN_PCMPEQW,
13022 IX86_BUILTIN_PCMPEQD,
13023 IX86_BUILTIN_PCMPGTB,
13024 IX86_BUILTIN_PCMPGTW,
13025 IX86_BUILTIN_PCMPGTD,
13027 IX86_BUILTIN_PMADDWD,
13029 IX86_BUILTIN_PMAXSW,
13030 IX86_BUILTIN_PMAXUB,
13031 IX86_BUILTIN_PMINSW,
13032 IX86_BUILTIN_PMINUB,
13034 IX86_BUILTIN_PMULHUW,
13035 IX86_BUILTIN_PMULHW,
13036 IX86_BUILTIN_PMULLW,
13038 IX86_BUILTIN_PSADBW,
13039 IX86_BUILTIN_PSHUFW,
13041 IX86_BUILTIN_PSLLW,
13042 IX86_BUILTIN_PSLLD,
13043 IX86_BUILTIN_PSLLQ,
13044 IX86_BUILTIN_PSRAW,
13045 IX86_BUILTIN_PSRAD,
13046 IX86_BUILTIN_PSRLW,
13047 IX86_BUILTIN_PSRLD,
13048 IX86_BUILTIN_PSRLQ,
13049 IX86_BUILTIN_PSLLWI,
13050 IX86_BUILTIN_PSLLDI,
13051 IX86_BUILTIN_PSLLQI,
13052 IX86_BUILTIN_PSRAWI,
13053 IX86_BUILTIN_PSRADI,
13054 IX86_BUILTIN_PSRLWI,
13055 IX86_BUILTIN_PSRLDI,
13056 IX86_BUILTIN_PSRLQI,
13058 IX86_BUILTIN_PUNPCKHBW,
13059 IX86_BUILTIN_PUNPCKHWD,
13060 IX86_BUILTIN_PUNPCKHDQ,
13061 IX86_BUILTIN_PUNPCKLBW,
13062 IX86_BUILTIN_PUNPCKLWD,
13063 IX86_BUILTIN_PUNPCKLDQ,
13065 IX86_BUILTIN_SHUFPS,
13067 IX86_BUILTIN_RCPPS,
13068 IX86_BUILTIN_RCPSS,
13069 IX86_BUILTIN_RSQRTPS,
13070 IX86_BUILTIN_RSQRTSS,
13071 IX86_BUILTIN_SQRTPS,
13072 IX86_BUILTIN_SQRTSS,
13074 IX86_BUILTIN_UNPCKHPS,
13075 IX86_BUILTIN_UNPCKLPS,
13077 IX86_BUILTIN_ANDPS,
13078 IX86_BUILTIN_ANDNPS,
13079 IX86_BUILTIN_ORPS,
13080 IX86_BUILTIN_XORPS,
13082 IX86_BUILTIN_EMMS,
13083 IX86_BUILTIN_LDMXCSR,
13084 IX86_BUILTIN_STMXCSR,
13085 IX86_BUILTIN_SFENCE,
13087 /* 3DNow! Original */
13088 IX86_BUILTIN_FEMMS,
13089 IX86_BUILTIN_PAVGUSB,
13090 IX86_BUILTIN_PF2ID,
13091 IX86_BUILTIN_PFACC,
13092 IX86_BUILTIN_PFADD,
13093 IX86_BUILTIN_PFCMPEQ,
13094 IX86_BUILTIN_PFCMPGE,
13095 IX86_BUILTIN_PFCMPGT,
13096 IX86_BUILTIN_PFMAX,
13097 IX86_BUILTIN_PFMIN,
13098 IX86_BUILTIN_PFMUL,
13099 IX86_BUILTIN_PFRCP,
13100 IX86_BUILTIN_PFRCPIT1,
13101 IX86_BUILTIN_PFRCPIT2,
13102 IX86_BUILTIN_PFRSQIT1,
13103 IX86_BUILTIN_PFRSQRT,
13104 IX86_BUILTIN_PFSUB,
13105 IX86_BUILTIN_PFSUBR,
13106 IX86_BUILTIN_PI2FD,
13107 IX86_BUILTIN_PMULHRW,
13109 /* 3DNow! Athlon Extensions */
13110 IX86_BUILTIN_PF2IW,
13111 IX86_BUILTIN_PFNACC,
13112 IX86_BUILTIN_PFPNACC,
13113 IX86_BUILTIN_PI2FW,
13114 IX86_BUILTIN_PSWAPDSI,
13115 IX86_BUILTIN_PSWAPDSF,
13117 /* SSE2 */
13118 IX86_BUILTIN_ADDPD,
13119 IX86_BUILTIN_ADDSD,
13120 IX86_BUILTIN_DIVPD,
13121 IX86_BUILTIN_DIVSD,
13122 IX86_BUILTIN_MULPD,
13123 IX86_BUILTIN_MULSD,
13124 IX86_BUILTIN_SUBPD,
13125 IX86_BUILTIN_SUBSD,
13127 IX86_BUILTIN_CMPEQPD,
13128 IX86_BUILTIN_CMPLTPD,
13129 IX86_BUILTIN_CMPLEPD,
13130 IX86_BUILTIN_CMPGTPD,
13131 IX86_BUILTIN_CMPGEPD,
13132 IX86_BUILTIN_CMPNEQPD,
13133 IX86_BUILTIN_CMPNLTPD,
13134 IX86_BUILTIN_CMPNLEPD,
13135 IX86_BUILTIN_CMPNGTPD,
13136 IX86_BUILTIN_CMPNGEPD,
13137 IX86_BUILTIN_CMPORDPD,
13138 IX86_BUILTIN_CMPUNORDPD,
13139 IX86_BUILTIN_CMPNEPD,
13140 IX86_BUILTIN_CMPEQSD,
13141 IX86_BUILTIN_CMPLTSD,
13142 IX86_BUILTIN_CMPLESD,
13143 IX86_BUILTIN_CMPNEQSD,
13144 IX86_BUILTIN_CMPNLTSD,
13145 IX86_BUILTIN_CMPNLESD,
13146 IX86_BUILTIN_CMPORDSD,
13147 IX86_BUILTIN_CMPUNORDSD,
13148 IX86_BUILTIN_CMPNESD,
13150 IX86_BUILTIN_COMIEQSD,
13151 IX86_BUILTIN_COMILTSD,
13152 IX86_BUILTIN_COMILESD,
13153 IX86_BUILTIN_COMIGTSD,
13154 IX86_BUILTIN_COMIGESD,
13155 IX86_BUILTIN_COMINEQSD,
13156 IX86_BUILTIN_UCOMIEQSD,
13157 IX86_BUILTIN_UCOMILTSD,
13158 IX86_BUILTIN_UCOMILESD,
13159 IX86_BUILTIN_UCOMIGTSD,
13160 IX86_BUILTIN_UCOMIGESD,
13161 IX86_BUILTIN_UCOMINEQSD,
13163 IX86_BUILTIN_MAXPD,
13164 IX86_BUILTIN_MAXSD,
13165 IX86_BUILTIN_MINPD,
13166 IX86_BUILTIN_MINSD,
13168 IX86_BUILTIN_ANDPD,
13169 IX86_BUILTIN_ANDNPD,
13170 IX86_BUILTIN_ORPD,
13171 IX86_BUILTIN_XORPD,
13173 IX86_BUILTIN_SQRTPD,
13174 IX86_BUILTIN_SQRTSD,
13176 IX86_BUILTIN_UNPCKHPD,
13177 IX86_BUILTIN_UNPCKLPD,
13179 IX86_BUILTIN_SHUFPD,
13181 IX86_BUILTIN_LOADUPD,
13182 IX86_BUILTIN_STOREUPD,
13183 IX86_BUILTIN_MOVSD,
13185 IX86_BUILTIN_LOADHPD,
13186 IX86_BUILTIN_LOADLPD,
13188 IX86_BUILTIN_CVTDQ2PD,
13189 IX86_BUILTIN_CVTDQ2PS,
13191 IX86_BUILTIN_CVTPD2DQ,
13192 IX86_BUILTIN_CVTPD2PI,
13193 IX86_BUILTIN_CVTPD2PS,
13194 IX86_BUILTIN_CVTTPD2DQ,
13195 IX86_BUILTIN_CVTTPD2PI,
13197 IX86_BUILTIN_CVTPI2PD,
13198 IX86_BUILTIN_CVTSI2SD,
13199 IX86_BUILTIN_CVTSI642SD,
13201 IX86_BUILTIN_CVTSD2SI,
13202 IX86_BUILTIN_CVTSD2SI64,
13203 IX86_BUILTIN_CVTSD2SS,
13204 IX86_BUILTIN_CVTSS2SD,
13205 IX86_BUILTIN_CVTTSD2SI,
13206 IX86_BUILTIN_CVTTSD2SI64,
13208 IX86_BUILTIN_CVTPS2DQ,
13209 IX86_BUILTIN_CVTPS2PD,
13210 IX86_BUILTIN_CVTTPS2DQ,
13212 IX86_BUILTIN_MOVNTI,
13213 IX86_BUILTIN_MOVNTPD,
13214 IX86_BUILTIN_MOVNTDQ,
13216 /* SSE2 MMX */
13217 IX86_BUILTIN_MASKMOVDQU,
13218 IX86_BUILTIN_MOVMSKPD,
13219 IX86_BUILTIN_PMOVMSKB128,
13221 IX86_BUILTIN_PACKSSWB128,
13222 IX86_BUILTIN_PACKSSDW128,
13223 IX86_BUILTIN_PACKUSWB128,
13225 IX86_BUILTIN_PADDB128,
13226 IX86_BUILTIN_PADDW128,
13227 IX86_BUILTIN_PADDD128,
13228 IX86_BUILTIN_PADDQ128,
13229 IX86_BUILTIN_PADDSB128,
13230 IX86_BUILTIN_PADDSW128,
13231 IX86_BUILTIN_PADDUSB128,
13232 IX86_BUILTIN_PADDUSW128,
13233 IX86_BUILTIN_PSUBB128,
13234 IX86_BUILTIN_PSUBW128,
13235 IX86_BUILTIN_PSUBD128,
13236 IX86_BUILTIN_PSUBQ128,
13237 IX86_BUILTIN_PSUBSB128,
13238 IX86_BUILTIN_PSUBSW128,
13239 IX86_BUILTIN_PSUBUSB128,
13240 IX86_BUILTIN_PSUBUSW128,
13242 IX86_BUILTIN_PAND128,
13243 IX86_BUILTIN_PANDN128,
13244 IX86_BUILTIN_POR128,
13245 IX86_BUILTIN_PXOR128,
13247 IX86_BUILTIN_PAVGB128,
13248 IX86_BUILTIN_PAVGW128,
13250 IX86_BUILTIN_PCMPEQB128,
13251 IX86_BUILTIN_PCMPEQW128,
13252 IX86_BUILTIN_PCMPEQD128,
13253 IX86_BUILTIN_PCMPGTB128,
13254 IX86_BUILTIN_PCMPGTW128,
13255 IX86_BUILTIN_PCMPGTD128,
13257 IX86_BUILTIN_PMADDWD128,
13259 IX86_BUILTIN_PMAXSW128,
13260 IX86_BUILTIN_PMAXUB128,
13261 IX86_BUILTIN_PMINSW128,
13262 IX86_BUILTIN_PMINUB128,
13264 IX86_BUILTIN_PMULUDQ,
13265 IX86_BUILTIN_PMULUDQ128,
13266 IX86_BUILTIN_PMULHUW128,
13267 IX86_BUILTIN_PMULHW128,
13268 IX86_BUILTIN_PMULLW128,
13270 IX86_BUILTIN_PSADBW128,
13271 IX86_BUILTIN_PSHUFHW,
13272 IX86_BUILTIN_PSHUFLW,
13273 IX86_BUILTIN_PSHUFD,
13275 IX86_BUILTIN_PSLLW128,
13276 IX86_BUILTIN_PSLLD128,
13277 IX86_BUILTIN_PSLLQ128,
13278 IX86_BUILTIN_PSRAW128,
13279 IX86_BUILTIN_PSRAD128,
13280 IX86_BUILTIN_PSRLW128,
13281 IX86_BUILTIN_PSRLD128,
13282 IX86_BUILTIN_PSRLQ128,
13283 IX86_BUILTIN_PSLLDQI128,
13284 IX86_BUILTIN_PSLLWI128,
13285 IX86_BUILTIN_PSLLDI128,
13286 IX86_BUILTIN_PSLLQI128,
13287 IX86_BUILTIN_PSRAWI128,
13288 IX86_BUILTIN_PSRADI128,
13289 IX86_BUILTIN_PSRLDQI128,
13290 IX86_BUILTIN_PSRLWI128,
13291 IX86_BUILTIN_PSRLDI128,
13292 IX86_BUILTIN_PSRLQI128,
13294 IX86_BUILTIN_PUNPCKHBW128,
13295 IX86_BUILTIN_PUNPCKHWD128,
13296 IX86_BUILTIN_PUNPCKHDQ128,
13297 IX86_BUILTIN_PUNPCKHQDQ128,
13298 IX86_BUILTIN_PUNPCKLBW128,
13299 IX86_BUILTIN_PUNPCKLWD128,
13300 IX86_BUILTIN_PUNPCKLDQ128,
13301 IX86_BUILTIN_PUNPCKLQDQ128,
13303 IX86_BUILTIN_CLFLUSH,
13304 IX86_BUILTIN_MFENCE,
13305 IX86_BUILTIN_LFENCE,
13307 /* Prescott New Instructions. */
13308 IX86_BUILTIN_ADDSUBPS,
13309 IX86_BUILTIN_HADDPS,
13310 IX86_BUILTIN_HSUBPS,
13311 IX86_BUILTIN_MOVSHDUP,
13312 IX86_BUILTIN_MOVSLDUP,
13313 IX86_BUILTIN_ADDSUBPD,
13314 IX86_BUILTIN_HADDPD,
13315 IX86_BUILTIN_HSUBPD,
13316 IX86_BUILTIN_LDDQU,
13318 IX86_BUILTIN_MONITOR,
13319 IX86_BUILTIN_MWAIT,
13321 IX86_BUILTIN_VEC_INIT_V2SI,
13322 IX86_BUILTIN_VEC_INIT_V4HI,
13323 IX86_BUILTIN_VEC_INIT_V8QI,
13324 IX86_BUILTIN_VEC_EXT_V2DF,
13325 IX86_BUILTIN_VEC_EXT_V2DI,
13326 IX86_BUILTIN_VEC_EXT_V4SF,
13327 IX86_BUILTIN_VEC_EXT_V4SI,
13328 IX86_BUILTIN_VEC_EXT_V8HI,
13329 IX86_BUILTIN_VEC_EXT_V2SI,
13330 IX86_BUILTIN_VEC_EXT_V4HI,
13331 IX86_BUILTIN_VEC_SET_V8HI,
13332 IX86_BUILTIN_VEC_SET_V4HI,
13334 IX86_BUILTIN_MAX
13337 #define def_builtin(MASK, NAME, TYPE, CODE) \
13338 do { \
13339 if ((MASK) & target_flags \
13340 && (!((MASK) & MASK_64BIT) || TARGET_64BIT)) \
13341 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
13342 NULL, NULL_TREE); \
13343 } while (0)
13345 /* Bits for builtin_description.flag. */
13347 /* Set when we don't support the comparison natively, and should
13348 swap_comparison in order to support it. */
13349 #define BUILTIN_DESC_SWAP_OPERANDS 1
13351 struct builtin_description
13353 const unsigned int mask;
13354 const enum insn_code icode;
13355 const char *const name;
13356 const enum ix86_builtins code;
13357 const enum rtx_code comparison;
13358 const unsigned int flag;
13361 static const struct builtin_description bdesc_comi[] =
13363 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
13364 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
13365 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
13366 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
13367 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
13368 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
13369 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
13370 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
13371 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
13372 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
13373 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
13374 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
13375 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
13376 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
13377 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
13378 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
13379 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
13380 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
13381 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
13382 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
13383 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
13384 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
13385 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
13386 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
13389 static const struct builtin_description bdesc_2arg[] =
13391 /* SSE */
13392 { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
13393 { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
13394 { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
13395 { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
13396 { MASK_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
13397 { MASK_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
13398 { MASK_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
13399 { MASK_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
13401 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
13402 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
13403 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
13404 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT,
13405 BUILTIN_DESC_SWAP_OPERANDS },
13406 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE,
13407 BUILTIN_DESC_SWAP_OPERANDS },
13408 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
13409 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, 0 },
13410 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, 0 },
13411 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, 0 },
13412 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE,
13413 BUILTIN_DESC_SWAP_OPERANDS },
13414 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT,
13415 BUILTIN_DESC_SWAP_OPERANDS },
13416 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, 0 },
13417 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
13418 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
13419 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
13420 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
13421 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, 0 },
13422 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, 0 },
13423 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, 0 },
13424 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE,
13425 BUILTIN_DESC_SWAP_OPERANDS },
13426 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT,
13427 BUILTIN_DESC_SWAP_OPERANDS },
13428 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 },
13430 { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
13431 { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
13432 { MASK_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
13433 { MASK_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
13435 { MASK_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
13436 { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
13437 { MASK_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
13438 { MASK_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
13440 { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
13441 { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
13442 { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
13443 { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
13444 { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
13446 /* MMX */
13447 { MASK_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
13448 { MASK_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
13449 { MASK_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
13450 { MASK_MMX, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
13451 { MASK_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
13452 { MASK_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
13453 { MASK_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
13454 { MASK_MMX, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
13456 { MASK_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
13457 { MASK_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
13458 { MASK_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
13459 { MASK_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
13460 { MASK_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
13461 { MASK_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
13462 { MASK_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
13463 { MASK_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
13465 { MASK_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
13466 { MASK_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
13467 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
13469 { MASK_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
13470 { MASK_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
13471 { MASK_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
13472 { MASK_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
13474 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
13475 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
13477 { MASK_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
13478 { MASK_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
13479 { MASK_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
13480 { MASK_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
13481 { MASK_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
13482 { MASK_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
13484 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
13485 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
13486 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
13487 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
13489 { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
13490 { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
13491 { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
13492 { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
13493 { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
13494 { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
13496 /* Special. */
13497 { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
13498 { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
13499 { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
13501 { MASK_SSE, CODE_FOR_sse_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
13502 { MASK_SSE, CODE_FOR_sse_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
13503 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
13505 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
13506 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
13507 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
13508 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
13509 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
13510 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
13512 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
13513 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
13514 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
13515 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
13516 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
13517 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
13519 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
13520 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
13521 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
13522 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 },
13524 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
13525 { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
13527 /* SSE2 */
13528 { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
13529 { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
13530 { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
13531 { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
13532 { MASK_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
13533 { MASK_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
13534 { MASK_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
13535 { MASK_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
13537 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
13538 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
13539 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
13540 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT,
13541 BUILTIN_DESC_SWAP_OPERANDS },
13542 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE,
13543 BUILTIN_DESC_SWAP_OPERANDS },
13544 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
13545 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, 0 },
13546 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, 0 },
13547 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, 0 },
13548 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE,
13549 BUILTIN_DESC_SWAP_OPERANDS },
13550 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT,
13551 BUILTIN_DESC_SWAP_OPERANDS },
13552 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, 0 },
13553 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
13554 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
13555 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
13556 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
13557 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, 0 },
13558 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, 0 },
13559 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, 0 },
13560 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, 0 },
13562 { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
13563 { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
13564 { MASK_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
13565 { MASK_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
13567 { MASK_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
13568 { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
13569 { MASK_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
13570 { MASK_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
13572 { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
13573 { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
13574 { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
13576 /* SSE2 MMX */
13577 { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
13578 { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
13579 { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
13580 { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
13581 { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
13582 { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
13583 { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
13584 { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
13586 { MASK_MMX, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
13587 { MASK_MMX, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
13588 { MASK_MMX, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
13589 { MASK_MMX, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
13590 { MASK_MMX, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
13591 { MASK_MMX, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
13592 { MASK_MMX, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
13593 { MASK_MMX, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
13595 { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
13596 { MASK_SSE2, CODE_FOR_sse2_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
13598 { MASK_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
13599 { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
13600 { MASK_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
13601 { MASK_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
13603 { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
13604 { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
13606 { MASK_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
13607 { MASK_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
13608 { MASK_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
13609 { MASK_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
13610 { MASK_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
13611 { MASK_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
13613 { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
13614 { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
13615 { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
13616 { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
13618 { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
13619 { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
13620 { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
13621 { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
13622 { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
13623 { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
13624 { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
13625 { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
13627 { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
13628 { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
13629 { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
13631 { MASK_SSE2, CODE_FOR_sse2_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
13632 { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
13634 { MASK_SSE2, CODE_FOR_sse2_umulsidi3, 0, IX86_BUILTIN_PMULUDQ, 0, 0 },
13635 { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, 0, 0 },
13637 { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
13638 { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
13639 { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
13641 { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
13642 { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
13643 { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
13645 { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
13646 { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
13648 { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
13650 { MASK_SSE2, CODE_FOR_sse2_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
13651 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
13652 { MASK_SSE2, CODE_FOR_sse2_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
13653 { MASK_SSE2, CODE_FOR_sse2_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
13655 /* SSE3 MMX */
13656 { MASK_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
13657 { MASK_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
13658 { MASK_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
13659 { MASK_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
13660 { MASK_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
13661 { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 }
13664 static const struct builtin_description bdesc_1arg[] =
13666 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
13667 { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
13669 { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
13670 { MASK_SSE, CODE_FOR_sse_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
13671 { MASK_SSE, CODE_FOR_sse_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
13673 { MASK_SSE, CODE_FOR_sse_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
13674 { MASK_SSE, CODE_FOR_sse_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
13675 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
13676 { MASK_SSE, CODE_FOR_sse_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
13677 { MASK_SSE, CODE_FOR_sse_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
13678 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
13680 { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
13681 { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
13683 { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
13685 { MASK_SSE2, CODE_FOR_sse2_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
13686 { MASK_SSE2, CODE_FOR_sse2_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
13688 { MASK_SSE2, CODE_FOR_sse2_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
13689 { MASK_SSE2, CODE_FOR_sse2_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
13690 { MASK_SSE2, CODE_FOR_sse2_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
13691 { MASK_SSE2, CODE_FOR_sse2_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
13692 { MASK_SSE2, CODE_FOR_sse2_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
13694 { MASK_SSE2, CODE_FOR_sse2_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
13696 { MASK_SSE2, CODE_FOR_sse2_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
13697 { MASK_SSE2, CODE_FOR_sse2_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
13698 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
13699 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
13701 { MASK_SSE2, CODE_FOR_sse2_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
13702 { MASK_SSE2, CODE_FOR_sse2_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
13703 { MASK_SSE2, CODE_FOR_sse2_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
13705 /* SSE3 */
13706 { MASK_SSE3, CODE_FOR_sse3_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
13707 { MASK_SSE3, CODE_FOR_sse3_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
13710 static void
13711 ix86_init_builtins (void)
13713 if (TARGET_MMX)
13714 ix86_init_mmx_sse_builtins ();
13717 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
13718 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
13719 builtins. */
13720 static void
13721 ix86_init_mmx_sse_builtins (void)
13723 const struct builtin_description * d;
13724 size_t i;
13726 tree V16QI_type_node = build_vector_type_for_mode (intQI_type_node, V16QImode);
13727 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
13728 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
13729 tree V2DI_type_node
13730 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
13731 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
13732 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
13733 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
13734 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
13735 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
13736 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
13738 tree pchar_type_node = build_pointer_type (char_type_node);
13739 tree pcchar_type_node = build_pointer_type (
13740 build_type_variant (char_type_node, 1, 0));
13741 tree pfloat_type_node = build_pointer_type (float_type_node);
13742 tree pcfloat_type_node = build_pointer_type (
13743 build_type_variant (float_type_node, 1, 0));
13744 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
13745 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
13746 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
13748 /* Comparisons. */
13749 tree int_ftype_v4sf_v4sf
13750 = build_function_type_list (integer_type_node,
13751 V4SF_type_node, V4SF_type_node, NULL_TREE);
13752 tree v4si_ftype_v4sf_v4sf
13753 = build_function_type_list (V4SI_type_node,
13754 V4SF_type_node, V4SF_type_node, NULL_TREE);
13755 /* MMX/SSE/integer conversions. */
13756 tree int_ftype_v4sf
13757 = build_function_type_list (integer_type_node,
13758 V4SF_type_node, NULL_TREE);
13759 tree int64_ftype_v4sf
13760 = build_function_type_list (long_long_integer_type_node,
13761 V4SF_type_node, NULL_TREE);
13762 tree int_ftype_v8qi
13763 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
13764 tree v4sf_ftype_v4sf_int
13765 = build_function_type_list (V4SF_type_node,
13766 V4SF_type_node, integer_type_node, NULL_TREE);
13767 tree v4sf_ftype_v4sf_int64
13768 = build_function_type_list (V4SF_type_node,
13769 V4SF_type_node, long_long_integer_type_node,
13770 NULL_TREE);
13771 tree v4sf_ftype_v4sf_v2si
13772 = build_function_type_list (V4SF_type_node,
13773 V4SF_type_node, V2SI_type_node, NULL_TREE);
13775 /* Miscellaneous. */
13776 tree v8qi_ftype_v4hi_v4hi
13777 = build_function_type_list (V8QI_type_node,
13778 V4HI_type_node, V4HI_type_node, NULL_TREE);
13779 tree v4hi_ftype_v2si_v2si
13780 = build_function_type_list (V4HI_type_node,
13781 V2SI_type_node, V2SI_type_node, NULL_TREE);
13782 tree v4sf_ftype_v4sf_v4sf_int
13783 = build_function_type_list (V4SF_type_node,
13784 V4SF_type_node, V4SF_type_node,
13785 integer_type_node, NULL_TREE);
13786 tree v2si_ftype_v4hi_v4hi
13787 = build_function_type_list (V2SI_type_node,
13788 V4HI_type_node, V4HI_type_node, NULL_TREE);
13789 tree v4hi_ftype_v4hi_int
13790 = build_function_type_list (V4HI_type_node,
13791 V4HI_type_node, integer_type_node, NULL_TREE);
13792 tree v4hi_ftype_v4hi_di
13793 = build_function_type_list (V4HI_type_node,
13794 V4HI_type_node, long_long_unsigned_type_node,
13795 NULL_TREE);
13796 tree v2si_ftype_v2si_di
13797 = build_function_type_list (V2SI_type_node,
13798 V2SI_type_node, long_long_unsigned_type_node,
13799 NULL_TREE);
13800 tree void_ftype_void
13801 = build_function_type (void_type_node, void_list_node);
13802 tree void_ftype_unsigned
13803 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
13804 tree void_ftype_unsigned_unsigned
13805 = build_function_type_list (void_type_node, unsigned_type_node,
13806 unsigned_type_node, NULL_TREE);
13807 tree void_ftype_pcvoid_unsigned_unsigned
13808 = build_function_type_list (void_type_node, const_ptr_type_node,
13809 unsigned_type_node, unsigned_type_node,
13810 NULL_TREE);
13811 tree unsigned_ftype_void
13812 = build_function_type (unsigned_type_node, void_list_node);
13813 tree v2si_ftype_v4sf
13814 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
13815 /* Loads/stores. */
13816 tree void_ftype_v8qi_v8qi_pchar
13817 = build_function_type_list (void_type_node,
13818 V8QI_type_node, V8QI_type_node,
13819 pchar_type_node, NULL_TREE);
13820 tree v4sf_ftype_pcfloat
13821 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
13822 /* @@@ the type is bogus */
13823 tree v4sf_ftype_v4sf_pv2si
13824 = build_function_type_list (V4SF_type_node,
13825 V4SF_type_node, pv2si_type_node, NULL_TREE);
13826 tree void_ftype_pv2si_v4sf
13827 = build_function_type_list (void_type_node,
13828 pv2si_type_node, V4SF_type_node, NULL_TREE);
13829 tree void_ftype_pfloat_v4sf
13830 = build_function_type_list (void_type_node,
13831 pfloat_type_node, V4SF_type_node, NULL_TREE);
13832 tree void_ftype_pdi_di
13833 = build_function_type_list (void_type_node,
13834 pdi_type_node, long_long_unsigned_type_node,
13835 NULL_TREE);
13836 tree void_ftype_pv2di_v2di
13837 = build_function_type_list (void_type_node,
13838 pv2di_type_node, V2DI_type_node, NULL_TREE);
13839 /* Normal vector unops. */
13840 tree v4sf_ftype_v4sf
13841 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
13843 /* Normal vector binops. */
13844 tree v4sf_ftype_v4sf_v4sf
13845 = build_function_type_list (V4SF_type_node,
13846 V4SF_type_node, V4SF_type_node, NULL_TREE);
13847 tree v8qi_ftype_v8qi_v8qi
13848 = build_function_type_list (V8QI_type_node,
13849 V8QI_type_node, V8QI_type_node, NULL_TREE);
13850 tree v4hi_ftype_v4hi_v4hi
13851 = build_function_type_list (V4HI_type_node,
13852 V4HI_type_node, V4HI_type_node, NULL_TREE);
13853 tree v2si_ftype_v2si_v2si
13854 = build_function_type_list (V2SI_type_node,
13855 V2SI_type_node, V2SI_type_node, NULL_TREE);
13856 tree di_ftype_di_di
13857 = build_function_type_list (long_long_unsigned_type_node,
13858 long_long_unsigned_type_node,
13859 long_long_unsigned_type_node, NULL_TREE);
13861 tree v2si_ftype_v2sf
13862 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
13863 tree v2sf_ftype_v2si
13864 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
13865 tree v2si_ftype_v2si
13866 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
13867 tree v2sf_ftype_v2sf
13868 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
13869 tree v2sf_ftype_v2sf_v2sf
13870 = build_function_type_list (V2SF_type_node,
13871 V2SF_type_node, V2SF_type_node, NULL_TREE);
13872 tree v2si_ftype_v2sf_v2sf
13873 = build_function_type_list (V2SI_type_node,
13874 V2SF_type_node, V2SF_type_node, NULL_TREE);
13875 tree pint_type_node = build_pointer_type (integer_type_node);
13876 tree pdouble_type_node = build_pointer_type (double_type_node);
13877 tree pcdouble_type_node = build_pointer_type (
13878 build_type_variant (double_type_node, 1, 0));
13879 tree int_ftype_v2df_v2df
13880 = build_function_type_list (integer_type_node,
13881 V2DF_type_node, V2DF_type_node, NULL_TREE);
13883 tree ti_ftype_ti_ti
13884 = build_function_type_list (intTI_type_node,
13885 intTI_type_node, intTI_type_node, NULL_TREE);
13886 tree void_ftype_pcvoid
13887 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
13888 tree v4sf_ftype_v4si
13889 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
13890 tree v4si_ftype_v4sf
13891 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
13892 tree v2df_ftype_v4si
13893 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
13894 tree v4si_ftype_v2df
13895 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
13896 tree v2si_ftype_v2df
13897 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
13898 tree v4sf_ftype_v2df
13899 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
13900 tree v2df_ftype_v2si
13901 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
13902 tree v2df_ftype_v4sf
13903 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
13904 tree int_ftype_v2df
13905 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
13906 tree int64_ftype_v2df
13907 = build_function_type_list (long_long_integer_type_node,
13908 V2DF_type_node, NULL_TREE);
13909 tree v2df_ftype_v2df_int
13910 = build_function_type_list (V2DF_type_node,
13911 V2DF_type_node, integer_type_node, NULL_TREE);
13912 tree v2df_ftype_v2df_int64
13913 = build_function_type_list (V2DF_type_node,
13914 V2DF_type_node, long_long_integer_type_node,
13915 NULL_TREE);
13916 tree v4sf_ftype_v4sf_v2df
13917 = build_function_type_list (V4SF_type_node,
13918 V4SF_type_node, V2DF_type_node, NULL_TREE);
13919 tree v2df_ftype_v2df_v4sf
13920 = build_function_type_list (V2DF_type_node,
13921 V2DF_type_node, V4SF_type_node, NULL_TREE);
13922 tree v2df_ftype_v2df_v2df_int
13923 = build_function_type_list (V2DF_type_node,
13924 V2DF_type_node, V2DF_type_node,
13925 integer_type_node,
13926 NULL_TREE);
13927 tree v2df_ftype_v2df_pcdouble
13928 = build_function_type_list (V2DF_type_node,
13929 V2DF_type_node, pcdouble_type_node, NULL_TREE);
13930 tree void_ftype_pdouble_v2df
13931 = build_function_type_list (void_type_node,
13932 pdouble_type_node, V2DF_type_node, NULL_TREE);
13933 tree void_ftype_pint_int
13934 = build_function_type_list (void_type_node,
13935 pint_type_node, integer_type_node, NULL_TREE);
13936 tree void_ftype_v16qi_v16qi_pchar
13937 = build_function_type_list (void_type_node,
13938 V16QI_type_node, V16QI_type_node,
13939 pchar_type_node, NULL_TREE);
13940 tree v2df_ftype_pcdouble
13941 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
13942 tree v2df_ftype_v2df_v2df
13943 = build_function_type_list (V2DF_type_node,
13944 V2DF_type_node, V2DF_type_node, NULL_TREE);
13945 tree v16qi_ftype_v16qi_v16qi
13946 = build_function_type_list (V16QI_type_node,
13947 V16QI_type_node, V16QI_type_node, NULL_TREE);
13948 tree v8hi_ftype_v8hi_v8hi
13949 = build_function_type_list (V8HI_type_node,
13950 V8HI_type_node, V8HI_type_node, NULL_TREE);
13951 tree v4si_ftype_v4si_v4si
13952 = build_function_type_list (V4SI_type_node,
13953 V4SI_type_node, V4SI_type_node, NULL_TREE);
13954 tree v2di_ftype_v2di_v2di
13955 = build_function_type_list (V2DI_type_node,
13956 V2DI_type_node, V2DI_type_node, NULL_TREE);
13957 tree v2di_ftype_v2df_v2df
13958 = build_function_type_list (V2DI_type_node,
13959 V2DF_type_node, V2DF_type_node, NULL_TREE);
13960 tree v2df_ftype_v2df
13961 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
13962 tree v2di_ftype_v2di_int
13963 = build_function_type_list (V2DI_type_node,
13964 V2DI_type_node, integer_type_node, NULL_TREE);
13965 tree v4si_ftype_v4si_int
13966 = build_function_type_list (V4SI_type_node,
13967 V4SI_type_node, integer_type_node, NULL_TREE);
13968 tree v8hi_ftype_v8hi_int
13969 = build_function_type_list (V8HI_type_node,
13970 V8HI_type_node, integer_type_node, NULL_TREE);
13971 tree v8hi_ftype_v8hi_v2di
13972 = build_function_type_list (V8HI_type_node,
13973 V8HI_type_node, V2DI_type_node, NULL_TREE);
13974 tree v4si_ftype_v4si_v2di
13975 = build_function_type_list (V4SI_type_node,
13976 V4SI_type_node, V2DI_type_node, NULL_TREE);
13977 tree v4si_ftype_v8hi_v8hi
13978 = build_function_type_list (V4SI_type_node,
13979 V8HI_type_node, V8HI_type_node, NULL_TREE);
13980 tree di_ftype_v8qi_v8qi
13981 = build_function_type_list (long_long_unsigned_type_node,
13982 V8QI_type_node, V8QI_type_node, NULL_TREE);
13983 tree di_ftype_v2si_v2si
13984 = build_function_type_list (long_long_unsigned_type_node,
13985 V2SI_type_node, V2SI_type_node, NULL_TREE);
13986 tree v2di_ftype_v16qi_v16qi
13987 = build_function_type_list (V2DI_type_node,
13988 V16QI_type_node, V16QI_type_node, NULL_TREE);
13989 tree v2di_ftype_v4si_v4si
13990 = build_function_type_list (V2DI_type_node,
13991 V4SI_type_node, V4SI_type_node, NULL_TREE);
13992 tree int_ftype_v16qi
13993 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
13994 tree v16qi_ftype_pcchar
13995 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
13996 tree void_ftype_pchar_v16qi
13997 = build_function_type_list (void_type_node,
13998 pchar_type_node, V16QI_type_node, NULL_TREE);
14000 tree float80_type;
14001 tree float128_type;
14002 tree ftype;
14004 /* The __float80 type. */
14005 if (TYPE_MODE (long_double_type_node) == XFmode)
14006 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
14007 "__float80");
14008 else
14010 /* The __float80 type. */
14011 float80_type = make_node (REAL_TYPE);
14012 TYPE_PRECISION (float80_type) = 80;
14013 layout_type (float80_type);
14014 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
14017 float128_type = make_node (REAL_TYPE);
14018 TYPE_PRECISION (float128_type) = 128;
14019 layout_type (float128_type);
14020 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
14022 /* Add all builtins that are more or less simple operations on two
14023 operands. */
14024 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
14026 /* Use one of the operands; the target can have a different mode for
14027 mask-generating compares. */
14028 enum machine_mode mode;
14029 tree type;
14031 if (d->name == 0)
14032 continue;
14033 mode = insn_data[d->icode].operand[1].mode;
14035 switch (mode)
14037 case V16QImode:
14038 type = v16qi_ftype_v16qi_v16qi;
14039 break;
14040 case V8HImode:
14041 type = v8hi_ftype_v8hi_v8hi;
14042 break;
14043 case V4SImode:
14044 type = v4si_ftype_v4si_v4si;
14045 break;
14046 case V2DImode:
14047 type = v2di_ftype_v2di_v2di;
14048 break;
14049 case V2DFmode:
14050 type = v2df_ftype_v2df_v2df;
14051 break;
14052 case TImode:
14053 type = ti_ftype_ti_ti;
14054 break;
14055 case V4SFmode:
14056 type = v4sf_ftype_v4sf_v4sf;
14057 break;
14058 case V8QImode:
14059 type = v8qi_ftype_v8qi_v8qi;
14060 break;
14061 case V4HImode:
14062 type = v4hi_ftype_v4hi_v4hi;
14063 break;
14064 case V2SImode:
14065 type = v2si_ftype_v2si_v2si;
14066 break;
14067 case DImode:
14068 type = di_ftype_di_di;
14069 break;
14071 default:
14072 gcc_unreachable ();
14075 /* Override for comparisons. */
14076 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
14077 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3)
14078 type = v4si_ftype_v4sf_v4sf;
14080 if (d->icode == CODE_FOR_sse2_maskcmpv2df3
14081 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
14082 type = v2di_ftype_v2df_v2df;
14084 def_builtin (d->mask, d->name, type, d->code);
14087 /* Add the remaining MMX insns with somewhat more complicated types. */
14088 def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
14089 def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
14090 def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD);
14091 def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ);
14093 def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW);
14094 def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD);
14095 def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ);
14097 def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
14098 def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
14100 def_builtin (MASK_MMX, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
14101 def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
14103 /* comi/ucomi insns. */
14104 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
14105 if (d->mask == MASK_SSE2)
14106 def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
14107 else
14108 def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
14110 def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
14111 def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
14112 def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
14114 def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
14115 def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
14116 def_builtin (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
14117 def_builtin (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
14118 def_builtin (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
14119 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
14120 def_builtin (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
14121 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
14122 def_builtin (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
14123 def_builtin (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
14124 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
14126 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
14128 def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
14129 def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
14131 def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
14132 def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
14133 def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
14134 def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
14136 def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
14137 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
14138 def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
14139 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
14141 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
14143 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
14145 def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
14146 def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
14147 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
14148 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
14149 def_builtin (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
14150 def_builtin (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
14152 def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
14154 /* Original 3DNow! */
14155 def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
14156 def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
14157 def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
14158 def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
14159 def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
14160 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
14161 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
14162 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
14163 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
14164 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
14165 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
14166 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
14167 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
14168 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
14169 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
14170 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
14171 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
14172 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
14173 def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
14174 def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
14176 /* 3DNow! extension as used in the Athlon CPU. */
14177 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
14178 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
14179 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
14180 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
14181 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
14182 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
14184 /* SSE2 */
14185 def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
14187 def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
14188 def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
14190 def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADHPD);
14191 def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADLPD);
14193 def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
14194 def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
14195 def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
14196 def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
14197 def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
14199 def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
14200 def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
14201 def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
14202 def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
14204 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
14205 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
14207 def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
14209 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
14210 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
14212 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
14213 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
14214 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
14215 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
14216 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
14218 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
14220 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
14221 def_builtin (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
14222 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
14223 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
14225 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
14226 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
14227 def_builtin (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
14229 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
14230 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
14231 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
14232 def_builtin (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
14234 def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
14235 def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
14236 def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
14238 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
14239 def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
14241 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
14242 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
14244 def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128);
14245 def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128);
14246 def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
14248 def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRLW128);
14249 def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRLD128);
14250 def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
14252 def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRAW128);
14253 def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRAD128);
14255 def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
14256 def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
14257 def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
14258 def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
14260 def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
14261 def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
14262 def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
14263 def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
14265 def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
14266 def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
14268 def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
14270 /* Prescott New Instructions. */
14271 def_builtin (MASK_SSE3, "__builtin_ia32_monitor",
14272 void_ftype_pcvoid_unsigned_unsigned,
14273 IX86_BUILTIN_MONITOR);
14274 def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
14275 void_ftype_unsigned_unsigned,
14276 IX86_BUILTIN_MWAIT);
14277 def_builtin (MASK_SSE3, "__builtin_ia32_movshdup",
14278 v4sf_ftype_v4sf,
14279 IX86_BUILTIN_MOVSHDUP);
14280 def_builtin (MASK_SSE3, "__builtin_ia32_movsldup",
14281 v4sf_ftype_v4sf,
14282 IX86_BUILTIN_MOVSLDUP);
14283 def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
14284 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
14286 /* Access to the vec_init patterns. */
14287 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
14288 integer_type_node, NULL_TREE);
14289 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v2si",
14290 ftype, IX86_BUILTIN_VEC_INIT_V2SI);
14292 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
14293 short_integer_type_node,
14294 short_integer_type_node,
14295 short_integer_type_node, NULL_TREE);
14296 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v4hi",
14297 ftype, IX86_BUILTIN_VEC_INIT_V4HI);
14299 ftype = build_function_type_list (V8QI_type_node, char_type_node,
14300 char_type_node, char_type_node,
14301 char_type_node, char_type_node,
14302 char_type_node, char_type_node,
14303 char_type_node, NULL_TREE);
14304 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v8qi",
14305 ftype, IX86_BUILTIN_VEC_INIT_V8QI);
14307 /* Access to the vec_extract patterns. */
14308 ftype = build_function_type_list (double_type_node, V2DF_type_node,
14309 integer_type_node, NULL_TREE);
14310 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2df",
14311 ftype, IX86_BUILTIN_VEC_EXT_V2DF);
14313 ftype = build_function_type_list (long_long_integer_type_node,
14314 V2DI_type_node, integer_type_node,
14315 NULL_TREE);
14316 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2di",
14317 ftype, IX86_BUILTIN_VEC_EXT_V2DI);
14319 ftype = build_function_type_list (float_type_node, V4SF_type_node,
14320 integer_type_node, NULL_TREE);
14321 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4sf",
14322 ftype, IX86_BUILTIN_VEC_EXT_V4SF);
14324 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
14325 integer_type_node, NULL_TREE);
14326 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4si",
14327 ftype, IX86_BUILTIN_VEC_EXT_V4SI);
14329 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
14330 integer_type_node, NULL_TREE);
14331 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v8hi",
14332 ftype, IX86_BUILTIN_VEC_EXT_V8HI);
14334 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
14335 integer_type_node, NULL_TREE);
14336 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_ext_v4hi",
14337 ftype, IX86_BUILTIN_VEC_EXT_V4HI);
14339 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
14340 integer_type_node, NULL_TREE);
14341 def_builtin (MASK_MMX, "__builtin_ia32_vec_ext_v2si",
14342 ftype, IX86_BUILTIN_VEC_EXT_V2SI);
14344 /* Access to the vec_set patterns. */
14345 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
14346 intHI_type_node,
14347 integer_type_node, NULL_TREE);
14348 def_builtin (MASK_SSE, "__builtin_ia32_vec_set_v8hi",
14349 ftype, IX86_BUILTIN_VEC_SET_V8HI);
14351 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
14352 intHI_type_node,
14353 integer_type_node, NULL_TREE);
14354 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_set_v4hi",
14355 ftype, IX86_BUILTIN_VEC_SET_V4HI);
14358 /* Errors in the source file can cause expand_expr to return const0_rtx
14359 where we expect a vector. To avoid crashing, use one of the vector
14360 clear instructions. */
14361 static rtx
14362 safe_vector_operand (rtx x, enum machine_mode mode)
14364 if (x == const0_rtx)
14365 x = CONST0_RTX (mode);
14366 return x;
14369 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
14371 static rtx
14372 ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
14374 rtx pat, xops[3];
14375 tree arg0 = TREE_VALUE (arglist);
14376 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14377 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14378 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14379 enum machine_mode tmode = insn_data[icode].operand[0].mode;
14380 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
14381 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
14383 if (VECTOR_MODE_P (mode0))
14384 op0 = safe_vector_operand (op0, mode0);
14385 if (VECTOR_MODE_P (mode1))
14386 op1 = safe_vector_operand (op1, mode1);
14388 if (optimize || !target
14389 || GET_MODE (target) != tmode
14390 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14391 target = gen_reg_rtx (tmode);
14393 if (GET_MODE (op1) == SImode && mode1 == TImode)
14395 rtx x = gen_reg_rtx (V4SImode);
14396 emit_insn (gen_sse2_loadd (x, op1));
14397 op1 = gen_lowpart (TImode, x);
14400 /* The insn must want input operands in the same modes as the
14401 result. */
14402 gcc_assert ((GET_MODE (op0) == mode0 || GET_MODE (op0) == VOIDmode)
14403 && (GET_MODE (op1) == mode1 || GET_MODE (op1) == VOIDmode));
14405 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
14406 op0 = copy_to_mode_reg (mode0, op0);
14407 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
14408 op1 = copy_to_mode_reg (mode1, op1);
14410 /* ??? Using ix86_fixup_binary_operands is problematic when
14411 we've got mismatched modes. Fake it. */
14413 xops[0] = target;
14414 xops[1] = op0;
14415 xops[2] = op1;
14417 if (tmode == mode0 && tmode == mode1)
14419 target = ix86_fixup_binary_operands (UNKNOWN, tmode, xops);
14420 op0 = xops[1];
14421 op1 = xops[2];
14423 else if (optimize || !ix86_binary_operator_ok (UNKNOWN, tmode, xops))
14425 op0 = force_reg (mode0, op0);
14426 op1 = force_reg (mode1, op1);
14427 target = gen_reg_rtx (tmode);
14430 pat = GEN_FCN (icode) (target, op0, op1);
14431 if (! pat)
14432 return 0;
14433 emit_insn (pat);
14434 return target;
14437 /* Subroutine of ix86_expand_builtin to take care of stores. */
14439 static rtx
14440 ix86_expand_store_builtin (enum insn_code icode, tree arglist)
14442 rtx pat;
14443 tree arg0 = TREE_VALUE (arglist);
14444 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14445 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14446 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14447 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
14448 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
14450 if (VECTOR_MODE_P (mode1))
14451 op1 = safe_vector_operand (op1, mode1);
14453 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14454 op1 = copy_to_mode_reg (mode1, op1);
14456 pat = GEN_FCN (icode) (op0, op1);
14457 if (pat)
14458 emit_insn (pat);
14459 return 0;
14462 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
14464 static rtx
14465 ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
14466 rtx target, int do_load)
14468 rtx pat;
14469 tree arg0 = TREE_VALUE (arglist);
14470 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14471 enum machine_mode tmode = insn_data[icode].operand[0].mode;
14472 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
14474 if (optimize || !target
14475 || GET_MODE (target) != tmode
14476 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14477 target = gen_reg_rtx (tmode);
14478 if (do_load)
14479 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14480 else
14482 if (VECTOR_MODE_P (mode0))
14483 op0 = safe_vector_operand (op0, mode0);
14485 if ((optimize && !register_operand (op0, mode0))
14486 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14487 op0 = copy_to_mode_reg (mode0, op0);
14490 pat = GEN_FCN (icode) (target, op0);
14491 if (! pat)
14492 return 0;
14493 emit_insn (pat);
14494 return target;
14497 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
14498 sqrtss, rsqrtss, rcpss. */
14500 static rtx
14501 ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
14503 rtx pat;
14504 tree arg0 = TREE_VALUE (arglist);
14505 rtx op1, op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14506 enum machine_mode tmode = insn_data[icode].operand[0].mode;
14507 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
14509 if (optimize || !target
14510 || GET_MODE (target) != tmode
14511 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14512 target = gen_reg_rtx (tmode);
14514 if (VECTOR_MODE_P (mode0))
14515 op0 = safe_vector_operand (op0, mode0);
14517 if ((optimize && !register_operand (op0, mode0))
14518 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14519 op0 = copy_to_mode_reg (mode0, op0);
14521 op1 = op0;
14522 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
14523 op1 = copy_to_mode_reg (mode0, op1);
14525 pat = GEN_FCN (icode) (target, op0, op1);
14526 if (! pat)
14527 return 0;
14528 emit_insn (pat);
14529 return target;
14532 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
14534 static rtx
14535 ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
14536 rtx target)
14538 rtx pat;
14539 tree arg0 = TREE_VALUE (arglist);
14540 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14541 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14542 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14543 rtx op2;
14544 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
14545 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
14546 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
14547 enum rtx_code comparison = d->comparison;
14549 if (VECTOR_MODE_P (mode0))
14550 op0 = safe_vector_operand (op0, mode0);
14551 if (VECTOR_MODE_P (mode1))
14552 op1 = safe_vector_operand (op1, mode1);
14554 /* Swap operands if we have a comparison that isn't available in
14555 hardware. */
14556 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
14558 rtx tmp = gen_reg_rtx (mode1);
14559 emit_move_insn (tmp, op1);
14560 op1 = op0;
14561 op0 = tmp;
14564 if (optimize || !target
14565 || GET_MODE (target) != tmode
14566 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
14567 target = gen_reg_rtx (tmode);
14569 if ((optimize && !register_operand (op0, mode0))
14570 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
14571 op0 = copy_to_mode_reg (mode0, op0);
14572 if ((optimize && !register_operand (op1, mode1))
14573 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
14574 op1 = copy_to_mode_reg (mode1, op1);
14576 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
14577 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
14578 if (! pat)
14579 return 0;
14580 emit_insn (pat);
14581 return target;
14584 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
14586 static rtx
14587 ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
14588 rtx target)
14590 rtx pat;
14591 tree arg0 = TREE_VALUE (arglist);
14592 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14593 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14594 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14595 rtx op2;
14596 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
14597 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
14598 enum rtx_code comparison = d->comparison;
14600 if (VECTOR_MODE_P (mode0))
14601 op0 = safe_vector_operand (op0, mode0);
14602 if (VECTOR_MODE_P (mode1))
14603 op1 = safe_vector_operand (op1, mode1);
14605 /* Swap operands if we have a comparison that isn't available in
14606 hardware. */
14607 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
14609 rtx tmp = op1;
14610 op1 = op0;
14611 op0 = tmp;
14614 target = gen_reg_rtx (SImode);
14615 emit_move_insn (target, const0_rtx);
14616 target = gen_rtx_SUBREG (QImode, target, 0);
14618 if ((optimize && !register_operand (op0, mode0))
14619 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14620 op0 = copy_to_mode_reg (mode0, op0);
14621 if ((optimize && !register_operand (op1, mode1))
14622 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14623 op1 = copy_to_mode_reg (mode1, op1);
14625 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
14626 pat = GEN_FCN (d->icode) (op0, op1);
14627 if (! pat)
14628 return 0;
14629 emit_insn (pat);
14630 emit_insn (gen_rtx_SET (VOIDmode,
14631 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
14632 gen_rtx_fmt_ee (comparison, QImode,
14633 SET_DEST (pat),
14634 const0_rtx)));
14636 return SUBREG_REG (target);
14639 /* Return the integer constant in ARG. Constrain it to be in the range
14640 of the subparts of VEC_TYPE; issue an error if not. */
14642 static int
14643 get_element_number (tree vec_type, tree arg)
14645 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14647 if (!host_integerp (arg, 1)
14648 || (elt = tree_low_cst (arg, 1), elt > max))
14650 error ("selector must be an integer constant in the range 0..%i", max);
14651 return 0;
14654 return elt;
14657 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
14658 ix86_expand_vector_init. We DO have language-level syntax for this, in
14659 the form of (type){ init-list }. Except that since we can't place emms
14660 instructions from inside the compiler, we can't allow the use of MMX
14661 registers unless the user explicitly asks for it. So we do *not* define
14662 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
14663 we have builtins invoked by mmintrin.h that gives us license to emit
14664 these sorts of instructions. */
14666 static rtx
14667 ix86_expand_vec_init_builtin (tree type, tree arglist, rtx target)
14669 enum machine_mode tmode = TYPE_MODE (type);
14670 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
14671 int i, n_elt = GET_MODE_NUNITS (tmode);
14672 rtvec v = rtvec_alloc (n_elt);
14674 gcc_assert (VECTOR_MODE_P (tmode));
14676 for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
14678 rtx x = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
14679 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14682 gcc_assert (arglist == NULL);
14684 if (!target || !register_operand (target, tmode))
14685 target = gen_reg_rtx (tmode);
14687 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
14688 return target;
14691 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
14692 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
14693 had a language-level syntax for referencing vector elements. */
14695 static rtx
14696 ix86_expand_vec_ext_builtin (tree arglist, rtx target)
14698 enum machine_mode tmode, mode0;
14699 tree arg0, arg1;
14700 int elt;
14701 rtx op0;
14703 arg0 = TREE_VALUE (arglist);
14704 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14706 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14707 elt = get_element_number (TREE_TYPE (arg0), arg1);
14709 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14710 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14711 gcc_assert (VECTOR_MODE_P (mode0));
14713 op0 = force_reg (mode0, op0);
14715 if (optimize || !target || !register_operand (target, tmode))
14716 target = gen_reg_rtx (tmode);
14718 ix86_expand_vector_extract (true, target, op0, elt);
14720 return target;
14723 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
14724 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
14725 a language-level syntax for referencing vector elements. */
14727 static rtx
14728 ix86_expand_vec_set_builtin (tree arglist)
14730 enum machine_mode tmode, mode1;
14731 tree arg0, arg1, arg2;
14732 int elt;
14733 rtx op0, op1;
14735 arg0 = TREE_VALUE (arglist);
14736 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14737 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
14739 tmode = TYPE_MODE (TREE_TYPE (arg0));
14740 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14741 gcc_assert (VECTOR_MODE_P (tmode));
14743 op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
14744 op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
14745 elt = get_element_number (TREE_TYPE (arg0), arg2);
14747 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14748 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14750 op0 = force_reg (tmode, op0);
14751 op1 = force_reg (mode1, op1);
14753 ix86_expand_vector_set (true, op0, op1, elt);
14755 return op0;
14758 /* Expand an expression EXP that calls a built-in function,
14759 with result going to TARGET if that's convenient
14760 (and in mode MODE if that's convenient).
14761 SUBTARGET may be used as the target for computing one of EXP's operands.
14762 IGNORE is nonzero if the value is to be ignored. */
14764 static rtx
14765 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
14766 enum machine_mode mode ATTRIBUTE_UNUSED,
14767 int ignore ATTRIBUTE_UNUSED)
14769 const struct builtin_description *d;
14770 size_t i;
14771 enum insn_code icode;
14772 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
14773 tree arglist = TREE_OPERAND (exp, 1);
14774 tree arg0, arg1, arg2;
14775 rtx op0, op1, op2, pat;
14776 enum machine_mode tmode, mode0, mode1, mode2;
14777 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
14779 switch (fcode)
14781 case IX86_BUILTIN_EMMS:
14782 emit_insn (gen_mmx_emms ());
14783 return 0;
14785 case IX86_BUILTIN_SFENCE:
14786 emit_insn (gen_sse_sfence ());
14787 return 0;
14789 case IX86_BUILTIN_MASKMOVQ:
14790 case IX86_BUILTIN_MASKMOVDQU:
14791 icode = (fcode == IX86_BUILTIN_MASKMOVQ
14792 ? CODE_FOR_mmx_maskmovq
14793 : CODE_FOR_sse2_maskmovdqu);
14794 /* Note the arg order is different from the operand order. */
14795 arg1 = TREE_VALUE (arglist);
14796 arg2 = TREE_VALUE (TREE_CHAIN (arglist));
14797 arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
14798 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14799 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14800 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
14801 mode0 = insn_data[icode].operand[0].mode;
14802 mode1 = insn_data[icode].operand[1].mode;
14803 mode2 = insn_data[icode].operand[2].mode;
14805 op0 = force_reg (Pmode, op0);
14806 op0 = gen_rtx_MEM (mode1, op0);
14808 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14809 op0 = copy_to_mode_reg (mode0, op0);
14810 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
14811 op1 = copy_to_mode_reg (mode1, op1);
14812 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
14813 op2 = copy_to_mode_reg (mode2, op2);
14814 pat = GEN_FCN (icode) (op0, op1, op2);
14815 if (! pat)
14816 return 0;
14817 emit_insn (pat);
14818 return 0;
14820 case IX86_BUILTIN_SQRTSS:
14821 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2, arglist, target);
14822 case IX86_BUILTIN_RSQRTSS:
14823 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2, arglist, target);
14824 case IX86_BUILTIN_RCPSS:
14825 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, arglist, target);
14827 case IX86_BUILTIN_LOADUPS:
14828 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
14830 case IX86_BUILTIN_STOREUPS:
14831 return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
14833 case IX86_BUILTIN_LOADHPS:
14834 case IX86_BUILTIN_LOADLPS:
14835 case IX86_BUILTIN_LOADHPD:
14836 case IX86_BUILTIN_LOADLPD:
14837 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_loadhps
14838 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_loadlps
14839 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_loadhpd
14840 : CODE_FOR_sse2_loadlpd);
14841 arg0 = TREE_VALUE (arglist);
14842 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14843 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14844 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14845 tmode = insn_data[icode].operand[0].mode;
14846 mode0 = insn_data[icode].operand[1].mode;
14847 mode1 = insn_data[icode].operand[2].mode;
14849 op0 = force_reg (mode0, op0);
14850 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
14851 if (optimize || target == 0
14852 || GET_MODE (target) != tmode
14853 || !register_operand (target, tmode))
14854 target = gen_reg_rtx (tmode);
14855 pat = GEN_FCN (icode) (target, op0, op1);
14856 if (! pat)
14857 return 0;
14858 emit_insn (pat);
14859 return target;
14861 case IX86_BUILTIN_STOREHPS:
14862 case IX86_BUILTIN_STORELPS:
14863 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_storehps
14864 : CODE_FOR_sse_storelps);
14865 arg0 = TREE_VALUE (arglist);
14866 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14867 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14868 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14869 mode0 = insn_data[icode].operand[0].mode;
14870 mode1 = insn_data[icode].operand[1].mode;
14872 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14873 op1 = force_reg (mode1, op1);
14875 pat = GEN_FCN (icode) (op0, op1);
14876 if (! pat)
14877 return 0;
14878 emit_insn (pat);
14879 return const0_rtx;
14881 case IX86_BUILTIN_MOVNTPS:
14882 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
14883 case IX86_BUILTIN_MOVNTQ:
14884 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
14886 case IX86_BUILTIN_LDMXCSR:
14887 op0 = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
14888 target = assign_386_stack_local (SImode, SLOT_TEMP);
14889 emit_move_insn (target, op0);
14890 emit_insn (gen_sse_ldmxcsr (target));
14891 return 0;
14893 case IX86_BUILTIN_STMXCSR:
14894 target = assign_386_stack_local (SImode, SLOT_TEMP);
14895 emit_insn (gen_sse_stmxcsr (target));
14896 return copy_to_mode_reg (SImode, target);
14898 case IX86_BUILTIN_SHUFPS:
14899 case IX86_BUILTIN_SHUFPD:
14900 icode = (fcode == IX86_BUILTIN_SHUFPS
14901 ? CODE_FOR_sse_shufps
14902 : CODE_FOR_sse2_shufpd);
14903 arg0 = TREE_VALUE (arglist);
14904 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14905 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
14906 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14907 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14908 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
14909 tmode = insn_data[icode].operand[0].mode;
14910 mode0 = insn_data[icode].operand[1].mode;
14911 mode1 = insn_data[icode].operand[2].mode;
14912 mode2 = insn_data[icode].operand[3].mode;
14914 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14915 op0 = copy_to_mode_reg (mode0, op0);
14916 if ((optimize && !register_operand (op1, mode1))
14917 || !(*insn_data[icode].operand[2].predicate) (op1, mode1))
14918 op1 = copy_to_mode_reg (mode1, op1);
14919 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14921 /* @@@ better error message */
14922 error ("mask must be an immediate");
14923 return gen_reg_rtx (tmode);
14925 if (optimize || target == 0
14926 || GET_MODE (target) != tmode
14927 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14928 target = gen_reg_rtx (tmode);
14929 pat = GEN_FCN (icode) (target, op0, op1, op2);
14930 if (! pat)
14931 return 0;
14932 emit_insn (pat);
14933 return target;
14935 case IX86_BUILTIN_PSHUFW:
14936 case IX86_BUILTIN_PSHUFD:
14937 case IX86_BUILTIN_PSHUFHW:
14938 case IX86_BUILTIN_PSHUFLW:
14939 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
14940 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
14941 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
14942 : CODE_FOR_mmx_pshufw);
14943 arg0 = TREE_VALUE (arglist);
14944 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14945 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14946 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14947 tmode = insn_data[icode].operand[0].mode;
14948 mode1 = insn_data[icode].operand[1].mode;
14949 mode2 = insn_data[icode].operand[2].mode;
14951 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
14952 op0 = copy_to_mode_reg (mode1, op0);
14953 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
14955 /* @@@ better error message */
14956 error ("mask must be an immediate");
14957 return const0_rtx;
14959 if (target == 0
14960 || GET_MODE (target) != tmode
14961 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14962 target = gen_reg_rtx (tmode);
14963 pat = GEN_FCN (icode) (target, op0, op1);
14964 if (! pat)
14965 return 0;
14966 emit_insn (pat);
14967 return target;
14969 case IX86_BUILTIN_PSLLDQI128:
14970 case IX86_BUILTIN_PSRLDQI128:
14971 icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
14972 : CODE_FOR_sse2_lshrti3);
14973 arg0 = TREE_VALUE (arglist);
14974 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14975 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14976 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14977 tmode = insn_data[icode].operand[0].mode;
14978 mode1 = insn_data[icode].operand[1].mode;
14979 mode2 = insn_data[icode].operand[2].mode;
14981 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
14983 op0 = copy_to_reg (op0);
14984 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
14986 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
14988 error ("shift must be an immediate");
14989 return const0_rtx;
14991 target = gen_reg_rtx (V2DImode);
14992 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0), op0, op1);
14993 if (! pat)
14994 return 0;
14995 emit_insn (pat);
14996 return target;
14998 case IX86_BUILTIN_FEMMS:
14999 emit_insn (gen_mmx_femms ());
15000 return NULL_RTX;
15002 case IX86_BUILTIN_PAVGUSB:
15003 return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3, arglist, target);
15005 case IX86_BUILTIN_PF2ID:
15006 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id, arglist, target, 0);
15008 case IX86_BUILTIN_PFACC:
15009 return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3, arglist, target);
15011 case IX86_BUILTIN_PFADD:
15012 return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3, arglist, target);
15014 case IX86_BUILTIN_PFCMPEQ:
15015 return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3, arglist, target);
15017 case IX86_BUILTIN_PFCMPGE:
15018 return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3, arglist, target);
15020 case IX86_BUILTIN_PFCMPGT:
15021 return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3, arglist, target);
15023 case IX86_BUILTIN_PFMAX:
15024 return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3, arglist, target);
15026 case IX86_BUILTIN_PFMIN:
15027 return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3, arglist, target);
15029 case IX86_BUILTIN_PFMUL:
15030 return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3, arglist, target);
15032 case IX86_BUILTIN_PFRCP:
15033 return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2, arglist, target, 0);
15035 case IX86_BUILTIN_PFRCPIT1:
15036 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3, arglist, target);
15038 case IX86_BUILTIN_PFRCPIT2:
15039 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3, arglist, target);
15041 case IX86_BUILTIN_PFRSQIT1:
15042 return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3, arglist, target);
15044 case IX86_BUILTIN_PFRSQRT:
15045 return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2, arglist, target, 0);
15047 case IX86_BUILTIN_PFSUB:
15048 return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3, arglist, target);
15050 case IX86_BUILTIN_PFSUBR:
15051 return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3, arglist, target);
15053 case IX86_BUILTIN_PI2FD:
15054 return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2, arglist, target, 0);
15056 case IX86_BUILTIN_PMULHRW:
15057 return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3, arglist, target);
15059 case IX86_BUILTIN_PF2IW:
15060 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw, arglist, target, 0);
15062 case IX86_BUILTIN_PFNACC:
15063 return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3, arglist, target);
15065 case IX86_BUILTIN_PFPNACC:
15066 return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3, arglist, target);
15068 case IX86_BUILTIN_PI2FW:
15069 return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw, arglist, target, 0);
15071 case IX86_BUILTIN_PSWAPDSI:
15072 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2, arglist, target, 0);
15074 case IX86_BUILTIN_PSWAPDSF:
15075 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, arglist, target, 0);
15077 case IX86_BUILTIN_SQRTSD:
15078 return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, arglist, target);
15079 case IX86_BUILTIN_LOADUPD:
15080 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
15081 case IX86_BUILTIN_STOREUPD:
15082 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
15084 case IX86_BUILTIN_MFENCE:
15085 emit_insn (gen_sse2_mfence ());
15086 return 0;
15087 case IX86_BUILTIN_LFENCE:
15088 emit_insn (gen_sse2_lfence ());
15089 return 0;
15091 case IX86_BUILTIN_CLFLUSH:
15092 arg0 = TREE_VALUE (arglist);
15093 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15094 icode = CODE_FOR_sse2_clflush;
15095 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
15096 op0 = copy_to_mode_reg (Pmode, op0);
15098 emit_insn (gen_sse2_clflush (op0));
15099 return 0;
15101 case IX86_BUILTIN_MOVNTPD:
15102 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
15103 case IX86_BUILTIN_MOVNTDQ:
15104 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
15105 case IX86_BUILTIN_MOVNTI:
15106 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
15108 case IX86_BUILTIN_LOADDQU:
15109 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
15110 case IX86_BUILTIN_STOREDQU:
15111 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
15113 case IX86_BUILTIN_MONITOR:
15114 arg0 = TREE_VALUE (arglist);
15115 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15116 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15117 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15118 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15119 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
15120 if (!REG_P (op0))
15121 op0 = copy_to_mode_reg (SImode, op0);
15122 if (!REG_P (op1))
15123 op1 = copy_to_mode_reg (SImode, op1);
15124 if (!REG_P (op2))
15125 op2 = copy_to_mode_reg (SImode, op2);
15126 emit_insn (gen_sse3_monitor (op0, op1, op2));
15127 return 0;
15129 case IX86_BUILTIN_MWAIT:
15130 arg0 = TREE_VALUE (arglist);
15131 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15132 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15133 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15134 if (!REG_P (op0))
15135 op0 = copy_to_mode_reg (SImode, op0);
15136 if (!REG_P (op1))
15137 op1 = copy_to_mode_reg (SImode, op1);
15138 emit_insn (gen_sse3_mwait (op0, op1));
15139 return 0;
15141 case IX86_BUILTIN_LDDQU:
15142 return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist,
15143 target, 1);
15145 case IX86_BUILTIN_VEC_INIT_V2SI:
15146 case IX86_BUILTIN_VEC_INIT_V4HI:
15147 case IX86_BUILTIN_VEC_INIT_V8QI:
15148 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
15150 case IX86_BUILTIN_VEC_EXT_V2DF:
15151 case IX86_BUILTIN_VEC_EXT_V2DI:
15152 case IX86_BUILTIN_VEC_EXT_V4SF:
15153 case IX86_BUILTIN_VEC_EXT_V4SI:
15154 case IX86_BUILTIN_VEC_EXT_V8HI:
15155 case IX86_BUILTIN_VEC_EXT_V2SI:
15156 case IX86_BUILTIN_VEC_EXT_V4HI:
15157 return ix86_expand_vec_ext_builtin (arglist, target);
15159 case IX86_BUILTIN_VEC_SET_V8HI:
15160 case IX86_BUILTIN_VEC_SET_V4HI:
15161 return ix86_expand_vec_set_builtin (arglist);
15163 default:
15164 break;
15167 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
15168 if (d->code == fcode)
15170 /* Compares are treated specially. */
15171 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
15172 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3
15173 || d->icode == CODE_FOR_sse2_maskcmpv2df3
15174 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
15175 return ix86_expand_sse_compare (d, arglist, target);
15177 return ix86_expand_binop_builtin (d->icode, arglist, target);
15180 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
15181 if (d->code == fcode)
15182 return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
15184 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
15185 if (d->code == fcode)
15186 return ix86_expand_sse_comi (d, arglist, target);
15188 gcc_unreachable ();
15191 /* Store OPERAND to the memory after reload is completed. This means
15192 that we can't easily use assign_stack_local. */
15194 ix86_force_to_memory (enum machine_mode mode, rtx operand)
15196 rtx result;
15198 gcc_assert (reload_completed);
15199 if (TARGET_RED_ZONE)
15201 result = gen_rtx_MEM (mode,
15202 gen_rtx_PLUS (Pmode,
15203 stack_pointer_rtx,
15204 GEN_INT (-RED_ZONE_SIZE)));
15205 emit_move_insn (result, operand);
15207 else if (!TARGET_RED_ZONE && TARGET_64BIT)
15209 switch (mode)
15211 case HImode:
15212 case SImode:
15213 operand = gen_lowpart (DImode, operand);
15214 /* FALLTHRU */
15215 case DImode:
15216 emit_insn (
15217 gen_rtx_SET (VOIDmode,
15218 gen_rtx_MEM (DImode,
15219 gen_rtx_PRE_DEC (DImode,
15220 stack_pointer_rtx)),
15221 operand));
15222 break;
15223 default:
15224 gcc_unreachable ();
15226 result = gen_rtx_MEM (mode, stack_pointer_rtx);
15228 else
15230 switch (mode)
15232 case DImode:
15234 rtx operands[2];
15235 split_di (&operand, 1, operands, operands + 1);
15236 emit_insn (
15237 gen_rtx_SET (VOIDmode,
15238 gen_rtx_MEM (SImode,
15239 gen_rtx_PRE_DEC (Pmode,
15240 stack_pointer_rtx)),
15241 operands[1]));
15242 emit_insn (
15243 gen_rtx_SET (VOIDmode,
15244 gen_rtx_MEM (SImode,
15245 gen_rtx_PRE_DEC (Pmode,
15246 stack_pointer_rtx)),
15247 operands[0]));
15249 break;
15250 case HImode:
15251 /* It is better to store HImodes as SImodes. */
15252 if (!TARGET_PARTIAL_REG_STALL)
15253 operand = gen_lowpart (SImode, operand);
15254 /* FALLTHRU */
15255 case SImode:
15256 emit_insn (
15257 gen_rtx_SET (VOIDmode,
15258 gen_rtx_MEM (GET_MODE (operand),
15259 gen_rtx_PRE_DEC (SImode,
15260 stack_pointer_rtx)),
15261 operand));
15262 break;
15263 default:
15264 gcc_unreachable ();
15266 result = gen_rtx_MEM (mode, stack_pointer_rtx);
15268 return result;
15271 /* Free operand from the memory. */
15272 void
15273 ix86_free_from_memory (enum machine_mode mode)
15275 if (!TARGET_RED_ZONE)
15277 int size;
15279 if (mode == DImode || TARGET_64BIT)
15280 size = 8;
15281 else if (mode == HImode && TARGET_PARTIAL_REG_STALL)
15282 size = 2;
15283 else
15284 size = 4;
15285 /* Use LEA to deallocate stack space. In peephole2 it will be converted
15286 to pop or add instruction if registers are available. */
15287 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
15288 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
15289 GEN_INT (size))));
15293 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
15294 QImode must go into class Q_REGS.
15295 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
15296 movdf to do mem-to-mem moves through integer regs. */
15297 enum reg_class
15298 ix86_preferred_reload_class (rtx x, enum reg_class class)
15300 /* We're only allowed to return a subclass of CLASS. Many of the
15301 following checks fail for NO_REGS, so eliminate that early. */
15302 if (class == NO_REGS)
15303 return NO_REGS;
15305 /* All classes can load zeros. */
15306 if (x == CONST0_RTX (GET_MODE (x)))
15307 return class;
15309 /* Floating-point constants need more complex checks. */
15310 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
15312 /* General regs can load everything. */
15313 if (reg_class_subset_p (class, GENERAL_REGS))
15314 return class;
15316 /* Floats can load 0 and 1 plus some others. Note that we eliminated
15317 zero above. We only want to wind up preferring 80387 registers if
15318 we plan on doing computation with them. */
15319 if (TARGET_80387
15320 && (TARGET_MIX_SSE_I387
15321 || !(TARGET_SSE_MATH && SSE_FLOAT_MODE_P (GET_MODE (x))))
15322 && standard_80387_constant_p (x))
15324 /* Limit class to non-sse. */
15325 if (class == FLOAT_SSE_REGS)
15326 return FLOAT_REGS;
15327 if (class == FP_TOP_SSE_REGS)
15328 return FP_TOP_REG;
15329 if (class == FP_SECOND_SSE_REGS)
15330 return FP_SECOND_REG;
15331 if (class == FLOAT_INT_REGS || class == FLOAT_REGS)
15332 return class;
15335 return NO_REGS;
15337 if (MAYBE_MMX_CLASS_P (class) && CONSTANT_P (x))
15338 return NO_REGS;
15339 if (MAYBE_SSE_CLASS_P (class) && CONSTANT_P (x))
15340 return NO_REGS;
15342 /* Generally when we see PLUS here, it's the function invariant
15343 (plus soft-fp const_int). Which can only be computed into general
15344 regs. */
15345 if (GET_CODE (x) == PLUS)
15346 return reg_class_subset_p (class, GENERAL_REGS) ? class : NO_REGS;
15348 /* QImode constants are easy to load, but non-constant QImode data
15349 must go into Q_REGS. */
15350 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
15352 if (reg_class_subset_p (class, Q_REGS))
15353 return class;
15354 if (reg_class_subset_p (Q_REGS, class))
15355 return Q_REGS;
15356 return NO_REGS;
15359 return class;
15362 /* If we are copying between general and FP registers, we need a memory
15363 location. The same is true for SSE and MMX registers.
15365 The macro can't work reliably when one of the CLASSES is class containing
15366 registers from multiple units (SSE, MMX, integer). We avoid this by never
15367 combining those units in single alternative in the machine description.
15368 Ensure that this constraint holds to avoid unexpected surprises.
15370 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
15371 enforce these sanity checks. */
15374 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
15375 enum machine_mode mode, int strict)
15377 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
15378 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
15379 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
15380 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
15381 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
15382 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
15384 gcc_assert (!strict);
15385 return true;
15388 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
15389 return true;
15391 /* ??? This is a lie. We do have moves between mmx/general, and for
15392 mmx/sse2. But by saying we need secondary memory we discourage the
15393 register allocator from using the mmx registers unless needed. */
15394 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
15395 return true;
15397 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
15399 /* SSE1 doesn't have any direct moves from other classes. */
15400 if (!TARGET_SSE2)
15401 return true;
15403 /* If the target says that inter-unit moves are more expensive
15404 than moving through memory, then don't generate them. */
15405 if (!TARGET_INTER_UNIT_MOVES && !optimize_size)
15406 return true;
15408 /* Between SSE and general, we have moves no larger than word size. */
15409 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
15410 return true;
15412 /* ??? For the cost of one register reformat penalty, we could use
15413 the same instructions to move SFmode and DFmode data, but the
15414 relevant move patterns don't support those alternatives. */
15415 if (mode == SFmode || mode == DFmode)
15416 return true;
15419 return false;
15422 /* Return true if the registers in CLASS cannot represent the change from
15423 modes FROM to TO. */
15425 bool
15426 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
15427 enum reg_class class)
15429 if (from == to)
15430 return false;
15432 /* x87 registers can't do subreg at all, as all values are reformatted
15433 to extended precision. */
15434 if (MAYBE_FLOAT_CLASS_P (class))
15435 return true;
15437 if (MAYBE_SSE_CLASS_P (class) || MAYBE_MMX_CLASS_P (class))
15439 /* Vector registers do not support QI or HImode loads. If we don't
15440 disallow a change to these modes, reload will assume it's ok to
15441 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
15442 the vec_dupv4hi pattern. */
15443 if (GET_MODE_SIZE (from) < 4)
15444 return true;
15446 /* Vector registers do not support subreg with nonzero offsets, which
15447 are otherwise valid for integer registers. Since we can't see
15448 whether we have a nonzero offset from here, prohibit all
15449 nonparadoxical subregs changing size. */
15450 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
15451 return true;
15454 return false;
15457 /* Return the cost of moving data from a register in class CLASS1 to
15458 one in class CLASS2.
15460 It is not required that the cost always equal 2 when FROM is the same as TO;
15461 on some machines it is expensive to move between registers if they are not
15462 general registers. */
15465 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
15466 enum reg_class class2)
15468 /* In case we require secondary memory, compute cost of the store followed
15469 by load. In order to avoid bad register allocation choices, we need
15470 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
15472 if (ix86_secondary_memory_needed (class1, class2, mode, 0))
15474 int cost = 1;
15476 cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
15477 MEMORY_MOVE_COST (mode, class1, 1));
15478 cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
15479 MEMORY_MOVE_COST (mode, class2, 1));
15481 /* In case of copying from general_purpose_register we may emit multiple
15482 stores followed by single load causing memory size mismatch stall.
15483 Count this as arbitrarily high cost of 20. */
15484 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
15485 cost += 20;
15487 /* In the case of FP/MMX moves, the registers actually overlap, and we
15488 have to switch modes in order to treat them differently. */
15489 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
15490 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
15491 cost += 20;
15493 return cost;
15496 /* Moves between SSE/MMX and integer unit are expensive. */
15497 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
15498 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
15499 return ix86_cost->mmxsse_to_integer;
15500 if (MAYBE_FLOAT_CLASS_P (class1))
15501 return ix86_cost->fp_move;
15502 if (MAYBE_SSE_CLASS_P (class1))
15503 return ix86_cost->sse_move;
15504 if (MAYBE_MMX_CLASS_P (class1))
15505 return ix86_cost->mmx_move;
15506 return 2;
15509 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
15511 bool
15512 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
15514 /* Flags and only flags can only hold CCmode values. */
15515 if (CC_REGNO_P (regno))
15516 return GET_MODE_CLASS (mode) == MODE_CC;
15517 if (GET_MODE_CLASS (mode) == MODE_CC
15518 || GET_MODE_CLASS (mode) == MODE_RANDOM
15519 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
15520 return 0;
15521 if (FP_REGNO_P (regno))
15522 return VALID_FP_MODE_P (mode);
15523 if (SSE_REGNO_P (regno))
15525 /* We implement the move patterns for all vector modes into and
15526 out of SSE registers, even when no operation instructions
15527 are available. */
15528 return (VALID_SSE_REG_MODE (mode)
15529 || VALID_SSE2_REG_MODE (mode)
15530 || VALID_MMX_REG_MODE (mode)
15531 || VALID_MMX_REG_MODE_3DNOW (mode));
15533 if (MMX_REGNO_P (regno))
15535 /* We implement the move patterns for 3DNOW modes even in MMX mode,
15536 so if the register is available at all, then we can move data of
15537 the given mode into or out of it. */
15538 return (VALID_MMX_REG_MODE (mode)
15539 || VALID_MMX_REG_MODE_3DNOW (mode));
15542 if (mode == QImode)
15544 /* Take care for QImode values - they can be in non-QI regs,
15545 but then they do cause partial register stalls. */
15546 if (regno < 4 || TARGET_64BIT)
15547 return 1;
15548 if (!TARGET_PARTIAL_REG_STALL)
15549 return 1;
15550 return reload_in_progress || reload_completed;
15552 /* We handle both integer and floats in the general purpose registers. */
15553 else if (VALID_INT_MODE_P (mode))
15554 return 1;
15555 else if (VALID_FP_MODE_P (mode))
15556 return 1;
15557 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
15558 on to use that value in smaller contexts, this can easily force a
15559 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
15560 supporting DImode, allow it. */
15561 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
15562 return 1;
15564 return 0;
15567 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
15568 tieable integer mode. */
15570 static bool
15571 ix86_tieable_integer_mode_p (enum machine_mode mode)
15573 switch (mode)
15575 case HImode:
15576 case SImode:
15577 return true;
15579 case QImode:
15580 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
15582 case DImode:
15583 return TARGET_64BIT;
15585 default:
15586 return false;
15590 /* Return true if MODE1 is accessible in a register that can hold MODE2
15591 without copying. That is, all register classes that can hold MODE2
15592 can also hold MODE1. */
15594 bool
15595 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
15597 if (mode1 == mode2)
15598 return true;
15600 if (ix86_tieable_integer_mode_p (mode1)
15601 && ix86_tieable_integer_mode_p (mode2))
15602 return true;
15604 /* MODE2 being XFmode implies fp stack or general regs, which means we
15605 can tie any smaller floating point modes to it. Note that we do not
15606 tie this with TFmode. */
15607 if (mode2 == XFmode)
15608 return mode1 == SFmode || mode1 == DFmode;
15610 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
15611 that we can tie it with SFmode. */
15612 if (mode2 == DFmode)
15613 return mode1 == SFmode;
15615 /* If MODE2 is only appropriate for an SSE register, then tie with
15616 any other mode acceptable to SSE registers. */
15617 if (GET_MODE_SIZE (mode2) >= 8
15618 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
15619 return ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1);
15621 /* If MODE2 is appropriate for an MMX (or SSE) register, then tie
15622 with any other mode acceptable to MMX registers. */
15623 if (GET_MODE_SIZE (mode2) == 8
15624 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
15625 return ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1);
15627 return false;
15630 /* Return the cost of moving data of mode M between a
15631 register and memory. A value of 2 is the default; this cost is
15632 relative to those in `REGISTER_MOVE_COST'.
15634 If moving between registers and memory is more expensive than
15635 between two registers, you should define this macro to express the
15636 relative cost.
15638 Model also increased moving costs of QImode registers in non
15639 Q_REGS classes.
15642 ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
15644 if (FLOAT_CLASS_P (class))
15646 int index;
15647 switch (mode)
15649 case SFmode:
15650 index = 0;
15651 break;
15652 case DFmode:
15653 index = 1;
15654 break;
15655 case XFmode:
15656 index = 2;
15657 break;
15658 default:
15659 return 100;
15661 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
15663 if (SSE_CLASS_P (class))
15665 int index;
15666 switch (GET_MODE_SIZE (mode))
15668 case 4:
15669 index = 0;
15670 break;
15671 case 8:
15672 index = 1;
15673 break;
15674 case 16:
15675 index = 2;
15676 break;
15677 default:
15678 return 100;
15680 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
15682 if (MMX_CLASS_P (class))
15684 int index;
15685 switch (GET_MODE_SIZE (mode))
15687 case 4:
15688 index = 0;
15689 break;
15690 case 8:
15691 index = 1;
15692 break;
15693 default:
15694 return 100;
15696 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
15698 switch (GET_MODE_SIZE (mode))
15700 case 1:
15701 if (in)
15702 return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
15703 : ix86_cost->movzbl_load);
15704 else
15705 return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
15706 : ix86_cost->int_store[0] + 4);
15707 break;
15708 case 2:
15709 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
15710 default:
15711 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
15712 if (mode == TFmode)
15713 mode = XFmode;
15714 return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
15715 * (((int) GET_MODE_SIZE (mode)
15716 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
15720 /* Compute a (partial) cost for rtx X. Return true if the complete
15721 cost has been computed, and false if subexpressions should be
15722 scanned. In either case, *TOTAL contains the cost result. */
15724 static bool
15725 ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
15727 enum machine_mode mode = GET_MODE (x);
15729 switch (code)
15731 case CONST_INT:
15732 case CONST:
15733 case LABEL_REF:
15734 case SYMBOL_REF:
15735 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
15736 *total = 3;
15737 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
15738 *total = 2;
15739 else if (flag_pic && SYMBOLIC_CONST (x)
15740 && (!TARGET_64BIT
15741 || (!GET_CODE (x) != LABEL_REF
15742 && (GET_CODE (x) != SYMBOL_REF
15743 || !SYMBOL_REF_LOCAL_P (x)))))
15744 *total = 1;
15745 else
15746 *total = 0;
15747 return true;
15749 case CONST_DOUBLE:
15750 if (mode == VOIDmode)
15751 *total = 0;
15752 else
15753 switch (standard_80387_constant_p (x))
15755 case 1: /* 0.0 */
15756 *total = 1;
15757 break;
15758 default: /* Other constants */
15759 *total = 2;
15760 break;
15761 case 0:
15762 case -1:
15763 /* Start with (MEM (SYMBOL_REF)), since that's where
15764 it'll probably end up. Add a penalty for size. */
15765 *total = (COSTS_N_INSNS (1)
15766 + (flag_pic != 0 && !TARGET_64BIT)
15767 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
15768 break;
15770 return true;
15772 case ZERO_EXTEND:
15773 /* The zero extensions is often completely free on x86_64, so make
15774 it as cheap as possible. */
15775 if (TARGET_64BIT && mode == DImode
15776 && GET_MODE (XEXP (x, 0)) == SImode)
15777 *total = 1;
15778 else if (TARGET_ZERO_EXTEND_WITH_AND)
15779 *total = COSTS_N_INSNS (ix86_cost->add);
15780 else
15781 *total = COSTS_N_INSNS (ix86_cost->movzx);
15782 return false;
15784 case SIGN_EXTEND:
15785 *total = COSTS_N_INSNS (ix86_cost->movsx);
15786 return false;
15788 case ASHIFT:
15789 if (GET_CODE (XEXP (x, 1)) == CONST_INT
15790 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
15792 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
15793 if (value == 1)
15795 *total = COSTS_N_INSNS (ix86_cost->add);
15796 return false;
15798 if ((value == 2 || value == 3)
15799 && ix86_cost->lea <= ix86_cost->shift_const)
15801 *total = COSTS_N_INSNS (ix86_cost->lea);
15802 return false;
15805 /* FALLTHRU */
15807 case ROTATE:
15808 case ASHIFTRT:
15809 case LSHIFTRT:
15810 case ROTATERT:
15811 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
15813 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
15815 if (INTVAL (XEXP (x, 1)) > 32)
15816 *total = COSTS_N_INSNS(ix86_cost->shift_const + 2);
15817 else
15818 *total = COSTS_N_INSNS(ix86_cost->shift_const * 2);
15820 else
15822 if (GET_CODE (XEXP (x, 1)) == AND)
15823 *total = COSTS_N_INSNS(ix86_cost->shift_var * 2);
15824 else
15825 *total = COSTS_N_INSNS(ix86_cost->shift_var * 6 + 2);
15828 else
15830 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
15831 *total = COSTS_N_INSNS (ix86_cost->shift_const);
15832 else
15833 *total = COSTS_N_INSNS (ix86_cost->shift_var);
15835 return false;
15837 case MULT:
15838 if (FLOAT_MODE_P (mode))
15840 *total = COSTS_N_INSNS (ix86_cost->fmul);
15841 return false;
15843 else
15845 rtx op0 = XEXP (x, 0);
15846 rtx op1 = XEXP (x, 1);
15847 int nbits;
15848 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
15850 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
15851 for (nbits = 0; value != 0; value &= value - 1)
15852 nbits++;
15854 else
15855 /* This is arbitrary. */
15856 nbits = 7;
15858 /* Compute costs correctly for widening multiplication. */
15859 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND)
15860 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
15861 == GET_MODE_SIZE (mode))
15863 int is_mulwiden = 0;
15864 enum machine_mode inner_mode = GET_MODE (op0);
15866 if (GET_CODE (op0) == GET_CODE (op1))
15867 is_mulwiden = 1, op1 = XEXP (op1, 0);
15868 else if (GET_CODE (op1) == CONST_INT)
15870 if (GET_CODE (op0) == SIGN_EXTEND)
15871 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
15872 == INTVAL (op1);
15873 else
15874 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
15877 if (is_mulwiden)
15878 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
15881 *total = COSTS_N_INSNS (ix86_cost->mult_init[MODE_INDEX (mode)]
15882 + nbits * ix86_cost->mult_bit)
15883 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code);
15885 return true;
15888 case DIV:
15889 case UDIV:
15890 case MOD:
15891 case UMOD:
15892 if (FLOAT_MODE_P (mode))
15893 *total = COSTS_N_INSNS (ix86_cost->fdiv);
15894 else
15895 *total = COSTS_N_INSNS (ix86_cost->divide[MODE_INDEX (mode)]);
15896 return false;
15898 case PLUS:
15899 if (FLOAT_MODE_P (mode))
15900 *total = COSTS_N_INSNS (ix86_cost->fadd);
15901 else if (GET_MODE_CLASS (mode) == MODE_INT
15902 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
15904 if (GET_CODE (XEXP (x, 0)) == PLUS
15905 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
15906 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
15907 && CONSTANT_P (XEXP (x, 1)))
15909 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
15910 if (val == 2 || val == 4 || val == 8)
15912 *total = COSTS_N_INSNS (ix86_cost->lea);
15913 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
15914 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
15915 outer_code);
15916 *total += rtx_cost (XEXP (x, 1), outer_code);
15917 return true;
15920 else if (GET_CODE (XEXP (x, 0)) == MULT
15921 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
15923 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
15924 if (val == 2 || val == 4 || val == 8)
15926 *total = COSTS_N_INSNS (ix86_cost->lea);
15927 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
15928 *total += rtx_cost (XEXP (x, 1), outer_code);
15929 return true;
15932 else if (GET_CODE (XEXP (x, 0)) == PLUS)
15934 *total = COSTS_N_INSNS (ix86_cost->lea);
15935 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
15936 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
15937 *total += rtx_cost (XEXP (x, 1), outer_code);
15938 return true;
15941 /* FALLTHRU */
15943 case MINUS:
15944 if (FLOAT_MODE_P (mode))
15946 *total = COSTS_N_INSNS (ix86_cost->fadd);
15947 return false;
15949 /* FALLTHRU */
15951 case AND:
15952 case IOR:
15953 case XOR:
15954 if (!TARGET_64BIT && mode == DImode)
15956 *total = (COSTS_N_INSNS (ix86_cost->add) * 2
15957 + (rtx_cost (XEXP (x, 0), outer_code)
15958 << (GET_MODE (XEXP (x, 0)) != DImode))
15959 + (rtx_cost (XEXP (x, 1), outer_code)
15960 << (GET_MODE (XEXP (x, 1)) != DImode)));
15961 return true;
15963 /* FALLTHRU */
15965 case NEG:
15966 if (FLOAT_MODE_P (mode))
15968 *total = COSTS_N_INSNS (ix86_cost->fchs);
15969 return false;
15971 /* FALLTHRU */
15973 case NOT:
15974 if (!TARGET_64BIT && mode == DImode)
15975 *total = COSTS_N_INSNS (ix86_cost->add * 2);
15976 else
15977 *total = COSTS_N_INSNS (ix86_cost->add);
15978 return false;
15980 case COMPARE:
15981 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
15982 && XEXP (XEXP (x, 0), 1) == const1_rtx
15983 && GET_CODE (XEXP (XEXP (x, 0), 2)) == CONST_INT
15984 && XEXP (x, 1) == const0_rtx)
15986 /* This kind of construct is implemented using test[bwl].
15987 Treat it as if we had an AND. */
15988 *total = (COSTS_N_INSNS (ix86_cost->add)
15989 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
15990 + rtx_cost (const1_rtx, outer_code));
15991 return true;
15993 return false;
15995 case FLOAT_EXTEND:
15996 if (!TARGET_SSE_MATH
15997 || mode == XFmode
15998 || (mode == DFmode && !TARGET_SSE2))
15999 *total = 0;
16000 return false;
16002 case ABS:
16003 if (FLOAT_MODE_P (mode))
16004 *total = COSTS_N_INSNS (ix86_cost->fabs);
16005 return false;
16007 case SQRT:
16008 if (FLOAT_MODE_P (mode))
16009 *total = COSTS_N_INSNS (ix86_cost->fsqrt);
16010 return false;
16012 case UNSPEC:
16013 if (XINT (x, 1) == UNSPEC_TP)
16014 *total = 0;
16015 return false;
16017 default:
16018 return false;
16022 #if TARGET_MACHO
16024 static int current_machopic_label_num;
16026 /* Given a symbol name and its associated stub, write out the
16027 definition of the stub. */
16029 void
16030 machopic_output_stub (FILE *file, const char *symb, const char *stub)
16032 unsigned int length;
16033 char *binder_name, *symbol_name, lazy_ptr_name[32];
16034 int label = ++current_machopic_label_num;
16036 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
16037 symb = (*targetm.strip_name_encoding) (symb);
16039 length = strlen (stub);
16040 binder_name = alloca (length + 32);
16041 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
16043 length = strlen (symb);
16044 symbol_name = alloca (length + 32);
16045 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
16047 sprintf (lazy_ptr_name, "L%d$lz", label);
16049 if (MACHOPIC_PURE)
16050 machopic_picsymbol_stub_section ();
16051 else
16052 machopic_symbol_stub_section ();
16054 fprintf (file, "%s:\n", stub);
16055 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
16057 if (MACHOPIC_PURE)
16059 fprintf (file, "\tcall LPC$%d\nLPC$%d:\tpopl %%eax\n", label, label);
16060 fprintf (file, "\tmovl %s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
16061 fprintf (file, "\tjmp %%edx\n");
16063 else
16064 fprintf (file, "\tjmp *%s\n", lazy_ptr_name);
16066 fprintf (file, "%s:\n", binder_name);
16068 if (MACHOPIC_PURE)
16070 fprintf (file, "\tlea %s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
16071 fprintf (file, "\tpushl %%eax\n");
16073 else
16074 fprintf (file, "\t pushl $%s\n", lazy_ptr_name);
16076 fprintf (file, "\tjmp dyld_stub_binding_helper\n");
16078 machopic_lazy_symbol_ptr_section ();
16079 fprintf (file, "%s:\n", lazy_ptr_name);
16080 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
16081 fprintf (file, "\t.long %s\n", binder_name);
16083 #endif /* TARGET_MACHO */
16085 /* Order the registers for register allocator. */
16087 void
16088 x86_order_regs_for_local_alloc (void)
16090 int pos = 0;
16091 int i;
16093 /* First allocate the local general purpose registers. */
16094 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
16095 if (GENERAL_REGNO_P (i) && call_used_regs[i])
16096 reg_alloc_order [pos++] = i;
16098 /* Global general purpose registers. */
16099 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
16100 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
16101 reg_alloc_order [pos++] = i;
16103 /* x87 registers come first in case we are doing FP math
16104 using them. */
16105 if (!TARGET_SSE_MATH)
16106 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
16107 reg_alloc_order [pos++] = i;
16109 /* SSE registers. */
16110 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
16111 reg_alloc_order [pos++] = i;
16112 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
16113 reg_alloc_order [pos++] = i;
16115 /* x87 registers. */
16116 if (TARGET_SSE_MATH)
16117 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
16118 reg_alloc_order [pos++] = i;
16120 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
16121 reg_alloc_order [pos++] = i;
16123 /* Initialize the rest of array as we do not allocate some registers
16124 at all. */
16125 while (pos < FIRST_PSEUDO_REGISTER)
16126 reg_alloc_order [pos++] = 0;
16129 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
16130 struct attribute_spec.handler. */
16131 static tree
16132 ix86_handle_struct_attribute (tree *node, tree name,
16133 tree args ATTRIBUTE_UNUSED,
16134 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
16136 tree *type = NULL;
16137 if (DECL_P (*node))
16139 if (TREE_CODE (*node) == TYPE_DECL)
16140 type = &TREE_TYPE (*node);
16142 else
16143 type = node;
16145 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
16146 || TREE_CODE (*type) == UNION_TYPE)))
16148 warning (OPT_Wattributes, "%qs attribute ignored",
16149 IDENTIFIER_POINTER (name));
16150 *no_add_attrs = true;
16153 else if ((is_attribute_p ("ms_struct", name)
16154 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
16155 || ((is_attribute_p ("gcc_struct", name)
16156 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
16158 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
16159 IDENTIFIER_POINTER (name));
16160 *no_add_attrs = true;
16163 return NULL_TREE;
16166 static bool
16167 ix86_ms_bitfield_layout_p (tree record_type)
16169 return (TARGET_MS_BITFIELD_LAYOUT &&
16170 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
16171 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
16174 /* Returns an expression indicating where the this parameter is
16175 located on entry to the FUNCTION. */
16177 static rtx
16178 x86_this_parameter (tree function)
16180 tree type = TREE_TYPE (function);
16182 if (TARGET_64BIT)
16184 int n = aggregate_value_p (TREE_TYPE (type), type) != 0;
16185 return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]);
16188 if (ix86_function_regparm (type, function) > 0)
16190 tree parm;
16192 parm = TYPE_ARG_TYPES (type);
16193 /* Figure out whether or not the function has a variable number of
16194 arguments. */
16195 for (; parm; parm = TREE_CHAIN (parm))
16196 if (TREE_VALUE (parm) == void_type_node)
16197 break;
16198 /* If not, the this parameter is in the first argument. */
16199 if (parm)
16201 int regno = 0;
16202 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
16203 regno = 2;
16204 return gen_rtx_REG (SImode, regno);
16208 if (aggregate_value_p (TREE_TYPE (type), type))
16209 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8));
16210 else
16211 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4));
16214 /* Determine whether x86_output_mi_thunk can succeed. */
16216 static bool
16217 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
16218 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
16219 HOST_WIDE_INT vcall_offset, tree function)
16221 /* 64-bit can handle anything. */
16222 if (TARGET_64BIT)
16223 return true;
16225 /* For 32-bit, everything's fine if we have one free register. */
16226 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
16227 return true;
16229 /* Need a free register for vcall_offset. */
16230 if (vcall_offset)
16231 return false;
16233 /* Need a free register for GOT references. */
16234 if (flag_pic && !(*targetm.binds_local_p) (function))
16235 return false;
16237 /* Otherwise ok. */
16238 return true;
16241 /* Output the assembler code for a thunk function. THUNK_DECL is the
16242 declaration for the thunk function itself, FUNCTION is the decl for
16243 the target function. DELTA is an immediate constant offset to be
16244 added to THIS. If VCALL_OFFSET is nonzero, the word at
16245 *(*this + vcall_offset) should be added to THIS. */
16247 static void
16248 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
16249 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
16250 HOST_WIDE_INT vcall_offset, tree function)
16252 rtx xops[3];
16253 rtx this = x86_this_parameter (function);
16254 rtx this_reg, tmp;
16256 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
16257 pull it in now and let DELTA benefit. */
16258 if (REG_P (this))
16259 this_reg = this;
16260 else if (vcall_offset)
16262 /* Put the this parameter into %eax. */
16263 xops[0] = this;
16264 xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
16265 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
16267 else
16268 this_reg = NULL_RTX;
16270 /* Adjust the this parameter by a fixed constant. */
16271 if (delta)
16273 xops[0] = GEN_INT (delta);
16274 xops[1] = this_reg ? this_reg : this;
16275 if (TARGET_64BIT)
16277 if (!x86_64_general_operand (xops[0], DImode))
16279 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
16280 xops[1] = tmp;
16281 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
16282 xops[0] = tmp;
16283 xops[1] = this;
16285 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
16287 else
16288 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
16291 /* Adjust the this parameter by a value stored in the vtable. */
16292 if (vcall_offset)
16294 if (TARGET_64BIT)
16295 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
16296 else
16298 int tmp_regno = 2 /* ECX */;
16299 if (lookup_attribute ("fastcall",
16300 TYPE_ATTRIBUTES (TREE_TYPE (function))))
16301 tmp_regno = 0 /* EAX */;
16302 tmp = gen_rtx_REG (SImode, tmp_regno);
16305 xops[0] = gen_rtx_MEM (Pmode, this_reg);
16306 xops[1] = tmp;
16307 if (TARGET_64BIT)
16308 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
16309 else
16310 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
16312 /* Adjust the this parameter. */
16313 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
16314 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
16316 rtx tmp2 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
16317 xops[0] = GEN_INT (vcall_offset);
16318 xops[1] = tmp2;
16319 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
16320 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
16322 xops[1] = this_reg;
16323 if (TARGET_64BIT)
16324 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
16325 else
16326 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
16329 /* If necessary, drop THIS back to its stack slot. */
16330 if (this_reg && this_reg != this)
16332 xops[0] = this_reg;
16333 xops[1] = this;
16334 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
16337 xops[0] = XEXP (DECL_RTL (function), 0);
16338 if (TARGET_64BIT)
16340 if (!flag_pic || (*targetm.binds_local_p) (function))
16341 output_asm_insn ("jmp\t%P0", xops);
16342 else
16344 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
16345 tmp = gen_rtx_CONST (Pmode, tmp);
16346 tmp = gen_rtx_MEM (QImode, tmp);
16347 xops[0] = tmp;
16348 output_asm_insn ("jmp\t%A0", xops);
16351 else
16353 if (!flag_pic || (*targetm.binds_local_p) (function))
16354 output_asm_insn ("jmp\t%P0", xops);
16355 else
16356 #if TARGET_MACHO
16357 if (TARGET_MACHO)
16359 rtx sym_ref = XEXP (DECL_RTL (function), 0);
16360 tmp = (gen_rtx_SYMBOL_REF
16361 (Pmode,
16362 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
16363 tmp = gen_rtx_MEM (QImode, tmp);
16364 xops[0] = tmp;
16365 output_asm_insn ("jmp\t%0", xops);
16367 else
16368 #endif /* TARGET_MACHO */
16370 tmp = gen_rtx_REG (SImode, 2 /* ECX */);
16371 output_set_got (tmp);
16373 xops[1] = tmp;
16374 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
16375 output_asm_insn ("jmp\t{*}%1", xops);
16380 static void
16381 x86_file_start (void)
16383 default_file_start ();
16384 if (X86_FILE_START_VERSION_DIRECTIVE)
16385 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
16386 if (X86_FILE_START_FLTUSED)
16387 fputs ("\t.global\t__fltused\n", asm_out_file);
16388 if (ix86_asm_dialect == ASM_INTEL)
16389 fputs ("\t.intel_syntax\n", asm_out_file);
16393 x86_field_alignment (tree field, int computed)
16395 enum machine_mode mode;
16396 tree type = TREE_TYPE (field);
16398 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
16399 return computed;
16400 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
16401 ? get_inner_array_type (type) : type);
16402 if (mode == DFmode || mode == DCmode
16403 || GET_MODE_CLASS (mode) == MODE_INT
16404 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
16405 return MIN (32, computed);
16406 return computed;
16409 /* Output assembler code to FILE to increment profiler label # LABELNO
16410 for profiling a function entry. */
16411 void
16412 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
16414 if (TARGET_64BIT)
16415 if (flag_pic)
16417 #ifndef NO_PROFILE_COUNTERS
16418 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
16419 #endif
16420 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
16422 else
16424 #ifndef NO_PROFILE_COUNTERS
16425 fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno);
16426 #endif
16427 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
16429 else if (flag_pic)
16431 #ifndef NO_PROFILE_COUNTERS
16432 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
16433 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
16434 #endif
16435 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
16437 else
16439 #ifndef NO_PROFILE_COUNTERS
16440 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
16441 PROFILE_COUNT_REGISTER);
16442 #endif
16443 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
16447 /* We don't have exact information about the insn sizes, but we may assume
16448 quite safely that we are informed about all 1 byte insns and memory
16449 address sizes. This is enough to eliminate unnecessary padding in
16450 99% of cases. */
16452 static int
16453 min_insn_size (rtx insn)
16455 int l = 0;
16457 if (!INSN_P (insn) || !active_insn_p (insn))
16458 return 0;
16460 /* Discard alignments we've emit and jump instructions. */
16461 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
16462 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
16463 return 0;
16464 if (GET_CODE (insn) == JUMP_INSN
16465 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
16466 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
16467 return 0;
16469 /* Important case - calls are always 5 bytes.
16470 It is common to have many calls in the row. */
16471 if (GET_CODE (insn) == CALL_INSN
16472 && symbolic_reference_mentioned_p (PATTERN (insn))
16473 && !SIBLING_CALL_P (insn))
16474 return 5;
16475 if (get_attr_length (insn) <= 1)
16476 return 1;
16478 /* For normal instructions we may rely on the sizes of addresses
16479 and the presence of symbol to require 4 bytes of encoding.
16480 This is not the case for jumps where references are PC relative. */
16481 if (GET_CODE (insn) != JUMP_INSN)
16483 l = get_attr_length_address (insn);
16484 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
16485 l = 4;
16487 if (l)
16488 return 1+l;
16489 else
16490 return 2;
16493 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
16494 window. */
16496 static void
16497 ix86_avoid_jump_misspredicts (void)
16499 rtx insn, start = get_insns ();
16500 int nbytes = 0, njumps = 0;
16501 int isjump = 0;
16503 /* Look for all minimal intervals of instructions containing 4 jumps.
16504 The intervals are bounded by START and INSN. NBYTES is the total
16505 size of instructions in the interval including INSN and not including
16506 START. When the NBYTES is smaller than 16 bytes, it is possible
16507 that the end of START and INSN ends up in the same 16byte page.
16509 The smallest offset in the page INSN can start is the case where START
16510 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
16511 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
16513 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
16516 nbytes += min_insn_size (insn);
16517 if (dump_file)
16518 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
16519 INSN_UID (insn), min_insn_size (insn));
16520 if ((GET_CODE (insn) == JUMP_INSN
16521 && GET_CODE (PATTERN (insn)) != ADDR_VEC
16522 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
16523 || GET_CODE (insn) == CALL_INSN)
16524 njumps++;
16525 else
16526 continue;
16528 while (njumps > 3)
16530 start = NEXT_INSN (start);
16531 if ((GET_CODE (start) == JUMP_INSN
16532 && GET_CODE (PATTERN (start)) != ADDR_VEC
16533 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
16534 || GET_CODE (start) == CALL_INSN)
16535 njumps--, isjump = 1;
16536 else
16537 isjump = 0;
16538 nbytes -= min_insn_size (start);
16540 gcc_assert (njumps >= 0);
16541 if (dump_file)
16542 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
16543 INSN_UID (start), INSN_UID (insn), nbytes);
16545 if (njumps == 3 && isjump && nbytes < 16)
16547 int padsize = 15 - nbytes + min_insn_size (insn);
16549 if (dump_file)
16550 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
16551 INSN_UID (insn), padsize);
16552 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
16557 /* AMD Athlon works faster
16558 when RET is not destination of conditional jump or directly preceded
16559 by other jump instruction. We avoid the penalty by inserting NOP just
16560 before the RET instructions in such cases. */
16561 static void
16562 ix86_pad_returns (void)
16564 edge e;
16565 edge_iterator ei;
16567 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
16569 basic_block bb = e->src;
16570 rtx ret = BB_END (bb);
16571 rtx prev;
16572 bool replace = false;
16574 if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN
16575 || !maybe_hot_bb_p (bb))
16576 continue;
16577 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
16578 if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL)
16579 break;
16580 if (prev && GET_CODE (prev) == CODE_LABEL)
16582 edge e;
16583 edge_iterator ei;
16585 FOR_EACH_EDGE (e, ei, bb->preds)
16586 if (EDGE_FREQUENCY (e) && e->src->index >= 0
16587 && !(e->flags & EDGE_FALLTHRU))
16588 replace = true;
16590 if (!replace)
16592 prev = prev_active_insn (ret);
16593 if (prev
16594 && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev))
16595 || GET_CODE (prev) == CALL_INSN))
16596 replace = true;
16597 /* Empty functions get branch mispredict even when the jump destination
16598 is not visible to us. */
16599 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
16600 replace = true;
16602 if (replace)
16604 emit_insn_before (gen_return_internal_long (), ret);
16605 delete_insn (ret);
16610 /* Implement machine specific optimizations. We implement padding of returns
16611 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
16612 static void
16613 ix86_reorg (void)
16615 if (TARGET_ATHLON_K8 && optimize && !optimize_size)
16616 ix86_pad_returns ();
16617 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
16618 ix86_avoid_jump_misspredicts ();
16621 /* Return nonzero when QImode register that must be represented via REX prefix
16622 is used. */
16623 bool
16624 x86_extended_QIreg_mentioned_p (rtx insn)
16626 int i;
16627 extract_insn_cached (insn);
16628 for (i = 0; i < recog_data.n_operands; i++)
16629 if (REG_P (recog_data.operand[i])
16630 && REGNO (recog_data.operand[i]) >= 4)
16631 return true;
16632 return false;
16635 /* Return nonzero when P points to register encoded via REX prefix.
16636 Called via for_each_rtx. */
16637 static int
16638 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
16640 unsigned int regno;
16641 if (!REG_P (*p))
16642 return 0;
16643 regno = REGNO (*p);
16644 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
16647 /* Return true when INSN mentions register that must be encoded using REX
16648 prefix. */
16649 bool
16650 x86_extended_reg_mentioned_p (rtx insn)
16652 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
16655 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
16656 optabs would emit if we didn't have TFmode patterns. */
16658 void
16659 x86_emit_floatuns (rtx operands[2])
16661 rtx neglab, donelab, i0, i1, f0, in, out;
16662 enum machine_mode mode, inmode;
16664 inmode = GET_MODE (operands[1]);
16665 gcc_assert (inmode == SImode || inmode == DImode);
16667 out = operands[0];
16668 in = force_reg (inmode, operands[1]);
16669 mode = GET_MODE (out);
16670 neglab = gen_label_rtx ();
16671 donelab = gen_label_rtx ();
16672 i1 = gen_reg_rtx (Pmode);
16673 f0 = gen_reg_rtx (mode);
16675 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab);
16677 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
16678 emit_jump_insn (gen_jump (donelab));
16679 emit_barrier ();
16681 emit_label (neglab);
16683 i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
16684 i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
16685 i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
16686 expand_float (f0, i0, 0);
16687 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
16689 emit_label (donelab);
16692 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
16693 with all elements equal to VAR. Return true if successful. */
16695 static bool
16696 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
16697 rtx target, rtx val)
16699 enum machine_mode smode, wsmode, wvmode;
16700 rtx x;
16702 switch (mode)
16704 case V2SImode:
16705 case V2SFmode:
16706 if (!mmx_ok && !TARGET_SSE)
16707 return false;
16708 /* FALLTHRU */
16710 case V2DFmode:
16711 case V2DImode:
16712 case V4SFmode:
16713 case V4SImode:
16714 val = force_reg (GET_MODE_INNER (mode), val);
16715 x = gen_rtx_VEC_DUPLICATE (mode, val);
16716 emit_insn (gen_rtx_SET (VOIDmode, target, x));
16717 return true;
16719 case V4HImode:
16720 if (!mmx_ok)
16721 return false;
16722 if (TARGET_SSE || TARGET_3DNOW_A)
16724 val = gen_lowpart (SImode, val);
16725 x = gen_rtx_TRUNCATE (HImode, val);
16726 x = gen_rtx_VEC_DUPLICATE (mode, x);
16727 emit_insn (gen_rtx_SET (VOIDmode, target, x));
16728 return true;
16730 else
16732 smode = HImode;
16733 wsmode = SImode;
16734 wvmode = V2SImode;
16735 goto widen;
16738 case V8QImode:
16739 if (!mmx_ok)
16740 return false;
16741 smode = QImode;
16742 wsmode = HImode;
16743 wvmode = V4HImode;
16744 goto widen;
16745 case V8HImode:
16746 smode = HImode;
16747 wsmode = SImode;
16748 wvmode = V4SImode;
16749 goto widen;
16750 case V16QImode:
16751 smode = QImode;
16752 wsmode = HImode;
16753 wvmode = V8HImode;
16754 goto widen;
16755 widen:
16756 /* Replicate the value once into the next wider mode and recurse. */
16757 val = convert_modes (wsmode, smode, val, true);
16758 x = expand_simple_binop (wsmode, ASHIFT, val,
16759 GEN_INT (GET_MODE_BITSIZE (smode)),
16760 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16761 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
16763 x = gen_reg_rtx (wvmode);
16764 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
16765 gcc_unreachable ();
16766 emit_move_insn (target, gen_lowpart (mode, x));
16767 return true;
16769 default:
16770 return false;
16774 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
16775 whose low element is VAR, and other elements are zero. Return true
16776 if successful. */
16778 static bool
16779 ix86_expand_vector_init_low_nonzero (bool mmx_ok, enum machine_mode mode,
16780 rtx target, rtx var)
16782 enum machine_mode vsimode;
16783 rtx x;
16785 switch (mode)
16787 case V2SFmode:
16788 case V2SImode:
16789 if (!mmx_ok && !TARGET_SSE)
16790 return false;
16791 /* FALLTHRU */
16793 case V2DFmode:
16794 case V2DImode:
16795 var = force_reg (GET_MODE_INNER (mode), var);
16796 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
16797 emit_insn (gen_rtx_SET (VOIDmode, target, x));
16798 return true;
16800 case V4SFmode:
16801 case V4SImode:
16802 var = force_reg (GET_MODE_INNER (mode), var);
16803 x = gen_rtx_VEC_DUPLICATE (mode, var);
16804 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
16805 emit_insn (gen_rtx_SET (VOIDmode, target, x));
16806 return true;
16808 case V8HImode:
16809 case V16QImode:
16810 vsimode = V4SImode;
16811 goto widen;
16812 case V4HImode:
16813 case V8QImode:
16814 if (!mmx_ok)
16815 return false;
16816 vsimode = V2SImode;
16817 goto widen;
16818 widen:
16819 /* Zero extend the variable element to SImode and recurse. */
16820 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
16822 x = gen_reg_rtx (vsimode);
16823 if (!ix86_expand_vector_init_low_nonzero (mmx_ok, vsimode, x, var))
16824 gcc_unreachable ();
16826 emit_move_insn (target, gen_lowpart (mode, x));
16827 return true;
16829 default:
16830 return false;
16834 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
16835 consisting of the values in VALS. It is known that all elements
16836 except ONE_VAR are constants. Return true if successful. */
16838 static bool
16839 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
16840 rtx target, rtx vals, int one_var)
16842 rtx var = XVECEXP (vals, 0, one_var);
16843 enum machine_mode wmode;
16844 rtx const_vec, x;
16846 XVECEXP (vals, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
16847 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
16849 switch (mode)
16851 case V2DFmode:
16852 case V2DImode:
16853 case V2SFmode:
16854 case V2SImode:
16855 /* For the two element vectors, it's just as easy to use
16856 the general case. */
16857 return false;
16859 case V4SFmode:
16860 case V4SImode:
16861 case V8HImode:
16862 case V4HImode:
16863 break;
16865 case V16QImode:
16866 wmode = V8HImode;
16867 goto widen;
16868 case V8QImode:
16869 wmode = V4HImode;
16870 goto widen;
16871 widen:
16872 /* There's no way to set one QImode entry easily. Combine
16873 the variable value with its adjacent constant value, and
16874 promote to an HImode set. */
16875 x = XVECEXP (vals, 0, one_var ^ 1);
16876 if (one_var & 1)
16878 var = convert_modes (HImode, QImode, var, true);
16879 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
16880 NULL_RTX, 1, OPTAB_LIB_WIDEN);
16881 x = GEN_INT (INTVAL (x) & 0xff);
16883 else
16885 var = convert_modes (HImode, QImode, var, true);
16886 x = gen_int_mode (INTVAL (x) << 8, HImode);
16888 if (x != const0_rtx)
16889 var = expand_simple_binop (HImode, IOR, var, x, var,
16890 1, OPTAB_LIB_WIDEN);
16892 x = gen_reg_rtx (wmode);
16893 emit_move_insn (x, gen_lowpart (wmode, const_vec));
16894 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
16896 emit_move_insn (target, gen_lowpart (mode, x));
16897 return true;
16899 default:
16900 return false;
16903 emit_move_insn (target, const_vec);
16904 ix86_expand_vector_set (mmx_ok, target, var, one_var);
16905 return true;
16908 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
16909 all values variable, and none identical. */
16911 static void
16912 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
16913 rtx target, rtx vals)
16915 enum machine_mode half_mode = GET_MODE_INNER (mode);
16916 rtx op0 = NULL, op1 = NULL;
16917 bool use_vec_concat = false;
16919 switch (mode)
16921 case V2SFmode:
16922 case V2SImode:
16923 if (!mmx_ok && !TARGET_SSE)
16924 break;
16925 /* FALLTHRU */
16927 case V2DFmode:
16928 case V2DImode:
16929 /* For the two element vectors, we always implement VEC_CONCAT. */
16930 op0 = XVECEXP (vals, 0, 0);
16931 op1 = XVECEXP (vals, 0, 1);
16932 use_vec_concat = true;
16933 break;
16935 case V4SFmode:
16936 half_mode = V2SFmode;
16937 goto half;
16938 case V4SImode:
16939 half_mode = V2SImode;
16940 goto half;
16941 half:
16943 rtvec v;
16945 /* For V4SF and V4SI, we implement a concat of two V2 vectors.
16946 Recurse to load the two halves. */
16948 op0 = gen_reg_rtx (half_mode);
16949 v = gen_rtvec (2, XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1));
16950 ix86_expand_vector_init (false, op0, gen_rtx_PARALLEL (half_mode, v));
16952 op1 = gen_reg_rtx (half_mode);
16953 v = gen_rtvec (2, XVECEXP (vals, 0, 2), XVECEXP (vals, 0, 3));
16954 ix86_expand_vector_init (false, op1, gen_rtx_PARALLEL (half_mode, v));
16956 use_vec_concat = true;
16958 break;
16960 case V8HImode:
16961 case V16QImode:
16962 case V4HImode:
16963 case V8QImode:
16964 break;
16966 default:
16967 gcc_unreachable ();
16970 if (use_vec_concat)
16972 if (!register_operand (op0, half_mode))
16973 op0 = force_reg (half_mode, op0);
16974 if (!register_operand (op1, half_mode))
16975 op1 = force_reg (half_mode, op1);
16977 emit_insn (gen_rtx_SET (VOIDmode, target,
16978 gen_rtx_VEC_CONCAT (mode, op0, op1)));
16980 else
16982 int i, j, n_elts, n_words, n_elt_per_word;
16983 enum machine_mode inner_mode;
16984 rtx words[4], shift;
16986 inner_mode = GET_MODE_INNER (mode);
16987 n_elts = GET_MODE_NUNITS (mode);
16988 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
16989 n_elt_per_word = n_elts / n_words;
16990 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
16992 for (i = 0; i < n_words; ++i)
16994 rtx word = NULL_RTX;
16996 for (j = 0; j < n_elt_per_word; ++j)
16998 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
16999 elt = convert_modes (word_mode, inner_mode, elt, true);
17001 if (j == 0)
17002 word = elt;
17003 else
17005 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
17006 word, 1, OPTAB_LIB_WIDEN);
17007 word = expand_simple_binop (word_mode, IOR, word, elt,
17008 word, 1, OPTAB_LIB_WIDEN);
17012 words[i] = word;
17015 if (n_words == 1)
17016 emit_move_insn (target, gen_lowpart (mode, words[0]));
17017 else if (n_words == 2)
17019 rtx tmp = gen_reg_rtx (mode);
17020 emit_insn (gen_rtx_CLOBBER (VOIDmode, tmp));
17021 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
17022 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
17023 emit_move_insn (target, tmp);
17025 else if (n_words == 4)
17027 rtx tmp = gen_reg_rtx (V4SImode);
17028 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
17029 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
17030 emit_move_insn (target, gen_lowpart (mode, tmp));
17032 else
17033 gcc_unreachable ();
17037 /* Initialize vector TARGET via VALS. Suppress the use of MMX
17038 instructions unless MMX_OK is true. */
17040 void
17041 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
17043 enum machine_mode mode = GET_MODE (target);
17044 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17045 int n_elts = GET_MODE_NUNITS (mode);
17046 int n_var = 0, one_var = -1;
17047 bool all_same = true, all_const_zero = true;
17048 int i;
17049 rtx x;
17051 for (i = 0; i < n_elts; ++i)
17053 x = XVECEXP (vals, 0, i);
17054 if (!CONSTANT_P (x))
17055 n_var++, one_var = i;
17056 else if (x != CONST0_RTX (inner_mode))
17057 all_const_zero = false;
17058 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
17059 all_same = false;
17062 /* Constants are best loaded from the constant pool. */
17063 if (n_var == 0)
17065 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
17066 return;
17069 /* If all values are identical, broadcast the value. */
17070 if (all_same
17071 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
17072 XVECEXP (vals, 0, 0)))
17073 return;
17075 /* Values where only one field is non-constant are best loaded from
17076 the pool and overwritten via move later. */
17077 if (n_var == 1)
17079 if (all_const_zero && one_var == 0
17080 && ix86_expand_vector_init_low_nonzero (mmx_ok, mode, target,
17081 XVECEXP (vals, 0, 0)))
17082 return;
17084 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
17085 return;
17088 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
17091 void
17092 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
17094 enum machine_mode mode = GET_MODE (target);
17095 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17096 bool use_vec_merge = false;
17097 rtx tmp;
17099 switch (mode)
17101 case V2SFmode:
17102 case V2SImode:
17103 if (mmx_ok)
17105 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
17106 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
17107 if (elt == 0)
17108 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
17109 else
17110 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
17111 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17112 return;
17114 break;
17116 case V2DFmode:
17117 case V2DImode:
17119 rtx op0, op1;
17121 /* For the two element vectors, we implement a VEC_CONCAT with
17122 the extraction of the other element. */
17124 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
17125 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
17127 if (elt == 0)
17128 op0 = val, op1 = tmp;
17129 else
17130 op0 = tmp, op1 = val;
17132 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
17133 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17135 return;
17137 case V4SFmode:
17138 switch (elt)
17140 case 0:
17141 use_vec_merge = true;
17142 break;
17144 case 1:
17145 /* tmp = target = A B C D */
17146 tmp = copy_to_reg (target);
17147 /* target = A A B B */
17148 emit_insn (gen_sse_unpcklps (target, target, target));
17149 /* target = X A B B */
17150 ix86_expand_vector_set (false, target, val, 0);
17151 /* target = A X C D */
17152 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17153 GEN_INT (1), GEN_INT (0),
17154 GEN_INT (2+4), GEN_INT (3+4)));
17155 return;
17157 case 2:
17158 /* tmp = target = A B C D */
17159 tmp = copy_to_reg (target);
17160 /* tmp = X B C D */
17161 ix86_expand_vector_set (false, tmp, val, 0);
17162 /* target = A B X D */
17163 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17164 GEN_INT (0), GEN_INT (1),
17165 GEN_INT (0+4), GEN_INT (3+4)));
17166 return;
17168 case 3:
17169 /* tmp = target = A B C D */
17170 tmp = copy_to_reg (target);
17171 /* tmp = X B C D */
17172 ix86_expand_vector_set (false, tmp, val, 0);
17173 /* target = A B X D */
17174 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17175 GEN_INT (0), GEN_INT (1),
17176 GEN_INT (2+4), GEN_INT (0+4)));
17177 return;
17179 default:
17180 gcc_unreachable ();
17182 break;
17184 case V4SImode:
17185 /* Element 0 handled by vec_merge below. */
17186 if (elt == 0)
17188 use_vec_merge = true;
17189 break;
17192 if (TARGET_SSE2)
17194 /* With SSE2, use integer shuffles to swap element 0 and ELT,
17195 store into element 0, then shuffle them back. */
17197 rtx order[4];
17199 order[0] = GEN_INT (elt);
17200 order[1] = const1_rtx;
17201 order[2] = const2_rtx;
17202 order[3] = GEN_INT (3);
17203 order[elt] = const0_rtx;
17205 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
17206 order[1], order[2], order[3]));
17208 ix86_expand_vector_set (false, target, val, 0);
17210 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
17211 order[1], order[2], order[3]));
17213 else
17215 /* For SSE1, we have to reuse the V4SF code. */
17216 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
17217 gen_lowpart (SFmode, val), elt);
17219 return;
17221 case V8HImode:
17222 use_vec_merge = TARGET_SSE2;
17223 break;
17224 case V4HImode:
17225 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
17226 break;
17228 case V16QImode:
17229 case V8QImode:
17230 default:
17231 break;
17234 if (use_vec_merge)
17236 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
17237 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
17238 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17240 else
17242 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
17244 emit_move_insn (mem, target);
17246 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
17247 emit_move_insn (tmp, val);
17249 emit_move_insn (target, mem);
17253 void
17254 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
17256 enum machine_mode mode = GET_MODE (vec);
17257 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17258 bool use_vec_extr = false;
17259 rtx tmp;
17261 switch (mode)
17263 case V2SImode:
17264 case V2SFmode:
17265 if (!mmx_ok)
17266 break;
17267 /* FALLTHRU */
17269 case V2DFmode:
17270 case V2DImode:
17271 use_vec_extr = true;
17272 break;
17274 case V4SFmode:
17275 switch (elt)
17277 case 0:
17278 tmp = vec;
17279 break;
17281 case 1:
17282 case 3:
17283 tmp = gen_reg_rtx (mode);
17284 emit_insn (gen_sse_shufps_1 (tmp, vec, vec,
17285 GEN_INT (elt), GEN_INT (elt),
17286 GEN_INT (elt+4), GEN_INT (elt+4)));
17287 break;
17289 case 2:
17290 tmp = gen_reg_rtx (mode);
17291 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
17292 break;
17294 default:
17295 gcc_unreachable ();
17297 vec = tmp;
17298 use_vec_extr = true;
17299 elt = 0;
17300 break;
17302 case V4SImode:
17303 if (TARGET_SSE2)
17305 switch (elt)
17307 case 0:
17308 tmp = vec;
17309 break;
17311 case 1:
17312 case 3:
17313 tmp = gen_reg_rtx (mode);
17314 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
17315 GEN_INT (elt), GEN_INT (elt),
17316 GEN_INT (elt), GEN_INT (elt)));
17317 break;
17319 case 2:
17320 tmp = gen_reg_rtx (mode);
17321 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
17322 break;
17324 default:
17325 gcc_unreachable ();
17327 vec = tmp;
17328 use_vec_extr = true;
17329 elt = 0;
17331 else
17333 /* For SSE1, we have to reuse the V4SF code. */
17334 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
17335 gen_lowpart (V4SFmode, vec), elt);
17336 return;
17338 break;
17340 case V8HImode:
17341 use_vec_extr = TARGET_SSE2;
17342 break;
17343 case V4HImode:
17344 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
17345 break;
17347 case V16QImode:
17348 case V8QImode:
17349 /* ??? Could extract the appropriate HImode element and shift. */
17350 default:
17351 break;
17354 if (use_vec_extr)
17356 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
17357 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
17359 /* Let the rtl optimizers know about the zero extension performed. */
17360 if (inner_mode == HImode)
17362 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
17363 target = gen_lowpart (SImode, target);
17366 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17368 else
17370 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
17372 emit_move_insn (mem, vec);
17374 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
17375 emit_move_insn (target, tmp);
17379 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binar
17380 pattern to reduce; DEST is the destination; IN is the input vector. */
17382 void
17383 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
17385 rtx tmp1, tmp2, tmp3;
17387 tmp1 = gen_reg_rtx (V4SFmode);
17388 tmp2 = gen_reg_rtx (V4SFmode);
17389 tmp3 = gen_reg_rtx (V4SFmode);
17391 emit_insn (gen_sse_movhlps (tmp1, in, in));
17392 emit_insn (fn (tmp2, tmp1, in));
17394 emit_insn (gen_sse_shufps_1 (tmp3, tmp2, tmp2,
17395 GEN_INT (1), GEN_INT (1),
17396 GEN_INT (1+4), GEN_INT (1+4)));
17397 emit_insn (fn (dest, tmp2, tmp3));
17400 /* Implements target hook vector_mode_supported_p. */
17401 static bool
17402 ix86_vector_mode_supported_p (enum machine_mode mode)
17404 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
17405 return true;
17406 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
17407 return true;
17408 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
17409 return true;
17410 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
17411 return true;
17412 return false;
17415 /* Worker function for TARGET_MD_ASM_CLOBBERS.
17417 We do this in the new i386 backend to maintain source compatibility
17418 with the old cc0-based compiler. */
17420 static tree
17421 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
17422 tree inputs ATTRIBUTE_UNUSED,
17423 tree clobbers)
17425 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
17426 clobbers);
17427 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
17428 clobbers);
17429 clobbers = tree_cons (NULL_TREE, build_string (7, "dirflag"),
17430 clobbers);
17431 return clobbers;
17434 /* Worker function for REVERSE_CONDITION. */
17436 enum rtx_code
17437 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
17439 return (mode != CCFPmode && mode != CCFPUmode
17440 ? reverse_condition (code)
17441 : reverse_condition_maybe_unordered (code));
17444 /* Output code to perform an x87 FP register move, from OPERANDS[1]
17445 to OPERANDS[0]. */
17447 const char *
17448 output_387_reg_move (rtx insn, rtx *operands)
17450 if (REG_P (operands[1])
17451 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
17453 if (REGNO (operands[0]) == FIRST_STACK_REG
17454 && TARGET_USE_FFREEP)
17455 return "ffreep\t%y0";
17456 return "fstp\t%y0";
17458 if (STACK_TOP_P (operands[0]))
17459 return "fld%z1\t%y1";
17460 return "fst\t%y0";
17463 /* Output code to perform a conditional jump to LABEL, if C2 flag in
17464 FP status register is set. */
17466 void
17467 ix86_emit_fp_unordered_jump (rtx label)
17469 rtx reg = gen_reg_rtx (HImode);
17470 rtx temp;
17472 emit_insn (gen_x86_fnstsw_1 (reg));
17474 if (TARGET_USE_SAHF)
17476 emit_insn (gen_x86_sahf_1 (reg));
17478 temp = gen_rtx_REG (CCmode, FLAGS_REG);
17479 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
17481 else
17483 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
17485 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
17486 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
17489 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
17490 gen_rtx_LABEL_REF (VOIDmode, label),
17491 pc_rtx);
17492 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
17493 emit_jump_insn (temp);
17496 /* Output code to perform a log1p XFmode calculation. */
17498 void ix86_emit_i387_log1p (rtx op0, rtx op1)
17500 rtx label1 = gen_label_rtx ();
17501 rtx label2 = gen_label_rtx ();
17503 rtx tmp = gen_reg_rtx (XFmode);
17504 rtx tmp2 = gen_reg_rtx (XFmode);
17506 emit_insn (gen_absxf2 (tmp, op1));
17507 emit_insn (gen_cmpxf (tmp,
17508 CONST_DOUBLE_FROM_REAL_VALUE (
17509 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
17510 XFmode)));
17511 emit_jump_insn (gen_bge (label1));
17513 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
17514 emit_insn (gen_fyl2xp1_xf3 (op0, tmp2, op1));
17515 emit_jump (label2);
17517 emit_label (label1);
17518 emit_move_insn (tmp, CONST1_RTX (XFmode));
17519 emit_insn (gen_addxf3 (tmp, op1, tmp));
17520 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
17521 emit_insn (gen_fyl2x_xf3 (op0, tmp2, tmp));
17523 emit_label (label2);
17526 /* Solaris named-section hook. Parameters are as for
17527 named_section_real. */
17529 static void
17530 i386_solaris_elf_named_section (const char *name, unsigned int flags,
17531 tree decl)
17533 /* With Binutils 2.15, the "@unwind" marker must be specified on
17534 every occurrence of the ".eh_frame" section, not just the first
17535 one. */
17536 if (TARGET_64BIT
17537 && strcmp (name, ".eh_frame") == 0)
17539 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
17540 flags & SECTION_WRITE ? "aw" : "a");
17541 return;
17543 default_elf_asm_named_section (name, flags, decl);
17546 /* Return the mangling of TYPE if it is an extended fundamental type. */
17548 static const char *
17549 ix86_mangle_fundamental_type (tree type)
17551 switch (TYPE_MODE (type))
17553 case TFmode:
17554 /* __float128 is "g". */
17555 return "g";
17556 case XFmode:
17557 /* "long double" or __float80 is "e". */
17558 return "e";
17559 default:
17560 return NULL;
17564 #include "gt-i386.h"