Merge from mainline, version 106440, to sh-elf-4_1-branch .
[official-gcc.git] / gcc / config / i386 / i386.c
blob26f0299b52bcfe4b52deda803dda428461d81a8c
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
20 Boston, MA 02110-1301, USA. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "tree-gimple.h"
51 #include "dwarf2.h"
53 #ifndef CHECK_STACK_LIMIT
54 #define CHECK_STACK_LIMIT (-1)
55 #endif
57 /* Return index of given mode in mult and division cost tables. */
58 #define MODE_INDEX(mode) \
59 ((mode) == QImode ? 0 \
60 : (mode) == HImode ? 1 \
61 : (mode) == SImode ? 2 \
62 : (mode) == DImode ? 3 \
63 : 4)
65 /* Processor costs (relative to an add) */
66 static const
67 struct processor_costs size_cost = { /* costs for tunning for size */
68 2, /* cost of an add instruction */
69 3, /* cost of a lea instruction */
70 2, /* variable shift costs */
71 3, /* constant shift costs */
72 {3, 3, 3, 3, 5}, /* cost of starting a multiply */
73 0, /* cost of multiply per each bit set */
74 {3, 3, 3, 3, 5}, /* cost of a divide/mod */
75 3, /* cost of movsx */
76 3, /* cost of movzx */
77 0, /* "large" insn */
78 2, /* MOVE_RATIO */
79 2, /* cost for loading QImode using movzbl */
80 {2, 2, 2}, /* cost of loading integer registers
81 in QImode, HImode and SImode.
82 Relative to reg-reg move (2). */
83 {2, 2, 2}, /* cost of storing integer registers */
84 2, /* cost of reg,reg fld/fst */
85 {2, 2, 2}, /* cost of loading fp registers
86 in SFmode, DFmode and XFmode */
87 {2, 2, 2}, /* cost of loading integer registers */
88 3, /* cost of moving MMX register */
89 {3, 3}, /* cost of loading MMX registers
90 in SImode and DImode */
91 {3, 3}, /* cost of storing MMX registers
92 in SImode and DImode */
93 3, /* cost of moving SSE register */
94 {3, 3, 3}, /* cost of loading SSE registers
95 in SImode, DImode and TImode */
96 {3, 3, 3}, /* cost of storing SSE registers
97 in SImode, DImode and TImode */
98 3, /* MMX or SSE register to integer */
99 0, /* size of prefetch block */
100 0, /* number of parallel prefetches */
101 1, /* Branch cost */
102 2, /* cost of FADD and FSUB insns. */
103 2, /* cost of FMUL instruction. */
104 2, /* cost of FDIV instruction. */
105 2, /* cost of FABS instruction. */
106 2, /* cost of FCHS instruction. */
107 2, /* cost of FSQRT instruction. */
110 /* Processor costs (relative to an add) */
111 static const
112 struct processor_costs i386_cost = { /* 386 specific costs */
113 1, /* cost of an add instruction */
114 1, /* cost of a lea instruction */
115 3, /* variable shift costs */
116 2, /* constant shift costs */
117 {6, 6, 6, 6, 6}, /* cost of starting a multiply */
118 1, /* cost of multiply per each bit set */
119 {23, 23, 23, 23, 23}, /* cost of a divide/mod */
120 3, /* cost of movsx */
121 2, /* cost of movzx */
122 15, /* "large" insn */
123 3, /* MOVE_RATIO */
124 4, /* cost for loading QImode using movzbl */
125 {2, 4, 2}, /* cost of loading integer registers
126 in QImode, HImode and SImode.
127 Relative to reg-reg move (2). */
128 {2, 4, 2}, /* cost of storing integer registers */
129 2, /* cost of reg,reg fld/fst */
130 {8, 8, 8}, /* cost of loading fp registers
131 in SFmode, DFmode and XFmode */
132 {8, 8, 8}, /* cost of loading integer registers */
133 2, /* cost of moving MMX register */
134 {4, 8}, /* cost of loading MMX registers
135 in SImode and DImode */
136 {4, 8}, /* cost of storing MMX registers
137 in SImode and DImode */
138 2, /* cost of moving SSE register */
139 {4, 8, 16}, /* cost of loading SSE registers
140 in SImode, DImode and TImode */
141 {4, 8, 16}, /* cost of storing SSE registers
142 in SImode, DImode and TImode */
143 3, /* MMX or SSE register to integer */
144 0, /* size of prefetch block */
145 0, /* number of parallel prefetches */
146 1, /* Branch cost */
147 23, /* cost of FADD and FSUB insns. */
148 27, /* cost of FMUL instruction. */
149 88, /* cost of FDIV instruction. */
150 22, /* cost of FABS instruction. */
151 24, /* cost of FCHS instruction. */
152 122, /* cost of FSQRT instruction. */
155 static const
156 struct processor_costs i486_cost = { /* 486 specific costs */
157 1, /* cost of an add instruction */
158 1, /* cost of a lea instruction */
159 3, /* variable shift costs */
160 2, /* constant shift costs */
161 {12, 12, 12, 12, 12}, /* cost of starting a multiply */
162 1, /* cost of multiply per each bit set */
163 {40, 40, 40, 40, 40}, /* cost of a divide/mod */
164 3, /* cost of movsx */
165 2, /* cost of movzx */
166 15, /* "large" insn */
167 3, /* MOVE_RATIO */
168 4, /* cost for loading QImode using movzbl */
169 {2, 4, 2}, /* cost of loading integer registers
170 in QImode, HImode and SImode.
171 Relative to reg-reg move (2). */
172 {2, 4, 2}, /* cost of storing integer registers */
173 2, /* cost of reg,reg fld/fst */
174 {8, 8, 8}, /* cost of loading fp registers
175 in SFmode, DFmode and XFmode */
176 {8, 8, 8}, /* cost of loading integer registers */
177 2, /* cost of moving MMX register */
178 {4, 8}, /* cost of loading MMX registers
179 in SImode and DImode */
180 {4, 8}, /* cost of storing MMX registers
181 in SImode and DImode */
182 2, /* cost of moving SSE register */
183 {4, 8, 16}, /* cost of loading SSE registers
184 in SImode, DImode and TImode */
185 {4, 8, 16}, /* cost of storing SSE registers
186 in SImode, DImode and TImode */
187 3, /* MMX or SSE register to integer */
188 0, /* size of prefetch block */
189 0, /* number of parallel prefetches */
190 1, /* Branch cost */
191 8, /* cost of FADD and FSUB insns. */
192 16, /* cost of FMUL instruction. */
193 73, /* cost of FDIV instruction. */
194 3, /* cost of FABS instruction. */
195 3, /* cost of FCHS instruction. */
196 83, /* cost of FSQRT instruction. */
199 static const
200 struct processor_costs pentium_cost = {
201 1, /* cost of an add instruction */
202 1, /* cost of a lea instruction */
203 4, /* variable shift costs */
204 1, /* constant shift costs */
205 {11, 11, 11, 11, 11}, /* cost of starting a multiply */
206 0, /* cost of multiply per each bit set */
207 {25, 25, 25, 25, 25}, /* cost of a divide/mod */
208 3, /* cost of movsx */
209 2, /* cost of movzx */
210 8, /* "large" insn */
211 6, /* MOVE_RATIO */
212 6, /* cost for loading QImode using movzbl */
213 {2, 4, 2}, /* cost of loading integer registers
214 in QImode, HImode and SImode.
215 Relative to reg-reg move (2). */
216 {2, 4, 2}, /* cost of storing integer registers */
217 2, /* cost of reg,reg fld/fst */
218 {2, 2, 6}, /* cost of loading fp registers
219 in SFmode, DFmode and XFmode */
220 {4, 4, 6}, /* cost of loading integer registers */
221 8, /* cost of moving MMX register */
222 {8, 8}, /* cost of loading MMX registers
223 in SImode and DImode */
224 {8, 8}, /* cost of storing MMX registers
225 in SImode and DImode */
226 2, /* cost of moving SSE register */
227 {4, 8, 16}, /* cost of loading SSE registers
228 in SImode, DImode and TImode */
229 {4, 8, 16}, /* cost of storing SSE registers
230 in SImode, DImode and TImode */
231 3, /* MMX or SSE register to integer */
232 0, /* size of prefetch block */
233 0, /* number of parallel prefetches */
234 2, /* Branch cost */
235 3, /* cost of FADD and FSUB insns. */
236 3, /* cost of FMUL instruction. */
237 39, /* cost of FDIV instruction. */
238 1, /* cost of FABS instruction. */
239 1, /* cost of FCHS instruction. */
240 70, /* cost of FSQRT instruction. */
243 static const
244 struct processor_costs pentiumpro_cost = {
245 1, /* cost of an add instruction */
246 1, /* cost of a lea instruction */
247 1, /* variable shift costs */
248 1, /* constant shift costs */
249 {4, 4, 4, 4, 4}, /* cost of starting a multiply */
250 0, /* cost of multiply per each bit set */
251 {17, 17, 17, 17, 17}, /* cost of a divide/mod */
252 1, /* cost of movsx */
253 1, /* cost of movzx */
254 8, /* "large" insn */
255 6, /* MOVE_RATIO */
256 2, /* cost for loading QImode using movzbl */
257 {4, 4, 4}, /* cost of loading integer registers
258 in QImode, HImode and SImode.
259 Relative to reg-reg move (2). */
260 {2, 2, 2}, /* cost of storing integer registers */
261 2, /* cost of reg,reg fld/fst */
262 {2, 2, 6}, /* cost of loading fp registers
263 in SFmode, DFmode and XFmode */
264 {4, 4, 6}, /* cost of loading integer registers */
265 2, /* cost of moving MMX register */
266 {2, 2}, /* cost of loading MMX registers
267 in SImode and DImode */
268 {2, 2}, /* cost of storing MMX registers
269 in SImode and DImode */
270 2, /* cost of moving SSE register */
271 {2, 2, 8}, /* cost of loading SSE registers
272 in SImode, DImode and TImode */
273 {2, 2, 8}, /* cost of storing SSE registers
274 in SImode, DImode and TImode */
275 3, /* MMX or SSE register to integer */
276 32, /* size of prefetch block */
277 6, /* number of parallel prefetches */
278 2, /* Branch cost */
279 3, /* cost of FADD and FSUB insns. */
280 5, /* cost of FMUL instruction. */
281 56, /* cost of FDIV instruction. */
282 2, /* cost of FABS instruction. */
283 2, /* cost of FCHS instruction. */
284 56, /* cost of FSQRT instruction. */
287 static const
288 struct processor_costs k6_cost = {
289 1, /* cost of an add instruction */
290 2, /* cost of a lea instruction */
291 1, /* variable shift costs */
292 1, /* constant shift costs */
293 {3, 3, 3, 3, 3}, /* cost of starting a multiply */
294 0, /* cost of multiply per each bit set */
295 {18, 18, 18, 18, 18}, /* cost of a divide/mod */
296 2, /* cost of movsx */
297 2, /* cost of movzx */
298 8, /* "large" insn */
299 4, /* MOVE_RATIO */
300 3, /* cost for loading QImode using movzbl */
301 {4, 5, 4}, /* cost of loading integer registers
302 in QImode, HImode and SImode.
303 Relative to reg-reg move (2). */
304 {2, 3, 2}, /* cost of storing integer registers */
305 4, /* cost of reg,reg fld/fst */
306 {6, 6, 6}, /* cost of loading fp registers
307 in SFmode, DFmode and XFmode */
308 {4, 4, 4}, /* cost of loading integer registers */
309 2, /* cost of moving MMX register */
310 {2, 2}, /* cost of loading MMX registers
311 in SImode and DImode */
312 {2, 2}, /* cost of storing MMX registers
313 in SImode and DImode */
314 2, /* cost of moving SSE register */
315 {2, 2, 8}, /* cost of loading SSE registers
316 in SImode, DImode and TImode */
317 {2, 2, 8}, /* cost of storing SSE registers
318 in SImode, DImode and TImode */
319 6, /* MMX or SSE register to integer */
320 32, /* size of prefetch block */
321 1, /* number of parallel prefetches */
322 1, /* Branch cost */
323 2, /* cost of FADD and FSUB insns. */
324 2, /* cost of FMUL instruction. */
325 56, /* cost of FDIV instruction. */
326 2, /* cost of FABS instruction. */
327 2, /* cost of FCHS instruction. */
328 56, /* cost of FSQRT instruction. */
331 static const
332 struct processor_costs athlon_cost = {
333 1, /* cost of an add instruction */
334 2, /* cost of a lea instruction */
335 1, /* variable shift costs */
336 1, /* constant shift costs */
337 {5, 5, 5, 5, 5}, /* cost of starting a multiply */
338 0, /* cost of multiply per each bit set */
339 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
340 1, /* cost of movsx */
341 1, /* cost of movzx */
342 8, /* "large" insn */
343 9, /* MOVE_RATIO */
344 4, /* cost for loading QImode using movzbl */
345 {3, 4, 3}, /* cost of loading integer registers
346 in QImode, HImode and SImode.
347 Relative to reg-reg move (2). */
348 {3, 4, 3}, /* cost of storing integer registers */
349 4, /* cost of reg,reg fld/fst */
350 {4, 4, 12}, /* cost of loading fp registers
351 in SFmode, DFmode and XFmode */
352 {6, 6, 8}, /* cost of loading integer registers */
353 2, /* cost of moving MMX register */
354 {4, 4}, /* cost of loading MMX registers
355 in SImode and DImode */
356 {4, 4}, /* cost of storing MMX registers
357 in SImode and DImode */
358 2, /* cost of moving SSE register */
359 {4, 4, 6}, /* cost of loading SSE registers
360 in SImode, DImode and TImode */
361 {4, 4, 5}, /* cost of storing SSE registers
362 in SImode, DImode and TImode */
363 5, /* MMX or SSE register to integer */
364 64, /* size of prefetch block */
365 6, /* number of parallel prefetches */
366 5, /* Branch cost */
367 4, /* cost of FADD and FSUB insns. */
368 4, /* cost of FMUL instruction. */
369 24, /* cost of FDIV instruction. */
370 2, /* cost of FABS instruction. */
371 2, /* cost of FCHS instruction. */
372 35, /* cost of FSQRT instruction. */
375 static const
376 struct processor_costs k8_cost = {
377 1, /* cost of an add instruction */
378 2, /* cost of a lea instruction */
379 1, /* variable shift costs */
380 1, /* constant shift costs */
381 {3, 4, 3, 4, 5}, /* cost of starting a multiply */
382 0, /* cost of multiply per each bit set */
383 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
384 1, /* cost of movsx */
385 1, /* cost of movzx */
386 8, /* "large" insn */
387 9, /* MOVE_RATIO */
388 4, /* cost for loading QImode using movzbl */
389 {3, 4, 3}, /* cost of loading integer registers
390 in QImode, HImode and SImode.
391 Relative to reg-reg move (2). */
392 {3, 4, 3}, /* cost of storing integer registers */
393 4, /* cost of reg,reg fld/fst */
394 {4, 4, 12}, /* cost of loading fp registers
395 in SFmode, DFmode and XFmode */
396 {6, 6, 8}, /* cost of loading integer registers */
397 2, /* cost of moving MMX register */
398 {3, 3}, /* cost of loading MMX registers
399 in SImode and DImode */
400 {4, 4}, /* cost of storing MMX registers
401 in SImode and DImode */
402 2, /* cost of moving SSE register */
403 {4, 3, 6}, /* cost of loading SSE registers
404 in SImode, DImode and TImode */
405 {4, 4, 5}, /* cost of storing SSE registers
406 in SImode, DImode and TImode */
407 5, /* MMX or SSE register to integer */
408 64, /* size of prefetch block */
409 6, /* number of parallel prefetches */
410 5, /* Branch cost */
411 4, /* cost of FADD and FSUB insns. */
412 4, /* cost of FMUL instruction. */
413 19, /* cost of FDIV instruction. */
414 2, /* cost of FABS instruction. */
415 2, /* cost of FCHS instruction. */
416 35, /* cost of FSQRT instruction. */
419 static const
420 struct processor_costs pentium4_cost = {
421 1, /* cost of an add instruction */
422 3, /* cost of a lea instruction */
423 4, /* variable shift costs */
424 4, /* constant shift costs */
425 {15, 15, 15, 15, 15}, /* cost of starting a multiply */
426 0, /* cost of multiply per each bit set */
427 {56, 56, 56, 56, 56}, /* cost of a divide/mod */
428 1, /* cost of movsx */
429 1, /* cost of movzx */
430 16, /* "large" insn */
431 6, /* MOVE_RATIO */
432 2, /* cost for loading QImode using movzbl */
433 {4, 5, 4}, /* cost of loading integer registers
434 in QImode, HImode and SImode.
435 Relative to reg-reg move (2). */
436 {2, 3, 2}, /* cost of storing integer registers */
437 2, /* cost of reg,reg fld/fst */
438 {2, 2, 6}, /* cost of loading fp registers
439 in SFmode, DFmode and XFmode */
440 {4, 4, 6}, /* cost of loading integer registers */
441 2, /* cost of moving MMX register */
442 {2, 2}, /* cost of loading MMX registers
443 in SImode and DImode */
444 {2, 2}, /* cost of storing MMX registers
445 in SImode and DImode */
446 12, /* cost of moving SSE register */
447 {12, 12, 12}, /* cost of loading SSE registers
448 in SImode, DImode and TImode */
449 {2, 2, 8}, /* cost of storing SSE registers
450 in SImode, DImode and TImode */
451 10, /* MMX or SSE register to integer */
452 64, /* size of prefetch block */
453 6, /* number of parallel prefetches */
454 2, /* Branch cost */
455 5, /* cost of FADD and FSUB insns. */
456 7, /* cost of FMUL instruction. */
457 43, /* cost of FDIV instruction. */
458 2, /* cost of FABS instruction. */
459 2, /* cost of FCHS instruction. */
460 43, /* cost of FSQRT instruction. */
463 static const
464 struct processor_costs nocona_cost = {
465 1, /* cost of an add instruction */
466 1, /* cost of a lea instruction */
467 1, /* variable shift costs */
468 1, /* constant shift costs */
469 {10, 10, 10, 10, 10}, /* cost of starting a multiply */
470 0, /* cost of multiply per each bit set */
471 {66, 66, 66, 66, 66}, /* cost of a divide/mod */
472 1, /* cost of movsx */
473 1, /* cost of movzx */
474 16, /* "large" insn */
475 17, /* MOVE_RATIO */
476 4, /* cost for loading QImode using movzbl */
477 {4, 4, 4}, /* cost of loading integer registers
478 in QImode, HImode and SImode.
479 Relative to reg-reg move (2). */
480 {4, 4, 4}, /* cost of storing integer registers */
481 3, /* cost of reg,reg fld/fst */
482 {12, 12, 12}, /* cost of loading fp registers
483 in SFmode, DFmode and XFmode */
484 {4, 4, 4}, /* cost of loading integer registers */
485 6, /* cost of moving MMX register */
486 {12, 12}, /* cost of loading MMX registers
487 in SImode and DImode */
488 {12, 12}, /* cost of storing MMX registers
489 in SImode and DImode */
490 6, /* cost of moving SSE register */
491 {12, 12, 12}, /* cost of loading SSE registers
492 in SImode, DImode and TImode */
493 {12, 12, 12}, /* cost of storing SSE registers
494 in SImode, DImode and TImode */
495 8, /* MMX or SSE register to integer */
496 128, /* size of prefetch block */
497 8, /* number of parallel prefetches */
498 1, /* Branch cost */
499 6, /* cost of FADD and FSUB insns. */
500 8, /* cost of FMUL instruction. */
501 40, /* cost of FDIV instruction. */
502 3, /* cost of FABS instruction. */
503 3, /* cost of FCHS instruction. */
504 44, /* cost of FSQRT instruction. */
507 const struct processor_costs *ix86_cost = &pentium_cost;
509 /* Processor feature/optimization bitmasks. */
510 #define m_386 (1<<PROCESSOR_I386)
511 #define m_486 (1<<PROCESSOR_I486)
512 #define m_PENT (1<<PROCESSOR_PENTIUM)
513 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
514 #define m_K6 (1<<PROCESSOR_K6)
515 #define m_ATHLON (1<<PROCESSOR_ATHLON)
516 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
517 #define m_K8 (1<<PROCESSOR_K8)
518 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
519 #define m_NOCONA (1<<PROCESSOR_NOCONA)
521 const int x86_use_leave = m_386 | m_K6 | m_ATHLON_K8;
522 const int x86_push_memory = m_386 | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
523 const int x86_zero_extend_with_and = m_486 | m_PENT;
524 const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA /* m_386 | m_K6 */;
525 const int x86_double_with_add = ~m_386;
526 const int x86_use_bit_test = m_386;
527 const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6;
528 const int x86_cmove = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
529 const int x86_fisttp = m_NOCONA;
530 const int x86_3dnow_a = m_ATHLON_K8;
531 const int x86_deep_branch = m_PPRO | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
532 /* Branch hints were put in P4 based on simulation result. But
533 after P4 was made, no performance benefit was observed with
534 branch hints. It also increases the code size. As the result,
535 icc never generates branch hints. */
536 const int x86_branch_hints = 0;
537 const int x86_use_sahf = m_PPRO | m_K6 | m_PENT4 | m_NOCONA;
538 const int x86_partial_reg_stall = m_PPRO;
539 const int x86_use_himode_fiop = m_386 | m_486 | m_K6;
540 const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT);
541 const int x86_use_mov0 = m_K6;
542 const int x86_use_cltd = ~(m_PENT | m_K6);
543 const int x86_read_modify_write = ~m_PENT;
544 const int x86_read_modify = ~(m_PENT | m_PPRO);
545 const int x86_split_long_moves = m_PPRO;
546 const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486 | m_ATHLON_K8;
547 const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
548 const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
549 const int x86_qimode_math = ~(0);
550 const int x86_promote_qi_regs = 0;
551 const int x86_himode_math = ~(m_PPRO);
552 const int x86_promote_hi_regs = m_PPRO;
553 const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA;
554 const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA;
555 const int x86_add_esp_4 = m_ATHLON_K8 | m_K6 | m_PENT4 | m_NOCONA;
556 const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6 | m_386 | m_486 | m_PENT4 | m_NOCONA;
557 const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO);
558 const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
559 const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
560 const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO;
561 const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO;
562 const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO;
563 const int x86_decompose_lea = m_PENT4 | m_NOCONA;
564 const int x86_shift1 = ~m_486;
565 const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
566 const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO;
567 /* Set for machines where the type and dependencies are resolved on SSE
568 register parts instead of whole registers, so we may maintain just
569 lower part of scalar values in proper format leaving the upper part
570 undefined. */
571 const int x86_sse_split_regs = m_ATHLON_K8;
572 const int x86_sse_typeless_stores = m_ATHLON_K8;
573 const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
574 const int x86_use_ffreep = m_ATHLON_K8;
575 const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6;
577 /* ??? Allowing interunit moves makes it all too easy for the compiler to put
578 integer data in xmm registers. Which results in pretty abysmal code. */
579 const int x86_inter_unit_moves = 0 /* ~(m_ATHLON_K8) */;
581 const int x86_ext_80387_constants = m_K6 | m_ATHLON | m_PENT4 | m_NOCONA | m_PPRO;
582 /* Some CPU cores are not able to predict more than 4 branch instructions in
583 the 16 byte window. */
584 const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
585 const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6 | m_PENT;
586 const int x86_use_bt = m_ATHLON_K8;
587 /* Compare and exchange was added for 80486. */
588 const int x86_cmpxchg = ~m_386;
589 /* Exchange and add was added for 80486. */
590 const int x86_xadd = ~m_386;
592 /* In case the average insn count for single function invocation is
593 lower than this constant, emit fast (but longer) prologue and
594 epilogue code. */
595 #define FAST_PROLOGUE_INSN_COUNT 20
597 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
598 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
599 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
600 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
602 /* Array of the smallest class containing reg number REGNO, indexed by
603 REGNO. Used by REGNO_REG_CLASS in i386.h. */
605 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
607 /* ax, dx, cx, bx */
608 AREG, DREG, CREG, BREG,
609 /* si, di, bp, sp */
610 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
611 /* FP registers */
612 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
613 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
614 /* arg pointer */
615 NON_Q_REGS,
616 /* flags, fpsr, dirflag, frame */
617 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
618 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
619 SSE_REGS, SSE_REGS,
620 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
621 MMX_REGS, MMX_REGS,
622 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
623 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
624 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
625 SSE_REGS, SSE_REGS,
628 /* The "default" register map used in 32bit mode. */
630 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
632 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
633 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
634 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
635 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
636 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
637 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
638 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
641 static int const x86_64_int_parameter_registers[6] =
643 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
644 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
647 static int const x86_64_int_return_registers[4] =
649 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
652 /* The "default" register map used in 64bit mode. */
653 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
655 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
656 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
657 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
658 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
659 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
660 8,9,10,11,12,13,14,15, /* extended integer registers */
661 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
664 /* Define the register numbers to be used in Dwarf debugging information.
665 The SVR4 reference port C compiler uses the following register numbers
666 in its Dwarf output code:
667 0 for %eax (gcc regno = 0)
668 1 for %ecx (gcc regno = 2)
669 2 for %edx (gcc regno = 1)
670 3 for %ebx (gcc regno = 3)
671 4 for %esp (gcc regno = 7)
672 5 for %ebp (gcc regno = 6)
673 6 for %esi (gcc regno = 4)
674 7 for %edi (gcc regno = 5)
675 The following three DWARF register numbers are never generated by
676 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
677 believes these numbers have these meanings.
678 8 for %eip (no gcc equivalent)
679 9 for %eflags (gcc regno = 17)
680 10 for %trapno (no gcc equivalent)
681 It is not at all clear how we should number the FP stack registers
682 for the x86 architecture. If the version of SDB on x86/svr4 were
683 a bit less brain dead with respect to floating-point then we would
684 have a precedent to follow with respect to DWARF register numbers
685 for x86 FP registers, but the SDB on x86/svr4 is so completely
686 broken with respect to FP registers that it is hardly worth thinking
687 of it as something to strive for compatibility with.
688 The version of x86/svr4 SDB I have at the moment does (partially)
689 seem to believe that DWARF register number 11 is associated with
690 the x86 register %st(0), but that's about all. Higher DWARF
691 register numbers don't seem to be associated with anything in
692 particular, and even for DWARF regno 11, SDB only seems to under-
693 stand that it should say that a variable lives in %st(0) (when
694 asked via an `=' command) if we said it was in DWARF regno 11,
695 but SDB still prints garbage when asked for the value of the
696 variable in question (via a `/' command).
697 (Also note that the labels SDB prints for various FP stack regs
698 when doing an `x' command are all wrong.)
699 Note that these problems generally don't affect the native SVR4
700 C compiler because it doesn't allow the use of -O with -g and
701 because when it is *not* optimizing, it allocates a memory
702 location for each floating-point variable, and the memory
703 location is what gets described in the DWARF AT_location
704 attribute for the variable in question.
705 Regardless of the severe mental illness of the x86/svr4 SDB, we
706 do something sensible here and we use the following DWARF
707 register numbers. Note that these are all stack-top-relative
708 numbers.
709 11 for %st(0) (gcc regno = 8)
710 12 for %st(1) (gcc regno = 9)
711 13 for %st(2) (gcc regno = 10)
712 14 for %st(3) (gcc regno = 11)
713 15 for %st(4) (gcc regno = 12)
714 16 for %st(5) (gcc regno = 13)
715 17 for %st(6) (gcc regno = 14)
716 18 for %st(7) (gcc regno = 15)
718 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
720 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
721 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
722 -1, 9, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
723 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
724 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
725 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
726 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
729 /* Test and compare insns in i386.md store the information needed to
730 generate branch and scc insns here. */
732 rtx ix86_compare_op0 = NULL_RTX;
733 rtx ix86_compare_op1 = NULL_RTX;
734 rtx ix86_compare_emitted = NULL_RTX;
736 /* Size of the register save area. */
737 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
739 /* Define the structure for the machine field in struct function. */
741 struct stack_local_entry GTY(())
743 unsigned short mode;
744 unsigned short n;
745 rtx rtl;
746 struct stack_local_entry *next;
749 /* Structure describing stack frame layout.
750 Stack grows downward:
752 [arguments]
753 <- ARG_POINTER
754 saved pc
756 saved frame pointer if frame_pointer_needed
757 <- HARD_FRAME_POINTER
758 [saved regs]
760 [padding1] \
762 [va_arg registers] (
763 > to_allocate <- FRAME_POINTER
764 [frame] (
766 [padding2] /
768 struct ix86_frame
770 int nregs;
771 int padding1;
772 int va_arg_size;
773 HOST_WIDE_INT frame;
774 int padding2;
775 int outgoing_arguments_size;
776 int red_zone_size;
778 HOST_WIDE_INT to_allocate;
779 /* The offsets relative to ARG_POINTER. */
780 HOST_WIDE_INT frame_pointer_offset;
781 HOST_WIDE_INT hard_frame_pointer_offset;
782 HOST_WIDE_INT stack_pointer_offset;
784 /* When save_regs_using_mov is set, emit prologue using
785 move instead of push instructions. */
786 bool save_regs_using_mov;
789 /* Code model option. */
790 enum cmodel ix86_cmodel;
791 /* Asm dialect. */
792 enum asm_dialect ix86_asm_dialect = ASM_ATT;
793 /* TLS dialext. */
794 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
796 /* Which unit we are generating floating point math for. */
797 enum fpmath_unit ix86_fpmath;
799 /* Which cpu are we scheduling for. */
800 enum processor_type ix86_tune;
801 /* Which instruction set architecture to use. */
802 enum processor_type ix86_arch;
804 /* true if sse prefetch instruction is not NOOP. */
805 int x86_prefetch_sse;
807 /* ix86_regparm_string as a number */
808 static int ix86_regparm;
810 /* Preferred alignment for stack boundary in bits. */
811 unsigned int ix86_preferred_stack_boundary;
813 /* Values 1-5: see jump.c */
814 int ix86_branch_cost;
816 /* Variables which are this size or smaller are put in the data/bss
817 or ldata/lbss sections. */
819 int ix86_section_threshold = 65536;
821 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
822 char internal_label_prefix[16];
823 int internal_label_prefix_len;
825 static bool ix86_handle_option (size_t, const char *, int);
826 static void output_pic_addr_const (FILE *, rtx, int);
827 static void put_condition_code (enum rtx_code, enum machine_mode,
828 int, int, FILE *);
829 static const char *get_some_local_dynamic_name (void);
830 static int get_some_local_dynamic_name_1 (rtx *, void *);
831 static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx);
832 static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *,
833 rtx *);
834 static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *);
835 static enum machine_mode ix86_cc_modes_compatible (enum machine_mode,
836 enum machine_mode);
837 static rtx get_thread_pointer (int);
838 static rtx legitimize_tls_address (rtx, enum tls_model, int);
839 static void get_pc_thunk_name (char [32], unsigned int);
840 static rtx gen_push (rtx);
841 static int ix86_flags_dependant (rtx, rtx, enum attr_type);
842 static int ix86_agi_dependant (rtx, rtx, enum attr_type);
843 static struct machine_function * ix86_init_machine_status (void);
844 static int ix86_split_to_parts (rtx, rtx *, enum machine_mode);
845 static int ix86_nsaved_regs (void);
846 static void ix86_emit_save_regs (void);
847 static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT);
848 static void ix86_emit_restore_regs_using_mov (rtx, HOST_WIDE_INT, int);
849 static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT);
850 static HOST_WIDE_INT ix86_GOT_alias_set (void);
851 static void ix86_adjust_counter (rtx, HOST_WIDE_INT);
852 static rtx ix86_expand_aligntest (rtx, int);
853 static void ix86_expand_strlensi_unroll_1 (rtx, rtx, rtx);
854 static int ix86_issue_rate (void);
855 static int ix86_adjust_cost (rtx, rtx, rtx, int);
856 static int ia32_multipass_dfa_lookahead (void);
857 static void ix86_init_mmx_sse_builtins (void);
858 static rtx x86_this_parameter (tree);
859 static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
860 HOST_WIDE_INT, tree);
861 static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
862 static void x86_file_start (void);
863 static void ix86_reorg (void);
864 static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*);
865 static tree ix86_build_builtin_va_list (void);
866 static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
867 tree, int *, int);
868 static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
869 static bool ix86_vector_mode_supported_p (enum machine_mode);
871 static int ix86_address_cost (rtx);
872 static bool ix86_cannot_force_const_mem (rtx);
873 static rtx ix86_delegitimize_address (rtx);
875 static void i386_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
877 struct builtin_description;
878 static rtx ix86_expand_sse_comi (const struct builtin_description *,
879 tree, rtx);
880 static rtx ix86_expand_sse_compare (const struct builtin_description *,
881 tree, rtx);
882 static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx);
883 static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int);
884 static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx);
885 static rtx ix86_expand_store_builtin (enum insn_code, tree);
886 static rtx safe_vector_operand (rtx, enum machine_mode);
887 static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *);
888 static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code);
889 static int ix86_fp_comparison_fcomi_cost (enum rtx_code code);
890 static int ix86_fp_comparison_sahf_cost (enum rtx_code code);
891 static int ix86_fp_comparison_cost (enum rtx_code code);
892 static unsigned int ix86_select_alt_pic_regnum (void);
893 static int ix86_save_reg (unsigned int, int);
894 static void ix86_compute_frame_layout (struct ix86_frame *);
895 static int ix86_comp_type_attributes (tree, tree);
896 static int ix86_function_regparm (tree, tree);
897 const struct attribute_spec ix86_attribute_table[];
898 static bool ix86_function_ok_for_sibcall (tree, tree);
899 static tree ix86_handle_cconv_attribute (tree *, tree, tree, int, bool *);
900 static int ix86_value_regno (enum machine_mode, tree, tree);
901 static bool contains_128bit_aligned_vector_p (tree);
902 static rtx ix86_struct_value_rtx (tree, int);
903 static bool ix86_ms_bitfield_layout_p (tree);
904 static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *);
905 static int extended_reg_mentioned_1 (rtx *, void *);
906 static bool ix86_rtx_costs (rtx, int, int, int *);
907 static int min_insn_size (rtx);
908 static tree ix86_md_asm_clobbers (tree outputs, tree inputs, tree clobbers);
909 static bool ix86_must_pass_in_stack (enum machine_mode mode, tree type);
910 static bool ix86_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
911 tree, bool);
912 static void ix86_init_builtins (void);
913 static rtx ix86_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
914 static const char *ix86_mangle_fundamental_type (tree);
915 static tree ix86_stack_protect_fail (void);
916 static rtx ix86_internal_arg_pointer (void);
917 static void ix86_dwarf_handle_frame_unspec (const char *, rtx, int);
919 /* This function is only used on Solaris. */
920 static void i386_solaris_elf_named_section (const char *, unsigned int, tree)
921 ATTRIBUTE_UNUSED;
923 /* Register class used for passing given 64bit part of the argument.
924 These represent classes as documented by the PS ABI, with the exception
925 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
926 use SF or DFmode move instead of DImode to avoid reformatting penalties.
928 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
929 whenever possible (upper half does contain padding).
931 enum x86_64_reg_class
933 X86_64_NO_CLASS,
934 X86_64_INTEGER_CLASS,
935 X86_64_INTEGERSI_CLASS,
936 X86_64_SSE_CLASS,
937 X86_64_SSESF_CLASS,
938 X86_64_SSEDF_CLASS,
939 X86_64_SSEUP_CLASS,
940 X86_64_X87_CLASS,
941 X86_64_X87UP_CLASS,
942 X86_64_COMPLEX_X87_CLASS,
943 X86_64_MEMORY_CLASS
945 static const char * const x86_64_reg_class_name[] = {
946 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
947 "sseup", "x87", "x87up", "cplx87", "no"
950 #define MAX_CLASSES 4
952 /* Table of constants used by fldpi, fldln2, etc.... */
953 static REAL_VALUE_TYPE ext_80387_constants_table [5];
954 static bool ext_80387_constants_init = 0;
955 static void init_ext_80387_constants (void);
956 static bool ix86_in_large_data_p (tree) ATTRIBUTE_UNUSED;
957 static void ix86_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
958 static void x86_64_elf_unique_section (tree decl, int reloc) ATTRIBUTE_UNUSED;
959 static void x86_64_elf_select_section (tree decl, int reloc,
960 unsigned HOST_WIDE_INT align)
961 ATTRIBUTE_UNUSED;
963 /* Initialize the GCC target structure. */
964 #undef TARGET_ATTRIBUTE_TABLE
965 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
966 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
967 # undef TARGET_MERGE_DECL_ATTRIBUTES
968 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
969 #endif
971 #undef TARGET_COMP_TYPE_ATTRIBUTES
972 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
974 #undef TARGET_INIT_BUILTINS
975 #define TARGET_INIT_BUILTINS ix86_init_builtins
976 #undef TARGET_EXPAND_BUILTIN
977 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
979 #undef TARGET_ASM_FUNCTION_EPILOGUE
980 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
982 #undef TARGET_ENCODE_SECTION_INFO
983 #ifndef SUBTARGET_ENCODE_SECTION_INFO
984 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
985 #else
986 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
987 #endif
989 #undef TARGET_ASM_OPEN_PAREN
990 #define TARGET_ASM_OPEN_PAREN ""
991 #undef TARGET_ASM_CLOSE_PAREN
992 #define TARGET_ASM_CLOSE_PAREN ""
994 #undef TARGET_ASM_ALIGNED_HI_OP
995 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
996 #undef TARGET_ASM_ALIGNED_SI_OP
997 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
998 #ifdef ASM_QUAD
999 #undef TARGET_ASM_ALIGNED_DI_OP
1000 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
1001 #endif
1003 #undef TARGET_ASM_UNALIGNED_HI_OP
1004 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
1005 #undef TARGET_ASM_UNALIGNED_SI_OP
1006 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
1007 #undef TARGET_ASM_UNALIGNED_DI_OP
1008 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
1010 #undef TARGET_SCHED_ADJUST_COST
1011 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
1012 #undef TARGET_SCHED_ISSUE_RATE
1013 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
1014 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1015 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1016 ia32_multipass_dfa_lookahead
1018 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1019 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
1021 #ifdef HAVE_AS_TLS
1022 #undef TARGET_HAVE_TLS
1023 #define TARGET_HAVE_TLS true
1024 #endif
1025 #undef TARGET_CANNOT_FORCE_CONST_MEM
1026 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
1028 #undef TARGET_DELEGITIMIZE_ADDRESS
1029 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
1031 #undef TARGET_MS_BITFIELD_LAYOUT_P
1032 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
1034 #if TARGET_MACHO
1035 #undef TARGET_BINDS_LOCAL_P
1036 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1037 #endif
1039 #undef TARGET_ASM_OUTPUT_MI_THUNK
1040 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
1041 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1042 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
1044 #undef TARGET_ASM_FILE_START
1045 #define TARGET_ASM_FILE_START x86_file_start
1047 #undef TARGET_DEFAULT_TARGET_FLAGS
1048 #define TARGET_DEFAULT_TARGET_FLAGS \
1049 (TARGET_DEFAULT \
1050 | TARGET_64BIT_DEFAULT \
1051 | TARGET_SUBTARGET_DEFAULT \
1052 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
1054 #undef TARGET_HANDLE_OPTION
1055 #define TARGET_HANDLE_OPTION ix86_handle_option
1057 #undef TARGET_RTX_COSTS
1058 #define TARGET_RTX_COSTS ix86_rtx_costs
1059 #undef TARGET_ADDRESS_COST
1060 #define TARGET_ADDRESS_COST ix86_address_cost
1062 #undef TARGET_FIXED_CONDITION_CODE_REGS
1063 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
1064 #undef TARGET_CC_MODES_COMPATIBLE
1065 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
1067 #undef TARGET_MACHINE_DEPENDENT_REORG
1068 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
1070 #undef TARGET_BUILD_BUILTIN_VA_LIST
1071 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
1073 #undef TARGET_MD_ASM_CLOBBERS
1074 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
1076 #undef TARGET_PROMOTE_PROTOTYPES
1077 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1078 #undef TARGET_STRUCT_VALUE_RTX
1079 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
1080 #undef TARGET_SETUP_INCOMING_VARARGS
1081 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
1082 #undef TARGET_MUST_PASS_IN_STACK
1083 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
1084 #undef TARGET_PASS_BY_REFERENCE
1085 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
1086 #undef TARGET_INTERNAL_ARG_POINTER
1087 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
1088 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
1089 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC ix86_dwarf_handle_frame_unspec
1091 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1092 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
1094 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1095 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
1097 #ifdef HAVE_AS_TLS
1098 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1099 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
1100 #endif
1102 #ifdef SUBTARGET_INSERT_ATTRIBUTES
1103 #undef TARGET_INSERT_ATTRIBUTES
1104 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
1105 #endif
1107 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
1108 #define TARGET_MANGLE_FUNDAMENTAL_TYPE ix86_mangle_fundamental_type
1110 #undef TARGET_STACK_PROTECT_FAIL
1111 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
1113 #undef TARGET_FUNCTION_VALUE
1114 #define TARGET_FUNCTION_VALUE ix86_function_value
1116 struct gcc_target targetm = TARGET_INITIALIZER;
1119 /* The svr4 ABI for the i386 says that records and unions are returned
1120 in memory. */
1121 #ifndef DEFAULT_PCC_STRUCT_RETURN
1122 #define DEFAULT_PCC_STRUCT_RETURN 1
1123 #endif
1125 /* Implement TARGET_HANDLE_OPTION. */
1127 static bool
1128 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
1130 switch (code)
1132 case OPT_m3dnow:
1133 if (!value)
1135 target_flags &= ~MASK_3DNOW_A;
1136 target_flags_explicit |= MASK_3DNOW_A;
1138 return true;
1140 case OPT_mmmx:
1141 if (!value)
1143 target_flags &= ~(MASK_3DNOW | MASK_3DNOW_A);
1144 target_flags_explicit |= MASK_3DNOW | MASK_3DNOW_A;
1146 return true;
1148 case OPT_msse:
1149 if (!value)
1151 target_flags &= ~(MASK_SSE2 | MASK_SSE3);
1152 target_flags_explicit |= MASK_SSE2 | MASK_SSE3;
1154 return true;
1156 case OPT_msse2:
1157 if (!value)
1159 target_flags &= ~MASK_SSE3;
1160 target_flags_explicit |= MASK_SSE3;
1162 return true;
1164 default:
1165 return true;
1169 /* Sometimes certain combinations of command options do not make
1170 sense on a particular target machine. You can define a macro
1171 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1172 defined, is executed once just after all the command options have
1173 been parsed.
1175 Don't use this macro to turn on various extra optimizations for
1176 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1178 void
1179 override_options (void)
1181 int i;
1182 int ix86_tune_defaulted = 0;
1184 /* Comes from final.c -- no real reason to change it. */
1185 #define MAX_CODE_ALIGN 16
1187 static struct ptt
1189 const struct processor_costs *cost; /* Processor costs */
1190 const int target_enable; /* Target flags to enable. */
1191 const int target_disable; /* Target flags to disable. */
1192 const int align_loop; /* Default alignments. */
1193 const int align_loop_max_skip;
1194 const int align_jump;
1195 const int align_jump_max_skip;
1196 const int align_func;
1198 const processor_target_table[PROCESSOR_max] =
1200 {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
1201 {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
1202 {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
1203 {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
1204 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1205 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1206 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1207 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1208 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0}
1211 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1212 static struct pta
1214 const char *const name; /* processor name or nickname. */
1215 const enum processor_type processor;
1216 const enum pta_flags
1218 PTA_SSE = 1,
1219 PTA_SSE2 = 2,
1220 PTA_SSE3 = 4,
1221 PTA_MMX = 8,
1222 PTA_PREFETCH_SSE = 16,
1223 PTA_3DNOW = 32,
1224 PTA_3DNOW_A = 64,
1225 PTA_64BIT = 128
1226 } flags;
1228 const processor_alias_table[] =
1230 {"i386", PROCESSOR_I386, 0},
1231 {"i486", PROCESSOR_I486, 0},
1232 {"i586", PROCESSOR_PENTIUM, 0},
1233 {"pentium", PROCESSOR_PENTIUM, 0},
1234 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
1235 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
1236 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1237 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1238 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
1239 {"i686", PROCESSOR_PENTIUMPRO, 0},
1240 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
1241 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
1242 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1243 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1244 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1245 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1246 | PTA_MMX | PTA_PREFETCH_SSE},
1247 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1248 | PTA_MMX | PTA_PREFETCH_SSE},
1249 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1250 | PTA_MMX | PTA_PREFETCH_SSE},
1251 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1252 | PTA_MMX | PTA_PREFETCH_SSE},
1253 {"k6", PROCESSOR_K6, PTA_MMX},
1254 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1255 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1256 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1257 | PTA_3DNOW_A},
1258 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
1259 | PTA_3DNOW | PTA_3DNOW_A},
1260 {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1261 | PTA_3DNOW_A | PTA_SSE},
1262 {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1263 | PTA_3DNOW_A | PTA_SSE},
1264 {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1265 | PTA_3DNOW_A | PTA_SSE},
1266 {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT
1267 | PTA_SSE | PTA_SSE2 },
1268 {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1269 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1270 {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1271 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1272 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1273 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1274 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1275 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1278 int const pta_size = ARRAY_SIZE (processor_alias_table);
1280 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1281 SUBTARGET_OVERRIDE_OPTIONS;
1282 #endif
1284 /* Set the default values for switches whose default depends on TARGET_64BIT
1285 in case they weren't overwritten by command line options. */
1286 if (TARGET_64BIT)
1288 if (flag_omit_frame_pointer == 2)
1289 flag_omit_frame_pointer = 1;
1290 if (flag_asynchronous_unwind_tables == 2)
1291 flag_asynchronous_unwind_tables = 1;
1292 if (flag_pcc_struct_return == 2)
1293 flag_pcc_struct_return = 0;
1295 else
1297 if (flag_omit_frame_pointer == 2)
1298 flag_omit_frame_pointer = 0;
1299 if (flag_asynchronous_unwind_tables == 2)
1300 flag_asynchronous_unwind_tables = 0;
1301 if (flag_pcc_struct_return == 2)
1302 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
1305 if (!ix86_tune_string && ix86_arch_string)
1306 ix86_tune_string = ix86_arch_string;
1307 if (!ix86_tune_string)
1309 ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
1310 ix86_tune_defaulted = 1;
1312 if (!ix86_arch_string)
1313 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
1315 if (ix86_cmodel_string != 0)
1317 if (!strcmp (ix86_cmodel_string, "small"))
1318 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1319 else if (!strcmp (ix86_cmodel_string, "medium"))
1320 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
1321 else if (flag_pic)
1322 sorry ("code model %s not supported in PIC mode", ix86_cmodel_string);
1323 else if (!strcmp (ix86_cmodel_string, "32"))
1324 ix86_cmodel = CM_32;
1325 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
1326 ix86_cmodel = CM_KERNEL;
1327 else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic)
1328 ix86_cmodel = CM_LARGE;
1329 else
1330 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
1332 else
1334 ix86_cmodel = CM_32;
1335 if (TARGET_64BIT)
1336 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1338 if (ix86_asm_string != 0)
1340 if (!strcmp (ix86_asm_string, "intel"))
1341 ix86_asm_dialect = ASM_INTEL;
1342 else if (!strcmp (ix86_asm_string, "att"))
1343 ix86_asm_dialect = ASM_ATT;
1344 else
1345 error ("bad value (%s) for -masm= switch", ix86_asm_string);
1347 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
1348 error ("code model %qs not supported in the %s bit mode",
1349 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
1350 if (ix86_cmodel == CM_LARGE)
1351 sorry ("code model %<large%> not supported yet");
1352 if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
1353 sorry ("%i-bit mode not compiled in",
1354 (target_flags & MASK_64BIT) ? 64 : 32);
1356 for (i = 0; i < pta_size; i++)
1357 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
1359 ix86_arch = processor_alias_table[i].processor;
1360 /* Default cpu tuning to the architecture. */
1361 ix86_tune = ix86_arch;
1362 if (processor_alias_table[i].flags & PTA_MMX
1363 && !(target_flags_explicit & MASK_MMX))
1364 target_flags |= MASK_MMX;
1365 if (processor_alias_table[i].flags & PTA_3DNOW
1366 && !(target_flags_explicit & MASK_3DNOW))
1367 target_flags |= MASK_3DNOW;
1368 if (processor_alias_table[i].flags & PTA_3DNOW_A
1369 && !(target_flags_explicit & MASK_3DNOW_A))
1370 target_flags |= MASK_3DNOW_A;
1371 if (processor_alias_table[i].flags & PTA_SSE
1372 && !(target_flags_explicit & MASK_SSE))
1373 target_flags |= MASK_SSE;
1374 if (processor_alias_table[i].flags & PTA_SSE2
1375 && !(target_flags_explicit & MASK_SSE2))
1376 target_flags |= MASK_SSE2;
1377 if (processor_alias_table[i].flags & PTA_SSE3
1378 && !(target_flags_explicit & MASK_SSE3))
1379 target_flags |= MASK_SSE3;
1380 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1381 x86_prefetch_sse = true;
1382 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1383 error ("CPU you selected does not support x86-64 "
1384 "instruction set");
1385 break;
1388 if (i == pta_size)
1389 error ("bad value (%s) for -march= switch", ix86_arch_string);
1391 for (i = 0; i < pta_size; i++)
1392 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
1394 ix86_tune = processor_alias_table[i].processor;
1395 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1397 if (ix86_tune_defaulted)
1399 ix86_tune_string = "x86-64";
1400 for (i = 0; i < pta_size; i++)
1401 if (! strcmp (ix86_tune_string,
1402 processor_alias_table[i].name))
1403 break;
1404 ix86_tune = processor_alias_table[i].processor;
1406 else
1407 error ("CPU you selected does not support x86-64 "
1408 "instruction set");
1410 /* Intel CPUs have always interpreted SSE prefetch instructions as
1411 NOPs; so, we can enable SSE prefetch instructions even when
1412 -mtune (rather than -march) points us to a processor that has them.
1413 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
1414 higher processors. */
1415 if (TARGET_CMOVE && (processor_alias_table[i].flags & PTA_PREFETCH_SSE))
1416 x86_prefetch_sse = true;
1417 break;
1419 if (i == pta_size)
1420 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1422 if (optimize_size)
1423 ix86_cost = &size_cost;
1424 else
1425 ix86_cost = processor_target_table[ix86_tune].cost;
1426 target_flags |= processor_target_table[ix86_tune].target_enable;
1427 target_flags &= ~processor_target_table[ix86_tune].target_disable;
1429 /* Arrange to set up i386_stack_locals for all functions. */
1430 init_machine_status = ix86_init_machine_status;
1432 /* Validate -mregparm= value. */
1433 if (ix86_regparm_string)
1435 i = atoi (ix86_regparm_string);
1436 if (i < 0 || i > REGPARM_MAX)
1437 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
1438 else
1439 ix86_regparm = i;
1441 else
1442 if (TARGET_64BIT)
1443 ix86_regparm = REGPARM_MAX;
1445 /* If the user has provided any of the -malign-* options,
1446 warn and use that value only if -falign-* is not set.
1447 Remove this code in GCC 3.2 or later. */
1448 if (ix86_align_loops_string)
1450 warning (0, "-malign-loops is obsolete, use -falign-loops");
1451 if (align_loops == 0)
1453 i = atoi (ix86_align_loops_string);
1454 if (i < 0 || i > MAX_CODE_ALIGN)
1455 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1456 else
1457 align_loops = 1 << i;
1461 if (ix86_align_jumps_string)
1463 warning (0, "-malign-jumps is obsolete, use -falign-jumps");
1464 if (align_jumps == 0)
1466 i = atoi (ix86_align_jumps_string);
1467 if (i < 0 || i > MAX_CODE_ALIGN)
1468 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1469 else
1470 align_jumps = 1 << i;
1474 if (ix86_align_funcs_string)
1476 warning (0, "-malign-functions is obsolete, use -falign-functions");
1477 if (align_functions == 0)
1479 i = atoi (ix86_align_funcs_string);
1480 if (i < 0 || i > MAX_CODE_ALIGN)
1481 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1482 else
1483 align_functions = 1 << i;
1487 /* Default align_* from the processor table. */
1488 if (align_loops == 0)
1490 align_loops = processor_target_table[ix86_tune].align_loop;
1491 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
1493 if (align_jumps == 0)
1495 align_jumps = processor_target_table[ix86_tune].align_jump;
1496 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
1498 if (align_functions == 0)
1500 align_functions = processor_target_table[ix86_tune].align_func;
1503 /* Validate -mpreferred-stack-boundary= value, or provide default.
1504 The default of 128 bits is for Pentium III's SSE __m128, but we
1505 don't want additional code to keep the stack aligned when
1506 optimizing for code size. */
1507 ix86_preferred_stack_boundary = (optimize_size
1508 ? TARGET_64BIT ? 128 : 32
1509 : 128);
1510 if (ix86_preferred_stack_boundary_string)
1512 i = atoi (ix86_preferred_stack_boundary_string);
1513 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
1514 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
1515 TARGET_64BIT ? 4 : 2);
1516 else
1517 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
1520 /* Validate -mbranch-cost= value, or provide default. */
1521 ix86_branch_cost = processor_target_table[ix86_tune].cost->branch_cost;
1522 if (ix86_branch_cost_string)
1524 i = atoi (ix86_branch_cost_string);
1525 if (i < 0 || i > 5)
1526 error ("-mbranch-cost=%d is not between 0 and 5", i);
1527 else
1528 ix86_branch_cost = i;
1530 if (ix86_section_threshold_string)
1532 i = atoi (ix86_section_threshold_string);
1533 if (i < 0)
1534 error ("-mlarge-data-threshold=%d is negative", i);
1535 else
1536 ix86_section_threshold = i;
1539 if (ix86_tls_dialect_string)
1541 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
1542 ix86_tls_dialect = TLS_DIALECT_GNU;
1543 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
1544 ix86_tls_dialect = TLS_DIALECT_SUN;
1545 else
1546 error ("bad value (%s) for -mtls-dialect= switch",
1547 ix86_tls_dialect_string);
1550 /* Keep nonleaf frame pointers. */
1551 if (flag_omit_frame_pointer)
1552 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
1553 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
1554 flag_omit_frame_pointer = 1;
1556 /* If we're doing fast math, we don't care about comparison order
1557 wrt NaNs. This lets us use a shorter comparison sequence. */
1558 if (flag_unsafe_math_optimizations)
1559 target_flags &= ~MASK_IEEE_FP;
1561 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
1562 since the insns won't need emulation. */
1563 if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
1564 target_flags &= ~MASK_NO_FANCY_MATH_387;
1566 /* Likewise, if the target doesn't have a 387, or we've specified
1567 software floating point, don't use 387 inline intrinsics. */
1568 if (!TARGET_80387)
1569 target_flags |= MASK_NO_FANCY_MATH_387;
1571 /* Turn on SSE2 builtins for -msse3. */
1572 if (TARGET_SSE3)
1573 target_flags |= MASK_SSE2;
1575 /* Turn on SSE builtins for -msse2. */
1576 if (TARGET_SSE2)
1577 target_flags |= MASK_SSE;
1579 /* Turn on MMX builtins for -msse. */
1580 if (TARGET_SSE)
1582 target_flags |= MASK_MMX & ~target_flags_explicit;
1583 x86_prefetch_sse = true;
1586 /* Turn on MMX builtins for 3Dnow. */
1587 if (TARGET_3DNOW)
1588 target_flags |= MASK_MMX;
1590 if (TARGET_64BIT)
1592 if (TARGET_ALIGN_DOUBLE)
1593 error ("-malign-double makes no sense in the 64bit mode");
1594 if (TARGET_RTD)
1595 error ("-mrtd calling convention not supported in the 64bit mode");
1597 /* Enable by default the SSE and MMX builtins. Do allow the user to
1598 explicitly disable any of these. In particular, disabling SSE and
1599 MMX for kernel code is extremely useful. */
1600 target_flags
1601 |= ((MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE)
1602 & ~target_flags_explicit);
1604 else
1606 /* i386 ABI does not specify red zone. It still makes sense to use it
1607 when programmer takes care to stack from being destroyed. */
1608 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
1609 target_flags |= MASK_NO_RED_ZONE;
1612 /* Accept -msseregparm only if at least SSE support is enabled. */
1613 if (TARGET_SSEREGPARM
1614 && ! TARGET_SSE)
1615 error ("-msseregparm used without SSE enabled");
1617 ix86_fpmath = TARGET_FPMATH_DEFAULT;
1619 if (ix86_fpmath_string != 0)
1621 if (! strcmp (ix86_fpmath_string, "387"))
1622 ix86_fpmath = FPMATH_387;
1623 else if (! strcmp (ix86_fpmath_string, "sse"))
1625 if (!TARGET_SSE)
1627 warning (0, "SSE instruction set disabled, using 387 arithmetics");
1628 ix86_fpmath = FPMATH_387;
1630 else
1631 ix86_fpmath = FPMATH_SSE;
1633 else if (! strcmp (ix86_fpmath_string, "387,sse")
1634 || ! strcmp (ix86_fpmath_string, "sse,387"))
1636 if (!TARGET_SSE)
1638 warning (0, "SSE instruction set disabled, using 387 arithmetics");
1639 ix86_fpmath = FPMATH_387;
1641 else if (!TARGET_80387)
1643 warning (0, "387 instruction set disabled, using SSE arithmetics");
1644 ix86_fpmath = FPMATH_SSE;
1646 else
1647 ix86_fpmath = FPMATH_SSE | FPMATH_387;
1649 else
1650 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
1653 /* If the i387 is disabled, then do not return values in it. */
1654 if (!TARGET_80387)
1655 target_flags &= ~MASK_FLOAT_RETURNS;
1657 if ((x86_accumulate_outgoing_args & TUNEMASK)
1658 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
1659 && !optimize_size)
1660 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
1662 /* ??? Unwind info is not correct around the CFG unless either a frame
1663 pointer is present or M_A_O_A is set. Fixing this requires rewriting
1664 unwind info generation to be aware of the CFG and propagating states
1665 around edges. */
1666 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
1667 || flag_exceptions || flag_non_call_exceptions)
1668 && flag_omit_frame_pointer
1669 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
1671 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
1672 warning (0, "unwind tables currently require either a frame pointer "
1673 "or -maccumulate-outgoing-args for correctness");
1674 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
1677 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
1679 char *p;
1680 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
1681 p = strchr (internal_label_prefix, 'X');
1682 internal_label_prefix_len = p - internal_label_prefix;
1683 *p = '\0';
1686 /* When scheduling description is not available, disable scheduler pass
1687 so it won't slow down the compilation and make x87 code slower. */
1688 if (!TARGET_SCHEDULE)
1689 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
1692 /* switch to the appropriate section for output of DECL.
1693 DECL is either a `VAR_DECL' node or a constant of some sort.
1694 RELOC indicates whether forming the initial value of DECL requires
1695 link-time relocations. */
1697 static void
1698 x86_64_elf_select_section (tree decl, int reloc,
1699 unsigned HOST_WIDE_INT align)
1701 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1702 && ix86_in_large_data_p (decl))
1704 const char *sname = NULL;
1705 switch (categorize_decl_for_section (decl, reloc, flag_pic))
1707 case SECCAT_DATA:
1708 sname = ".ldata";
1709 break;
1710 case SECCAT_DATA_REL:
1711 sname = ".ldata.rel";
1712 break;
1713 case SECCAT_DATA_REL_LOCAL:
1714 sname = ".ldata.rel.local";
1715 break;
1716 case SECCAT_DATA_REL_RO:
1717 sname = ".ldata.rel.ro";
1718 break;
1719 case SECCAT_DATA_REL_RO_LOCAL:
1720 sname = ".ldata.rel.ro.local";
1721 break;
1722 case SECCAT_BSS:
1723 sname = ".lbss";
1724 break;
1725 case SECCAT_RODATA:
1726 case SECCAT_RODATA_MERGE_STR:
1727 case SECCAT_RODATA_MERGE_STR_INIT:
1728 case SECCAT_RODATA_MERGE_CONST:
1729 sname = ".lrodata";
1730 break;
1731 case SECCAT_SRODATA:
1732 case SECCAT_SDATA:
1733 case SECCAT_SBSS:
1734 gcc_unreachable ();
1735 case SECCAT_TEXT:
1736 case SECCAT_TDATA:
1737 case SECCAT_TBSS:
1738 /* We don't split these for medium model. Place them into
1739 default sections and hope for best. */
1740 break;
1742 if (sname)
1744 named_section (decl, sname, reloc);
1745 return;
1748 default_elf_select_section (decl, reloc, align);
1751 /* Build up a unique section name, expressed as a
1752 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
1753 RELOC indicates whether the initial value of EXP requires
1754 link-time relocations. */
1756 static void
1757 x86_64_elf_unique_section (tree decl, int reloc)
1759 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1760 && ix86_in_large_data_p (decl))
1762 const char *prefix = NULL;
1763 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
1764 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
1766 switch (categorize_decl_for_section (decl, reloc, flag_pic))
1768 case SECCAT_DATA:
1769 case SECCAT_DATA_REL:
1770 case SECCAT_DATA_REL_LOCAL:
1771 case SECCAT_DATA_REL_RO:
1772 case SECCAT_DATA_REL_RO_LOCAL:
1773 prefix = one_only ? ".gnu.linkonce.ld." : ".ldata.";
1774 break;
1775 case SECCAT_BSS:
1776 prefix = one_only ? ".gnu.linkonce.lb." : ".lbss.";
1777 break;
1778 case SECCAT_RODATA:
1779 case SECCAT_RODATA_MERGE_STR:
1780 case SECCAT_RODATA_MERGE_STR_INIT:
1781 case SECCAT_RODATA_MERGE_CONST:
1782 prefix = one_only ? ".gnu.linkonce.lr." : ".lrodata.";
1783 break;
1784 case SECCAT_SRODATA:
1785 case SECCAT_SDATA:
1786 case SECCAT_SBSS:
1787 gcc_unreachable ();
1788 case SECCAT_TEXT:
1789 case SECCAT_TDATA:
1790 case SECCAT_TBSS:
1791 /* We don't split these for medium model. Place them into
1792 default sections and hope for best. */
1793 break;
1795 if (prefix)
1797 const char *name;
1798 size_t nlen, plen;
1799 char *string;
1800 plen = strlen (prefix);
1802 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
1803 name = targetm.strip_name_encoding (name);
1804 nlen = strlen (name);
1806 string = alloca (nlen + plen + 1);
1807 memcpy (string, prefix, plen);
1808 memcpy (string + plen, name, nlen + 1);
1810 DECL_SECTION_NAME (decl) = build_string (nlen + plen, string);
1811 return;
1814 default_unique_section (decl, reloc);
1817 #ifdef COMMON_ASM_OP
1818 /* This says how to output assembler code to declare an
1819 uninitialized external linkage data object.
1821 For medium model x86-64 we need to use .largecomm opcode for
1822 large objects. */
1823 void
1824 x86_elf_aligned_common (FILE *file,
1825 const char *name, unsigned HOST_WIDE_INT size,
1826 int align)
1828 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1829 && size > (unsigned int)ix86_section_threshold)
1830 fprintf (file, ".largecomm\t");
1831 else
1832 fprintf (file, "%s", COMMON_ASM_OP);
1833 assemble_name (file, name);
1834 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
1835 size, align / BITS_PER_UNIT);
1838 /* Utility function for targets to use in implementing
1839 ASM_OUTPUT_ALIGNED_BSS. */
1841 void
1842 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
1843 const char *name, unsigned HOST_WIDE_INT size,
1844 int align)
1846 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
1847 && size > (unsigned int)ix86_section_threshold)
1848 named_section (decl, ".lbss", 0);
1849 else
1850 bss_section ();
1851 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
1852 #ifdef ASM_DECLARE_OBJECT_NAME
1853 last_assemble_variable_decl = decl;
1854 ASM_DECLARE_OBJECT_NAME (file, name, decl);
1855 #else
1856 /* Standard thing is just output label for the object. */
1857 ASM_OUTPUT_LABEL (file, name);
1858 #endif /* ASM_DECLARE_OBJECT_NAME */
1859 ASM_OUTPUT_SKIP (file, size ? size : 1);
1861 #endif
1863 void
1864 optimization_options (int level, int size ATTRIBUTE_UNUSED)
1866 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
1867 make the problem with not enough registers even worse. */
1868 #ifdef INSN_SCHEDULING
1869 if (level > 1)
1870 flag_schedule_insns = 0;
1871 #endif
1873 if (TARGET_MACHO)
1874 /* The Darwin libraries never set errno, so we might as well
1875 avoid calling them when that's the only reason we would. */
1876 flag_errno_math = 0;
1878 /* The default values of these switches depend on the TARGET_64BIT
1879 that is not known at this moment. Mark these values with 2 and
1880 let user the to override these. In case there is no command line option
1881 specifying them, we will set the defaults in override_options. */
1882 if (optimize >= 1)
1883 flag_omit_frame_pointer = 2;
1884 flag_pcc_struct_return = 2;
1885 flag_asynchronous_unwind_tables = 2;
1886 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
1887 SUBTARGET_OPTIMIZATION_OPTIONS;
1888 #endif
1891 /* Table of valid machine attributes. */
1892 const struct attribute_spec ix86_attribute_table[] =
1894 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
1895 /* Stdcall attribute says callee is responsible for popping arguments
1896 if they are not variable. */
1897 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1898 /* Fastcall attribute says callee is responsible for popping arguments
1899 if they are not variable. */
1900 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1901 /* Cdecl attribute says the callee is a normal C declaration */
1902 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1903 /* Regparm attribute specifies how many integer arguments are to be
1904 passed in registers. */
1905 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
1906 /* Sseregparm attribute says we are using x86_64 calling conventions
1907 for FP arguments. */
1908 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
1909 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1910 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
1911 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
1912 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
1913 #endif
1914 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1915 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1916 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1917 SUBTARGET_ATTRIBUTE_TABLE,
1918 #endif
1919 { NULL, 0, 0, false, false, false, NULL }
1922 /* Decide whether we can make a sibling call to a function. DECL is the
1923 declaration of the function being targeted by the call and EXP is the
1924 CALL_EXPR representing the call. */
1926 static bool
1927 ix86_function_ok_for_sibcall (tree decl, tree exp)
1929 tree func;
1930 rtx a, b;
1932 /* If we are generating position-independent code, we cannot sibcall
1933 optimize any indirect call, or a direct call to a global function,
1934 as the PLT requires %ebx be live. */
1935 if (!TARGET_64BIT && flag_pic && (!decl || TREE_PUBLIC (decl)))
1936 return false;
1938 if (decl)
1939 func = decl;
1940 else
1942 func = TREE_TYPE (TREE_OPERAND (exp, 0));
1943 if (POINTER_TYPE_P (func))
1944 func = TREE_TYPE (func);
1947 /* Check that the return value locations are the same. Like
1948 if we are returning floats on the 80387 register stack, we cannot
1949 make a sibcall from a function that doesn't return a float to a
1950 function that does or, conversely, from a function that does return
1951 a float to a function that doesn't; the necessary stack adjustment
1952 would not be executed. This is also the place we notice
1953 differences in the return value ABI. Note that it is ok for one
1954 of the functions to have void return type as long as the return
1955 value of the other is passed in a register. */
1956 a = ix86_function_value (TREE_TYPE (exp), func, false);
1957 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
1958 cfun->decl, false);
1959 if (STACK_REG_P (a) || STACK_REG_P (b))
1961 if (!rtx_equal_p (a, b))
1962 return false;
1964 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1966 else if (!rtx_equal_p (a, b))
1967 return false;
1969 /* If this call is indirect, we'll need to be able to use a call-clobbered
1970 register for the address of the target function. Make sure that all
1971 such registers are not used for passing parameters. */
1972 if (!decl && !TARGET_64BIT)
1974 tree type;
1976 /* We're looking at the CALL_EXPR, we need the type of the function. */
1977 type = TREE_OPERAND (exp, 0); /* pointer expression */
1978 type = TREE_TYPE (type); /* pointer type */
1979 type = TREE_TYPE (type); /* function type */
1981 if (ix86_function_regparm (type, NULL) >= 3)
1983 /* ??? Need to count the actual number of registers to be used,
1984 not the possible number of registers. Fix later. */
1985 return false;
1989 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1990 /* Dllimport'd functions are also called indirectly. */
1991 if (decl && DECL_DLLIMPORT_P (decl)
1992 && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
1993 return false;
1994 #endif
1996 /* If we forced aligned the stack, then sibcalling would unalign the
1997 stack, which may break the called function. */
1998 if (cfun->machine->force_align_arg_pointer)
1999 return false;
2001 /* Otherwise okay. That also includes certain types of indirect calls. */
2002 return true;
2005 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
2006 calling convention attributes;
2007 arguments as in struct attribute_spec.handler. */
2009 static tree
2010 ix86_handle_cconv_attribute (tree *node, tree name,
2011 tree args,
2012 int flags ATTRIBUTE_UNUSED,
2013 bool *no_add_attrs)
2015 if (TREE_CODE (*node) != FUNCTION_TYPE
2016 && TREE_CODE (*node) != METHOD_TYPE
2017 && TREE_CODE (*node) != FIELD_DECL
2018 && TREE_CODE (*node) != TYPE_DECL)
2020 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2021 IDENTIFIER_POINTER (name));
2022 *no_add_attrs = true;
2023 return NULL_TREE;
2026 /* Can combine regparm with all attributes but fastcall. */
2027 if (is_attribute_p ("regparm", name))
2029 tree cst;
2031 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2033 error ("fastcall and regparm attributes are not compatible");
2036 cst = TREE_VALUE (args);
2037 if (TREE_CODE (cst) != INTEGER_CST)
2039 warning (OPT_Wattributes,
2040 "%qs attribute requires an integer constant argument",
2041 IDENTIFIER_POINTER (name));
2042 *no_add_attrs = true;
2044 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
2046 warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
2047 IDENTIFIER_POINTER (name), REGPARM_MAX);
2048 *no_add_attrs = true;
2051 return NULL_TREE;
2054 if (TARGET_64BIT)
2056 warning (OPT_Wattributes, "%qs attribute ignored",
2057 IDENTIFIER_POINTER (name));
2058 *no_add_attrs = true;
2059 return NULL_TREE;
2062 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
2063 if (is_attribute_p ("fastcall", name))
2065 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2067 error ("fastcall and cdecl attributes are not compatible");
2069 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2071 error ("fastcall and stdcall attributes are not compatible");
2073 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
2075 error ("fastcall and regparm attributes are not compatible");
2079 /* Can combine stdcall with fastcall (redundant), regparm and
2080 sseregparm. */
2081 else if (is_attribute_p ("stdcall", name))
2083 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2085 error ("stdcall and cdecl attributes are not compatible");
2087 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2089 error ("stdcall and fastcall attributes are not compatible");
2093 /* Can combine cdecl with regparm and sseregparm. */
2094 else if (is_attribute_p ("cdecl", name))
2096 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2098 error ("stdcall and cdecl attributes are not compatible");
2100 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2102 error ("fastcall and cdecl attributes are not compatible");
2106 /* Can combine sseregparm with all attributes. */
2108 return NULL_TREE;
2111 /* Return 0 if the attributes for two types are incompatible, 1 if they
2112 are compatible, and 2 if they are nearly compatible (which causes a
2113 warning to be generated). */
2115 static int
2116 ix86_comp_type_attributes (tree type1, tree type2)
2118 /* Check for mismatch of non-default calling convention. */
2119 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
2121 if (TREE_CODE (type1) != FUNCTION_TYPE)
2122 return 1;
2124 /* Check for mismatched fastcall/regparm types. */
2125 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
2126 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
2127 || (ix86_function_regparm (type1, NULL)
2128 != ix86_function_regparm (type2, NULL)))
2129 return 0;
2131 /* Check for mismatched sseregparm types. */
2132 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
2133 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
2134 return 0;
2136 /* Check for mismatched return types (cdecl vs stdcall). */
2137 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
2138 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
2139 return 0;
2141 return 1;
2144 /* Return the regparm value for a function with the indicated TYPE and DECL.
2145 DECL may be NULL when calling function indirectly
2146 or considering a libcall. */
2148 static int
2149 ix86_function_regparm (tree type, tree decl)
2151 tree attr;
2152 int regparm = ix86_regparm;
2153 bool user_convention = false;
2155 if (!TARGET_64BIT)
2157 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
2158 if (attr)
2160 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
2161 user_convention = true;
2164 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
2166 regparm = 2;
2167 user_convention = true;
2170 /* Use register calling convention for local functions when possible. */
2171 if (!TARGET_64BIT && !user_convention && decl
2172 && flag_unit_at_a_time && !profile_flag)
2174 struct cgraph_local_info *i = cgraph_local_info (decl);
2175 if (i && i->local)
2177 int local_regparm, globals = 0, regno;
2179 /* Make sure no regparm register is taken by a global register
2180 variable. */
2181 for (local_regparm = 0; local_regparm < 3; local_regparm++)
2182 if (global_regs[local_regparm])
2183 break;
2184 /* We can't use regparm(3) for nested functions as these use
2185 static chain pointer in third argument. */
2186 if (local_regparm == 3
2187 && decl_function_context (decl)
2188 && !DECL_NO_STATIC_CHAIN (decl))
2189 local_regparm = 2;
2190 /* Each global register variable increases register preassure,
2191 so the more global reg vars there are, the smaller regparm
2192 optimization use, unless requested by the user explicitly. */
2193 for (regno = 0; regno < 6; regno++)
2194 if (global_regs[regno])
2195 globals++;
2196 local_regparm
2197 = globals < local_regparm ? local_regparm - globals : 0;
2199 if (local_regparm > regparm)
2200 regparm = local_regparm;
2204 return regparm;
2207 /* Return 1 or 2, if we can pass up to 8 SFmode (1) and DFmode (2) arguments
2208 in SSE registers for a function with the indicated TYPE and DECL.
2209 DECL may be NULL when calling function indirectly
2210 or considering a libcall. Otherwise return 0. */
2212 static int
2213 ix86_function_sseregparm (tree type, tree decl)
2215 /* Use SSE registers to pass SFmode and DFmode arguments if requested
2216 by the sseregparm attribute. */
2217 if (TARGET_SSEREGPARM
2218 || (type
2219 && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
2221 if (!TARGET_SSE)
2223 if (decl)
2224 error ("Calling %qD with attribute sseregparm without "
2225 "SSE/SSE2 enabled", decl);
2226 else
2227 error ("Calling %qT with attribute sseregparm without "
2228 "SSE/SSE2 enabled", type);
2229 return 0;
2232 return 2;
2235 /* For local functions, pass SFmode (and DFmode for SSE2) arguments
2236 in SSE registers even for 32-bit mode and not just 3, but up to
2237 8 SSE arguments in registers. */
2238 if (!TARGET_64BIT && decl
2239 && TARGET_SSE_MATH && flag_unit_at_a_time && !profile_flag)
2241 struct cgraph_local_info *i = cgraph_local_info (decl);
2242 if (i && i->local)
2243 return TARGET_SSE2 ? 2 : 1;
2246 return 0;
2249 /* Return true if EAX is live at the start of the function. Used by
2250 ix86_expand_prologue to determine if we need special help before
2251 calling allocate_stack_worker. */
2253 static bool
2254 ix86_eax_live_at_start_p (void)
2256 /* Cheat. Don't bother working forward from ix86_function_regparm
2257 to the function type to whether an actual argument is located in
2258 eax. Instead just look at cfg info, which is still close enough
2259 to correct at this point. This gives false positives for broken
2260 functions that might use uninitialized data that happens to be
2261 allocated in eax, but who cares? */
2262 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->il.rtl->global_live_at_end, 0);
2265 /* Value is the number of bytes of arguments automatically
2266 popped when returning from a subroutine call.
2267 FUNDECL is the declaration node of the function (as a tree),
2268 FUNTYPE is the data type of the function (as a tree),
2269 or for a library call it is an identifier node for the subroutine name.
2270 SIZE is the number of bytes of arguments passed on the stack.
2272 On the 80386, the RTD insn may be used to pop them if the number
2273 of args is fixed, but if the number is variable then the caller
2274 must pop them all. RTD can't be used for library calls now
2275 because the library is compiled with the Unix compiler.
2276 Use of RTD is a selectable option, since it is incompatible with
2277 standard Unix calling sequences. If the option is not selected,
2278 the caller must always pop the args.
2280 The attribute stdcall is equivalent to RTD on a per module basis. */
2283 ix86_return_pops_args (tree fundecl, tree funtype, int size)
2285 int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
2287 /* Cdecl functions override -mrtd, and never pop the stack. */
2288 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
2290 /* Stdcall and fastcall functions will pop the stack if not
2291 variable args. */
2292 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
2293 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
2294 rtd = 1;
2296 if (rtd
2297 && (TYPE_ARG_TYPES (funtype) == NULL_TREE
2298 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
2299 == void_type_node)))
2300 return size;
2303 /* Lose any fake structure return argument if it is passed on the stack. */
2304 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
2305 && !TARGET_64BIT
2306 && !KEEP_AGGREGATE_RETURN_POINTER)
2308 int nregs = ix86_function_regparm (funtype, fundecl);
2310 if (!nregs)
2311 return GET_MODE_SIZE (Pmode);
2314 return 0;
2317 /* Argument support functions. */
2319 /* Return true when register may be used to pass function parameters. */
2320 bool
2321 ix86_function_arg_regno_p (int regno)
2323 int i;
2324 if (!TARGET_64BIT)
2325 return (regno < REGPARM_MAX
2326 || (TARGET_MMX && MMX_REGNO_P (regno)
2327 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
2328 || (TARGET_SSE && SSE_REGNO_P (regno)
2329 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
2331 if (TARGET_SSE && SSE_REGNO_P (regno)
2332 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
2333 return true;
2334 /* RAX is used as hidden argument to va_arg functions. */
2335 if (!regno)
2336 return true;
2337 for (i = 0; i < REGPARM_MAX; i++)
2338 if (regno == x86_64_int_parameter_registers[i])
2339 return true;
2340 return false;
2343 /* Return if we do not know how to pass TYPE solely in registers. */
2345 static bool
2346 ix86_must_pass_in_stack (enum machine_mode mode, tree type)
2348 if (must_pass_in_stack_var_size_or_pad (mode, type))
2349 return true;
2351 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
2352 The layout_type routine is crafty and tries to trick us into passing
2353 currently unsupported vector types on the stack by using TImode. */
2354 return (!TARGET_64BIT && mode == TImode
2355 && type && TREE_CODE (type) != VECTOR_TYPE);
2358 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2359 for a call to a function whose data type is FNTYPE.
2360 For a library call, FNTYPE is 0. */
2362 void
2363 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
2364 tree fntype, /* tree ptr for function decl */
2365 rtx libname, /* SYMBOL_REF of library name or 0 */
2366 tree fndecl)
2368 static CUMULATIVE_ARGS zero_cum;
2369 tree param, next_param;
2371 if (TARGET_DEBUG_ARG)
2373 fprintf (stderr, "\ninit_cumulative_args (");
2374 if (fntype)
2375 fprintf (stderr, "fntype code = %s, ret code = %s",
2376 tree_code_name[(int) TREE_CODE (fntype)],
2377 tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
2378 else
2379 fprintf (stderr, "no fntype");
2381 if (libname)
2382 fprintf (stderr, ", libname = %s", XSTR (libname, 0));
2385 *cum = zero_cum;
2387 /* Set up the number of registers to use for passing arguments. */
2388 cum->nregs = ix86_regparm;
2389 if (TARGET_SSE)
2390 cum->sse_nregs = SSE_REGPARM_MAX;
2391 if (TARGET_MMX)
2392 cum->mmx_nregs = MMX_REGPARM_MAX;
2393 cum->warn_sse = true;
2394 cum->warn_mmx = true;
2395 cum->maybe_vaarg = false;
2397 /* Use ecx and edx registers if function has fastcall attribute,
2398 else look for regparm information. */
2399 if (fntype && !TARGET_64BIT)
2401 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
2403 cum->nregs = 2;
2404 cum->fastcall = 1;
2406 else
2407 cum->nregs = ix86_function_regparm (fntype, fndecl);
2410 /* Set up the number of SSE registers used for passing SFmode
2411 and DFmode arguments. Warn for mismatching ABI. */
2412 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl);
2414 /* Determine if this function has variable arguments. This is
2415 indicated by the last argument being 'void_type_mode' if there
2416 are no variable arguments. If there are variable arguments, then
2417 we won't pass anything in registers in 32-bit mode. */
2419 if (cum->nregs || cum->mmx_nregs || cum->sse_nregs)
2421 for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
2422 param != 0; param = next_param)
2424 next_param = TREE_CHAIN (param);
2425 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
2427 if (!TARGET_64BIT)
2429 cum->nregs = 0;
2430 cum->sse_nregs = 0;
2431 cum->mmx_nregs = 0;
2432 cum->warn_sse = 0;
2433 cum->warn_mmx = 0;
2434 cum->fastcall = 0;
2435 cum->float_in_sse = 0;
2437 cum->maybe_vaarg = true;
2441 if ((!fntype && !libname)
2442 || (fntype && !TYPE_ARG_TYPES (fntype)))
2443 cum->maybe_vaarg = true;
2445 if (TARGET_DEBUG_ARG)
2446 fprintf (stderr, ", nregs=%d )\n", cum->nregs);
2448 return;
2451 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
2452 But in the case of vector types, it is some vector mode.
2454 When we have only some of our vector isa extensions enabled, then there
2455 are some modes for which vector_mode_supported_p is false. For these
2456 modes, the generic vector support in gcc will choose some non-vector mode
2457 in order to implement the type. By computing the natural mode, we'll
2458 select the proper ABI location for the operand and not depend on whatever
2459 the middle-end decides to do with these vector types. */
2461 static enum machine_mode
2462 type_natural_mode (tree type)
2464 enum machine_mode mode = TYPE_MODE (type);
2466 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
2468 HOST_WIDE_INT size = int_size_in_bytes (type);
2469 if ((size == 8 || size == 16)
2470 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
2471 && TYPE_VECTOR_SUBPARTS (type) > 1)
2473 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
2475 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
2476 mode = MIN_MODE_VECTOR_FLOAT;
2477 else
2478 mode = MIN_MODE_VECTOR_INT;
2480 /* Get the mode which has this inner mode and number of units. */
2481 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
2482 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
2483 && GET_MODE_INNER (mode) == innermode)
2484 return mode;
2486 gcc_unreachable ();
2490 return mode;
2493 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
2494 this may not agree with the mode that the type system has chosen for the
2495 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
2496 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
2498 static rtx
2499 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
2500 unsigned int regno)
2502 rtx tmp;
2504 if (orig_mode != BLKmode)
2505 tmp = gen_rtx_REG (orig_mode, regno);
2506 else
2508 tmp = gen_rtx_REG (mode, regno);
2509 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
2510 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
2513 return tmp;
2516 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
2517 of this code is to classify each 8bytes of incoming argument by the register
2518 class and assign registers accordingly. */
2520 /* Return the union class of CLASS1 and CLASS2.
2521 See the x86-64 PS ABI for details. */
2523 static enum x86_64_reg_class
2524 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
2526 /* Rule #1: If both classes are equal, this is the resulting class. */
2527 if (class1 == class2)
2528 return class1;
2530 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
2531 the other class. */
2532 if (class1 == X86_64_NO_CLASS)
2533 return class2;
2534 if (class2 == X86_64_NO_CLASS)
2535 return class1;
2537 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
2538 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
2539 return X86_64_MEMORY_CLASS;
2541 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
2542 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
2543 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
2544 return X86_64_INTEGERSI_CLASS;
2545 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
2546 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
2547 return X86_64_INTEGER_CLASS;
2549 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
2550 MEMORY is used. */
2551 if (class1 == X86_64_X87_CLASS
2552 || class1 == X86_64_X87UP_CLASS
2553 || class1 == X86_64_COMPLEX_X87_CLASS
2554 || class2 == X86_64_X87_CLASS
2555 || class2 == X86_64_X87UP_CLASS
2556 || class2 == X86_64_COMPLEX_X87_CLASS)
2557 return X86_64_MEMORY_CLASS;
2559 /* Rule #6: Otherwise class SSE is used. */
2560 return X86_64_SSE_CLASS;
2563 /* Classify the argument of type TYPE and mode MODE.
2564 CLASSES will be filled by the register class used to pass each word
2565 of the operand. The number of words is returned. In case the parameter
2566 should be passed in memory, 0 is returned. As a special case for zero
2567 sized containers, classes[0] will be NO_CLASS and 1 is returned.
2569 BIT_OFFSET is used internally for handling records and specifies offset
2570 of the offset in bits modulo 256 to avoid overflow cases.
2572 See the x86-64 PS ABI for details.
2575 static int
2576 classify_argument (enum machine_mode mode, tree type,
2577 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
2579 HOST_WIDE_INT bytes =
2580 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2581 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2583 /* Variable sized entities are always passed/returned in memory. */
2584 if (bytes < 0)
2585 return 0;
2587 if (mode != VOIDmode
2588 && targetm.calls.must_pass_in_stack (mode, type))
2589 return 0;
2591 if (type && AGGREGATE_TYPE_P (type))
2593 int i;
2594 tree field;
2595 enum x86_64_reg_class subclasses[MAX_CLASSES];
2597 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
2598 if (bytes > 16)
2599 return 0;
2601 for (i = 0; i < words; i++)
2602 classes[i] = X86_64_NO_CLASS;
2604 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
2605 signalize memory class, so handle it as special case. */
2606 if (!words)
2608 classes[0] = X86_64_NO_CLASS;
2609 return 1;
2612 /* Classify each field of record and merge classes. */
2613 switch (TREE_CODE (type))
2615 case RECORD_TYPE:
2616 /* For classes first merge in the field of the subclasses. */
2617 if (TYPE_BINFO (type))
2619 tree binfo, base_binfo;
2620 int basenum;
2622 for (binfo = TYPE_BINFO (type), basenum = 0;
2623 BINFO_BASE_ITERATE (binfo, basenum, base_binfo); basenum++)
2625 int num;
2626 int offset = tree_low_cst (BINFO_OFFSET (base_binfo), 0) * 8;
2627 tree type = BINFO_TYPE (base_binfo);
2629 num = classify_argument (TYPE_MODE (type),
2630 type, subclasses,
2631 (offset + bit_offset) % 256);
2632 if (!num)
2633 return 0;
2634 for (i = 0; i < num; i++)
2636 int pos = (offset + (bit_offset % 64)) / 8 / 8;
2637 classes[i + pos] =
2638 merge_classes (subclasses[i], classes[i + pos]);
2642 /* And now merge the fields of structure. */
2643 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2645 if (TREE_CODE (field) == FIELD_DECL)
2647 int num;
2649 /* Bitfields are always classified as integer. Handle them
2650 early, since later code would consider them to be
2651 misaligned integers. */
2652 if (DECL_BIT_FIELD (field))
2654 for (i = int_bit_position (field) / 8 / 8;
2655 i < (int_bit_position (field)
2656 + tree_low_cst (DECL_SIZE (field), 0)
2657 + 63) / 8 / 8; i++)
2658 classes[i] =
2659 merge_classes (X86_64_INTEGER_CLASS,
2660 classes[i]);
2662 else
2664 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2665 TREE_TYPE (field), subclasses,
2666 (int_bit_position (field)
2667 + bit_offset) % 256);
2668 if (!num)
2669 return 0;
2670 for (i = 0; i < num; i++)
2672 int pos =
2673 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
2674 classes[i + pos] =
2675 merge_classes (subclasses[i], classes[i + pos]);
2680 break;
2682 case ARRAY_TYPE:
2683 /* Arrays are handled as small records. */
2685 int num;
2686 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
2687 TREE_TYPE (type), subclasses, bit_offset);
2688 if (!num)
2689 return 0;
2691 /* The partial classes are now full classes. */
2692 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
2693 subclasses[0] = X86_64_SSE_CLASS;
2694 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
2695 subclasses[0] = X86_64_INTEGER_CLASS;
2697 for (i = 0; i < words; i++)
2698 classes[i] = subclasses[i % num];
2700 break;
2702 case UNION_TYPE:
2703 case QUAL_UNION_TYPE:
2704 /* Unions are similar to RECORD_TYPE but offset is always 0.
2707 /* Unions are not derived. */
2708 gcc_assert (!TYPE_BINFO (type)
2709 || !BINFO_N_BASE_BINFOS (TYPE_BINFO (type)));
2710 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2712 if (TREE_CODE (field) == FIELD_DECL)
2714 int num;
2715 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2716 TREE_TYPE (field), subclasses,
2717 bit_offset);
2718 if (!num)
2719 return 0;
2720 for (i = 0; i < num; i++)
2721 classes[i] = merge_classes (subclasses[i], classes[i]);
2724 break;
2726 default:
2727 gcc_unreachable ();
2730 /* Final merger cleanup. */
2731 for (i = 0; i < words; i++)
2733 /* If one class is MEMORY, everything should be passed in
2734 memory. */
2735 if (classes[i] == X86_64_MEMORY_CLASS)
2736 return 0;
2738 /* The X86_64_SSEUP_CLASS should be always preceded by
2739 X86_64_SSE_CLASS. */
2740 if (classes[i] == X86_64_SSEUP_CLASS
2741 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
2742 classes[i] = X86_64_SSE_CLASS;
2744 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
2745 if (classes[i] == X86_64_X87UP_CLASS
2746 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
2747 classes[i] = X86_64_SSE_CLASS;
2749 return words;
2752 /* Compute alignment needed. We align all types to natural boundaries with
2753 exception of XFmode that is aligned to 64bits. */
2754 if (mode != VOIDmode && mode != BLKmode)
2756 int mode_alignment = GET_MODE_BITSIZE (mode);
2758 if (mode == XFmode)
2759 mode_alignment = 128;
2760 else if (mode == XCmode)
2761 mode_alignment = 256;
2762 if (COMPLEX_MODE_P (mode))
2763 mode_alignment /= 2;
2764 /* Misaligned fields are always returned in memory. */
2765 if (bit_offset % mode_alignment)
2766 return 0;
2769 /* for V1xx modes, just use the base mode */
2770 if (VECTOR_MODE_P (mode)
2771 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
2772 mode = GET_MODE_INNER (mode);
2774 /* Classification of atomic types. */
2775 switch (mode)
2777 case DImode:
2778 case SImode:
2779 case HImode:
2780 case QImode:
2781 case CSImode:
2782 case CHImode:
2783 case CQImode:
2784 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2785 classes[0] = X86_64_INTEGERSI_CLASS;
2786 else
2787 classes[0] = X86_64_INTEGER_CLASS;
2788 return 1;
2789 case CDImode:
2790 case TImode:
2791 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
2792 return 2;
2793 case CTImode:
2794 return 0;
2795 case SFmode:
2796 if (!(bit_offset % 64))
2797 classes[0] = X86_64_SSESF_CLASS;
2798 else
2799 classes[0] = X86_64_SSE_CLASS;
2800 return 1;
2801 case DFmode:
2802 classes[0] = X86_64_SSEDF_CLASS;
2803 return 1;
2804 case XFmode:
2805 classes[0] = X86_64_X87_CLASS;
2806 classes[1] = X86_64_X87UP_CLASS;
2807 return 2;
2808 case TFmode:
2809 classes[0] = X86_64_SSE_CLASS;
2810 classes[1] = X86_64_SSEUP_CLASS;
2811 return 2;
2812 case SCmode:
2813 classes[0] = X86_64_SSE_CLASS;
2814 return 1;
2815 case DCmode:
2816 classes[0] = X86_64_SSEDF_CLASS;
2817 classes[1] = X86_64_SSEDF_CLASS;
2818 return 2;
2819 case XCmode:
2820 classes[0] = X86_64_COMPLEX_X87_CLASS;
2821 return 1;
2822 case TCmode:
2823 /* This modes is larger than 16 bytes. */
2824 return 0;
2825 case V4SFmode:
2826 case V4SImode:
2827 case V16QImode:
2828 case V8HImode:
2829 case V2DFmode:
2830 case V2DImode:
2831 classes[0] = X86_64_SSE_CLASS;
2832 classes[1] = X86_64_SSEUP_CLASS;
2833 return 2;
2834 case V2SFmode:
2835 case V2SImode:
2836 case V4HImode:
2837 case V8QImode:
2838 classes[0] = X86_64_SSE_CLASS;
2839 return 1;
2840 case BLKmode:
2841 case VOIDmode:
2842 return 0;
2843 default:
2844 gcc_assert (VECTOR_MODE_P (mode));
2846 if (bytes > 16)
2847 return 0;
2849 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
2851 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2852 classes[0] = X86_64_INTEGERSI_CLASS;
2853 else
2854 classes[0] = X86_64_INTEGER_CLASS;
2855 classes[1] = X86_64_INTEGER_CLASS;
2856 return 1 + (bytes > 8);
2860 /* Examine the argument and return set number of register required in each
2861 class. Return 0 iff parameter should be passed in memory. */
2862 static int
2863 examine_argument (enum machine_mode mode, tree type, int in_return,
2864 int *int_nregs, int *sse_nregs)
2866 enum x86_64_reg_class class[MAX_CLASSES];
2867 int n = classify_argument (mode, type, class, 0);
2869 *int_nregs = 0;
2870 *sse_nregs = 0;
2871 if (!n)
2872 return 0;
2873 for (n--; n >= 0; n--)
2874 switch (class[n])
2876 case X86_64_INTEGER_CLASS:
2877 case X86_64_INTEGERSI_CLASS:
2878 (*int_nregs)++;
2879 break;
2880 case X86_64_SSE_CLASS:
2881 case X86_64_SSESF_CLASS:
2882 case X86_64_SSEDF_CLASS:
2883 (*sse_nregs)++;
2884 break;
2885 case X86_64_NO_CLASS:
2886 case X86_64_SSEUP_CLASS:
2887 break;
2888 case X86_64_X87_CLASS:
2889 case X86_64_X87UP_CLASS:
2890 if (!in_return)
2891 return 0;
2892 break;
2893 case X86_64_COMPLEX_X87_CLASS:
2894 return in_return ? 2 : 0;
2895 case X86_64_MEMORY_CLASS:
2896 gcc_unreachable ();
2898 return 1;
2901 /* Construct container for the argument used by GCC interface. See
2902 FUNCTION_ARG for the detailed description. */
2904 static rtx
2905 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
2906 tree type, int in_return, int nintregs, int nsseregs,
2907 const int *intreg, int sse_regno)
2909 enum machine_mode tmpmode;
2910 int bytes =
2911 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2912 enum x86_64_reg_class class[MAX_CLASSES];
2913 int n;
2914 int i;
2915 int nexps = 0;
2916 int needed_sseregs, needed_intregs;
2917 rtx exp[MAX_CLASSES];
2918 rtx ret;
2920 n = classify_argument (mode, type, class, 0);
2921 if (TARGET_DEBUG_ARG)
2923 if (!n)
2924 fprintf (stderr, "Memory class\n");
2925 else
2927 fprintf (stderr, "Classes:");
2928 for (i = 0; i < n; i++)
2930 fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]);
2932 fprintf (stderr, "\n");
2935 if (!n)
2936 return NULL;
2937 if (!examine_argument (mode, type, in_return, &needed_intregs,
2938 &needed_sseregs))
2939 return NULL;
2940 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
2941 return NULL;
2943 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
2944 some less clueful developer tries to use floating-point anyway. */
2945 if (needed_sseregs && !TARGET_SSE)
2947 static bool issued_error;
2948 if (!issued_error)
2950 issued_error = true;
2951 if (in_return)
2952 error ("SSE register return with SSE disabled");
2953 else
2954 error ("SSE register argument with SSE disabled");
2956 return NULL;
2959 /* First construct simple cases. Avoid SCmode, since we want to use
2960 single register to pass this type. */
2961 if (n == 1 && mode != SCmode)
2962 switch (class[0])
2964 case X86_64_INTEGER_CLASS:
2965 case X86_64_INTEGERSI_CLASS:
2966 return gen_rtx_REG (mode, intreg[0]);
2967 case X86_64_SSE_CLASS:
2968 case X86_64_SSESF_CLASS:
2969 case X86_64_SSEDF_CLASS:
2970 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
2971 case X86_64_X87_CLASS:
2972 case X86_64_COMPLEX_X87_CLASS:
2973 return gen_rtx_REG (mode, FIRST_STACK_REG);
2974 case X86_64_NO_CLASS:
2975 /* Zero sized array, struct or class. */
2976 return NULL;
2977 default:
2978 gcc_unreachable ();
2980 if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS
2981 && mode != BLKmode)
2982 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
2983 if (n == 2
2984 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
2985 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
2986 if (n == 2 && class[0] == X86_64_INTEGER_CLASS
2987 && class[1] == X86_64_INTEGER_CLASS
2988 && (mode == CDImode || mode == TImode || mode == TFmode)
2989 && intreg[0] + 1 == intreg[1])
2990 return gen_rtx_REG (mode, intreg[0]);
2992 /* Otherwise figure out the entries of the PARALLEL. */
2993 for (i = 0; i < n; i++)
2995 switch (class[i])
2997 case X86_64_NO_CLASS:
2998 break;
2999 case X86_64_INTEGER_CLASS:
3000 case X86_64_INTEGERSI_CLASS:
3001 /* Merge TImodes on aligned occasions here too. */
3002 if (i * 8 + 8 > bytes)
3003 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
3004 else if (class[i] == X86_64_INTEGERSI_CLASS)
3005 tmpmode = SImode;
3006 else
3007 tmpmode = DImode;
3008 /* We've requested 24 bytes we don't have mode for. Use DImode. */
3009 if (tmpmode == BLKmode)
3010 tmpmode = DImode;
3011 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3012 gen_rtx_REG (tmpmode, *intreg),
3013 GEN_INT (i*8));
3014 intreg++;
3015 break;
3016 case X86_64_SSESF_CLASS:
3017 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3018 gen_rtx_REG (SFmode,
3019 SSE_REGNO (sse_regno)),
3020 GEN_INT (i*8));
3021 sse_regno++;
3022 break;
3023 case X86_64_SSEDF_CLASS:
3024 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3025 gen_rtx_REG (DFmode,
3026 SSE_REGNO (sse_regno)),
3027 GEN_INT (i*8));
3028 sse_regno++;
3029 break;
3030 case X86_64_SSE_CLASS:
3031 if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
3032 tmpmode = TImode;
3033 else
3034 tmpmode = DImode;
3035 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3036 gen_rtx_REG (tmpmode,
3037 SSE_REGNO (sse_regno)),
3038 GEN_INT (i*8));
3039 if (tmpmode == TImode)
3040 i++;
3041 sse_regno++;
3042 break;
3043 default:
3044 gcc_unreachable ();
3048 /* Empty aligned struct, union or class. */
3049 if (nexps == 0)
3050 return NULL;
3052 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
3053 for (i = 0; i < nexps; i++)
3054 XVECEXP (ret, 0, i) = exp [i];
3055 return ret;
3058 /* Update the data in CUM to advance over an argument
3059 of mode MODE and data type TYPE.
3060 (TYPE is null for libcalls where that information may not be available.) */
3062 void
3063 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3064 tree type, int named)
3066 int bytes =
3067 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3068 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3070 if (type)
3071 mode = type_natural_mode (type);
3073 if (TARGET_DEBUG_ARG)
3074 fprintf (stderr, "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, "
3075 "mode=%s, named=%d)\n\n",
3076 words, cum->words, cum->nregs, cum->sse_nregs,
3077 GET_MODE_NAME (mode), named);
3079 if (TARGET_64BIT)
3081 int int_nregs, sse_nregs;
3082 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
3083 cum->words += words;
3084 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
3086 cum->nregs -= int_nregs;
3087 cum->sse_nregs -= sse_nregs;
3088 cum->regno += int_nregs;
3089 cum->sse_regno += sse_nregs;
3091 else
3092 cum->words += words;
3094 else
3096 switch (mode)
3098 default:
3099 break;
3101 case BLKmode:
3102 if (bytes < 0)
3103 break;
3104 /* FALLTHRU */
3106 case DImode:
3107 case SImode:
3108 case HImode:
3109 case QImode:
3110 cum->words += words;
3111 cum->nregs -= words;
3112 cum->regno += words;
3114 if (cum->nregs <= 0)
3116 cum->nregs = 0;
3117 cum->regno = 0;
3119 break;
3121 case DFmode:
3122 if (cum->float_in_sse < 2)
3123 break;
3124 case SFmode:
3125 if (cum->float_in_sse < 1)
3126 break;
3127 /* FALLTHRU */
3129 case TImode:
3130 case V16QImode:
3131 case V8HImode:
3132 case V4SImode:
3133 case V2DImode:
3134 case V4SFmode:
3135 case V2DFmode:
3136 if (!type || !AGGREGATE_TYPE_P (type))
3138 cum->sse_words += words;
3139 cum->sse_nregs -= 1;
3140 cum->sse_regno += 1;
3141 if (cum->sse_nregs <= 0)
3143 cum->sse_nregs = 0;
3144 cum->sse_regno = 0;
3147 break;
3149 case V8QImode:
3150 case V4HImode:
3151 case V2SImode:
3152 case V2SFmode:
3153 if (!type || !AGGREGATE_TYPE_P (type))
3155 cum->mmx_words += words;
3156 cum->mmx_nregs -= 1;
3157 cum->mmx_regno += 1;
3158 if (cum->mmx_nregs <= 0)
3160 cum->mmx_nregs = 0;
3161 cum->mmx_regno = 0;
3164 break;
3169 /* Define where to put the arguments to a function.
3170 Value is zero to push the argument on the stack,
3171 or a hard register in which to store the argument.
3173 MODE is the argument's machine mode.
3174 TYPE is the data type of the argument (as a tree).
3175 This is null for libcalls where that information may
3176 not be available.
3177 CUM is a variable of type CUMULATIVE_ARGS which gives info about
3178 the preceding args and about the function being called.
3179 NAMED is nonzero if this argument is a named parameter
3180 (otherwise it is an extra parameter matching an ellipsis). */
3183 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode orig_mode,
3184 tree type, int named)
3186 enum machine_mode mode = orig_mode;
3187 rtx ret = NULL_RTX;
3188 int bytes =
3189 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3190 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3191 static bool warnedsse, warnedmmx;
3193 /* To simplify the code below, represent vector types with a vector mode
3194 even if MMX/SSE are not active. */
3195 if (type && TREE_CODE (type) == VECTOR_TYPE)
3196 mode = type_natural_mode (type);
3198 /* Handle a hidden AL argument containing number of registers for varargs
3199 x86-64 functions. For i386 ABI just return constm1_rtx to avoid
3200 any AL settings. */
3201 if (mode == VOIDmode)
3203 if (TARGET_64BIT)
3204 return GEN_INT (cum->maybe_vaarg
3205 ? (cum->sse_nregs < 0
3206 ? SSE_REGPARM_MAX
3207 : cum->sse_regno)
3208 : -1);
3209 else
3210 return constm1_rtx;
3212 if (TARGET_64BIT)
3213 ret = construct_container (mode, orig_mode, type, 0, cum->nregs,
3214 cum->sse_nregs,
3215 &x86_64_int_parameter_registers [cum->regno],
3216 cum->sse_regno);
3217 else
3218 switch (mode)
3220 /* For now, pass fp/complex values on the stack. */
3221 default:
3222 break;
3224 case BLKmode:
3225 if (bytes < 0)
3226 break;
3227 /* FALLTHRU */
3228 case DImode:
3229 case SImode:
3230 case HImode:
3231 case QImode:
3232 if (words <= cum->nregs)
3234 int regno = cum->regno;
3236 /* Fastcall allocates the first two DWORD (SImode) or
3237 smaller arguments to ECX and EDX. */
3238 if (cum->fastcall)
3240 if (mode == BLKmode || mode == DImode)
3241 break;
3243 /* ECX not EAX is the first allocated register. */
3244 if (regno == 0)
3245 regno = 2;
3247 ret = gen_rtx_REG (mode, regno);
3249 break;
3250 case DFmode:
3251 if (cum->float_in_sse < 2)
3252 break;
3253 case SFmode:
3254 if (cum->float_in_sse < 1)
3255 break;
3256 /* FALLTHRU */
3257 case TImode:
3258 case V16QImode:
3259 case V8HImode:
3260 case V4SImode:
3261 case V2DImode:
3262 case V4SFmode:
3263 case V2DFmode:
3264 if (!type || !AGGREGATE_TYPE_P (type))
3266 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
3268 warnedsse = true;
3269 warning (0, "SSE vector argument without SSE enabled "
3270 "changes the ABI");
3272 if (cum->sse_nregs)
3273 ret = gen_reg_or_parallel (mode, orig_mode,
3274 cum->sse_regno + FIRST_SSE_REG);
3276 break;
3277 case V8QImode:
3278 case V4HImode:
3279 case V2SImode:
3280 case V2SFmode:
3281 if (!type || !AGGREGATE_TYPE_P (type))
3283 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
3285 warnedmmx = true;
3286 warning (0, "MMX vector argument without MMX enabled "
3287 "changes the ABI");
3289 if (cum->mmx_nregs)
3290 ret = gen_reg_or_parallel (mode, orig_mode,
3291 cum->mmx_regno + FIRST_MMX_REG);
3293 break;
3296 if (TARGET_DEBUG_ARG)
3298 fprintf (stderr,
3299 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
3300 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
3302 if (ret)
3303 print_simple_rtl (stderr, ret);
3304 else
3305 fprintf (stderr, ", stack");
3307 fprintf (stderr, " )\n");
3310 return ret;
3313 /* A C expression that indicates when an argument must be passed by
3314 reference. If nonzero for an argument, a copy of that argument is
3315 made in memory and a pointer to the argument is passed instead of
3316 the argument itself. The pointer is passed in whatever way is
3317 appropriate for passing a pointer to that type. */
3319 static bool
3320 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3321 enum machine_mode mode ATTRIBUTE_UNUSED,
3322 tree type, bool named ATTRIBUTE_UNUSED)
3324 if (!TARGET_64BIT)
3325 return 0;
3327 if (type && int_size_in_bytes (type) == -1)
3329 if (TARGET_DEBUG_ARG)
3330 fprintf (stderr, "function_arg_pass_by_reference\n");
3331 return 1;
3334 return 0;
3337 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
3338 ABI. Only called if TARGET_SSE. */
3339 static bool
3340 contains_128bit_aligned_vector_p (tree type)
3342 enum machine_mode mode = TYPE_MODE (type);
3343 if (SSE_REG_MODE_P (mode)
3344 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
3345 return true;
3346 if (TYPE_ALIGN (type) < 128)
3347 return false;
3349 if (AGGREGATE_TYPE_P (type))
3351 /* Walk the aggregates recursively. */
3352 switch (TREE_CODE (type))
3354 case RECORD_TYPE:
3355 case UNION_TYPE:
3356 case QUAL_UNION_TYPE:
3358 tree field;
3360 if (TYPE_BINFO (type))
3362 tree binfo, base_binfo;
3363 int i;
3365 for (binfo = TYPE_BINFO (type), i = 0;
3366 BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
3367 if (contains_128bit_aligned_vector_p
3368 (BINFO_TYPE (base_binfo)))
3369 return true;
3371 /* And now merge the fields of structure. */
3372 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3374 if (TREE_CODE (field) == FIELD_DECL
3375 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
3376 return true;
3378 break;
3381 case ARRAY_TYPE:
3382 /* Just for use if some languages passes arrays by value. */
3383 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
3384 return true;
3385 break;
3387 default:
3388 gcc_unreachable ();
3391 return false;
3394 /* Gives the alignment boundary, in bits, of an argument with the
3395 specified mode and type. */
3398 ix86_function_arg_boundary (enum machine_mode mode, tree type)
3400 int align;
3401 if (type)
3402 align = TYPE_ALIGN (type);
3403 else
3404 align = GET_MODE_ALIGNMENT (mode);
3405 if (align < PARM_BOUNDARY)
3406 align = PARM_BOUNDARY;
3407 if (!TARGET_64BIT)
3409 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
3410 make an exception for SSE modes since these require 128bit
3411 alignment.
3413 The handling here differs from field_alignment. ICC aligns MMX
3414 arguments to 4 byte boundaries, while structure fields are aligned
3415 to 8 byte boundaries. */
3416 if (!TARGET_SSE)
3417 align = PARM_BOUNDARY;
3418 else if (!type)
3420 if (!SSE_REG_MODE_P (mode))
3421 align = PARM_BOUNDARY;
3423 else
3425 if (!contains_128bit_aligned_vector_p (type))
3426 align = PARM_BOUNDARY;
3429 if (align > 128)
3430 align = 128;
3431 return align;
3434 /* Return true if N is a possible register number of function value. */
3435 bool
3436 ix86_function_value_regno_p (int regno)
3438 if (regno == 0
3439 || (regno == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
3440 || (regno == FIRST_SSE_REG && TARGET_SSE))
3441 return true;
3443 if (!TARGET_64BIT
3444 && (regno == FIRST_MMX_REG && TARGET_MMX))
3445 return true;
3447 return false;
3450 /* Define how to find the value returned by a function.
3451 VALTYPE is the data type of the value (as a tree).
3452 If the precise function being called is known, FUNC is its FUNCTION_DECL;
3453 otherwise, FUNC is 0. */
3455 ix86_function_value (tree valtype, tree fntype_or_decl,
3456 bool outgoing ATTRIBUTE_UNUSED)
3458 enum machine_mode natmode = type_natural_mode (valtype);
3460 if (TARGET_64BIT)
3462 rtx ret = construct_container (natmode, TYPE_MODE (valtype), valtype,
3463 1, REGPARM_MAX, SSE_REGPARM_MAX,
3464 x86_64_int_return_registers, 0);
3465 /* For zero sized structures, construct_container return NULL, but we
3466 need to keep rest of compiler happy by returning meaningful value. */
3467 if (!ret)
3468 ret = gen_rtx_REG (TYPE_MODE (valtype), 0);
3469 return ret;
3471 else
3473 tree fn = NULL_TREE, fntype;
3474 if (fntype_or_decl
3475 && DECL_P (fntype_or_decl))
3476 fn = fntype_or_decl;
3477 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
3478 return gen_rtx_REG (TYPE_MODE (valtype),
3479 ix86_value_regno (natmode, fn, fntype));
3483 /* Return false iff type is returned in memory. */
3485 ix86_return_in_memory (tree type)
3487 int needed_intregs, needed_sseregs, size;
3488 enum machine_mode mode = type_natural_mode (type);
3490 if (TARGET_64BIT)
3491 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
3493 if (mode == BLKmode)
3494 return 1;
3496 size = int_size_in_bytes (type);
3498 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
3499 return 0;
3501 if (VECTOR_MODE_P (mode) || mode == TImode)
3503 /* User-created vectors small enough to fit in EAX. */
3504 if (size < 8)
3505 return 0;
3507 /* MMX/3dNow values are returned in MM0,
3508 except when it doesn't exits. */
3509 if (size == 8)
3510 return (TARGET_MMX ? 0 : 1);
3512 /* SSE values are returned in XMM0, except when it doesn't exist. */
3513 if (size == 16)
3514 return (TARGET_SSE ? 0 : 1);
3517 if (mode == XFmode)
3518 return 0;
3520 if (size > 12)
3521 return 1;
3522 return 0;
3525 /* When returning SSE vector types, we have a choice of either
3526 (1) being abi incompatible with a -march switch, or
3527 (2) generating an error.
3528 Given no good solution, I think the safest thing is one warning.
3529 The user won't be able to use -Werror, but....
3531 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
3532 called in response to actually generating a caller or callee that
3533 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
3534 via aggregate_value_p for general type probing from tree-ssa. */
3536 static rtx
3537 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
3539 static bool warnedsse, warnedmmx;
3541 if (type)
3543 /* Look at the return type of the function, not the function type. */
3544 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
3546 if (!TARGET_SSE && !warnedsse)
3548 if (mode == TImode
3549 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3551 warnedsse = true;
3552 warning (0, "SSE vector return without SSE enabled "
3553 "changes the ABI");
3557 if (!TARGET_MMX && !warnedmmx)
3559 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
3561 warnedmmx = true;
3562 warning (0, "MMX vector return without MMX enabled "
3563 "changes the ABI");
3568 return NULL;
3571 /* Define how to find the value returned by a library function
3572 assuming the value has mode MODE. */
3574 ix86_libcall_value (enum machine_mode mode)
3576 if (TARGET_64BIT)
3578 switch (mode)
3580 case SFmode:
3581 case SCmode:
3582 case DFmode:
3583 case DCmode:
3584 case TFmode:
3585 return gen_rtx_REG (mode, FIRST_SSE_REG);
3586 case XFmode:
3587 case XCmode:
3588 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
3589 case TCmode:
3590 return NULL;
3591 default:
3592 return gen_rtx_REG (mode, 0);
3595 else
3596 return gen_rtx_REG (mode, ix86_value_regno (mode, NULL, NULL));
3599 /* Given a mode, return the register to use for a return value. */
3601 static int
3602 ix86_value_regno (enum machine_mode mode, tree func, tree fntype)
3604 gcc_assert (!TARGET_64BIT);
3606 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
3607 we prevent this case when mmx is not available. */
3608 if ((VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8))
3609 return FIRST_MMX_REG;
3611 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
3612 we prevent this case when sse is not available. */
3613 if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3614 return FIRST_SSE_REG;
3616 /* Most things go in %eax, except (unless -mno-fp-ret-in-387) fp values. */
3617 if (GET_MODE_CLASS (mode) != MODE_FLOAT || !TARGET_FLOAT_RETURNS_IN_80387)
3618 return 0;
3620 /* Floating point return values in %st(0), except for local functions when
3621 SSE math is enabled or for functions with sseregparm attribute. */
3622 if ((func || fntype)
3623 && (mode == SFmode || mode == DFmode))
3625 int sse_level = ix86_function_sseregparm (fntype, func);
3626 if ((sse_level >= 1 && mode == SFmode)
3627 || (sse_level == 2 && mode == DFmode))
3628 return FIRST_SSE_REG;
3631 return FIRST_FLOAT_REG;
3634 /* Create the va_list data type. */
3636 static tree
3637 ix86_build_builtin_va_list (void)
3639 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
3641 /* For i386 we use plain pointer to argument area. */
3642 if (!TARGET_64BIT)
3643 return build_pointer_type (char_type_node);
3645 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
3646 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
3648 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
3649 unsigned_type_node);
3650 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
3651 unsigned_type_node);
3652 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
3653 ptr_type_node);
3654 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
3655 ptr_type_node);
3657 va_list_gpr_counter_field = f_gpr;
3658 va_list_fpr_counter_field = f_fpr;
3660 DECL_FIELD_CONTEXT (f_gpr) = record;
3661 DECL_FIELD_CONTEXT (f_fpr) = record;
3662 DECL_FIELD_CONTEXT (f_ovf) = record;
3663 DECL_FIELD_CONTEXT (f_sav) = record;
3665 TREE_CHAIN (record) = type_decl;
3666 TYPE_NAME (record) = type_decl;
3667 TYPE_FIELDS (record) = f_gpr;
3668 TREE_CHAIN (f_gpr) = f_fpr;
3669 TREE_CHAIN (f_fpr) = f_ovf;
3670 TREE_CHAIN (f_ovf) = f_sav;
3672 layout_type (record);
3674 /* The correct type is an array type of one element. */
3675 return build_array_type (record, build_index_type (size_zero_node));
3678 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
3680 static void
3681 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3682 tree type, int *pretend_size ATTRIBUTE_UNUSED,
3683 int no_rtl)
3685 CUMULATIVE_ARGS next_cum;
3686 rtx save_area = NULL_RTX, mem;
3687 rtx label;
3688 rtx label_ref;
3689 rtx tmp_reg;
3690 rtx nsse_reg;
3691 int set;
3692 tree fntype;
3693 int stdarg_p;
3694 int i;
3696 if (!TARGET_64BIT)
3697 return;
3699 if (! cfun->va_list_gpr_size && ! cfun->va_list_fpr_size)
3700 return;
3702 /* Indicate to allocate space on the stack for varargs save area. */
3703 ix86_save_varrargs_registers = 1;
3705 cfun->stack_alignment_needed = 128;
3707 fntype = TREE_TYPE (current_function_decl);
3708 stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
3709 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
3710 != void_type_node));
3712 /* For varargs, we do not want to skip the dummy va_dcl argument.
3713 For stdargs, we do want to skip the last named argument. */
3714 next_cum = *cum;
3715 if (stdarg_p)
3716 function_arg_advance (&next_cum, mode, type, 1);
3718 if (!no_rtl)
3719 save_area = frame_pointer_rtx;
3721 set = get_varargs_alias_set ();
3723 for (i = next_cum.regno;
3724 i < ix86_regparm
3725 && i < next_cum.regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
3726 i++)
3728 mem = gen_rtx_MEM (Pmode,
3729 plus_constant (save_area, i * UNITS_PER_WORD));
3730 MEM_NOTRAP_P (mem) = 1;
3731 set_mem_alias_set (mem, set);
3732 emit_move_insn (mem, gen_rtx_REG (Pmode,
3733 x86_64_int_parameter_registers[i]));
3736 if (next_cum.sse_nregs && cfun->va_list_fpr_size)
3738 /* Now emit code to save SSE registers. The AX parameter contains number
3739 of SSE parameter registers used to call this function. We use
3740 sse_prologue_save insn template that produces computed jump across
3741 SSE saves. We need some preparation work to get this working. */
3743 label = gen_label_rtx ();
3744 label_ref = gen_rtx_LABEL_REF (Pmode, label);
3746 /* Compute address to jump to :
3747 label - 5*eax + nnamed_sse_arguments*5 */
3748 tmp_reg = gen_reg_rtx (Pmode);
3749 nsse_reg = gen_reg_rtx (Pmode);
3750 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
3751 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3752 gen_rtx_MULT (Pmode, nsse_reg,
3753 GEN_INT (4))));
3754 if (next_cum.sse_regno)
3755 emit_move_insn
3756 (nsse_reg,
3757 gen_rtx_CONST (DImode,
3758 gen_rtx_PLUS (DImode,
3759 label_ref,
3760 GEN_INT (next_cum.sse_regno * 4))));
3761 else
3762 emit_move_insn (nsse_reg, label_ref);
3763 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
3765 /* Compute address of memory block we save into. We always use pointer
3766 pointing 127 bytes after first byte to store - this is needed to keep
3767 instruction size limited by 4 bytes. */
3768 tmp_reg = gen_reg_rtx (Pmode);
3769 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3770 plus_constant (save_area,
3771 8 * REGPARM_MAX + 127)));
3772 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
3773 MEM_NOTRAP_P (mem) = 1;
3774 set_mem_alias_set (mem, set);
3775 set_mem_align (mem, BITS_PER_WORD);
3777 /* And finally do the dirty job! */
3778 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
3779 GEN_INT (next_cum.sse_regno), label));
3784 /* Implement va_start. */
3786 void
3787 ix86_va_start (tree valist, rtx nextarg)
3789 HOST_WIDE_INT words, n_gpr, n_fpr;
3790 tree f_gpr, f_fpr, f_ovf, f_sav;
3791 tree gpr, fpr, ovf, sav, t;
3793 /* Only 64bit target needs something special. */
3794 if (!TARGET_64BIT)
3796 std_expand_builtin_va_start (valist, nextarg);
3797 return;
3800 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3801 f_fpr = TREE_CHAIN (f_gpr);
3802 f_ovf = TREE_CHAIN (f_fpr);
3803 f_sav = TREE_CHAIN (f_ovf);
3805 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
3806 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3807 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3808 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3809 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3811 /* Count number of gp and fp argument registers used. */
3812 words = current_function_args_info.words;
3813 n_gpr = current_function_args_info.regno;
3814 n_fpr = current_function_args_info.sse_regno;
3816 if (TARGET_DEBUG_ARG)
3817 fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
3818 (int) words, (int) n_gpr, (int) n_fpr);
3820 if (cfun->va_list_gpr_size)
3822 t = build (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
3823 build_int_cst (NULL_TREE, n_gpr * 8));
3824 TREE_SIDE_EFFECTS (t) = 1;
3825 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3828 if (cfun->va_list_fpr_size)
3830 t = build (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
3831 build_int_cst (NULL_TREE, n_fpr * 16 + 8*REGPARM_MAX));
3832 TREE_SIDE_EFFECTS (t) = 1;
3833 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3836 /* Find the overflow area. */
3837 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
3838 if (words != 0)
3839 t = build (PLUS_EXPR, TREE_TYPE (ovf), t,
3840 build_int_cst (NULL_TREE, words * UNITS_PER_WORD));
3841 t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3842 TREE_SIDE_EFFECTS (t) = 1;
3843 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3845 if (cfun->va_list_gpr_size || cfun->va_list_fpr_size)
3847 /* Find the register save area.
3848 Prologue of the function save it right above stack frame. */
3849 t = make_tree (TREE_TYPE (sav), frame_pointer_rtx);
3850 t = build (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
3851 TREE_SIDE_EFFECTS (t) = 1;
3852 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3856 /* Implement va_arg. */
3858 tree
3859 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
3861 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
3862 tree f_gpr, f_fpr, f_ovf, f_sav;
3863 tree gpr, fpr, ovf, sav, t;
3864 int size, rsize;
3865 tree lab_false, lab_over = NULL_TREE;
3866 tree addr, t2;
3867 rtx container;
3868 int indirect_p = 0;
3869 tree ptrtype;
3870 enum machine_mode nat_mode;
3872 /* Only 64bit target needs something special. */
3873 if (!TARGET_64BIT)
3874 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3876 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3877 f_fpr = TREE_CHAIN (f_gpr);
3878 f_ovf = TREE_CHAIN (f_fpr);
3879 f_sav = TREE_CHAIN (f_ovf);
3881 valist = build_va_arg_indirect_ref (valist);
3882 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3883 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3884 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3885 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3887 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
3888 if (indirect_p)
3889 type = build_pointer_type (type);
3890 size = int_size_in_bytes (type);
3891 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3893 nat_mode = type_natural_mode (type);
3894 container = construct_container (nat_mode, TYPE_MODE (type), type, 0,
3895 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
3897 /* Pull the value out of the saved registers. */
3899 addr = create_tmp_var (ptr_type_node, "addr");
3900 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
3902 if (container)
3904 int needed_intregs, needed_sseregs;
3905 bool need_temp;
3906 tree int_addr, sse_addr;
3908 lab_false = create_artificial_label ();
3909 lab_over = create_artificial_label ();
3911 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
3913 need_temp = (!REG_P (container)
3914 && ((needed_intregs && TYPE_ALIGN (type) > 64)
3915 || TYPE_ALIGN (type) > 128));
3917 /* In case we are passing structure, verify that it is consecutive block
3918 on the register save area. If not we need to do moves. */
3919 if (!need_temp && !REG_P (container))
3921 /* Verify that all registers are strictly consecutive */
3922 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
3924 int i;
3926 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3928 rtx slot = XVECEXP (container, 0, i);
3929 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
3930 || INTVAL (XEXP (slot, 1)) != i * 16)
3931 need_temp = 1;
3934 else
3936 int i;
3938 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3940 rtx slot = XVECEXP (container, 0, i);
3941 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
3942 || INTVAL (XEXP (slot, 1)) != i * 8)
3943 need_temp = 1;
3947 if (!need_temp)
3949 int_addr = addr;
3950 sse_addr = addr;
3952 else
3954 int_addr = create_tmp_var (ptr_type_node, "int_addr");
3955 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
3956 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
3957 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
3960 /* First ensure that we fit completely in registers. */
3961 if (needed_intregs)
3963 t = build_int_cst (TREE_TYPE (gpr),
3964 (REGPARM_MAX - needed_intregs + 1) * 8);
3965 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
3966 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3967 t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3968 gimplify_and_add (t, pre_p);
3970 if (needed_sseregs)
3972 t = build_int_cst (TREE_TYPE (fpr),
3973 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
3974 + REGPARM_MAX * 8);
3975 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
3976 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3977 t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3978 gimplify_and_add (t, pre_p);
3981 /* Compute index to start of area used for integer regs. */
3982 if (needed_intregs)
3984 /* int_addr = gpr + sav; */
3985 t = fold_convert (ptr_type_node, gpr);
3986 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
3987 t = build2 (MODIFY_EXPR, void_type_node, int_addr, t);
3988 gimplify_and_add (t, pre_p);
3990 if (needed_sseregs)
3992 /* sse_addr = fpr + sav; */
3993 t = fold_convert (ptr_type_node, fpr);
3994 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
3995 t = build2 (MODIFY_EXPR, void_type_node, sse_addr, t);
3996 gimplify_and_add (t, pre_p);
3998 if (need_temp)
4000 int i;
4001 tree temp = create_tmp_var (type, "va_arg_tmp");
4003 /* addr = &temp; */
4004 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
4005 t = build2 (MODIFY_EXPR, void_type_node, addr, t);
4006 gimplify_and_add (t, pre_p);
4008 for (i = 0; i < XVECLEN (container, 0); i++)
4010 rtx slot = XVECEXP (container, 0, i);
4011 rtx reg = XEXP (slot, 0);
4012 enum machine_mode mode = GET_MODE (reg);
4013 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
4014 tree addr_type = build_pointer_type (piece_type);
4015 tree src_addr, src;
4016 int src_offset;
4017 tree dest_addr, dest;
4019 if (SSE_REGNO_P (REGNO (reg)))
4021 src_addr = sse_addr;
4022 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
4024 else
4026 src_addr = int_addr;
4027 src_offset = REGNO (reg) * 8;
4029 src_addr = fold_convert (addr_type, src_addr);
4030 src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr,
4031 size_int (src_offset)));
4032 src = build_va_arg_indirect_ref (src_addr);
4034 dest_addr = fold_convert (addr_type, addr);
4035 dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr,
4036 size_int (INTVAL (XEXP (slot, 1)))));
4037 dest = build_va_arg_indirect_ref (dest_addr);
4039 t = build2 (MODIFY_EXPR, void_type_node, dest, src);
4040 gimplify_and_add (t, pre_p);
4044 if (needed_intregs)
4046 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
4047 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
4048 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr, t);
4049 gimplify_and_add (t, pre_p);
4051 if (needed_sseregs)
4053 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
4054 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
4055 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr, t);
4056 gimplify_and_add (t, pre_p);
4059 t = build1 (GOTO_EXPR, void_type_node, lab_over);
4060 gimplify_and_add (t, pre_p);
4062 t = build1 (LABEL_EXPR, void_type_node, lab_false);
4063 append_to_statement_list (t, pre_p);
4066 /* ... otherwise out of the overflow area. */
4068 /* Care for on-stack alignment if needed. */
4069 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64)
4070 t = ovf;
4071 else
4073 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
4074 t = build (PLUS_EXPR, TREE_TYPE (ovf), ovf,
4075 build_int_cst (TREE_TYPE (ovf), align - 1));
4076 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
4077 build_int_cst (TREE_TYPE (t), -align));
4079 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
4081 t2 = build2 (MODIFY_EXPR, void_type_node, addr, t);
4082 gimplify_and_add (t2, pre_p);
4084 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
4085 build_int_cst (TREE_TYPE (t), rsize * UNITS_PER_WORD));
4086 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
4087 gimplify_and_add (t, pre_p);
4089 if (container)
4091 t = build1 (LABEL_EXPR, void_type_node, lab_over);
4092 append_to_statement_list (t, pre_p);
4095 ptrtype = build_pointer_type (type);
4096 addr = fold_convert (ptrtype, addr);
4098 if (indirect_p)
4099 addr = build_va_arg_indirect_ref (addr);
4100 return build_va_arg_indirect_ref (addr);
4103 /* Return nonzero if OPNUM's MEM should be matched
4104 in movabs* patterns. */
4107 ix86_check_movabs (rtx insn, int opnum)
4109 rtx set, mem;
4111 set = PATTERN (insn);
4112 if (GET_CODE (set) == PARALLEL)
4113 set = XVECEXP (set, 0, 0);
4114 gcc_assert (GET_CODE (set) == SET);
4115 mem = XEXP (set, opnum);
4116 while (GET_CODE (mem) == SUBREG)
4117 mem = SUBREG_REG (mem);
4118 gcc_assert (GET_CODE (mem) == MEM);
4119 return (volatile_ok || !MEM_VOLATILE_P (mem));
4122 /* Initialize the table of extra 80387 mathematical constants. */
4124 static void
4125 init_ext_80387_constants (void)
4127 static const char * cst[5] =
4129 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
4130 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
4131 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
4132 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
4133 "3.1415926535897932385128089594061862044", /* 4: fldpi */
4135 int i;
4137 for (i = 0; i < 5; i++)
4139 real_from_string (&ext_80387_constants_table[i], cst[i]);
4140 /* Ensure each constant is rounded to XFmode precision. */
4141 real_convert (&ext_80387_constants_table[i],
4142 XFmode, &ext_80387_constants_table[i]);
4145 ext_80387_constants_init = 1;
4148 /* Return true if the constant is something that can be loaded with
4149 a special instruction. */
4152 standard_80387_constant_p (rtx x)
4154 if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
4155 return -1;
4157 if (x == CONST0_RTX (GET_MODE (x)))
4158 return 1;
4159 if (x == CONST1_RTX (GET_MODE (x)))
4160 return 2;
4162 /* For XFmode constants, try to find a special 80387 instruction when
4163 optimizing for size or on those CPUs that benefit from them. */
4164 if (GET_MODE (x) == XFmode
4165 && (optimize_size || x86_ext_80387_constants & TUNEMASK))
4167 REAL_VALUE_TYPE r;
4168 int i;
4170 if (! ext_80387_constants_init)
4171 init_ext_80387_constants ();
4173 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4174 for (i = 0; i < 5; i++)
4175 if (real_identical (&r, &ext_80387_constants_table[i]))
4176 return i + 3;
4179 return 0;
4182 /* Return the opcode of the special instruction to be used to load
4183 the constant X. */
4185 const char *
4186 standard_80387_constant_opcode (rtx x)
4188 switch (standard_80387_constant_p (x))
4190 case 1:
4191 return "fldz";
4192 case 2:
4193 return "fld1";
4194 case 3:
4195 return "fldlg2";
4196 case 4:
4197 return "fldln2";
4198 case 5:
4199 return "fldl2e";
4200 case 6:
4201 return "fldl2t";
4202 case 7:
4203 return "fldpi";
4204 default:
4205 gcc_unreachable ();
4209 /* Return the CONST_DOUBLE representing the 80387 constant that is
4210 loaded by the specified special instruction. The argument IDX
4211 matches the return value from standard_80387_constant_p. */
4214 standard_80387_constant_rtx (int idx)
4216 int i;
4218 if (! ext_80387_constants_init)
4219 init_ext_80387_constants ();
4221 switch (idx)
4223 case 3:
4224 case 4:
4225 case 5:
4226 case 6:
4227 case 7:
4228 i = idx - 3;
4229 break;
4231 default:
4232 gcc_unreachable ();
4235 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
4236 XFmode);
4239 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
4242 standard_sse_constant_p (rtx x)
4244 if (x == const0_rtx)
4245 return 1;
4246 return (x == CONST0_RTX (GET_MODE (x)));
4249 /* Returns 1 if OP contains a symbol reference */
4252 symbolic_reference_mentioned_p (rtx op)
4254 const char *fmt;
4255 int i;
4257 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
4258 return 1;
4260 fmt = GET_RTX_FORMAT (GET_CODE (op));
4261 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
4263 if (fmt[i] == 'E')
4265 int j;
4267 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
4268 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
4269 return 1;
4272 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
4273 return 1;
4276 return 0;
4279 /* Return 1 if it is appropriate to emit `ret' instructions in the
4280 body of a function. Do this only if the epilogue is simple, needing a
4281 couple of insns. Prior to reloading, we can't tell how many registers
4282 must be saved, so return 0 then. Return 0 if there is no frame
4283 marker to de-allocate. */
4286 ix86_can_use_return_insn_p (void)
4288 struct ix86_frame frame;
4290 if (! reload_completed || frame_pointer_needed)
4291 return 0;
4293 /* Don't allow more than 32 pop, since that's all we can do
4294 with one instruction. */
4295 if (current_function_pops_args
4296 && current_function_args_size >= 32768)
4297 return 0;
4299 ix86_compute_frame_layout (&frame);
4300 return frame.to_allocate == 0 && frame.nregs == 0;
4303 /* Value should be nonzero if functions must have frame pointers.
4304 Zero means the frame pointer need not be set up (and parms may
4305 be accessed via the stack pointer) in functions that seem suitable. */
4308 ix86_frame_pointer_required (void)
4310 /* If we accessed previous frames, then the generated code expects
4311 to be able to access the saved ebp value in our frame. */
4312 if (cfun->machine->accesses_prev_frame)
4313 return 1;
4315 /* Several x86 os'es need a frame pointer for other reasons,
4316 usually pertaining to setjmp. */
4317 if (SUBTARGET_FRAME_POINTER_REQUIRED)
4318 return 1;
4320 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
4321 the frame pointer by default. Turn it back on now if we've not
4322 got a leaf function. */
4323 if (TARGET_OMIT_LEAF_FRAME_POINTER
4324 && (!current_function_is_leaf))
4325 return 1;
4327 if (current_function_profile)
4328 return 1;
4330 return 0;
4333 /* Record that the current function accesses previous call frames. */
4335 void
4336 ix86_setup_frame_addresses (void)
4338 cfun->machine->accesses_prev_frame = 1;
4341 #if defined(HAVE_GAS_HIDDEN) && defined(SUPPORTS_ONE_ONLY)
4342 # define USE_HIDDEN_LINKONCE 1
4343 #else
4344 # define USE_HIDDEN_LINKONCE 0
4345 #endif
4347 static int pic_labels_used;
4349 /* Fills in the label name that should be used for a pc thunk for
4350 the given register. */
4352 static void
4353 get_pc_thunk_name (char name[32], unsigned int regno)
4355 if (USE_HIDDEN_LINKONCE)
4356 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
4357 else
4358 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
4362 /* This function generates code for -fpic that loads %ebx with
4363 the return address of the caller and then returns. */
4365 void
4366 ix86_file_end (void)
4368 rtx xops[2];
4369 int regno;
4371 for (regno = 0; regno < 8; ++regno)
4373 char name[32];
4375 if (! ((pic_labels_used >> regno) & 1))
4376 continue;
4378 get_pc_thunk_name (name, regno);
4380 if (USE_HIDDEN_LINKONCE)
4382 tree decl;
4384 decl = build_decl (FUNCTION_DECL, get_identifier (name),
4385 error_mark_node);
4386 TREE_PUBLIC (decl) = 1;
4387 TREE_STATIC (decl) = 1;
4388 DECL_ONE_ONLY (decl) = 1;
4390 (*targetm.asm_out.unique_section) (decl, 0);
4391 named_section (decl, NULL, 0);
4393 (*targetm.asm_out.globalize_label) (asm_out_file, name);
4394 fputs ("\t.hidden\t", asm_out_file);
4395 assemble_name (asm_out_file, name);
4396 fputc ('\n', asm_out_file);
4397 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
4399 else
4401 text_section ();
4402 ASM_OUTPUT_LABEL (asm_out_file, name);
4405 xops[0] = gen_rtx_REG (SImode, regno);
4406 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
4407 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
4408 output_asm_insn ("ret", xops);
4411 if (NEED_INDICATE_EXEC_STACK)
4412 file_end_indicate_exec_stack ();
4415 /* Emit code for the SET_GOT patterns. */
4417 const char *
4418 output_set_got (rtx dest)
4420 rtx xops[3];
4422 xops[0] = dest;
4423 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
4425 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
4427 xops[2] = gen_rtx_LABEL_REF (Pmode, gen_label_rtx ());
4429 if (!flag_pic)
4430 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
4431 else
4432 output_asm_insn ("call\t%a2", xops);
4434 #if TARGET_MACHO
4435 /* Output the "canonical" label name ("Lxx$pb") here too. This
4436 is what will be referred to by the Mach-O PIC subsystem. */
4437 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
4438 #endif
4439 (*targetm.asm_out.internal_label) (asm_out_file, "L",
4440 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
4442 if (flag_pic)
4443 output_asm_insn ("pop{l}\t%0", xops);
4445 else
4447 char name[32];
4448 get_pc_thunk_name (name, REGNO (dest));
4449 pic_labels_used |= 1 << REGNO (dest);
4451 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
4452 xops[2] = gen_rtx_MEM (QImode, xops[2]);
4453 output_asm_insn ("call\t%X2", xops);
4456 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
4457 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
4458 else if (!TARGET_MACHO)
4459 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
4461 return "";
4464 /* Generate an "push" pattern for input ARG. */
4466 static rtx
4467 gen_push (rtx arg)
4469 return gen_rtx_SET (VOIDmode,
4470 gen_rtx_MEM (Pmode,
4471 gen_rtx_PRE_DEC (Pmode,
4472 stack_pointer_rtx)),
4473 arg);
4476 /* Return >= 0 if there is an unused call-clobbered register available
4477 for the entire function. */
4479 static unsigned int
4480 ix86_select_alt_pic_regnum (void)
4482 if (current_function_is_leaf && !current_function_profile)
4484 int i;
4485 for (i = 2; i >= 0; --i)
4486 if (!regs_ever_live[i])
4487 return i;
4490 return INVALID_REGNUM;
4493 /* Return 1 if we need to save REGNO. */
4494 static int
4495 ix86_save_reg (unsigned int regno, int maybe_eh_return)
4497 if (pic_offset_table_rtx
4498 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
4499 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4500 || current_function_profile
4501 || current_function_calls_eh_return
4502 || current_function_uses_const_pool))
4504 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
4505 return 0;
4506 return 1;
4509 if (current_function_calls_eh_return && maybe_eh_return)
4511 unsigned i;
4512 for (i = 0; ; i++)
4514 unsigned test = EH_RETURN_DATA_REGNO (i);
4515 if (test == INVALID_REGNUM)
4516 break;
4517 if (test == regno)
4518 return 1;
4522 if (cfun->machine->force_align_arg_pointer
4523 && regno == REGNO (cfun->machine->force_align_arg_pointer))
4524 return 1;
4526 return (regs_ever_live[regno]
4527 && !call_used_regs[regno]
4528 && !fixed_regs[regno]
4529 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
4532 /* Return number of registers to be saved on the stack. */
4534 static int
4535 ix86_nsaved_regs (void)
4537 int nregs = 0;
4538 int regno;
4540 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
4541 if (ix86_save_reg (regno, true))
4542 nregs++;
4543 return nregs;
4546 /* Return the offset between two registers, one to be eliminated, and the other
4547 its replacement, at the start of a routine. */
4549 HOST_WIDE_INT
4550 ix86_initial_elimination_offset (int from, int to)
4552 struct ix86_frame frame;
4553 ix86_compute_frame_layout (&frame);
4555 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
4556 return frame.hard_frame_pointer_offset;
4557 else if (from == FRAME_POINTER_REGNUM
4558 && to == HARD_FRAME_POINTER_REGNUM)
4559 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
4560 else
4562 gcc_assert (to == STACK_POINTER_REGNUM);
4564 if (from == ARG_POINTER_REGNUM)
4565 return frame.stack_pointer_offset;
4567 gcc_assert (from == FRAME_POINTER_REGNUM);
4568 return frame.stack_pointer_offset - frame.frame_pointer_offset;
4572 /* Fill structure ix86_frame about frame of currently computed function. */
4574 static void
4575 ix86_compute_frame_layout (struct ix86_frame *frame)
4577 HOST_WIDE_INT total_size;
4578 unsigned int stack_alignment_needed;
4579 HOST_WIDE_INT offset;
4580 unsigned int preferred_alignment;
4581 HOST_WIDE_INT size = get_frame_size ();
4583 frame->nregs = ix86_nsaved_regs ();
4584 total_size = size;
4586 stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
4587 preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
4589 /* During reload iteration the amount of registers saved can change.
4590 Recompute the value as needed. Do not recompute when amount of registers
4591 didn't change as reload does multiple calls to the function and does not
4592 expect the decision to change within single iteration. */
4593 if (!optimize_size
4594 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
4596 int count = frame->nregs;
4598 cfun->machine->use_fast_prologue_epilogue_nregs = count;
4599 /* The fast prologue uses move instead of push to save registers. This
4600 is significantly longer, but also executes faster as modern hardware
4601 can execute the moves in parallel, but can't do that for push/pop.
4603 Be careful about choosing what prologue to emit: When function takes
4604 many instructions to execute we may use slow version as well as in
4605 case function is known to be outside hot spot (this is known with
4606 feedback only). Weight the size of function by number of registers
4607 to save as it is cheap to use one or two push instructions but very
4608 slow to use many of them. */
4609 if (count)
4610 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
4611 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
4612 || (flag_branch_probabilities
4613 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
4614 cfun->machine->use_fast_prologue_epilogue = false;
4615 else
4616 cfun->machine->use_fast_prologue_epilogue
4617 = !expensive_function_p (count);
4619 if (TARGET_PROLOGUE_USING_MOVE
4620 && cfun->machine->use_fast_prologue_epilogue)
4621 frame->save_regs_using_mov = true;
4622 else
4623 frame->save_regs_using_mov = false;
4626 /* Skip return address and saved base pointer. */
4627 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
4629 frame->hard_frame_pointer_offset = offset;
4631 /* Do some sanity checking of stack_alignment_needed and
4632 preferred_alignment, since i386 port is the only using those features
4633 that may break easily. */
4635 gcc_assert (!size || stack_alignment_needed);
4636 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
4637 gcc_assert (preferred_alignment <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
4638 gcc_assert (stack_alignment_needed
4639 <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
4641 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
4642 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
4644 /* Register save area */
4645 offset += frame->nregs * UNITS_PER_WORD;
4647 /* Va-arg area */
4648 if (ix86_save_varrargs_registers)
4650 offset += X86_64_VARARGS_SIZE;
4651 frame->va_arg_size = X86_64_VARARGS_SIZE;
4653 else
4654 frame->va_arg_size = 0;
4656 /* Align start of frame for local function. */
4657 frame->padding1 = ((offset + stack_alignment_needed - 1)
4658 & -stack_alignment_needed) - offset;
4660 offset += frame->padding1;
4662 /* Frame pointer points here. */
4663 frame->frame_pointer_offset = offset;
4665 offset += size;
4667 /* Add outgoing arguments area. Can be skipped if we eliminated
4668 all the function calls as dead code.
4669 Skipping is however impossible when function calls alloca. Alloca
4670 expander assumes that last current_function_outgoing_args_size
4671 of stack frame are unused. */
4672 if (ACCUMULATE_OUTGOING_ARGS
4673 && (!current_function_is_leaf || current_function_calls_alloca))
4675 offset += current_function_outgoing_args_size;
4676 frame->outgoing_arguments_size = current_function_outgoing_args_size;
4678 else
4679 frame->outgoing_arguments_size = 0;
4681 /* Align stack boundary. Only needed if we're calling another function
4682 or using alloca. */
4683 if (!current_function_is_leaf || current_function_calls_alloca)
4684 frame->padding2 = ((offset + preferred_alignment - 1)
4685 & -preferred_alignment) - offset;
4686 else
4687 frame->padding2 = 0;
4689 offset += frame->padding2;
4691 /* We've reached end of stack frame. */
4692 frame->stack_pointer_offset = offset;
4694 /* Size prologue needs to allocate. */
4695 frame->to_allocate =
4696 (size + frame->padding1 + frame->padding2
4697 + frame->outgoing_arguments_size + frame->va_arg_size);
4699 if ((!frame->to_allocate && frame->nregs <= 1)
4700 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
4701 frame->save_regs_using_mov = false;
4703 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
4704 && current_function_is_leaf)
4706 frame->red_zone_size = frame->to_allocate;
4707 if (frame->save_regs_using_mov)
4708 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
4709 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
4710 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
4712 else
4713 frame->red_zone_size = 0;
4714 frame->to_allocate -= frame->red_zone_size;
4715 frame->stack_pointer_offset -= frame->red_zone_size;
4716 #if 0
4717 fprintf (stderr, "nregs: %i\n", frame->nregs);
4718 fprintf (stderr, "size: %i\n", size);
4719 fprintf (stderr, "alignment1: %i\n", stack_alignment_needed);
4720 fprintf (stderr, "padding1: %i\n", frame->padding1);
4721 fprintf (stderr, "va_arg: %i\n", frame->va_arg_size);
4722 fprintf (stderr, "padding2: %i\n", frame->padding2);
4723 fprintf (stderr, "to_allocate: %i\n", frame->to_allocate);
4724 fprintf (stderr, "red_zone_size: %i\n", frame->red_zone_size);
4725 fprintf (stderr, "frame_pointer_offset: %i\n", frame->frame_pointer_offset);
4726 fprintf (stderr, "hard_frame_pointer_offset: %i\n",
4727 frame->hard_frame_pointer_offset);
4728 fprintf (stderr, "stack_pointer_offset: %i\n", frame->stack_pointer_offset);
4729 #endif
4732 /* Emit code to save registers in the prologue. */
4734 static void
4735 ix86_emit_save_regs (void)
4737 unsigned int regno;
4738 rtx insn;
4740 for (regno = FIRST_PSEUDO_REGISTER; regno-- > 0; )
4741 if (ix86_save_reg (regno, true))
4743 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
4744 RTX_FRAME_RELATED_P (insn) = 1;
4748 /* Emit code to save registers using MOV insns. First register
4749 is restored from POINTER + OFFSET. */
4750 static void
4751 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
4753 unsigned int regno;
4754 rtx insn;
4756 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4757 if (ix86_save_reg (regno, true))
4759 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
4760 Pmode, offset),
4761 gen_rtx_REG (Pmode, regno));
4762 RTX_FRAME_RELATED_P (insn) = 1;
4763 offset += UNITS_PER_WORD;
4767 /* Expand prologue or epilogue stack adjustment.
4768 The pattern exist to put a dependency on all ebp-based memory accesses.
4769 STYLE should be negative if instructions should be marked as frame related,
4770 zero if %r11 register is live and cannot be freely used and positive
4771 otherwise. */
4773 static void
4774 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
4776 rtx insn;
4778 if (! TARGET_64BIT)
4779 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
4780 else if (x86_64_immediate_operand (offset, DImode))
4781 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
4782 else
4784 rtx r11;
4785 /* r11 is used by indirect sibcall return as well, set before the
4786 epilogue and used after the epilogue. ATM indirect sibcall
4787 shouldn't be used together with huge frame sizes in one
4788 function because of the frame_size check in sibcall.c. */
4789 gcc_assert (style);
4790 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4791 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
4792 if (style < 0)
4793 RTX_FRAME_RELATED_P (insn) = 1;
4794 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
4795 offset));
4797 if (style < 0)
4798 RTX_FRAME_RELATED_P (insn) = 1;
4801 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
4803 static rtx
4804 ix86_internal_arg_pointer (void)
4806 if (FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN
4807 && DECL_NAME (current_function_decl)
4808 && MAIN_NAME_P (DECL_NAME (current_function_decl))
4809 && DECL_FILE_SCOPE_P (current_function_decl))
4811 cfun->machine->force_align_arg_pointer = gen_rtx_REG (Pmode, 2);
4812 return copy_to_reg (cfun->machine->force_align_arg_pointer);
4814 else
4815 return virtual_incoming_args_rtx;
4818 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
4819 This is called from dwarf2out.c to emit call frame instructions
4820 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
4821 static void
4822 ix86_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
4824 rtx unspec = SET_SRC (pattern);
4825 gcc_assert (GET_CODE (unspec) == UNSPEC);
4827 switch (index)
4829 case UNSPEC_REG_SAVE:
4830 dwarf2out_reg_save_reg (label, XVECEXP (unspec, 0, 0),
4831 SET_DEST (pattern));
4832 break;
4833 case UNSPEC_DEF_CFA:
4834 dwarf2out_def_cfa (label, REGNO (SET_DEST (pattern)),
4835 INTVAL (XVECEXP (unspec, 0, 0)));
4836 break;
4837 default:
4838 gcc_unreachable ();
4842 /* Expand the prologue into a bunch of separate insns. */
4844 void
4845 ix86_expand_prologue (void)
4847 rtx insn;
4848 bool pic_reg_used;
4849 struct ix86_frame frame;
4850 HOST_WIDE_INT allocate;
4852 ix86_compute_frame_layout (&frame);
4854 if (cfun->machine->force_align_arg_pointer)
4856 rtx x, y;
4858 /* Grab the argument pointer. */
4859 x = plus_constant (stack_pointer_rtx, 4);
4860 y = cfun->machine->force_align_arg_pointer;
4861 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
4862 RTX_FRAME_RELATED_P (insn) = 1;
4864 /* The unwind info consists of two parts: install the fafp as the cfa,
4865 and record the fafp as the "save register" of the stack pointer.
4866 The later is there in order that the unwinder can see where it
4867 should restore the stack pointer across the and insn. */
4868 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_DEF_CFA);
4869 x = gen_rtx_SET (VOIDmode, y, x);
4870 RTX_FRAME_RELATED_P (x) = 1;
4871 y = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, stack_pointer_rtx),
4872 UNSPEC_REG_SAVE);
4873 y = gen_rtx_SET (VOIDmode, cfun->machine->force_align_arg_pointer, y);
4874 RTX_FRAME_RELATED_P (y) = 1;
4875 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x, y));
4876 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
4877 REG_NOTES (insn) = x;
4879 /* Align the stack. */
4880 emit_insn (gen_andsi3 (stack_pointer_rtx, stack_pointer_rtx,
4881 GEN_INT (-16)));
4883 /* And here we cheat like madmen with the unwind info. We force the
4884 cfa register back to sp+4, which is exactly what it was at the
4885 start of the function. Re-pushing the return address results in
4886 the return at the same spot relative to the cfa, and thus is
4887 correct wrt the unwind info. */
4888 x = cfun->machine->force_align_arg_pointer;
4889 x = gen_frame_mem (Pmode, plus_constant (x, -4));
4890 insn = emit_insn (gen_push (x));
4891 RTX_FRAME_RELATED_P (insn) = 1;
4893 x = GEN_INT (4);
4894 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, x), UNSPEC_DEF_CFA);
4895 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
4896 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
4897 REG_NOTES (insn) = x;
4900 /* Note: AT&T enter does NOT have reversed args. Enter is probably
4901 slower on all targets. Also sdb doesn't like it. */
4903 if (frame_pointer_needed)
4905 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
4906 RTX_FRAME_RELATED_P (insn) = 1;
4908 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
4909 RTX_FRAME_RELATED_P (insn) = 1;
4912 allocate = frame.to_allocate;
4914 if (!frame.save_regs_using_mov)
4915 ix86_emit_save_regs ();
4916 else
4917 allocate += frame.nregs * UNITS_PER_WORD;
4919 /* When using red zone we may start register saving before allocating
4920 the stack frame saving one cycle of the prologue. */
4921 if (TARGET_RED_ZONE && frame.save_regs_using_mov)
4922 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
4923 : stack_pointer_rtx,
4924 -frame.nregs * UNITS_PER_WORD);
4926 if (allocate == 0)
4928 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
4929 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4930 GEN_INT (-allocate), -1);
4931 else
4933 /* Only valid for Win32. */
4934 rtx eax = gen_rtx_REG (SImode, 0);
4935 bool eax_live = ix86_eax_live_at_start_p ();
4936 rtx t;
4938 gcc_assert (!TARGET_64BIT);
4940 if (eax_live)
4942 emit_insn (gen_push (eax));
4943 allocate -= 4;
4946 emit_move_insn (eax, GEN_INT (allocate));
4948 insn = emit_insn (gen_allocate_stack_worker (eax));
4949 RTX_FRAME_RELATED_P (insn) = 1;
4950 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
4951 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
4952 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
4953 t, REG_NOTES (insn));
4955 if (eax_live)
4957 if (frame_pointer_needed)
4958 t = plus_constant (hard_frame_pointer_rtx,
4959 allocate
4960 - frame.to_allocate
4961 - frame.nregs * UNITS_PER_WORD);
4962 else
4963 t = plus_constant (stack_pointer_rtx, allocate);
4964 emit_move_insn (eax, gen_rtx_MEM (SImode, t));
4968 if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
4970 if (!frame_pointer_needed || !frame.to_allocate)
4971 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
4972 else
4973 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
4974 -frame.nregs * UNITS_PER_WORD);
4977 pic_reg_used = false;
4978 if (pic_offset_table_rtx
4979 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4980 || current_function_profile))
4982 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
4984 if (alt_pic_reg_used != INVALID_REGNUM)
4985 REGNO (pic_offset_table_rtx) = alt_pic_reg_used;
4987 pic_reg_used = true;
4990 if (pic_reg_used)
4992 if (TARGET_64BIT)
4993 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
4994 else
4995 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
4997 /* Even with accurate pre-reload life analysis, we can wind up
4998 deleting all references to the pic register after reload.
4999 Consider if cross-jumping unifies two sides of a branch
5000 controlled by a comparison vs the only read from a global.
5001 In which case, allow the set_got to be deleted, though we're
5002 too late to do anything about the ebx save in the prologue. */
5003 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
5006 /* Prevent function calls from be scheduled before the call to mcount.
5007 In the pic_reg_used case, make sure that the got load isn't deleted. */
5008 if (current_function_profile)
5009 emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
5012 /* Emit code to restore saved registers using MOV insns. First register
5013 is restored from POINTER + OFFSET. */
5014 static void
5015 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
5016 int maybe_eh_return)
5018 int regno;
5019 rtx base_address = gen_rtx_MEM (Pmode, pointer);
5021 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5022 if (ix86_save_reg (regno, maybe_eh_return))
5024 /* Ensure that adjust_address won't be forced to produce pointer
5025 out of range allowed by x86-64 instruction set. */
5026 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
5028 rtx r11;
5030 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
5031 emit_move_insn (r11, GEN_INT (offset));
5032 emit_insn (gen_adddi3 (r11, r11, pointer));
5033 base_address = gen_rtx_MEM (Pmode, r11);
5034 offset = 0;
5036 emit_move_insn (gen_rtx_REG (Pmode, regno),
5037 adjust_address (base_address, Pmode, offset));
5038 offset += UNITS_PER_WORD;
5042 /* Restore function stack, frame, and registers. */
5044 void
5045 ix86_expand_epilogue (int style)
5047 int regno;
5048 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
5049 struct ix86_frame frame;
5050 HOST_WIDE_INT offset;
5052 ix86_compute_frame_layout (&frame);
5054 /* Calculate start of saved registers relative to ebp. Special care
5055 must be taken for the normal return case of a function using
5056 eh_return: the eax and edx registers are marked as saved, but not
5057 restored along this path. */
5058 offset = frame.nregs;
5059 if (current_function_calls_eh_return && style != 2)
5060 offset -= 2;
5061 offset *= -UNITS_PER_WORD;
5063 /* If we're only restoring one register and sp is not valid then
5064 using a move instruction to restore the register since it's
5065 less work than reloading sp and popping the register.
5067 The default code result in stack adjustment using add/lea instruction,
5068 while this code results in LEAVE instruction (or discrete equivalent),
5069 so it is profitable in some other cases as well. Especially when there
5070 are no registers to restore. We also use this code when TARGET_USE_LEAVE
5071 and there is exactly one register to pop. This heuristic may need some
5072 tuning in future. */
5073 if ((!sp_valid && frame.nregs <= 1)
5074 || (TARGET_EPILOGUE_USING_MOVE
5075 && cfun->machine->use_fast_prologue_epilogue
5076 && (frame.nregs > 1 || frame.to_allocate))
5077 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
5078 || (frame_pointer_needed && TARGET_USE_LEAVE
5079 && cfun->machine->use_fast_prologue_epilogue
5080 && frame.nregs == 1)
5081 || current_function_calls_eh_return)
5083 /* Restore registers. We can use ebp or esp to address the memory
5084 locations. If both are available, default to ebp, since offsets
5085 are known to be small. Only exception is esp pointing directly to the
5086 end of block of saved registers, where we may simplify addressing
5087 mode. */
5089 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
5090 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
5091 frame.to_allocate, style == 2);
5092 else
5093 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
5094 offset, style == 2);
5096 /* eh_return epilogues need %ecx added to the stack pointer. */
5097 if (style == 2)
5099 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
5101 if (frame_pointer_needed)
5103 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
5104 tmp = plus_constant (tmp, UNITS_PER_WORD);
5105 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
5107 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
5108 emit_move_insn (hard_frame_pointer_rtx, tmp);
5110 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
5111 const0_rtx, style);
5113 else
5115 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
5116 tmp = plus_constant (tmp, (frame.to_allocate
5117 + frame.nregs * UNITS_PER_WORD));
5118 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
5121 else if (!frame_pointer_needed)
5122 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5123 GEN_INT (frame.to_allocate
5124 + frame.nregs * UNITS_PER_WORD),
5125 style);
5126 /* If not an i386, mov & pop is faster than "leave". */
5127 else if (TARGET_USE_LEAVE || optimize_size
5128 || !cfun->machine->use_fast_prologue_epilogue)
5129 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5130 else
5132 pro_epilogue_adjust_stack (stack_pointer_rtx,
5133 hard_frame_pointer_rtx,
5134 const0_rtx, style);
5135 if (TARGET_64BIT)
5136 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5137 else
5138 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5141 else
5143 /* First step is to deallocate the stack frame so that we can
5144 pop the registers. */
5145 if (!sp_valid)
5147 gcc_assert (frame_pointer_needed);
5148 pro_epilogue_adjust_stack (stack_pointer_rtx,
5149 hard_frame_pointer_rtx,
5150 GEN_INT (offset), style);
5152 else if (frame.to_allocate)
5153 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5154 GEN_INT (frame.to_allocate), style);
5156 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5157 if (ix86_save_reg (regno, false))
5159 if (TARGET_64BIT)
5160 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
5161 else
5162 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
5164 if (frame_pointer_needed)
5166 /* Leave results in shorter dependency chains on CPUs that are
5167 able to grok it fast. */
5168 if (TARGET_USE_LEAVE)
5169 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5170 else if (TARGET_64BIT)
5171 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5172 else
5173 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5177 if (cfun->machine->force_align_arg_pointer)
5179 emit_insn (gen_addsi3 (stack_pointer_rtx,
5180 cfun->machine->force_align_arg_pointer,
5181 GEN_INT (-4)));
5184 /* Sibcall epilogues don't want a return instruction. */
5185 if (style == 0)
5186 return;
5188 if (current_function_pops_args && current_function_args_size)
5190 rtx popc = GEN_INT (current_function_pops_args);
5192 /* i386 can only pop 64K bytes. If asked to pop more, pop
5193 return address, do explicit add, and jump indirectly to the
5194 caller. */
5196 if (current_function_pops_args >= 65536)
5198 rtx ecx = gen_rtx_REG (SImode, 2);
5200 /* There is no "pascal" calling convention in 64bit ABI. */
5201 gcc_assert (!TARGET_64BIT);
5203 emit_insn (gen_popsi1 (ecx));
5204 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
5205 emit_jump_insn (gen_return_indirect_internal (ecx));
5207 else
5208 emit_jump_insn (gen_return_pop_internal (popc));
5210 else
5211 emit_jump_insn (gen_return_internal ());
5214 /* Reset from the function's potential modifications. */
5216 static void
5217 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
5218 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5220 if (pic_offset_table_rtx)
5221 REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
5224 /* Extract the parts of an RTL expression that is a valid memory address
5225 for an instruction. Return 0 if the structure of the address is
5226 grossly off. Return -1 if the address contains ASHIFT, so it is not
5227 strictly valid, but still used for computing length of lea instruction. */
5230 ix86_decompose_address (rtx addr, struct ix86_address *out)
5232 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
5233 rtx base_reg, index_reg;
5234 HOST_WIDE_INT scale = 1;
5235 rtx scale_rtx = NULL_RTX;
5236 int retval = 1;
5237 enum ix86_address_seg seg = SEG_DEFAULT;
5239 if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
5240 base = addr;
5241 else if (GET_CODE (addr) == PLUS)
5243 rtx addends[4], op;
5244 int n = 0, i;
5246 op = addr;
5249 if (n >= 4)
5250 return 0;
5251 addends[n++] = XEXP (op, 1);
5252 op = XEXP (op, 0);
5254 while (GET_CODE (op) == PLUS);
5255 if (n >= 4)
5256 return 0;
5257 addends[n] = op;
5259 for (i = n; i >= 0; --i)
5261 op = addends[i];
5262 switch (GET_CODE (op))
5264 case MULT:
5265 if (index)
5266 return 0;
5267 index = XEXP (op, 0);
5268 scale_rtx = XEXP (op, 1);
5269 break;
5271 case UNSPEC:
5272 if (XINT (op, 1) == UNSPEC_TP
5273 && TARGET_TLS_DIRECT_SEG_REFS
5274 && seg == SEG_DEFAULT)
5275 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
5276 else
5277 return 0;
5278 break;
5280 case REG:
5281 case SUBREG:
5282 if (!base)
5283 base = op;
5284 else if (!index)
5285 index = op;
5286 else
5287 return 0;
5288 break;
5290 case CONST:
5291 case CONST_INT:
5292 case SYMBOL_REF:
5293 case LABEL_REF:
5294 if (disp)
5295 return 0;
5296 disp = op;
5297 break;
5299 default:
5300 return 0;
5304 else if (GET_CODE (addr) == MULT)
5306 index = XEXP (addr, 0); /* index*scale */
5307 scale_rtx = XEXP (addr, 1);
5309 else if (GET_CODE (addr) == ASHIFT)
5311 rtx tmp;
5313 /* We're called for lea too, which implements ashift on occasion. */
5314 index = XEXP (addr, 0);
5315 tmp = XEXP (addr, 1);
5316 if (GET_CODE (tmp) != CONST_INT)
5317 return 0;
5318 scale = INTVAL (tmp);
5319 if ((unsigned HOST_WIDE_INT) scale > 3)
5320 return 0;
5321 scale = 1 << scale;
5322 retval = -1;
5324 else
5325 disp = addr; /* displacement */
5327 /* Extract the integral value of scale. */
5328 if (scale_rtx)
5330 if (GET_CODE (scale_rtx) != CONST_INT)
5331 return 0;
5332 scale = INTVAL (scale_rtx);
5335 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
5336 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
5338 /* Allow arg pointer and stack pointer as index if there is not scaling. */
5339 if (base_reg && index_reg && scale == 1
5340 && (index_reg == arg_pointer_rtx
5341 || index_reg == frame_pointer_rtx
5342 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
5344 rtx tmp;
5345 tmp = base, base = index, index = tmp;
5346 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
5349 /* Special case: %ebp cannot be encoded as a base without a displacement. */
5350 if ((base_reg == hard_frame_pointer_rtx
5351 || base_reg == frame_pointer_rtx
5352 || base_reg == arg_pointer_rtx) && !disp)
5353 disp = const0_rtx;
5355 /* Special case: on K6, [%esi] makes the instruction vector decoded.
5356 Avoid this by transforming to [%esi+0]. */
5357 if (ix86_tune == PROCESSOR_K6 && !optimize_size
5358 && base_reg && !index_reg && !disp
5359 && REG_P (base_reg)
5360 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
5361 disp = const0_rtx;
5363 /* Special case: encode reg+reg instead of reg*2. */
5364 if (!base && index && scale && scale == 2)
5365 base = index, base_reg = index_reg, scale = 1;
5367 /* Special case: scaling cannot be encoded without base or displacement. */
5368 if (!base && !disp && index && scale != 1)
5369 disp = const0_rtx;
5371 out->base = base;
5372 out->index = index;
5373 out->disp = disp;
5374 out->scale = scale;
5375 out->seg = seg;
5377 return retval;
5380 /* Return cost of the memory address x.
5381 For i386, it is better to use a complex address than let gcc copy
5382 the address into a reg and make a new pseudo. But not if the address
5383 requires to two regs - that would mean more pseudos with longer
5384 lifetimes. */
5385 static int
5386 ix86_address_cost (rtx x)
5388 struct ix86_address parts;
5389 int cost = 1;
5390 int ok = ix86_decompose_address (x, &parts);
5392 gcc_assert (ok);
5394 if (parts.base && GET_CODE (parts.base) == SUBREG)
5395 parts.base = SUBREG_REG (parts.base);
5396 if (parts.index && GET_CODE (parts.index) == SUBREG)
5397 parts.index = SUBREG_REG (parts.index);
5399 /* More complex memory references are better. */
5400 if (parts.disp && parts.disp != const0_rtx)
5401 cost--;
5402 if (parts.seg != SEG_DEFAULT)
5403 cost--;
5405 /* Attempt to minimize number of registers in the address. */
5406 if ((parts.base
5407 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
5408 || (parts.index
5409 && (!REG_P (parts.index)
5410 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
5411 cost++;
5413 if (parts.base
5414 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
5415 && parts.index
5416 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
5417 && parts.base != parts.index)
5418 cost++;
5420 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
5421 since it's predecode logic can't detect the length of instructions
5422 and it degenerates to vector decoded. Increase cost of such
5423 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
5424 to split such addresses or even refuse such addresses at all.
5426 Following addressing modes are affected:
5427 [base+scale*index]
5428 [scale*index+disp]
5429 [base+index]
5431 The first and last case may be avoidable by explicitly coding the zero in
5432 memory address, but I don't have AMD-K6 machine handy to check this
5433 theory. */
5435 if (TARGET_K6
5436 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
5437 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
5438 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
5439 cost += 10;
5441 return cost;
5444 /* If X is a machine specific address (i.e. a symbol or label being
5445 referenced as a displacement from the GOT implemented using an
5446 UNSPEC), then return the base term. Otherwise return X. */
5449 ix86_find_base_term (rtx x)
5451 rtx term;
5453 if (TARGET_64BIT)
5455 if (GET_CODE (x) != CONST)
5456 return x;
5457 term = XEXP (x, 0);
5458 if (GET_CODE (term) == PLUS
5459 && (GET_CODE (XEXP (term, 1)) == CONST_INT
5460 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
5461 term = XEXP (term, 0);
5462 if (GET_CODE (term) != UNSPEC
5463 || XINT (term, 1) != UNSPEC_GOTPCREL)
5464 return x;
5466 term = XVECEXP (term, 0, 0);
5468 if (GET_CODE (term) != SYMBOL_REF
5469 && GET_CODE (term) != LABEL_REF)
5470 return x;
5472 return term;
5475 term = ix86_delegitimize_address (x);
5477 if (GET_CODE (term) != SYMBOL_REF
5478 && GET_CODE (term) != LABEL_REF)
5479 return x;
5481 return term;
5484 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
5485 this is used for to form addresses to local data when -fPIC is in
5486 use. */
5488 static bool
5489 darwin_local_data_pic (rtx disp)
5491 if (GET_CODE (disp) == MINUS)
5493 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
5494 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
5495 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
5497 const char *sym_name = XSTR (XEXP (disp, 1), 0);
5498 if (! strcmp (sym_name, "<pic base>"))
5499 return true;
5503 return false;
5506 /* Determine if a given RTX is a valid constant. We already know this
5507 satisfies CONSTANT_P. */
5509 bool
5510 legitimate_constant_p (rtx x)
5512 switch (GET_CODE (x))
5514 case CONST:
5515 x = XEXP (x, 0);
5517 if (GET_CODE (x) == PLUS)
5519 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5520 return false;
5521 x = XEXP (x, 0);
5524 if (TARGET_MACHO && darwin_local_data_pic (x))
5525 return true;
5527 /* Only some unspecs are valid as "constants". */
5528 if (GET_CODE (x) == UNSPEC)
5529 switch (XINT (x, 1))
5531 case UNSPEC_GOTOFF:
5532 return TARGET_64BIT;
5533 case UNSPEC_TPOFF:
5534 case UNSPEC_NTPOFF:
5535 x = XVECEXP (x, 0, 0);
5536 return (GET_CODE (x) == SYMBOL_REF
5537 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
5538 case UNSPEC_DTPOFF:
5539 x = XVECEXP (x, 0, 0);
5540 return (GET_CODE (x) == SYMBOL_REF
5541 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
5542 default:
5543 return false;
5546 /* We must have drilled down to a symbol. */
5547 if (GET_CODE (x) == LABEL_REF)
5548 return true;
5549 if (GET_CODE (x) != SYMBOL_REF)
5550 return false;
5551 /* FALLTHRU */
5553 case SYMBOL_REF:
5554 /* TLS symbols are never valid. */
5555 if (SYMBOL_REF_TLS_MODEL (x))
5556 return false;
5557 break;
5559 default:
5560 break;
5563 /* Otherwise we handle everything else in the move patterns. */
5564 return true;
5567 /* Determine if it's legal to put X into the constant pool. This
5568 is not possible for the address of thread-local symbols, which
5569 is checked above. */
5571 static bool
5572 ix86_cannot_force_const_mem (rtx x)
5574 return !legitimate_constant_p (x);
5577 /* Determine if a given RTX is a valid constant address. */
5579 bool
5580 constant_address_p (rtx x)
5582 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
5585 /* Nonzero if the constant value X is a legitimate general operand
5586 when generating PIC code. It is given that flag_pic is on and
5587 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
5589 bool
5590 legitimate_pic_operand_p (rtx x)
5592 rtx inner;
5594 switch (GET_CODE (x))
5596 case CONST:
5597 inner = XEXP (x, 0);
5598 if (GET_CODE (inner) == PLUS
5599 && GET_CODE (XEXP (inner, 1)) == CONST_INT)
5600 inner = XEXP (inner, 0);
5602 /* Only some unspecs are valid as "constants". */
5603 if (GET_CODE (inner) == UNSPEC)
5604 switch (XINT (inner, 1))
5606 case UNSPEC_GOTOFF:
5607 return TARGET_64BIT;
5608 case UNSPEC_TPOFF:
5609 x = XVECEXP (inner, 0, 0);
5610 return (GET_CODE (x) == SYMBOL_REF
5611 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
5612 default:
5613 return false;
5615 /* FALLTHRU */
5617 case SYMBOL_REF:
5618 case LABEL_REF:
5619 return legitimate_pic_address_disp_p (x);
5621 default:
5622 return true;
5626 /* Determine if a given CONST RTX is a valid memory displacement
5627 in PIC mode. */
5630 legitimate_pic_address_disp_p (rtx disp)
5632 bool saw_plus;
5634 /* In 64bit mode we can allow direct addresses of symbols and labels
5635 when they are not dynamic symbols. */
5636 if (TARGET_64BIT)
5638 rtx op0 = disp, op1;
5640 switch (GET_CODE (disp))
5642 case LABEL_REF:
5643 return true;
5645 case CONST:
5646 if (GET_CODE (XEXP (disp, 0)) != PLUS)
5647 break;
5648 op0 = XEXP (XEXP (disp, 0), 0);
5649 op1 = XEXP (XEXP (disp, 0), 1);
5650 if (GET_CODE (op1) != CONST_INT
5651 || INTVAL (op1) >= 16*1024*1024
5652 || INTVAL (op1) < -16*1024*1024)
5653 break;
5654 if (GET_CODE (op0) == LABEL_REF)
5655 return true;
5656 if (GET_CODE (op0) != SYMBOL_REF)
5657 break;
5658 /* FALLTHRU */
5660 case SYMBOL_REF:
5661 /* TLS references should always be enclosed in UNSPEC. */
5662 if (SYMBOL_REF_TLS_MODEL (op0))
5663 return false;
5664 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0))
5665 return true;
5666 break;
5668 default:
5669 break;
5672 if (GET_CODE (disp) != CONST)
5673 return 0;
5674 disp = XEXP (disp, 0);
5676 if (TARGET_64BIT)
5678 /* We are unsafe to allow PLUS expressions. This limit allowed distance
5679 of GOT tables. We should not need these anyway. */
5680 if (GET_CODE (disp) != UNSPEC
5681 || (XINT (disp, 1) != UNSPEC_GOTPCREL
5682 && XINT (disp, 1) != UNSPEC_GOTOFF))
5683 return 0;
5685 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
5686 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
5687 return 0;
5688 return 1;
5691 saw_plus = false;
5692 if (GET_CODE (disp) == PLUS)
5694 if (GET_CODE (XEXP (disp, 1)) != CONST_INT)
5695 return 0;
5696 disp = XEXP (disp, 0);
5697 saw_plus = true;
5700 if (TARGET_MACHO && darwin_local_data_pic (disp))
5701 return 1;
5703 if (GET_CODE (disp) != UNSPEC)
5704 return 0;
5706 switch (XINT (disp, 1))
5708 case UNSPEC_GOT:
5709 if (saw_plus)
5710 return false;
5711 return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF;
5712 case UNSPEC_GOTOFF:
5713 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
5714 While ABI specify also 32bit relocation but we don't produce it in
5715 small PIC model at all. */
5716 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
5717 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
5718 && !TARGET_64BIT)
5719 return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5720 return false;
5721 case UNSPEC_GOTTPOFF:
5722 case UNSPEC_GOTNTPOFF:
5723 case UNSPEC_INDNTPOFF:
5724 if (saw_plus)
5725 return false;
5726 disp = XVECEXP (disp, 0, 0);
5727 return (GET_CODE (disp) == SYMBOL_REF
5728 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
5729 case UNSPEC_NTPOFF:
5730 disp = XVECEXP (disp, 0, 0);
5731 return (GET_CODE (disp) == SYMBOL_REF
5732 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
5733 case UNSPEC_DTPOFF:
5734 disp = XVECEXP (disp, 0, 0);
5735 return (GET_CODE (disp) == SYMBOL_REF
5736 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
5739 return 0;
5742 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
5743 memory address for an instruction. The MODE argument is the machine mode
5744 for the MEM expression that wants to use this address.
5746 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
5747 convert common non-canonical forms to canonical form so that they will
5748 be recognized. */
5751 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
5753 struct ix86_address parts;
5754 rtx base, index, disp;
5755 HOST_WIDE_INT scale;
5756 const char *reason = NULL;
5757 rtx reason_rtx = NULL_RTX;
5759 if (TARGET_DEBUG_ADDR)
5761 fprintf (stderr,
5762 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
5763 GET_MODE_NAME (mode), strict);
5764 debug_rtx (addr);
5767 if (ix86_decompose_address (addr, &parts) <= 0)
5769 reason = "decomposition failed";
5770 goto report_error;
5773 base = parts.base;
5774 index = parts.index;
5775 disp = parts.disp;
5776 scale = parts.scale;
5778 /* Validate base register.
5780 Don't allow SUBREG's that span more than a word here. It can lead to spill
5781 failures when the base is one word out of a two word structure, which is
5782 represented internally as a DImode int. */
5784 if (base)
5786 rtx reg;
5787 reason_rtx = base;
5789 if (REG_P (base))
5790 reg = base;
5791 else if (GET_CODE (base) == SUBREG
5792 && REG_P (SUBREG_REG (base))
5793 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
5794 <= UNITS_PER_WORD)
5795 reg = SUBREG_REG (base);
5796 else
5798 reason = "base is not a register";
5799 goto report_error;
5802 if (GET_MODE (base) != Pmode)
5804 reason = "base is not in Pmode";
5805 goto report_error;
5808 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
5809 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
5811 reason = "base is not valid";
5812 goto report_error;
5816 /* Validate index register.
5818 Don't allow SUBREG's that span more than a word here -- same as above. */
5820 if (index)
5822 rtx reg;
5823 reason_rtx = index;
5825 if (REG_P (index))
5826 reg = index;
5827 else if (GET_CODE (index) == SUBREG
5828 && REG_P (SUBREG_REG (index))
5829 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
5830 <= UNITS_PER_WORD)
5831 reg = SUBREG_REG (index);
5832 else
5834 reason = "index is not a register";
5835 goto report_error;
5838 if (GET_MODE (index) != Pmode)
5840 reason = "index is not in Pmode";
5841 goto report_error;
5844 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
5845 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
5847 reason = "index is not valid";
5848 goto report_error;
5852 /* Validate scale factor. */
5853 if (scale != 1)
5855 reason_rtx = GEN_INT (scale);
5856 if (!index)
5858 reason = "scale without index";
5859 goto report_error;
5862 if (scale != 2 && scale != 4 && scale != 8)
5864 reason = "scale is not a valid multiplier";
5865 goto report_error;
5869 /* Validate displacement. */
5870 if (disp)
5872 reason_rtx = disp;
5874 if (GET_CODE (disp) == CONST
5875 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
5876 switch (XINT (XEXP (disp, 0), 1))
5878 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
5879 used. While ABI specify also 32bit relocations, we don't produce
5880 them at all and use IP relative instead. */
5881 case UNSPEC_GOT:
5882 case UNSPEC_GOTOFF:
5883 gcc_assert (flag_pic);
5884 if (!TARGET_64BIT)
5885 goto is_legitimate_pic;
5886 reason = "64bit address unspec";
5887 goto report_error;
5889 case UNSPEC_GOTPCREL:
5890 gcc_assert (flag_pic);
5891 goto is_legitimate_pic;
5893 case UNSPEC_GOTTPOFF:
5894 case UNSPEC_GOTNTPOFF:
5895 case UNSPEC_INDNTPOFF:
5896 case UNSPEC_NTPOFF:
5897 case UNSPEC_DTPOFF:
5898 break;
5900 default:
5901 reason = "invalid address unspec";
5902 goto report_error;
5905 else if (flag_pic && (SYMBOLIC_CONST (disp)
5906 #if TARGET_MACHO
5907 && !machopic_operand_p (disp)
5908 #endif
5911 is_legitimate_pic:
5912 if (TARGET_64BIT && (index || base))
5914 /* foo@dtpoff(%rX) is ok. */
5915 if (GET_CODE (disp) != CONST
5916 || GET_CODE (XEXP (disp, 0)) != PLUS
5917 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
5918 || GET_CODE (XEXP (XEXP (disp, 0), 1)) != CONST_INT
5919 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
5920 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
5922 reason = "non-constant pic memory reference";
5923 goto report_error;
5926 else if (! legitimate_pic_address_disp_p (disp))
5928 reason = "displacement is an invalid pic construct";
5929 goto report_error;
5932 /* This code used to verify that a symbolic pic displacement
5933 includes the pic_offset_table_rtx register.
5935 While this is good idea, unfortunately these constructs may
5936 be created by "adds using lea" optimization for incorrect
5937 code like:
5939 int a;
5940 int foo(int i)
5942 return *(&a+i);
5945 This code is nonsensical, but results in addressing
5946 GOT table with pic_offset_table_rtx base. We can't
5947 just refuse it easily, since it gets matched by
5948 "addsi3" pattern, that later gets split to lea in the
5949 case output register differs from input. While this
5950 can be handled by separate addsi pattern for this case
5951 that never results in lea, this seems to be easier and
5952 correct fix for crash to disable this test. */
5954 else if (GET_CODE (disp) != LABEL_REF
5955 && GET_CODE (disp) != CONST_INT
5956 && (GET_CODE (disp) != CONST
5957 || !legitimate_constant_p (disp))
5958 && (GET_CODE (disp) != SYMBOL_REF
5959 || !legitimate_constant_p (disp)))
5961 reason = "displacement is not constant";
5962 goto report_error;
5964 else if (TARGET_64BIT
5965 && !x86_64_immediate_operand (disp, VOIDmode))
5967 reason = "displacement is out of range";
5968 goto report_error;
5972 /* Everything looks valid. */
5973 if (TARGET_DEBUG_ADDR)
5974 fprintf (stderr, "Success.\n");
5975 return TRUE;
5977 report_error:
5978 if (TARGET_DEBUG_ADDR)
5980 fprintf (stderr, "Error: %s\n", reason);
5981 debug_rtx (reason_rtx);
5983 return FALSE;
5986 /* Return a unique alias set for the GOT. */
5988 static HOST_WIDE_INT
5989 ix86_GOT_alias_set (void)
5991 static HOST_WIDE_INT set = -1;
5992 if (set == -1)
5993 set = new_alias_set ();
5994 return set;
5997 /* Return a legitimate reference for ORIG (an address) using the
5998 register REG. If REG is 0, a new pseudo is generated.
6000 There are two types of references that must be handled:
6002 1. Global data references must load the address from the GOT, via
6003 the PIC reg. An insn is emitted to do this load, and the reg is
6004 returned.
6006 2. Static data references, constant pool addresses, and code labels
6007 compute the address as an offset from the GOT, whose base is in
6008 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
6009 differentiate them from global data objects. The returned
6010 address is the PIC reg + an unspec constant.
6012 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
6013 reg also appears in the address. */
6015 static rtx
6016 legitimize_pic_address (rtx orig, rtx reg)
6018 rtx addr = orig;
6019 rtx new = orig;
6020 rtx base;
6022 #if TARGET_MACHO
6023 if (reg == 0)
6024 reg = gen_reg_rtx (Pmode);
6025 /* Use the generic Mach-O PIC machinery. */
6026 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
6027 #endif
6029 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
6030 new = addr;
6031 else if (TARGET_64BIT
6032 && ix86_cmodel != CM_SMALL_PIC
6033 && local_symbolic_operand (addr, Pmode))
6035 rtx tmpreg;
6036 /* This symbol may be referenced via a displacement from the PIC
6037 base address (@GOTOFF). */
6039 if (reload_in_progress)
6040 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6041 if (GET_CODE (addr) == CONST)
6042 addr = XEXP (addr, 0);
6043 if (GET_CODE (addr) == PLUS)
6045 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
6046 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
6048 else
6049 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
6050 new = gen_rtx_CONST (Pmode, new);
6051 if (!reg)
6052 tmpreg = gen_reg_rtx (Pmode);
6053 else
6054 tmpreg = reg;
6055 emit_move_insn (tmpreg, new);
6057 if (reg != 0)
6059 new = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
6060 tmpreg, 1, OPTAB_DIRECT);
6061 new = reg;
6063 else new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
6065 else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode))
6067 /* This symbol may be referenced via a displacement from the PIC
6068 base address (@GOTOFF). */
6070 if (reload_in_progress)
6071 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6072 if (GET_CODE (addr) == CONST)
6073 addr = XEXP (addr, 0);
6074 if (GET_CODE (addr) == PLUS)
6076 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
6077 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
6079 else
6080 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
6081 new = gen_rtx_CONST (Pmode, new);
6082 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6084 if (reg != 0)
6086 emit_move_insn (reg, new);
6087 new = reg;
6090 else if (GET_CODE (addr) == SYMBOL_REF)
6092 if (TARGET_64BIT)
6094 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
6095 new = gen_rtx_CONST (Pmode, new);
6096 new = gen_const_mem (Pmode, new);
6097 set_mem_alias_set (new, ix86_GOT_alias_set ());
6099 if (reg == 0)
6100 reg = gen_reg_rtx (Pmode);
6101 /* Use directly gen_movsi, otherwise the address is loaded
6102 into register for CSE. We don't want to CSE this addresses,
6103 instead we CSE addresses from the GOT table, so skip this. */
6104 emit_insn (gen_movsi (reg, new));
6105 new = reg;
6107 else
6109 /* This symbol must be referenced via a load from the
6110 Global Offset Table (@GOT). */
6112 if (reload_in_progress)
6113 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6114 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
6115 new = gen_rtx_CONST (Pmode, new);
6116 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6117 new = gen_const_mem (Pmode, new);
6118 set_mem_alias_set (new, ix86_GOT_alias_set ());
6120 if (reg == 0)
6121 reg = gen_reg_rtx (Pmode);
6122 emit_move_insn (reg, new);
6123 new = reg;
6126 else
6128 if (GET_CODE (addr) == CONST_INT
6129 && !x86_64_immediate_operand (addr, VOIDmode))
6131 if (reg)
6133 emit_move_insn (reg, addr);
6134 new = reg;
6136 else
6137 new = force_reg (Pmode, addr);
6139 else if (GET_CODE (addr) == CONST)
6141 addr = XEXP (addr, 0);
6143 /* We must match stuff we generate before. Assume the only
6144 unspecs that can get here are ours. Not that we could do
6145 anything with them anyway.... */
6146 if (GET_CODE (addr) == UNSPEC
6147 || (GET_CODE (addr) == PLUS
6148 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
6149 return orig;
6150 gcc_assert (GET_CODE (addr) == PLUS);
6152 if (GET_CODE (addr) == PLUS)
6154 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
6156 /* Check first to see if this is a constant offset from a @GOTOFF
6157 symbol reference. */
6158 if (local_symbolic_operand (op0, Pmode)
6159 && GET_CODE (op1) == CONST_INT)
6161 if (!TARGET_64BIT)
6163 if (reload_in_progress)
6164 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6165 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
6166 UNSPEC_GOTOFF);
6167 new = gen_rtx_PLUS (Pmode, new, op1);
6168 new = gen_rtx_CONST (Pmode, new);
6169 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6171 if (reg != 0)
6173 emit_move_insn (reg, new);
6174 new = reg;
6177 else
6179 if (INTVAL (op1) < -16*1024*1024
6180 || INTVAL (op1) >= 16*1024*1024)
6181 new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
6184 else
6186 base = legitimize_pic_address (XEXP (addr, 0), reg);
6187 new = legitimize_pic_address (XEXP (addr, 1),
6188 base == reg ? NULL_RTX : reg);
6190 if (GET_CODE (new) == CONST_INT)
6191 new = plus_constant (base, INTVAL (new));
6192 else
6194 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
6196 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
6197 new = XEXP (new, 1);
6199 new = gen_rtx_PLUS (Pmode, base, new);
6204 return new;
6207 /* Load the thread pointer. If TO_REG is true, force it into a register. */
6209 static rtx
6210 get_thread_pointer (int to_reg)
6212 rtx tp, reg, insn;
6214 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
6215 if (!to_reg)
6216 return tp;
6218 reg = gen_reg_rtx (Pmode);
6219 insn = gen_rtx_SET (VOIDmode, reg, tp);
6220 insn = emit_insn (insn);
6222 return reg;
6225 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
6226 false if we expect this to be used for a memory address and true if
6227 we expect to load the address into a register. */
6229 static rtx
6230 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
6232 rtx dest, base, off, pic;
6233 int type;
6235 switch (model)
6237 case TLS_MODEL_GLOBAL_DYNAMIC:
6238 dest = gen_reg_rtx (Pmode);
6239 if (TARGET_64BIT)
6241 rtx rax = gen_rtx_REG (Pmode, 0), insns;
6243 start_sequence ();
6244 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
6245 insns = get_insns ();
6246 end_sequence ();
6248 emit_libcall_block (insns, dest, rax, x);
6250 else
6251 emit_insn (gen_tls_global_dynamic_32 (dest, x));
6252 break;
6254 case TLS_MODEL_LOCAL_DYNAMIC:
6255 base = gen_reg_rtx (Pmode);
6256 if (TARGET_64BIT)
6258 rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
6260 start_sequence ();
6261 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
6262 insns = get_insns ();
6263 end_sequence ();
6265 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
6266 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
6267 emit_libcall_block (insns, base, rax, note);
6269 else
6270 emit_insn (gen_tls_local_dynamic_base_32 (base));
6272 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
6273 off = gen_rtx_CONST (Pmode, off);
6275 return gen_rtx_PLUS (Pmode, base, off);
6277 case TLS_MODEL_INITIAL_EXEC:
6278 if (TARGET_64BIT)
6280 pic = NULL;
6281 type = UNSPEC_GOTNTPOFF;
6283 else if (flag_pic)
6285 if (reload_in_progress)
6286 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6287 pic = pic_offset_table_rtx;
6288 type = TARGET_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
6290 else if (!TARGET_GNU_TLS)
6292 pic = gen_reg_rtx (Pmode);
6293 emit_insn (gen_set_got (pic));
6294 type = UNSPEC_GOTTPOFF;
6296 else
6298 pic = NULL;
6299 type = UNSPEC_INDNTPOFF;
6302 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
6303 off = gen_rtx_CONST (Pmode, off);
6304 if (pic)
6305 off = gen_rtx_PLUS (Pmode, pic, off);
6306 off = gen_const_mem (Pmode, off);
6307 set_mem_alias_set (off, ix86_GOT_alias_set ());
6309 if (TARGET_64BIT || TARGET_GNU_TLS)
6311 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
6312 off = force_reg (Pmode, off);
6313 return gen_rtx_PLUS (Pmode, base, off);
6315 else
6317 base = get_thread_pointer (true);
6318 dest = gen_reg_rtx (Pmode);
6319 emit_insn (gen_subsi3 (dest, base, off));
6321 break;
6323 case TLS_MODEL_LOCAL_EXEC:
6324 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
6325 (TARGET_64BIT || TARGET_GNU_TLS)
6326 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
6327 off = gen_rtx_CONST (Pmode, off);
6329 if (TARGET_64BIT || TARGET_GNU_TLS)
6331 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
6332 return gen_rtx_PLUS (Pmode, base, off);
6334 else
6336 base = get_thread_pointer (true);
6337 dest = gen_reg_rtx (Pmode);
6338 emit_insn (gen_subsi3 (dest, base, off));
6340 break;
6342 default:
6343 gcc_unreachable ();
6346 return dest;
6349 /* Try machine-dependent ways of modifying an illegitimate address
6350 to be legitimate. If we find one, return the new, valid address.
6351 This macro is used in only one place: `memory_address' in explow.c.
6353 OLDX is the address as it was before break_out_memory_refs was called.
6354 In some cases it is useful to look at this to decide what needs to be done.
6356 MODE and WIN are passed so that this macro can use
6357 GO_IF_LEGITIMATE_ADDRESS.
6359 It is always safe for this macro to do nothing. It exists to recognize
6360 opportunities to optimize the output.
6362 For the 80386, we handle X+REG by loading X into a register R and
6363 using R+REG. R will go in a general reg and indexing will be used.
6364 However, if REG is a broken-out memory address or multiplication,
6365 nothing needs to be done because REG can certainly go in a general reg.
6367 When -fpic is used, special handling is needed for symbolic references.
6368 See comments by legitimize_pic_address in i386.c for details. */
6371 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
6373 int changed = 0;
6374 unsigned log;
6376 if (TARGET_DEBUG_ADDR)
6378 fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
6379 GET_MODE_NAME (mode));
6380 debug_rtx (x);
6383 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
6384 if (log)
6385 return legitimize_tls_address (x, log, false);
6386 if (GET_CODE (x) == CONST
6387 && GET_CODE (XEXP (x, 0)) == PLUS
6388 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6389 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
6391 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0), log, false);
6392 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
6395 if (flag_pic && SYMBOLIC_CONST (x))
6396 return legitimize_pic_address (x, 0);
6398 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
6399 if (GET_CODE (x) == ASHIFT
6400 && GET_CODE (XEXP (x, 1)) == CONST_INT
6401 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
6403 changed = 1;
6404 log = INTVAL (XEXP (x, 1));
6405 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
6406 GEN_INT (1 << log));
6409 if (GET_CODE (x) == PLUS)
6411 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
6413 if (GET_CODE (XEXP (x, 0)) == ASHIFT
6414 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6415 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
6417 changed = 1;
6418 log = INTVAL (XEXP (XEXP (x, 0), 1));
6419 XEXP (x, 0) = gen_rtx_MULT (Pmode,
6420 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
6421 GEN_INT (1 << log));
6424 if (GET_CODE (XEXP (x, 1)) == ASHIFT
6425 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
6426 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
6428 changed = 1;
6429 log = INTVAL (XEXP (XEXP (x, 1), 1));
6430 XEXP (x, 1) = gen_rtx_MULT (Pmode,
6431 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
6432 GEN_INT (1 << log));
6435 /* Put multiply first if it isn't already. */
6436 if (GET_CODE (XEXP (x, 1)) == MULT)
6438 rtx tmp = XEXP (x, 0);
6439 XEXP (x, 0) = XEXP (x, 1);
6440 XEXP (x, 1) = tmp;
6441 changed = 1;
6444 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
6445 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
6446 created by virtual register instantiation, register elimination, and
6447 similar optimizations. */
6448 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
6450 changed = 1;
6451 x = gen_rtx_PLUS (Pmode,
6452 gen_rtx_PLUS (Pmode, XEXP (x, 0),
6453 XEXP (XEXP (x, 1), 0)),
6454 XEXP (XEXP (x, 1), 1));
6457 /* Canonicalize
6458 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
6459 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
6460 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
6461 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
6462 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
6463 && CONSTANT_P (XEXP (x, 1)))
6465 rtx constant;
6466 rtx other = NULL_RTX;
6468 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6470 constant = XEXP (x, 1);
6471 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
6473 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
6475 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
6476 other = XEXP (x, 1);
6478 else
6479 constant = 0;
6481 if (constant)
6483 changed = 1;
6484 x = gen_rtx_PLUS (Pmode,
6485 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
6486 XEXP (XEXP (XEXP (x, 0), 1), 0)),
6487 plus_constant (other, INTVAL (constant)));
6491 if (changed && legitimate_address_p (mode, x, FALSE))
6492 return x;
6494 if (GET_CODE (XEXP (x, 0)) == MULT)
6496 changed = 1;
6497 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
6500 if (GET_CODE (XEXP (x, 1)) == MULT)
6502 changed = 1;
6503 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
6506 if (changed
6507 && GET_CODE (XEXP (x, 1)) == REG
6508 && GET_CODE (XEXP (x, 0)) == REG)
6509 return x;
6511 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
6513 changed = 1;
6514 x = legitimize_pic_address (x, 0);
6517 if (changed && legitimate_address_p (mode, x, FALSE))
6518 return x;
6520 if (GET_CODE (XEXP (x, 0)) == REG)
6522 rtx temp = gen_reg_rtx (Pmode);
6523 rtx val = force_operand (XEXP (x, 1), temp);
6524 if (val != temp)
6525 emit_move_insn (temp, val);
6527 XEXP (x, 1) = temp;
6528 return x;
6531 else if (GET_CODE (XEXP (x, 1)) == REG)
6533 rtx temp = gen_reg_rtx (Pmode);
6534 rtx val = force_operand (XEXP (x, 0), temp);
6535 if (val != temp)
6536 emit_move_insn (temp, val);
6538 XEXP (x, 0) = temp;
6539 return x;
6543 return x;
6546 /* Print an integer constant expression in assembler syntax. Addition
6547 and subtraction are the only arithmetic that may appear in these
6548 expressions. FILE is the stdio stream to write to, X is the rtx, and
6549 CODE is the operand print code from the output string. */
6551 static void
6552 output_pic_addr_const (FILE *file, rtx x, int code)
6554 char buf[256];
6556 switch (GET_CODE (x))
6558 case PC:
6559 gcc_assert (flag_pic);
6560 putc ('.', file);
6561 break;
6563 case SYMBOL_REF:
6564 assemble_name (file, XSTR (x, 0));
6565 if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
6566 fputs ("@PLT", file);
6567 break;
6569 case LABEL_REF:
6570 x = XEXP (x, 0);
6571 /* FALLTHRU */
6572 case CODE_LABEL:
6573 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
6574 assemble_name (asm_out_file, buf);
6575 break;
6577 case CONST_INT:
6578 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
6579 break;
6581 case CONST:
6582 /* This used to output parentheses around the expression,
6583 but that does not work on the 386 (either ATT or BSD assembler). */
6584 output_pic_addr_const (file, XEXP (x, 0), code);
6585 break;
6587 case CONST_DOUBLE:
6588 if (GET_MODE (x) == VOIDmode)
6590 /* We can use %d if the number is <32 bits and positive. */
6591 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
6592 fprintf (file, "0x%lx%08lx",
6593 (unsigned long) CONST_DOUBLE_HIGH (x),
6594 (unsigned long) CONST_DOUBLE_LOW (x));
6595 else
6596 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
6598 else
6599 /* We can't handle floating point constants;
6600 PRINT_OPERAND must handle them. */
6601 output_operand_lossage ("floating constant misused");
6602 break;
6604 case PLUS:
6605 /* Some assemblers need integer constants to appear first. */
6606 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
6608 output_pic_addr_const (file, XEXP (x, 0), code);
6609 putc ('+', file);
6610 output_pic_addr_const (file, XEXP (x, 1), code);
6612 else
6614 gcc_assert (GET_CODE (XEXP (x, 1)) == CONST_INT);
6615 output_pic_addr_const (file, XEXP (x, 1), code);
6616 putc ('+', file);
6617 output_pic_addr_const (file, XEXP (x, 0), code);
6619 break;
6621 case MINUS:
6622 if (!TARGET_MACHO)
6623 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
6624 output_pic_addr_const (file, XEXP (x, 0), code);
6625 putc ('-', file);
6626 output_pic_addr_const (file, XEXP (x, 1), code);
6627 if (!TARGET_MACHO)
6628 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
6629 break;
6631 case UNSPEC:
6632 gcc_assert (XVECLEN (x, 0) == 1);
6633 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
6634 switch (XINT (x, 1))
6636 case UNSPEC_GOT:
6637 fputs ("@GOT", file);
6638 break;
6639 case UNSPEC_GOTOFF:
6640 fputs ("@GOTOFF", file);
6641 break;
6642 case UNSPEC_GOTPCREL:
6643 fputs ("@GOTPCREL(%rip)", file);
6644 break;
6645 case UNSPEC_GOTTPOFF:
6646 /* FIXME: This might be @TPOFF in Sun ld too. */
6647 fputs ("@GOTTPOFF", file);
6648 break;
6649 case UNSPEC_TPOFF:
6650 fputs ("@TPOFF", file);
6651 break;
6652 case UNSPEC_NTPOFF:
6653 if (TARGET_64BIT)
6654 fputs ("@TPOFF", file);
6655 else
6656 fputs ("@NTPOFF", file);
6657 break;
6658 case UNSPEC_DTPOFF:
6659 fputs ("@DTPOFF", file);
6660 break;
6661 case UNSPEC_GOTNTPOFF:
6662 if (TARGET_64BIT)
6663 fputs ("@GOTTPOFF(%rip)", file);
6664 else
6665 fputs ("@GOTNTPOFF", file);
6666 break;
6667 case UNSPEC_INDNTPOFF:
6668 fputs ("@INDNTPOFF", file);
6669 break;
6670 default:
6671 output_operand_lossage ("invalid UNSPEC as operand");
6672 break;
6674 break;
6676 default:
6677 output_operand_lossage ("invalid expression as operand");
6681 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6682 We need to emit DTP-relative relocations. */
6684 static void
6685 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
6687 fputs (ASM_LONG, file);
6688 output_addr_const (file, x);
6689 fputs ("@DTPOFF", file);
6690 switch (size)
6692 case 4:
6693 break;
6694 case 8:
6695 fputs (", 0", file);
6696 break;
6697 default:
6698 gcc_unreachable ();
6702 /* In the name of slightly smaller debug output, and to cater to
6703 general assembler lossage, recognize PIC+GOTOFF and turn it back
6704 into a direct symbol reference. */
6706 static rtx
6707 ix86_delegitimize_address (rtx orig_x)
6709 rtx x = orig_x, y;
6711 if (GET_CODE (x) == MEM)
6712 x = XEXP (x, 0);
6714 if (TARGET_64BIT)
6716 if (GET_CODE (x) != CONST
6717 || GET_CODE (XEXP (x, 0)) != UNSPEC
6718 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
6719 || GET_CODE (orig_x) != MEM)
6720 return orig_x;
6721 return XVECEXP (XEXP (x, 0), 0, 0);
6724 if (GET_CODE (x) != PLUS
6725 || GET_CODE (XEXP (x, 1)) != CONST)
6726 return orig_x;
6728 if (GET_CODE (XEXP (x, 0)) == REG
6729 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6730 /* %ebx + GOT/GOTOFF */
6731 y = NULL;
6732 else if (GET_CODE (XEXP (x, 0)) == PLUS)
6734 /* %ebx + %reg * scale + GOT/GOTOFF */
6735 y = XEXP (x, 0);
6736 if (GET_CODE (XEXP (y, 0)) == REG
6737 && REGNO (XEXP (y, 0)) == PIC_OFFSET_TABLE_REGNUM)
6738 y = XEXP (y, 1);
6739 else if (GET_CODE (XEXP (y, 1)) == REG
6740 && REGNO (XEXP (y, 1)) == PIC_OFFSET_TABLE_REGNUM)
6741 y = XEXP (y, 0);
6742 else
6743 return orig_x;
6744 if (GET_CODE (y) != REG
6745 && GET_CODE (y) != MULT
6746 && GET_CODE (y) != ASHIFT)
6747 return orig_x;
6749 else
6750 return orig_x;
6752 x = XEXP (XEXP (x, 1), 0);
6753 if (GET_CODE (x) == UNSPEC
6754 && ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6755 || (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM)))
6757 if (y)
6758 return gen_rtx_PLUS (Pmode, y, XVECEXP (x, 0, 0));
6759 return XVECEXP (x, 0, 0);
6762 if (GET_CODE (x) == PLUS
6763 && GET_CODE (XEXP (x, 0)) == UNSPEC
6764 && GET_CODE (XEXP (x, 1)) == CONST_INT
6765 && ((XINT (XEXP (x, 0), 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6766 || (XINT (XEXP (x, 0), 1) == UNSPEC_GOTOFF
6767 && GET_CODE (orig_x) != MEM)))
6769 x = gen_rtx_PLUS (VOIDmode, XVECEXP (XEXP (x, 0), 0, 0), XEXP (x, 1));
6770 if (y)
6771 return gen_rtx_PLUS (Pmode, y, x);
6772 return x;
6775 return orig_x;
6778 static void
6779 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
6780 int fp, FILE *file)
6782 const char *suffix;
6784 if (mode == CCFPmode || mode == CCFPUmode)
6786 enum rtx_code second_code, bypass_code;
6787 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
6788 gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
6789 code = ix86_fp_compare_code_to_integer (code);
6790 mode = CCmode;
6792 if (reverse)
6793 code = reverse_condition (code);
6795 switch (code)
6797 case EQ:
6798 suffix = "e";
6799 break;
6800 case NE:
6801 suffix = "ne";
6802 break;
6803 case GT:
6804 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
6805 suffix = "g";
6806 break;
6807 case GTU:
6808 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
6809 Those same assemblers have the same but opposite lossage on cmov. */
6810 gcc_assert (mode == CCmode);
6811 suffix = fp ? "nbe" : "a";
6812 break;
6813 case LT:
6814 switch (mode)
6816 case CCNOmode:
6817 case CCGOCmode:
6818 suffix = "s";
6819 break;
6821 case CCmode:
6822 case CCGCmode:
6823 suffix = "l";
6824 break;
6826 default:
6827 gcc_unreachable ();
6829 break;
6830 case LTU:
6831 gcc_assert (mode == CCmode);
6832 suffix = "b";
6833 break;
6834 case GE:
6835 switch (mode)
6837 case CCNOmode:
6838 case CCGOCmode:
6839 suffix = "ns";
6840 break;
6842 case CCmode:
6843 case CCGCmode:
6844 suffix = "ge";
6845 break;
6847 default:
6848 gcc_unreachable ();
6850 break;
6851 case GEU:
6852 /* ??? As above. */
6853 gcc_assert (mode == CCmode);
6854 suffix = fp ? "nb" : "ae";
6855 break;
6856 case LE:
6857 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
6858 suffix = "le";
6859 break;
6860 case LEU:
6861 gcc_assert (mode == CCmode);
6862 suffix = "be";
6863 break;
6864 case UNORDERED:
6865 suffix = fp ? "u" : "p";
6866 break;
6867 case ORDERED:
6868 suffix = fp ? "nu" : "np";
6869 break;
6870 default:
6871 gcc_unreachable ();
6873 fputs (suffix, file);
6876 /* Print the name of register X to FILE based on its machine mode and number.
6877 If CODE is 'w', pretend the mode is HImode.
6878 If CODE is 'b', pretend the mode is QImode.
6879 If CODE is 'k', pretend the mode is SImode.
6880 If CODE is 'q', pretend the mode is DImode.
6881 If CODE is 'h', pretend the reg is the 'high' byte register.
6882 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
6884 void
6885 print_reg (rtx x, int code, FILE *file)
6887 gcc_assert (REGNO (x) != ARG_POINTER_REGNUM
6888 && REGNO (x) != FRAME_POINTER_REGNUM
6889 && REGNO (x) != FLAGS_REG
6890 && REGNO (x) != FPSR_REG);
6892 if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
6893 putc ('%', file);
6895 if (code == 'w' || MMX_REG_P (x))
6896 code = 2;
6897 else if (code == 'b')
6898 code = 1;
6899 else if (code == 'k')
6900 code = 4;
6901 else if (code == 'q')
6902 code = 8;
6903 else if (code == 'y')
6904 code = 3;
6905 else if (code == 'h')
6906 code = 0;
6907 else
6908 code = GET_MODE_SIZE (GET_MODE (x));
6910 /* Irritatingly, AMD extended registers use different naming convention
6911 from the normal registers. */
6912 if (REX_INT_REG_P (x))
6914 gcc_assert (TARGET_64BIT);
6915 switch (code)
6917 case 0:
6918 error ("extended registers have no high halves");
6919 break;
6920 case 1:
6921 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
6922 break;
6923 case 2:
6924 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
6925 break;
6926 case 4:
6927 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
6928 break;
6929 case 8:
6930 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
6931 break;
6932 default:
6933 error ("unsupported operand size for extended register");
6934 break;
6936 return;
6938 switch (code)
6940 case 3:
6941 if (STACK_TOP_P (x))
6943 fputs ("st(0)", file);
6944 break;
6946 /* FALLTHRU */
6947 case 8:
6948 case 4:
6949 case 12:
6950 if (! ANY_FP_REG_P (x))
6951 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
6952 /* FALLTHRU */
6953 case 16:
6954 case 2:
6955 normal:
6956 fputs (hi_reg_name[REGNO (x)], file);
6957 break;
6958 case 1:
6959 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
6960 goto normal;
6961 fputs (qi_reg_name[REGNO (x)], file);
6962 break;
6963 case 0:
6964 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
6965 goto normal;
6966 fputs (qi_high_reg_name[REGNO (x)], file);
6967 break;
6968 default:
6969 gcc_unreachable ();
6973 /* Locate some local-dynamic symbol still in use by this function
6974 so that we can print its name in some tls_local_dynamic_base
6975 pattern. */
6977 static const char *
6978 get_some_local_dynamic_name (void)
6980 rtx insn;
6982 if (cfun->machine->some_ld_name)
6983 return cfun->machine->some_ld_name;
6985 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
6986 if (INSN_P (insn)
6987 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
6988 return cfun->machine->some_ld_name;
6990 gcc_unreachable ();
6993 static int
6994 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
6996 rtx x = *px;
6998 if (GET_CODE (x) == SYMBOL_REF
6999 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
7001 cfun->machine->some_ld_name = XSTR (x, 0);
7002 return 1;
7005 return 0;
7008 /* Meaning of CODE:
7009 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
7010 C -- print opcode suffix for set/cmov insn.
7011 c -- like C, but print reversed condition
7012 F,f -- likewise, but for floating-point.
7013 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
7014 otherwise nothing
7015 R -- print the prefix for register names.
7016 z -- print the opcode suffix for the size of the current operand.
7017 * -- print a star (in certain assembler syntax)
7018 A -- print an absolute memory reference.
7019 w -- print the operand as if it's a "word" (HImode) even if it isn't.
7020 s -- print a shift double count, followed by the assemblers argument
7021 delimiter.
7022 b -- print the QImode name of the register for the indicated operand.
7023 %b0 would print %al if operands[0] is reg 0.
7024 w -- likewise, print the HImode name of the register.
7025 k -- likewise, print the SImode name of the register.
7026 q -- likewise, print the DImode name of the register.
7027 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
7028 y -- print "st(0)" instead of "st" as a register.
7029 D -- print condition for SSE cmp instruction.
7030 P -- if PIC, print an @PLT suffix.
7031 X -- don't print any sort of PIC '@' suffix for a symbol.
7032 & -- print some in-use local-dynamic symbol name.
7033 H -- print a memory address offset by 8; used for sse high-parts
7036 void
7037 print_operand (FILE *file, rtx x, int code)
7039 if (code)
7041 switch (code)
7043 case '*':
7044 if (ASSEMBLER_DIALECT == ASM_ATT)
7045 putc ('*', file);
7046 return;
7048 case '&':
7049 assemble_name (file, get_some_local_dynamic_name ());
7050 return;
7052 case 'A':
7053 switch (ASSEMBLER_DIALECT)
7055 case ASM_ATT:
7056 putc ('*', file);
7057 break;
7059 case ASM_INTEL:
7060 /* Intel syntax. For absolute addresses, registers should not
7061 be surrounded by braces. */
7062 if (GET_CODE (x) != REG)
7064 putc ('[', file);
7065 PRINT_OPERAND (file, x, 0);
7066 putc (']', file);
7067 return;
7069 break;
7071 default:
7072 gcc_unreachable ();
7075 PRINT_OPERAND (file, x, 0);
7076 return;
7079 case 'L':
7080 if (ASSEMBLER_DIALECT == ASM_ATT)
7081 putc ('l', file);
7082 return;
7084 case 'W':
7085 if (ASSEMBLER_DIALECT == ASM_ATT)
7086 putc ('w', file);
7087 return;
7089 case 'B':
7090 if (ASSEMBLER_DIALECT == ASM_ATT)
7091 putc ('b', file);
7092 return;
7094 case 'Q':
7095 if (ASSEMBLER_DIALECT == ASM_ATT)
7096 putc ('l', file);
7097 return;
7099 case 'S':
7100 if (ASSEMBLER_DIALECT == ASM_ATT)
7101 putc ('s', file);
7102 return;
7104 case 'T':
7105 if (ASSEMBLER_DIALECT == ASM_ATT)
7106 putc ('t', file);
7107 return;
7109 case 'z':
7110 /* 387 opcodes don't get size suffixes if the operands are
7111 registers. */
7112 if (STACK_REG_P (x))
7113 return;
7115 /* Likewise if using Intel opcodes. */
7116 if (ASSEMBLER_DIALECT == ASM_INTEL)
7117 return;
7119 /* This is the size of op from size of operand. */
7120 switch (GET_MODE_SIZE (GET_MODE (x)))
7122 case 2:
7123 #ifdef HAVE_GAS_FILDS_FISTS
7124 putc ('s', file);
7125 #endif
7126 return;
7128 case 4:
7129 if (GET_MODE (x) == SFmode)
7131 putc ('s', file);
7132 return;
7134 else
7135 putc ('l', file);
7136 return;
7138 case 12:
7139 case 16:
7140 putc ('t', file);
7141 return;
7143 case 8:
7144 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
7146 #ifdef GAS_MNEMONICS
7147 putc ('q', file);
7148 #else
7149 putc ('l', file);
7150 putc ('l', file);
7151 #endif
7153 else
7154 putc ('l', file);
7155 return;
7157 default:
7158 gcc_unreachable ();
7161 case 'b':
7162 case 'w':
7163 case 'k':
7164 case 'q':
7165 case 'h':
7166 case 'y':
7167 case 'X':
7168 case 'P':
7169 break;
7171 case 's':
7172 if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
7174 PRINT_OPERAND (file, x, 0);
7175 putc (',', file);
7177 return;
7179 case 'D':
7180 /* Little bit of braindamage here. The SSE compare instructions
7181 does use completely different names for the comparisons that the
7182 fp conditional moves. */
7183 switch (GET_CODE (x))
7185 case EQ:
7186 case UNEQ:
7187 fputs ("eq", file);
7188 break;
7189 case LT:
7190 case UNLT:
7191 fputs ("lt", file);
7192 break;
7193 case LE:
7194 case UNLE:
7195 fputs ("le", file);
7196 break;
7197 case UNORDERED:
7198 fputs ("unord", file);
7199 break;
7200 case NE:
7201 case LTGT:
7202 fputs ("neq", file);
7203 break;
7204 case UNGE:
7205 case GE:
7206 fputs ("nlt", file);
7207 break;
7208 case UNGT:
7209 case GT:
7210 fputs ("nle", file);
7211 break;
7212 case ORDERED:
7213 fputs ("ord", file);
7214 break;
7215 default:
7216 gcc_unreachable ();
7218 return;
7219 case 'O':
7220 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
7221 if (ASSEMBLER_DIALECT == ASM_ATT)
7223 switch (GET_MODE (x))
7225 case HImode: putc ('w', file); break;
7226 case SImode:
7227 case SFmode: putc ('l', file); break;
7228 case DImode:
7229 case DFmode: putc ('q', file); break;
7230 default: gcc_unreachable ();
7232 putc ('.', file);
7234 #endif
7235 return;
7236 case 'C':
7237 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
7238 return;
7239 case 'F':
7240 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
7241 if (ASSEMBLER_DIALECT == ASM_ATT)
7242 putc ('.', file);
7243 #endif
7244 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
7245 return;
7247 /* Like above, but reverse condition */
7248 case 'c':
7249 /* Check to see if argument to %c is really a constant
7250 and not a condition code which needs to be reversed. */
7251 if (!COMPARISON_P (x))
7253 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
7254 return;
7256 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
7257 return;
7258 case 'f':
7259 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
7260 if (ASSEMBLER_DIALECT == ASM_ATT)
7261 putc ('.', file);
7262 #endif
7263 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
7264 return;
7266 case 'H':
7267 /* It doesn't actually matter what mode we use here, as we're
7268 only going to use this for printing. */
7269 x = adjust_address_nv (x, DImode, 8);
7270 break;
7272 case '+':
7274 rtx x;
7276 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
7277 return;
7279 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
7280 if (x)
7282 int pred_val = INTVAL (XEXP (x, 0));
7284 if (pred_val < REG_BR_PROB_BASE * 45 / 100
7285 || pred_val > REG_BR_PROB_BASE * 55 / 100)
7287 int taken = pred_val > REG_BR_PROB_BASE / 2;
7288 int cputaken = final_forward_branch_p (current_output_insn) == 0;
7290 /* Emit hints only in the case default branch prediction
7291 heuristics would fail. */
7292 if (taken != cputaken)
7294 /* We use 3e (DS) prefix for taken branches and
7295 2e (CS) prefix for not taken branches. */
7296 if (taken)
7297 fputs ("ds ; ", file);
7298 else
7299 fputs ("cs ; ", file);
7303 return;
7305 default:
7306 output_operand_lossage ("invalid operand code '%c'", code);
7310 if (GET_CODE (x) == REG)
7311 print_reg (x, code, file);
7313 else if (GET_CODE (x) == MEM)
7315 /* No `byte ptr' prefix for call instructions. */
7316 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
7318 const char * size;
7319 switch (GET_MODE_SIZE (GET_MODE (x)))
7321 case 1: size = "BYTE"; break;
7322 case 2: size = "WORD"; break;
7323 case 4: size = "DWORD"; break;
7324 case 8: size = "QWORD"; break;
7325 case 12: size = "XWORD"; break;
7326 case 16: size = "XMMWORD"; break;
7327 default:
7328 gcc_unreachable ();
7331 /* Check for explicit size override (codes 'b', 'w' and 'k') */
7332 if (code == 'b')
7333 size = "BYTE";
7334 else if (code == 'w')
7335 size = "WORD";
7336 else if (code == 'k')
7337 size = "DWORD";
7339 fputs (size, file);
7340 fputs (" PTR ", file);
7343 x = XEXP (x, 0);
7344 /* Avoid (%rip) for call operands. */
7345 if (CONSTANT_ADDRESS_P (x) && code == 'P'
7346 && GET_CODE (x) != CONST_INT)
7347 output_addr_const (file, x);
7348 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
7349 output_operand_lossage ("invalid constraints for operand");
7350 else
7351 output_address (x);
7354 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
7356 REAL_VALUE_TYPE r;
7357 long l;
7359 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7360 REAL_VALUE_TO_TARGET_SINGLE (r, l);
7362 if (ASSEMBLER_DIALECT == ASM_ATT)
7363 putc ('$', file);
7364 fprintf (file, "0x%08lx", l);
7367 /* These float cases don't actually occur as immediate operands. */
7368 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
7370 char dstr[30];
7372 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
7373 fprintf (file, "%s", dstr);
7376 else if (GET_CODE (x) == CONST_DOUBLE
7377 && GET_MODE (x) == XFmode)
7379 char dstr[30];
7381 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
7382 fprintf (file, "%s", dstr);
7385 else
7387 /* We have patterns that allow zero sets of memory, for instance.
7388 In 64-bit mode, we should probably support all 8-byte vectors,
7389 since we can in fact encode that into an immediate. */
7390 if (GET_CODE (x) == CONST_VECTOR)
7392 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
7393 x = const0_rtx;
7396 if (code != 'P')
7398 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
7400 if (ASSEMBLER_DIALECT == ASM_ATT)
7401 putc ('$', file);
7403 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
7404 || GET_CODE (x) == LABEL_REF)
7406 if (ASSEMBLER_DIALECT == ASM_ATT)
7407 putc ('$', file);
7408 else
7409 fputs ("OFFSET FLAT:", file);
7412 if (GET_CODE (x) == CONST_INT)
7413 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
7414 else if (flag_pic)
7415 output_pic_addr_const (file, x, code);
7416 else
7417 output_addr_const (file, x);
7421 /* Print a memory operand whose address is ADDR. */
7423 void
7424 print_operand_address (FILE *file, rtx addr)
7426 struct ix86_address parts;
7427 rtx base, index, disp;
7428 int scale;
7429 int ok = ix86_decompose_address (addr, &parts);
7431 gcc_assert (ok);
7433 base = parts.base;
7434 index = parts.index;
7435 disp = parts.disp;
7436 scale = parts.scale;
7438 switch (parts.seg)
7440 case SEG_DEFAULT:
7441 break;
7442 case SEG_FS:
7443 case SEG_GS:
7444 if (USER_LABEL_PREFIX[0] == 0)
7445 putc ('%', file);
7446 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
7447 break;
7448 default:
7449 gcc_unreachable ();
7452 if (!base && !index)
7454 /* Displacement only requires special attention. */
7456 if (GET_CODE (disp) == CONST_INT)
7458 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
7460 if (USER_LABEL_PREFIX[0] == 0)
7461 putc ('%', file);
7462 fputs ("ds:", file);
7464 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
7466 else if (flag_pic)
7467 output_pic_addr_const (file, disp, 0);
7468 else
7469 output_addr_const (file, disp);
7471 /* Use one byte shorter RIP relative addressing for 64bit mode. */
7472 if (TARGET_64BIT)
7474 if (GET_CODE (disp) == CONST
7475 && GET_CODE (XEXP (disp, 0)) == PLUS
7476 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
7477 disp = XEXP (XEXP (disp, 0), 0);
7478 if (GET_CODE (disp) == LABEL_REF
7479 || (GET_CODE (disp) == SYMBOL_REF
7480 && SYMBOL_REF_TLS_MODEL (disp) == 0))
7481 fputs ("(%rip)", file);
7484 else
7486 if (ASSEMBLER_DIALECT == ASM_ATT)
7488 if (disp)
7490 if (flag_pic)
7491 output_pic_addr_const (file, disp, 0);
7492 else if (GET_CODE (disp) == LABEL_REF)
7493 output_asm_label (disp);
7494 else
7495 output_addr_const (file, disp);
7498 putc ('(', file);
7499 if (base)
7500 print_reg (base, 0, file);
7501 if (index)
7503 putc (',', file);
7504 print_reg (index, 0, file);
7505 if (scale != 1)
7506 fprintf (file, ",%d", scale);
7508 putc (')', file);
7510 else
7512 rtx offset = NULL_RTX;
7514 if (disp)
7516 /* Pull out the offset of a symbol; print any symbol itself. */
7517 if (GET_CODE (disp) == CONST
7518 && GET_CODE (XEXP (disp, 0)) == PLUS
7519 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
7521 offset = XEXP (XEXP (disp, 0), 1);
7522 disp = gen_rtx_CONST (VOIDmode,
7523 XEXP (XEXP (disp, 0), 0));
7526 if (flag_pic)
7527 output_pic_addr_const (file, disp, 0);
7528 else if (GET_CODE (disp) == LABEL_REF)
7529 output_asm_label (disp);
7530 else if (GET_CODE (disp) == CONST_INT)
7531 offset = disp;
7532 else
7533 output_addr_const (file, disp);
7536 putc ('[', file);
7537 if (base)
7539 print_reg (base, 0, file);
7540 if (offset)
7542 if (INTVAL (offset) >= 0)
7543 putc ('+', file);
7544 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
7547 else if (offset)
7548 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
7549 else
7550 putc ('0', file);
7552 if (index)
7554 putc ('+', file);
7555 print_reg (index, 0, file);
7556 if (scale != 1)
7557 fprintf (file, "*%d", scale);
7559 putc (']', file);
7564 bool
7565 output_addr_const_extra (FILE *file, rtx x)
7567 rtx op;
7569 if (GET_CODE (x) != UNSPEC)
7570 return false;
7572 op = XVECEXP (x, 0, 0);
7573 switch (XINT (x, 1))
7575 case UNSPEC_GOTTPOFF:
7576 output_addr_const (file, op);
7577 /* FIXME: This might be @TPOFF in Sun ld. */
7578 fputs ("@GOTTPOFF", file);
7579 break;
7580 case UNSPEC_TPOFF:
7581 output_addr_const (file, op);
7582 fputs ("@TPOFF", file);
7583 break;
7584 case UNSPEC_NTPOFF:
7585 output_addr_const (file, op);
7586 if (TARGET_64BIT)
7587 fputs ("@TPOFF", file);
7588 else
7589 fputs ("@NTPOFF", file);
7590 break;
7591 case UNSPEC_DTPOFF:
7592 output_addr_const (file, op);
7593 fputs ("@DTPOFF", file);
7594 break;
7595 case UNSPEC_GOTNTPOFF:
7596 output_addr_const (file, op);
7597 if (TARGET_64BIT)
7598 fputs ("@GOTTPOFF(%rip)", file);
7599 else
7600 fputs ("@GOTNTPOFF", file);
7601 break;
7602 case UNSPEC_INDNTPOFF:
7603 output_addr_const (file, op);
7604 fputs ("@INDNTPOFF", file);
7605 break;
7607 default:
7608 return false;
7611 return true;
7614 /* Split one or more DImode RTL references into pairs of SImode
7615 references. The RTL can be REG, offsettable MEM, integer constant, or
7616 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
7617 split and "num" is its length. lo_half and hi_half are output arrays
7618 that parallel "operands". */
7620 void
7621 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
7623 while (num--)
7625 rtx op = operands[num];
7627 /* simplify_subreg refuse to split volatile memory addresses,
7628 but we still have to handle it. */
7629 if (GET_CODE (op) == MEM)
7631 lo_half[num] = adjust_address (op, SImode, 0);
7632 hi_half[num] = adjust_address (op, SImode, 4);
7634 else
7636 lo_half[num] = simplify_gen_subreg (SImode, op,
7637 GET_MODE (op) == VOIDmode
7638 ? DImode : GET_MODE (op), 0);
7639 hi_half[num] = simplify_gen_subreg (SImode, op,
7640 GET_MODE (op) == VOIDmode
7641 ? DImode : GET_MODE (op), 4);
7645 /* Split one or more TImode RTL references into pairs of DImode
7646 references. The RTL can be REG, offsettable MEM, integer constant, or
7647 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
7648 split and "num" is its length. lo_half and hi_half are output arrays
7649 that parallel "operands". */
7651 void
7652 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
7654 while (num--)
7656 rtx op = operands[num];
7658 /* simplify_subreg refuse to split volatile memory addresses, but we
7659 still have to handle it. */
7660 if (GET_CODE (op) == MEM)
7662 lo_half[num] = adjust_address (op, DImode, 0);
7663 hi_half[num] = adjust_address (op, DImode, 8);
7665 else
7667 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
7668 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
7673 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
7674 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
7675 is the expression of the binary operation. The output may either be
7676 emitted here, or returned to the caller, like all output_* functions.
7678 There is no guarantee that the operands are the same mode, as they
7679 might be within FLOAT or FLOAT_EXTEND expressions. */
7681 #ifndef SYSV386_COMPAT
7682 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
7683 wants to fix the assemblers because that causes incompatibility
7684 with gcc. No-one wants to fix gcc because that causes
7685 incompatibility with assemblers... You can use the option of
7686 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
7687 #define SYSV386_COMPAT 1
7688 #endif
7690 const char *
7691 output_387_binary_op (rtx insn, rtx *operands)
7693 static char buf[30];
7694 const char *p;
7695 const char *ssep;
7696 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
7698 #ifdef ENABLE_CHECKING
7699 /* Even if we do not want to check the inputs, this documents input
7700 constraints. Which helps in understanding the following code. */
7701 if (STACK_REG_P (operands[0])
7702 && ((REG_P (operands[1])
7703 && REGNO (operands[0]) == REGNO (operands[1])
7704 && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM))
7705 || (REG_P (operands[2])
7706 && REGNO (operands[0]) == REGNO (operands[2])
7707 && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)))
7708 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
7709 ; /* ok */
7710 else
7711 gcc_assert (is_sse);
7712 #endif
7714 switch (GET_CODE (operands[3]))
7716 case PLUS:
7717 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7718 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7719 p = "fiadd";
7720 else
7721 p = "fadd";
7722 ssep = "add";
7723 break;
7725 case MINUS:
7726 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7727 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7728 p = "fisub";
7729 else
7730 p = "fsub";
7731 ssep = "sub";
7732 break;
7734 case MULT:
7735 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7736 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7737 p = "fimul";
7738 else
7739 p = "fmul";
7740 ssep = "mul";
7741 break;
7743 case DIV:
7744 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7745 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7746 p = "fidiv";
7747 else
7748 p = "fdiv";
7749 ssep = "div";
7750 break;
7752 default:
7753 gcc_unreachable ();
7756 if (is_sse)
7758 strcpy (buf, ssep);
7759 if (GET_MODE (operands[0]) == SFmode)
7760 strcat (buf, "ss\t{%2, %0|%0, %2}");
7761 else
7762 strcat (buf, "sd\t{%2, %0|%0, %2}");
7763 return buf;
7765 strcpy (buf, p);
7767 switch (GET_CODE (operands[3]))
7769 case MULT:
7770 case PLUS:
7771 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
7773 rtx temp = operands[2];
7774 operands[2] = operands[1];
7775 operands[1] = temp;
7778 /* know operands[0] == operands[1]. */
7780 if (GET_CODE (operands[2]) == MEM)
7782 p = "%z2\t%2";
7783 break;
7786 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7788 if (STACK_TOP_P (operands[0]))
7789 /* How is it that we are storing to a dead operand[2]?
7790 Well, presumably operands[1] is dead too. We can't
7791 store the result to st(0) as st(0) gets popped on this
7792 instruction. Instead store to operands[2] (which I
7793 think has to be st(1)). st(1) will be popped later.
7794 gcc <= 2.8.1 didn't have this check and generated
7795 assembly code that the Unixware assembler rejected. */
7796 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7797 else
7798 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7799 break;
7802 if (STACK_TOP_P (operands[0]))
7803 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7804 else
7805 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7806 break;
7808 case MINUS:
7809 case DIV:
7810 if (GET_CODE (operands[1]) == MEM)
7812 p = "r%z1\t%1";
7813 break;
7816 if (GET_CODE (operands[2]) == MEM)
7818 p = "%z2\t%2";
7819 break;
7822 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7824 #if SYSV386_COMPAT
7825 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
7826 derived assemblers, confusingly reverse the direction of
7827 the operation for fsub{r} and fdiv{r} when the
7828 destination register is not st(0). The Intel assembler
7829 doesn't have this brain damage. Read !SYSV386_COMPAT to
7830 figure out what the hardware really does. */
7831 if (STACK_TOP_P (operands[0]))
7832 p = "{p\t%0, %2|rp\t%2, %0}";
7833 else
7834 p = "{rp\t%2, %0|p\t%0, %2}";
7835 #else
7836 if (STACK_TOP_P (operands[0]))
7837 /* As above for fmul/fadd, we can't store to st(0). */
7838 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7839 else
7840 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7841 #endif
7842 break;
7845 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
7847 #if SYSV386_COMPAT
7848 if (STACK_TOP_P (operands[0]))
7849 p = "{rp\t%0, %1|p\t%1, %0}";
7850 else
7851 p = "{p\t%1, %0|rp\t%0, %1}";
7852 #else
7853 if (STACK_TOP_P (operands[0]))
7854 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
7855 else
7856 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
7857 #endif
7858 break;
7861 if (STACK_TOP_P (operands[0]))
7863 if (STACK_TOP_P (operands[1]))
7864 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7865 else
7866 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
7867 break;
7869 else if (STACK_TOP_P (operands[1]))
7871 #if SYSV386_COMPAT
7872 p = "{\t%1, %0|r\t%0, %1}";
7873 #else
7874 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
7875 #endif
7877 else
7879 #if SYSV386_COMPAT
7880 p = "{r\t%2, %0|\t%0, %2}";
7881 #else
7882 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7883 #endif
7885 break;
7887 default:
7888 gcc_unreachable ();
7891 strcat (buf, p);
7892 return buf;
7895 /* Return needed mode for entity in optimize_mode_switching pass. */
7898 ix86_mode_needed (int entity, rtx insn)
7900 enum attr_i387_cw mode;
7902 /* The mode UNINITIALIZED is used to store control word after a
7903 function call or ASM pattern. The mode ANY specify that function
7904 has no requirements on the control word and make no changes in the
7905 bits we are interested in. */
7907 if (CALL_P (insn)
7908 || (NONJUMP_INSN_P (insn)
7909 && (asm_noperands (PATTERN (insn)) >= 0
7910 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
7911 return I387_CW_UNINITIALIZED;
7913 if (recog_memoized (insn) < 0)
7914 return I387_CW_ANY;
7916 mode = get_attr_i387_cw (insn);
7918 switch (entity)
7920 case I387_TRUNC:
7921 if (mode == I387_CW_TRUNC)
7922 return mode;
7923 break;
7925 case I387_FLOOR:
7926 if (mode == I387_CW_FLOOR)
7927 return mode;
7928 break;
7930 case I387_CEIL:
7931 if (mode == I387_CW_CEIL)
7932 return mode;
7933 break;
7935 case I387_MASK_PM:
7936 if (mode == I387_CW_MASK_PM)
7937 return mode;
7938 break;
7940 default:
7941 gcc_unreachable ();
7944 return I387_CW_ANY;
7947 /* Output code to initialize control word copies used by trunc?f?i and
7948 rounding patterns. CURRENT_MODE is set to current control word,
7949 while NEW_MODE is set to new control word. */
7951 void
7952 emit_i387_cw_initialization (int mode)
7954 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
7955 rtx new_mode;
7957 int slot;
7959 rtx reg = gen_reg_rtx (HImode);
7961 emit_insn (gen_x86_fnstcw_1 (stored_mode));
7962 emit_move_insn (reg, stored_mode);
7964 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL || optimize_size)
7966 switch (mode)
7968 case I387_CW_TRUNC:
7969 /* round toward zero (truncate) */
7970 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
7971 slot = SLOT_CW_TRUNC;
7972 break;
7974 case I387_CW_FLOOR:
7975 /* round down toward -oo */
7976 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7977 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
7978 slot = SLOT_CW_FLOOR;
7979 break;
7981 case I387_CW_CEIL:
7982 /* round up toward +oo */
7983 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7984 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
7985 slot = SLOT_CW_CEIL;
7986 break;
7988 case I387_CW_MASK_PM:
7989 /* mask precision exception for nearbyint() */
7990 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
7991 slot = SLOT_CW_MASK_PM;
7992 break;
7994 default:
7995 gcc_unreachable ();
7998 else
8000 switch (mode)
8002 case I387_CW_TRUNC:
8003 /* round toward zero (truncate) */
8004 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
8005 slot = SLOT_CW_TRUNC;
8006 break;
8008 case I387_CW_FLOOR:
8009 /* round down toward -oo */
8010 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
8011 slot = SLOT_CW_FLOOR;
8012 break;
8014 case I387_CW_CEIL:
8015 /* round up toward +oo */
8016 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
8017 slot = SLOT_CW_CEIL;
8018 break;
8020 case I387_CW_MASK_PM:
8021 /* mask precision exception for nearbyint() */
8022 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
8023 slot = SLOT_CW_MASK_PM;
8024 break;
8026 default:
8027 gcc_unreachable ();
8031 gcc_assert (slot < MAX_386_STACK_LOCALS);
8033 new_mode = assign_386_stack_local (HImode, slot);
8034 emit_move_insn (new_mode, reg);
8037 /* Output code for INSN to convert a float to a signed int. OPERANDS
8038 are the insn operands. The output may be [HSD]Imode and the input
8039 operand may be [SDX]Fmode. */
8041 const char *
8042 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
8044 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
8045 int dimode_p = GET_MODE (operands[0]) == DImode;
8046 int round_mode = get_attr_i387_cw (insn);
8048 /* Jump through a hoop or two for DImode, since the hardware has no
8049 non-popping instruction. We used to do this a different way, but
8050 that was somewhat fragile and broke with post-reload splitters. */
8051 if ((dimode_p || fisttp) && !stack_top_dies)
8052 output_asm_insn ("fld\t%y1", operands);
8054 gcc_assert (STACK_TOP_P (operands[1]));
8055 gcc_assert (GET_CODE (operands[0]) == MEM);
8057 if (fisttp)
8058 output_asm_insn ("fisttp%z0\t%0", operands);
8059 else
8061 if (round_mode != I387_CW_ANY)
8062 output_asm_insn ("fldcw\t%3", operands);
8063 if (stack_top_dies || dimode_p)
8064 output_asm_insn ("fistp%z0\t%0", operands);
8065 else
8066 output_asm_insn ("fist%z0\t%0", operands);
8067 if (round_mode != I387_CW_ANY)
8068 output_asm_insn ("fldcw\t%2", operands);
8071 return "";
8074 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
8075 should be used. UNORDERED_P is true when fucom should be used. */
8077 const char *
8078 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
8080 int stack_top_dies;
8081 rtx cmp_op0, cmp_op1;
8082 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
8084 if (eflags_p)
8086 cmp_op0 = operands[0];
8087 cmp_op1 = operands[1];
8089 else
8091 cmp_op0 = operands[1];
8092 cmp_op1 = operands[2];
8095 if (is_sse)
8097 if (GET_MODE (operands[0]) == SFmode)
8098 if (unordered_p)
8099 return "ucomiss\t{%1, %0|%0, %1}";
8100 else
8101 return "comiss\t{%1, %0|%0, %1}";
8102 else
8103 if (unordered_p)
8104 return "ucomisd\t{%1, %0|%0, %1}";
8105 else
8106 return "comisd\t{%1, %0|%0, %1}";
8109 gcc_assert (STACK_TOP_P (cmp_op0));
8111 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
8113 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
8115 if (stack_top_dies)
8117 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
8118 return TARGET_USE_FFREEP ? "ffreep\t%y1" : "fstp\t%y1";
8120 else
8121 return "ftst\n\tfnstsw\t%0";
8124 if (STACK_REG_P (cmp_op1)
8125 && stack_top_dies
8126 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
8127 && REGNO (cmp_op1) != FIRST_STACK_REG)
8129 /* If both the top of the 387 stack dies, and the other operand
8130 is also a stack register that dies, then this must be a
8131 `fcompp' float compare */
8133 if (eflags_p)
8135 /* There is no double popping fcomi variant. Fortunately,
8136 eflags is immune from the fstp's cc clobbering. */
8137 if (unordered_p)
8138 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
8139 else
8140 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
8141 return TARGET_USE_FFREEP ? "ffreep\t%y0" : "fstp\t%y0";
8143 else
8145 if (unordered_p)
8146 return "fucompp\n\tfnstsw\t%0";
8147 else
8148 return "fcompp\n\tfnstsw\t%0";
8151 else
8153 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
8155 static const char * const alt[16] =
8157 "fcom%z2\t%y2\n\tfnstsw\t%0",
8158 "fcomp%z2\t%y2\n\tfnstsw\t%0",
8159 "fucom%z2\t%y2\n\tfnstsw\t%0",
8160 "fucomp%z2\t%y2\n\tfnstsw\t%0",
8162 "ficom%z2\t%y2\n\tfnstsw\t%0",
8163 "ficomp%z2\t%y2\n\tfnstsw\t%0",
8164 NULL,
8165 NULL,
8167 "fcomi\t{%y1, %0|%0, %y1}",
8168 "fcomip\t{%y1, %0|%0, %y1}",
8169 "fucomi\t{%y1, %0|%0, %y1}",
8170 "fucomip\t{%y1, %0|%0, %y1}",
8172 NULL,
8173 NULL,
8174 NULL,
8175 NULL
8178 int mask;
8179 const char *ret;
8181 mask = eflags_p << 3;
8182 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
8183 mask |= unordered_p << 1;
8184 mask |= stack_top_dies;
8186 gcc_assert (mask < 16);
8187 ret = alt[mask];
8188 gcc_assert (ret);
8190 return ret;
8194 void
8195 ix86_output_addr_vec_elt (FILE *file, int value)
8197 const char *directive = ASM_LONG;
8199 #ifdef ASM_QUAD
8200 if (TARGET_64BIT)
8201 directive = ASM_QUAD;
8202 #else
8203 gcc_assert (!TARGET_64BIT);
8204 #endif
8206 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
8209 void
8210 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
8212 if (TARGET_64BIT)
8213 fprintf (file, "%s%s%d-%s%d\n",
8214 ASM_LONG, LPREFIX, value, LPREFIX, rel);
8215 else if (HAVE_AS_GOTOFF_IN_DATA)
8216 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
8217 #if TARGET_MACHO
8218 else if (TARGET_MACHO)
8220 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
8221 machopic_output_function_base_name (file);
8222 fprintf(file, "\n");
8224 #endif
8225 else
8226 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
8227 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
8230 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
8231 for the target. */
8233 void
8234 ix86_expand_clear (rtx dest)
8236 rtx tmp;
8238 /* We play register width games, which are only valid after reload. */
8239 gcc_assert (reload_completed);
8241 /* Avoid HImode and its attendant prefix byte. */
8242 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
8243 dest = gen_rtx_REG (SImode, REGNO (dest));
8245 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
8247 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
8248 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
8250 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
8251 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
8254 emit_insn (tmp);
8257 /* X is an unchanging MEM. If it is a constant pool reference, return
8258 the constant pool rtx, else NULL. */
8261 maybe_get_pool_constant (rtx x)
8263 x = ix86_delegitimize_address (XEXP (x, 0));
8265 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
8266 return get_pool_constant (x);
8268 return NULL_RTX;
8271 void
8272 ix86_expand_move (enum machine_mode mode, rtx operands[])
8274 int strict = (reload_in_progress || reload_completed);
8275 rtx op0, op1;
8276 enum tls_model model;
8278 op0 = operands[0];
8279 op1 = operands[1];
8281 if (GET_CODE (op1) == SYMBOL_REF)
8283 model = SYMBOL_REF_TLS_MODEL (op1);
8284 if (model)
8286 op1 = legitimize_tls_address (op1, model, true);
8287 op1 = force_operand (op1, op0);
8288 if (op1 == op0)
8289 return;
8292 else if (GET_CODE (op1) == CONST
8293 && GET_CODE (XEXP (op1, 0)) == PLUS
8294 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
8296 model = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (op1, 0), 0));
8297 if (model)
8299 rtx addend = XEXP (XEXP (op1, 0), 1);
8300 op1 = legitimize_tls_address (XEXP (XEXP (op1, 0), 0), model, true);
8301 op1 = force_operand (op1, NULL);
8302 op1 = expand_simple_binop (Pmode, PLUS, op1, addend,
8303 op0, 1, OPTAB_DIRECT);
8304 if (op1 == op0)
8305 return;
8309 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
8311 #if TARGET_MACHO
8312 if (MACHOPIC_PURE)
8314 rtx temp = ((reload_in_progress
8315 || ((op0 && GET_CODE (op0) == REG)
8316 && mode == Pmode))
8317 ? op0 : gen_reg_rtx (Pmode));
8318 op1 = machopic_indirect_data_reference (op1, temp);
8319 op1 = machopic_legitimize_pic_address (op1, mode,
8320 temp == op1 ? 0 : temp);
8322 else if (MACHOPIC_INDIRECT)
8323 op1 = machopic_indirect_data_reference (op1, 0);
8324 if (op0 == op1)
8325 return;
8326 #else
8327 if (GET_CODE (op0) == MEM)
8328 op1 = force_reg (Pmode, op1);
8329 else
8330 op1 = legitimize_address (op1, op1, Pmode);
8331 #endif /* TARGET_MACHO */
8333 else
8335 if (GET_CODE (op0) == MEM
8336 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
8337 || !push_operand (op0, mode))
8338 && GET_CODE (op1) == MEM)
8339 op1 = force_reg (mode, op1);
8341 if (push_operand (op0, mode)
8342 && ! general_no_elim_operand (op1, mode))
8343 op1 = copy_to_mode_reg (mode, op1);
8345 /* Force large constants in 64bit compilation into register
8346 to get them CSEed. */
8347 if (TARGET_64BIT && mode == DImode
8348 && immediate_operand (op1, mode)
8349 && !x86_64_zext_immediate_operand (op1, VOIDmode)
8350 && !register_operand (op0, mode)
8351 && optimize && !reload_completed && !reload_in_progress)
8352 op1 = copy_to_mode_reg (mode, op1);
8354 if (FLOAT_MODE_P (mode))
8356 /* If we are loading a floating point constant to a register,
8357 force the value to memory now, since we'll get better code
8358 out the back end. */
8360 if (strict)
8362 else if (GET_CODE (op1) == CONST_DOUBLE)
8364 op1 = validize_mem (force_const_mem (mode, op1));
8365 if (!register_operand (op0, mode))
8367 rtx temp = gen_reg_rtx (mode);
8368 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
8369 emit_move_insn (op0, temp);
8370 return;
8376 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
8379 void
8380 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
8382 rtx op0 = operands[0], op1 = operands[1];
8384 /* Force constants other than zero into memory. We do not know how
8385 the instructions used to build constants modify the upper 64 bits
8386 of the register, once we have that information we may be able
8387 to handle some of them more efficiently. */
8388 if ((reload_in_progress | reload_completed) == 0
8389 && register_operand (op0, mode)
8390 && CONSTANT_P (op1) && op1 != CONST0_RTX (mode))
8391 op1 = validize_mem (force_const_mem (mode, op1));
8393 /* Make operand1 a register if it isn't already. */
8394 if (!no_new_pseudos
8395 && !register_operand (op0, mode)
8396 && !register_operand (op1, mode))
8398 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
8399 return;
8402 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
8405 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
8406 straight to ix86_expand_vector_move. */
8408 void
8409 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
8411 rtx op0, op1, m;
8413 op0 = operands[0];
8414 op1 = operands[1];
8416 if (MEM_P (op1))
8418 /* If we're optimizing for size, movups is the smallest. */
8419 if (optimize_size)
8421 op0 = gen_lowpart (V4SFmode, op0);
8422 op1 = gen_lowpart (V4SFmode, op1);
8423 emit_insn (gen_sse_movups (op0, op1));
8424 return;
8427 /* ??? If we have typed data, then it would appear that using
8428 movdqu is the only way to get unaligned data loaded with
8429 integer type. */
8430 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
8432 op0 = gen_lowpart (V16QImode, op0);
8433 op1 = gen_lowpart (V16QImode, op1);
8434 emit_insn (gen_sse2_movdqu (op0, op1));
8435 return;
8438 if (TARGET_SSE2 && mode == V2DFmode)
8440 rtx zero;
8442 /* When SSE registers are split into halves, we can avoid
8443 writing to the top half twice. */
8444 if (TARGET_SSE_SPLIT_REGS)
8446 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
8447 zero = op0;
8449 else
8451 /* ??? Not sure about the best option for the Intel chips.
8452 The following would seem to satisfy; the register is
8453 entirely cleared, breaking the dependency chain. We
8454 then store to the upper half, with a dependency depth
8455 of one. A rumor has it that Intel recommends two movsd
8456 followed by an unpacklpd, but this is unconfirmed. And
8457 given that the dependency depth of the unpacklpd would
8458 still be one, I'm not sure why this would be better. */
8459 zero = CONST0_RTX (V2DFmode);
8462 m = adjust_address (op1, DFmode, 0);
8463 emit_insn (gen_sse2_loadlpd (op0, zero, m));
8464 m = adjust_address (op1, DFmode, 8);
8465 emit_insn (gen_sse2_loadhpd (op0, op0, m));
8467 else
8469 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
8470 emit_move_insn (op0, CONST0_RTX (mode));
8471 else
8472 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
8474 if (mode != V4SFmode)
8475 op0 = gen_lowpart (V4SFmode, op0);
8476 m = adjust_address (op1, V2SFmode, 0);
8477 emit_insn (gen_sse_loadlps (op0, op0, m));
8478 m = adjust_address (op1, V2SFmode, 8);
8479 emit_insn (gen_sse_loadhps (op0, op0, m));
8482 else if (MEM_P (op0))
8484 /* If we're optimizing for size, movups is the smallest. */
8485 if (optimize_size)
8487 op0 = gen_lowpart (V4SFmode, op0);
8488 op1 = gen_lowpart (V4SFmode, op1);
8489 emit_insn (gen_sse_movups (op0, op1));
8490 return;
8493 /* ??? Similar to above, only less clear because of quote
8494 typeless stores unquote. */
8495 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
8496 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
8498 op0 = gen_lowpart (V16QImode, op0);
8499 op1 = gen_lowpart (V16QImode, op1);
8500 emit_insn (gen_sse2_movdqu (op0, op1));
8501 return;
8504 if (TARGET_SSE2 && mode == V2DFmode)
8506 m = adjust_address (op0, DFmode, 0);
8507 emit_insn (gen_sse2_storelpd (m, op1));
8508 m = adjust_address (op0, DFmode, 8);
8509 emit_insn (gen_sse2_storehpd (m, op1));
8511 else
8513 if (mode != V4SFmode)
8514 op1 = gen_lowpart (V4SFmode, op1);
8515 m = adjust_address (op0, V2SFmode, 0);
8516 emit_insn (gen_sse_storelps (m, op1));
8517 m = adjust_address (op0, V2SFmode, 8);
8518 emit_insn (gen_sse_storehps (m, op1));
8521 else
8522 gcc_unreachable ();
8525 /* Expand a push in MODE. This is some mode for which we do not support
8526 proper push instructions, at least from the registers that we expect
8527 the value to live in. */
8529 void
8530 ix86_expand_push (enum machine_mode mode, rtx x)
8532 rtx tmp;
8534 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
8535 GEN_INT (-GET_MODE_SIZE (mode)),
8536 stack_pointer_rtx, 1, OPTAB_DIRECT);
8537 if (tmp != stack_pointer_rtx)
8538 emit_move_insn (stack_pointer_rtx, tmp);
8540 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
8541 emit_move_insn (tmp, x);
8544 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
8545 destination to use for the operation. If different from the true
8546 destination in operands[0], a copy operation will be required. */
8549 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
8550 rtx operands[])
8552 int matching_memory;
8553 rtx src1, src2, dst;
8555 dst = operands[0];
8556 src1 = operands[1];
8557 src2 = operands[2];
8559 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
8560 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8561 && (rtx_equal_p (dst, src2)
8562 || immediate_operand (src1, mode)))
8564 rtx temp = src1;
8565 src1 = src2;
8566 src2 = temp;
8569 /* If the destination is memory, and we do not have matching source
8570 operands, do things in registers. */
8571 matching_memory = 0;
8572 if (GET_CODE (dst) == MEM)
8574 if (rtx_equal_p (dst, src1))
8575 matching_memory = 1;
8576 else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8577 && rtx_equal_p (dst, src2))
8578 matching_memory = 2;
8579 else
8580 dst = gen_reg_rtx (mode);
8583 /* Both source operands cannot be in memory. */
8584 if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM)
8586 if (matching_memory != 2)
8587 src2 = force_reg (mode, src2);
8588 else
8589 src1 = force_reg (mode, src1);
8592 /* If the operation is not commutable, source 1 cannot be a constant
8593 or non-matching memory. */
8594 if ((CONSTANT_P (src1)
8595 || (!matching_memory && GET_CODE (src1) == MEM))
8596 && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
8597 src1 = force_reg (mode, src1);
8599 src1 = operands[1] = src1;
8600 src2 = operands[2] = src2;
8601 return dst;
8604 /* Similarly, but assume that the destination has already been
8605 set up properly. */
8607 void
8608 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
8609 enum machine_mode mode, rtx operands[])
8611 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
8612 gcc_assert (dst == operands[0]);
8615 /* Attempt to expand a binary operator. Make the expansion closer to the
8616 actual machine, then just general_operand, which will allow 3 separate
8617 memory references (one output, two input) in a single insn. */
8619 void
8620 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
8621 rtx operands[])
8623 rtx src1, src2, dst, op, clob;
8625 dst = ix86_fixup_binary_operands (code, mode, operands);
8626 src1 = operands[1];
8627 src2 = operands[2];
8629 /* Emit the instruction. */
8631 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
8632 if (reload_in_progress)
8634 /* Reload doesn't know about the flags register, and doesn't know that
8635 it doesn't want to clobber it. We can only do this with PLUS. */
8636 gcc_assert (code == PLUS);
8637 emit_insn (op);
8639 else
8641 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8642 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
8645 /* Fix up the destination if needed. */
8646 if (dst != operands[0])
8647 emit_move_insn (operands[0], dst);
8650 /* Return TRUE or FALSE depending on whether the binary operator meets the
8651 appropriate constraints. */
8654 ix86_binary_operator_ok (enum rtx_code code,
8655 enum machine_mode mode ATTRIBUTE_UNUSED,
8656 rtx operands[3])
8658 /* Both source operands cannot be in memory. */
8659 if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM)
8660 return 0;
8661 /* If the operation is not commutable, source 1 cannot be a constant. */
8662 if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
8663 return 0;
8664 /* If the destination is memory, we must have a matching source operand. */
8665 if (GET_CODE (operands[0]) == MEM
8666 && ! (rtx_equal_p (operands[0], operands[1])
8667 || (GET_RTX_CLASS (code) == RTX_COMM_ARITH
8668 && rtx_equal_p (operands[0], operands[2]))))
8669 return 0;
8670 /* If the operation is not commutable and the source 1 is memory, we must
8671 have a matching destination. */
8672 if (GET_CODE (operands[1]) == MEM
8673 && GET_RTX_CLASS (code) != RTX_COMM_ARITH
8674 && ! rtx_equal_p (operands[0], operands[1]))
8675 return 0;
8676 return 1;
8679 /* Attempt to expand a unary operator. Make the expansion closer to the
8680 actual machine, then just general_operand, which will allow 2 separate
8681 memory references (one output, one input) in a single insn. */
8683 void
8684 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
8685 rtx operands[])
8687 int matching_memory;
8688 rtx src, dst, op, clob;
8690 dst = operands[0];
8691 src = operands[1];
8693 /* If the destination is memory, and we do not have matching source
8694 operands, do things in registers. */
8695 matching_memory = 0;
8696 if (MEM_P (dst))
8698 if (rtx_equal_p (dst, src))
8699 matching_memory = 1;
8700 else
8701 dst = gen_reg_rtx (mode);
8704 /* When source operand is memory, destination must match. */
8705 if (MEM_P (src) && !matching_memory)
8706 src = force_reg (mode, src);
8708 /* Emit the instruction. */
8710 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
8711 if (reload_in_progress || code == NOT)
8713 /* Reload doesn't know about the flags register, and doesn't know that
8714 it doesn't want to clobber it. */
8715 gcc_assert (code == NOT);
8716 emit_insn (op);
8718 else
8720 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8721 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
8724 /* Fix up the destination if needed. */
8725 if (dst != operands[0])
8726 emit_move_insn (operands[0], dst);
8729 /* Return TRUE or FALSE depending on whether the unary operator meets the
8730 appropriate constraints. */
8733 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
8734 enum machine_mode mode ATTRIBUTE_UNUSED,
8735 rtx operands[2] ATTRIBUTE_UNUSED)
8737 /* If one of operands is memory, source and destination must match. */
8738 if ((GET_CODE (operands[0]) == MEM
8739 || GET_CODE (operands[1]) == MEM)
8740 && ! rtx_equal_p (operands[0], operands[1]))
8741 return FALSE;
8742 return TRUE;
8745 /* A subroutine of ix86_expand_fp_absneg_operator and copysign expanders.
8746 Create a mask for the sign bit in MODE for an SSE register. If VECT is
8747 true, then replicate the mask for all elements of the vector register.
8748 If INVERT is true, then create a mask excluding the sign bit. */
8751 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
8753 enum machine_mode vec_mode;
8754 HOST_WIDE_INT hi, lo;
8755 int shift = 63;
8756 rtvec v;
8757 rtx mask;
8759 /* Find the sign bit, sign extended to 2*HWI. */
8760 if (mode == SFmode)
8761 lo = 0x80000000, hi = lo < 0;
8762 else if (HOST_BITS_PER_WIDE_INT >= 64)
8763 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
8764 else
8765 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
8767 if (invert)
8768 lo = ~lo, hi = ~hi;
8770 /* Force this value into the low part of a fp vector constant. */
8771 mask = immed_double_const (lo, hi, mode == SFmode ? SImode : DImode);
8772 mask = gen_lowpart (mode, mask);
8774 if (mode == SFmode)
8776 if (vect)
8777 v = gen_rtvec (4, mask, mask, mask, mask);
8778 else
8779 v = gen_rtvec (4, mask, CONST0_RTX (SFmode),
8780 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
8781 vec_mode = V4SFmode;
8783 else
8785 if (vect)
8786 v = gen_rtvec (2, mask, mask);
8787 else
8788 v = gen_rtvec (2, mask, CONST0_RTX (DFmode));
8789 vec_mode = V2DFmode;
8792 return force_reg (vec_mode, gen_rtx_CONST_VECTOR (vec_mode, v));
8795 /* Generate code for floating point ABS or NEG. */
8797 void
8798 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
8799 rtx operands[])
8801 rtx mask, set, use, clob, dst, src;
8802 bool matching_memory;
8803 bool use_sse = false;
8804 bool vector_mode = VECTOR_MODE_P (mode);
8805 enum machine_mode elt_mode = mode;
8807 if (vector_mode)
8809 elt_mode = GET_MODE_INNER (mode);
8810 use_sse = true;
8812 else if (TARGET_SSE_MATH)
8813 use_sse = SSE_FLOAT_MODE_P (mode);
8815 /* NEG and ABS performed with SSE use bitwise mask operations.
8816 Create the appropriate mask now. */
8817 if (use_sse)
8818 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
8819 else
8821 /* When not using SSE, we don't use the mask, but prefer to keep the
8822 same general form of the insn pattern to reduce duplication when
8823 it comes time to split. */
8824 mask = const0_rtx;
8827 dst = operands[0];
8828 src = operands[1];
8830 /* If the destination is memory, and we don't have matching source
8831 operands, do things in registers. */
8832 matching_memory = false;
8833 if (MEM_P (dst))
8835 if (rtx_equal_p (dst, src))
8836 matching_memory = true;
8837 else
8838 dst = gen_reg_rtx (mode);
8840 if (MEM_P (src) && !matching_memory)
8841 src = force_reg (mode, src);
8843 if (vector_mode)
8845 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
8846 set = gen_rtx_SET (VOIDmode, dst, set);
8847 emit_insn (set);
8849 else
8851 set = gen_rtx_fmt_e (code, mode, src);
8852 set = gen_rtx_SET (VOIDmode, dst, set);
8853 use = gen_rtx_USE (VOIDmode, mask);
8854 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8855 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (3, set, use, clob)));
8858 if (dst != operands[0])
8859 emit_move_insn (operands[0], dst);
8862 /* Expand a copysign operation. Special case operand 0 being a constant. */
8864 void
8865 ix86_expand_copysign (rtx operands[])
8867 enum machine_mode mode, vmode;
8868 rtx dest, op0, op1, mask, nmask;
8870 dest = operands[0];
8871 op0 = operands[1];
8872 op1 = operands[2];
8874 mode = GET_MODE (dest);
8875 vmode = mode == SFmode ? V4SFmode : V2DFmode;
8877 if (GET_CODE (op0) == CONST_DOUBLE)
8879 rtvec v;
8881 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
8882 op0 = simplify_unary_operation (ABS, mode, op0, mode);
8884 if (op0 == CONST0_RTX (mode))
8885 op0 = CONST0_RTX (vmode);
8886 else
8888 if (mode == SFmode)
8889 v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
8890 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
8891 else
8892 v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
8893 op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
8896 mask = ix86_build_signbit_mask (mode, 0, 0);
8898 if (mode == SFmode)
8899 emit_insn (gen_copysignsf3_const (dest, op0, op1, mask));
8900 else
8901 emit_insn (gen_copysigndf3_const (dest, op0, op1, mask));
8903 else
8905 nmask = ix86_build_signbit_mask (mode, 0, 1);
8906 mask = ix86_build_signbit_mask (mode, 0, 0);
8908 if (mode == SFmode)
8909 emit_insn (gen_copysignsf3_var (dest, NULL, op0, op1, nmask, mask));
8910 else
8911 emit_insn (gen_copysigndf3_var (dest, NULL, op0, op1, nmask, mask));
8915 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
8916 be a constant, and so has already been expanded into a vector constant. */
8918 void
8919 ix86_split_copysign_const (rtx operands[])
8921 enum machine_mode mode, vmode;
8922 rtx dest, op0, op1, mask, x;
8924 dest = operands[0];
8925 op0 = operands[1];
8926 op1 = operands[2];
8927 mask = operands[3];
8929 mode = GET_MODE (dest);
8930 vmode = GET_MODE (mask);
8932 dest = simplify_gen_subreg (vmode, dest, mode, 0);
8933 x = gen_rtx_AND (vmode, dest, mask);
8934 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8936 if (op0 != CONST0_RTX (vmode))
8938 x = gen_rtx_IOR (vmode, dest, op0);
8939 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8943 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
8944 so we have to do two masks. */
8946 void
8947 ix86_split_copysign_var (rtx operands[])
8949 enum machine_mode mode, vmode;
8950 rtx dest, scratch, op0, op1, mask, nmask, x;
8952 dest = operands[0];
8953 scratch = operands[1];
8954 op0 = operands[2];
8955 op1 = operands[3];
8956 nmask = operands[4];
8957 mask = operands[5];
8959 mode = GET_MODE (dest);
8960 vmode = GET_MODE (mask);
8962 if (rtx_equal_p (op0, op1))
8964 /* Shouldn't happen often (it's useless, obviously), but when it does
8965 we'd generate incorrect code if we continue below. */
8966 emit_move_insn (dest, op0);
8967 return;
8970 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
8972 gcc_assert (REGNO (op1) == REGNO (scratch));
8974 x = gen_rtx_AND (vmode, scratch, mask);
8975 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
8977 dest = mask;
8978 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
8979 x = gen_rtx_NOT (vmode, dest);
8980 x = gen_rtx_AND (vmode, x, op0);
8981 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
8983 else
8985 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
8987 x = gen_rtx_AND (vmode, scratch, mask);
8989 else /* alternative 2,4 */
8991 gcc_assert (REGNO (mask) == REGNO (scratch));
8992 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
8993 x = gen_rtx_AND (vmode, scratch, op1);
8995 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
8997 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
8999 dest = simplify_gen_subreg (vmode, op0, mode, 0);
9000 x = gen_rtx_AND (vmode, dest, nmask);
9002 else /* alternative 3,4 */
9004 gcc_assert (REGNO (nmask) == REGNO (dest));
9005 dest = nmask;
9006 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
9007 x = gen_rtx_AND (vmode, dest, op0);
9009 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9012 x = gen_rtx_IOR (vmode, dest, scratch);
9013 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9016 /* Return TRUE or FALSE depending on whether the first SET in INSN
9017 has source and destination with matching CC modes, and that the
9018 CC mode is at least as constrained as REQ_MODE. */
9021 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
9023 rtx set;
9024 enum machine_mode set_mode;
9026 set = PATTERN (insn);
9027 if (GET_CODE (set) == PARALLEL)
9028 set = XVECEXP (set, 0, 0);
9029 gcc_assert (GET_CODE (set) == SET);
9030 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
9032 set_mode = GET_MODE (SET_DEST (set));
9033 switch (set_mode)
9035 case CCNOmode:
9036 if (req_mode != CCNOmode
9037 && (req_mode != CCmode
9038 || XEXP (SET_SRC (set), 1) != const0_rtx))
9039 return 0;
9040 break;
9041 case CCmode:
9042 if (req_mode == CCGCmode)
9043 return 0;
9044 /* FALLTHRU */
9045 case CCGCmode:
9046 if (req_mode == CCGOCmode || req_mode == CCNOmode)
9047 return 0;
9048 /* FALLTHRU */
9049 case CCGOCmode:
9050 if (req_mode == CCZmode)
9051 return 0;
9052 /* FALLTHRU */
9053 case CCZmode:
9054 break;
9056 default:
9057 gcc_unreachable ();
9060 return (GET_MODE (SET_SRC (set)) == set_mode);
9063 /* Generate insn patterns to do an integer compare of OPERANDS. */
9065 static rtx
9066 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
9068 enum machine_mode cmpmode;
9069 rtx tmp, flags;
9071 cmpmode = SELECT_CC_MODE (code, op0, op1);
9072 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
9074 /* This is very simple, but making the interface the same as in the
9075 FP case makes the rest of the code easier. */
9076 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
9077 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
9079 /* Return the test that should be put into the flags user, i.e.
9080 the bcc, scc, or cmov instruction. */
9081 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
9084 /* Figure out whether to use ordered or unordered fp comparisons.
9085 Return the appropriate mode to use. */
9087 enum machine_mode
9088 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
9090 /* ??? In order to make all comparisons reversible, we do all comparisons
9091 non-trapping when compiling for IEEE. Once gcc is able to distinguish
9092 all forms trapping and nontrapping comparisons, we can make inequality
9093 comparisons trapping again, since it results in better code when using
9094 FCOM based compares. */
9095 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
9098 enum machine_mode
9099 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
9101 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
9102 return ix86_fp_compare_mode (code);
9103 switch (code)
9105 /* Only zero flag is needed. */
9106 case EQ: /* ZF=0 */
9107 case NE: /* ZF!=0 */
9108 return CCZmode;
9109 /* Codes needing carry flag. */
9110 case GEU: /* CF=0 */
9111 case GTU: /* CF=0 & ZF=0 */
9112 case LTU: /* CF=1 */
9113 case LEU: /* CF=1 | ZF=1 */
9114 return CCmode;
9115 /* Codes possibly doable only with sign flag when
9116 comparing against zero. */
9117 case GE: /* SF=OF or SF=0 */
9118 case LT: /* SF<>OF or SF=1 */
9119 if (op1 == const0_rtx)
9120 return CCGOCmode;
9121 else
9122 /* For other cases Carry flag is not required. */
9123 return CCGCmode;
9124 /* Codes doable only with sign flag when comparing
9125 against zero, but we miss jump instruction for it
9126 so we need to use relational tests against overflow
9127 that thus needs to be zero. */
9128 case GT: /* ZF=0 & SF=OF */
9129 case LE: /* ZF=1 | SF<>OF */
9130 if (op1 == const0_rtx)
9131 return CCNOmode;
9132 else
9133 return CCGCmode;
9134 /* strcmp pattern do (use flags) and combine may ask us for proper
9135 mode. */
9136 case USE:
9137 return CCmode;
9138 default:
9139 gcc_unreachable ();
9143 /* Return the fixed registers used for condition codes. */
9145 static bool
9146 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9148 *p1 = FLAGS_REG;
9149 *p2 = FPSR_REG;
9150 return true;
9153 /* If two condition code modes are compatible, return a condition code
9154 mode which is compatible with both. Otherwise, return
9155 VOIDmode. */
9157 static enum machine_mode
9158 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
9160 if (m1 == m2)
9161 return m1;
9163 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
9164 return VOIDmode;
9166 if ((m1 == CCGCmode && m2 == CCGOCmode)
9167 || (m1 == CCGOCmode && m2 == CCGCmode))
9168 return CCGCmode;
9170 switch (m1)
9172 default:
9173 gcc_unreachable ();
9175 case CCmode:
9176 case CCGCmode:
9177 case CCGOCmode:
9178 case CCNOmode:
9179 case CCZmode:
9180 switch (m2)
9182 default:
9183 return VOIDmode;
9185 case CCmode:
9186 case CCGCmode:
9187 case CCGOCmode:
9188 case CCNOmode:
9189 case CCZmode:
9190 return CCmode;
9193 case CCFPmode:
9194 case CCFPUmode:
9195 /* These are only compatible with themselves, which we already
9196 checked above. */
9197 return VOIDmode;
9201 /* Return true if we should use an FCOMI instruction for this fp comparison. */
9204 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
9206 enum rtx_code swapped_code = swap_condition (code);
9207 return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code))
9208 || (ix86_fp_comparison_cost (swapped_code)
9209 == ix86_fp_comparison_fcomi_cost (swapped_code)));
9212 /* Swap, force into registers, or otherwise massage the two operands
9213 to a fp comparison. The operands are updated in place; the new
9214 comparison code is returned. */
9216 static enum rtx_code
9217 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
9219 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
9220 rtx op0 = *pop0, op1 = *pop1;
9221 enum machine_mode op_mode = GET_MODE (op0);
9222 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
9224 /* All of the unordered compare instructions only work on registers.
9225 The same is true of the fcomi compare instructions. The XFmode
9226 compare instructions require registers except when comparing
9227 against zero or when converting operand 1 from fixed point to
9228 floating point. */
9230 if (!is_sse
9231 && (fpcmp_mode == CCFPUmode
9232 || (op_mode == XFmode
9233 && ! (standard_80387_constant_p (op0) == 1
9234 || standard_80387_constant_p (op1) == 1)
9235 && GET_CODE (op1) != FLOAT)
9236 || ix86_use_fcomi_compare (code)))
9238 op0 = force_reg (op_mode, op0);
9239 op1 = force_reg (op_mode, op1);
9241 else
9243 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
9244 things around if they appear profitable, otherwise force op0
9245 into a register. */
9247 if (standard_80387_constant_p (op0) == 0
9248 || (GET_CODE (op0) == MEM
9249 && ! (standard_80387_constant_p (op1) == 0
9250 || GET_CODE (op1) == MEM)))
9252 rtx tmp;
9253 tmp = op0, op0 = op1, op1 = tmp;
9254 code = swap_condition (code);
9257 if (GET_CODE (op0) != REG)
9258 op0 = force_reg (op_mode, op0);
9260 if (CONSTANT_P (op1))
9262 int tmp = standard_80387_constant_p (op1);
9263 if (tmp == 0)
9264 op1 = validize_mem (force_const_mem (op_mode, op1));
9265 else if (tmp == 1)
9267 if (TARGET_CMOVE)
9268 op1 = force_reg (op_mode, op1);
9270 else
9271 op1 = force_reg (op_mode, op1);
9275 /* Try to rearrange the comparison to make it cheaper. */
9276 if (ix86_fp_comparison_cost (code)
9277 > ix86_fp_comparison_cost (swap_condition (code))
9278 && (GET_CODE (op1) == REG || !no_new_pseudos))
9280 rtx tmp;
9281 tmp = op0, op0 = op1, op1 = tmp;
9282 code = swap_condition (code);
9283 if (GET_CODE (op0) != REG)
9284 op0 = force_reg (op_mode, op0);
9287 *pop0 = op0;
9288 *pop1 = op1;
9289 return code;
9292 /* Convert comparison codes we use to represent FP comparison to integer
9293 code that will result in proper branch. Return UNKNOWN if no such code
9294 is available. */
9296 enum rtx_code
9297 ix86_fp_compare_code_to_integer (enum rtx_code code)
9299 switch (code)
9301 case GT:
9302 return GTU;
9303 case GE:
9304 return GEU;
9305 case ORDERED:
9306 case UNORDERED:
9307 return code;
9308 break;
9309 case UNEQ:
9310 return EQ;
9311 break;
9312 case UNLT:
9313 return LTU;
9314 break;
9315 case UNLE:
9316 return LEU;
9317 break;
9318 case LTGT:
9319 return NE;
9320 break;
9321 default:
9322 return UNKNOWN;
9326 /* Split comparison code CODE into comparisons we can do using branch
9327 instructions. BYPASS_CODE is comparison code for branch that will
9328 branch around FIRST_CODE and SECOND_CODE. If some of branches
9329 is not required, set value to UNKNOWN.
9330 We never require more than two branches. */
9332 void
9333 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
9334 enum rtx_code *first_code,
9335 enum rtx_code *second_code)
9337 *first_code = code;
9338 *bypass_code = UNKNOWN;
9339 *second_code = UNKNOWN;
9341 /* The fcomi comparison sets flags as follows:
9343 cmp ZF PF CF
9344 > 0 0 0
9345 < 0 0 1
9346 = 1 0 0
9347 un 1 1 1 */
9349 switch (code)
9351 case GT: /* GTU - CF=0 & ZF=0 */
9352 case GE: /* GEU - CF=0 */
9353 case ORDERED: /* PF=0 */
9354 case UNORDERED: /* PF=1 */
9355 case UNEQ: /* EQ - ZF=1 */
9356 case UNLT: /* LTU - CF=1 */
9357 case UNLE: /* LEU - CF=1 | ZF=1 */
9358 case LTGT: /* EQ - ZF=0 */
9359 break;
9360 case LT: /* LTU - CF=1 - fails on unordered */
9361 *first_code = UNLT;
9362 *bypass_code = UNORDERED;
9363 break;
9364 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
9365 *first_code = UNLE;
9366 *bypass_code = UNORDERED;
9367 break;
9368 case EQ: /* EQ - ZF=1 - fails on unordered */
9369 *first_code = UNEQ;
9370 *bypass_code = UNORDERED;
9371 break;
9372 case NE: /* NE - ZF=0 - fails on unordered */
9373 *first_code = LTGT;
9374 *second_code = UNORDERED;
9375 break;
9376 case UNGE: /* GEU - CF=0 - fails on unordered */
9377 *first_code = GE;
9378 *second_code = UNORDERED;
9379 break;
9380 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
9381 *first_code = GT;
9382 *second_code = UNORDERED;
9383 break;
9384 default:
9385 gcc_unreachable ();
9387 if (!TARGET_IEEE_FP)
9389 *second_code = UNKNOWN;
9390 *bypass_code = UNKNOWN;
9394 /* Return cost of comparison done fcom + arithmetics operations on AX.
9395 All following functions do use number of instructions as a cost metrics.
9396 In future this should be tweaked to compute bytes for optimize_size and
9397 take into account performance of various instructions on various CPUs. */
9398 static int
9399 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
9401 if (!TARGET_IEEE_FP)
9402 return 4;
9403 /* The cost of code output by ix86_expand_fp_compare. */
9404 switch (code)
9406 case UNLE:
9407 case UNLT:
9408 case LTGT:
9409 case GT:
9410 case GE:
9411 case UNORDERED:
9412 case ORDERED:
9413 case UNEQ:
9414 return 4;
9415 break;
9416 case LT:
9417 case NE:
9418 case EQ:
9419 case UNGE:
9420 return 5;
9421 break;
9422 case LE:
9423 case UNGT:
9424 return 6;
9425 break;
9426 default:
9427 gcc_unreachable ();
9431 /* Return cost of comparison done using fcomi operation.
9432 See ix86_fp_comparison_arithmetics_cost for the metrics. */
9433 static int
9434 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
9436 enum rtx_code bypass_code, first_code, second_code;
9437 /* Return arbitrarily high cost when instruction is not supported - this
9438 prevents gcc from using it. */
9439 if (!TARGET_CMOVE)
9440 return 1024;
9441 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9442 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
9445 /* Return cost of comparison done using sahf operation.
9446 See ix86_fp_comparison_arithmetics_cost for the metrics. */
9447 static int
9448 ix86_fp_comparison_sahf_cost (enum rtx_code code)
9450 enum rtx_code bypass_code, first_code, second_code;
9451 /* Return arbitrarily high cost when instruction is not preferred - this
9452 avoids gcc from using it. */
9453 if (!TARGET_USE_SAHF && !optimize_size)
9454 return 1024;
9455 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9456 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
9459 /* Compute cost of the comparison done using any method.
9460 See ix86_fp_comparison_arithmetics_cost for the metrics. */
9461 static int
9462 ix86_fp_comparison_cost (enum rtx_code code)
9464 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
9465 int min;
9467 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
9468 sahf_cost = ix86_fp_comparison_sahf_cost (code);
9470 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
9471 if (min > sahf_cost)
9472 min = sahf_cost;
9473 if (min > fcomi_cost)
9474 min = fcomi_cost;
9475 return min;
9478 /* Generate insn patterns to do a floating point compare of OPERANDS. */
9480 static rtx
9481 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
9482 rtx *second_test, rtx *bypass_test)
9484 enum machine_mode fpcmp_mode, intcmp_mode;
9485 rtx tmp, tmp2;
9486 int cost = ix86_fp_comparison_cost (code);
9487 enum rtx_code bypass_code, first_code, second_code;
9489 fpcmp_mode = ix86_fp_compare_mode (code);
9490 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
9492 if (second_test)
9493 *second_test = NULL_RTX;
9494 if (bypass_test)
9495 *bypass_test = NULL_RTX;
9497 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9499 /* Do fcomi/sahf based test when profitable. */
9500 if ((bypass_code == UNKNOWN || bypass_test)
9501 && (second_code == UNKNOWN || second_test)
9502 && ix86_fp_comparison_arithmetics_cost (code) > cost)
9504 if (TARGET_CMOVE)
9506 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9507 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
9508 tmp);
9509 emit_insn (tmp);
9511 else
9513 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9514 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
9515 if (!scratch)
9516 scratch = gen_reg_rtx (HImode);
9517 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
9518 emit_insn (gen_x86_sahf_1 (scratch));
9521 /* The FP codes work out to act like unsigned. */
9522 intcmp_mode = fpcmp_mode;
9523 code = first_code;
9524 if (bypass_code != UNKNOWN)
9525 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
9526 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9527 const0_rtx);
9528 if (second_code != UNKNOWN)
9529 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
9530 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9531 const0_rtx);
9533 else
9535 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
9536 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9537 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
9538 if (!scratch)
9539 scratch = gen_reg_rtx (HImode);
9540 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
9542 /* In the unordered case, we have to check C2 for NaN's, which
9543 doesn't happen to work out to anything nice combination-wise.
9544 So do some bit twiddling on the value we've got in AH to come
9545 up with an appropriate set of condition codes. */
9547 intcmp_mode = CCNOmode;
9548 switch (code)
9550 case GT:
9551 case UNGT:
9552 if (code == GT || !TARGET_IEEE_FP)
9554 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
9555 code = EQ;
9557 else
9559 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9560 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
9561 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
9562 intcmp_mode = CCmode;
9563 code = GEU;
9565 break;
9566 case LT:
9567 case UNLT:
9568 if (code == LT && TARGET_IEEE_FP)
9570 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9571 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
9572 intcmp_mode = CCmode;
9573 code = EQ;
9575 else
9577 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
9578 code = NE;
9580 break;
9581 case GE:
9582 case UNGE:
9583 if (code == GE || !TARGET_IEEE_FP)
9585 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
9586 code = EQ;
9588 else
9590 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9591 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
9592 GEN_INT (0x01)));
9593 code = NE;
9595 break;
9596 case LE:
9597 case UNLE:
9598 if (code == LE && TARGET_IEEE_FP)
9600 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9601 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
9602 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
9603 intcmp_mode = CCmode;
9604 code = LTU;
9606 else
9608 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
9609 code = NE;
9611 break;
9612 case EQ:
9613 case UNEQ:
9614 if (code == EQ && TARGET_IEEE_FP)
9616 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9617 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
9618 intcmp_mode = CCmode;
9619 code = EQ;
9621 else
9623 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
9624 code = NE;
9625 break;
9627 break;
9628 case NE:
9629 case LTGT:
9630 if (code == NE && TARGET_IEEE_FP)
9632 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9633 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
9634 GEN_INT (0x40)));
9635 code = NE;
9637 else
9639 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
9640 code = EQ;
9642 break;
9644 case UNORDERED:
9645 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
9646 code = NE;
9647 break;
9648 case ORDERED:
9649 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
9650 code = EQ;
9651 break;
9653 default:
9654 gcc_unreachable ();
9658 /* Return the test that should be put into the flags user, i.e.
9659 the bcc, scc, or cmov instruction. */
9660 return gen_rtx_fmt_ee (code, VOIDmode,
9661 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9662 const0_rtx);
9666 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
9668 rtx op0, op1, ret;
9669 op0 = ix86_compare_op0;
9670 op1 = ix86_compare_op1;
9672 if (second_test)
9673 *second_test = NULL_RTX;
9674 if (bypass_test)
9675 *bypass_test = NULL_RTX;
9677 if (ix86_compare_emitted)
9679 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
9680 ix86_compare_emitted = NULL_RTX;
9682 else if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
9683 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
9684 second_test, bypass_test);
9685 else
9686 ret = ix86_expand_int_compare (code, op0, op1);
9688 return ret;
9691 /* Return true if the CODE will result in nontrivial jump sequence. */
9692 bool
9693 ix86_fp_jump_nontrivial_p (enum rtx_code code)
9695 enum rtx_code bypass_code, first_code, second_code;
9696 if (!TARGET_CMOVE)
9697 return true;
9698 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9699 return bypass_code != UNKNOWN || second_code != UNKNOWN;
9702 void
9703 ix86_expand_branch (enum rtx_code code, rtx label)
9705 rtx tmp;
9707 switch (GET_MODE (ix86_compare_op0))
9709 case QImode:
9710 case HImode:
9711 case SImode:
9712 simple:
9713 tmp = ix86_expand_compare (code, NULL, NULL);
9714 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
9715 gen_rtx_LABEL_REF (VOIDmode, label),
9716 pc_rtx);
9717 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
9718 return;
9720 case SFmode:
9721 case DFmode:
9722 case XFmode:
9724 rtvec vec;
9725 int use_fcomi;
9726 enum rtx_code bypass_code, first_code, second_code;
9728 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
9729 &ix86_compare_op1);
9731 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9733 /* Check whether we will use the natural sequence with one jump. If
9734 so, we can expand jump early. Otherwise delay expansion by
9735 creating compound insn to not confuse optimizers. */
9736 if (bypass_code == UNKNOWN && second_code == UNKNOWN
9737 && TARGET_CMOVE)
9739 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
9740 gen_rtx_LABEL_REF (VOIDmode, label),
9741 pc_rtx, NULL_RTX, NULL_RTX);
9743 else
9745 tmp = gen_rtx_fmt_ee (code, VOIDmode,
9746 ix86_compare_op0, ix86_compare_op1);
9747 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
9748 gen_rtx_LABEL_REF (VOIDmode, label),
9749 pc_rtx);
9750 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
9752 use_fcomi = ix86_use_fcomi_compare (code);
9753 vec = rtvec_alloc (3 + !use_fcomi);
9754 RTVEC_ELT (vec, 0) = tmp;
9755 RTVEC_ELT (vec, 1)
9756 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
9757 RTVEC_ELT (vec, 2)
9758 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
9759 if (! use_fcomi)
9760 RTVEC_ELT (vec, 3)
9761 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
9763 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
9765 return;
9768 case DImode:
9769 if (TARGET_64BIT)
9770 goto simple;
9771 case TImode:
9772 /* Expand DImode branch into multiple compare+branch. */
9774 rtx lo[2], hi[2], label2;
9775 enum rtx_code code1, code2, code3;
9776 enum machine_mode submode;
9778 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
9780 tmp = ix86_compare_op0;
9781 ix86_compare_op0 = ix86_compare_op1;
9782 ix86_compare_op1 = tmp;
9783 code = swap_condition (code);
9785 if (GET_MODE (ix86_compare_op0) == DImode)
9787 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
9788 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
9789 submode = SImode;
9791 else
9793 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
9794 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
9795 submode = DImode;
9798 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
9799 avoid two branches. This costs one extra insn, so disable when
9800 optimizing for size. */
9802 if ((code == EQ || code == NE)
9803 && (!optimize_size
9804 || hi[1] == const0_rtx || lo[1] == const0_rtx))
9806 rtx xor0, xor1;
9808 xor1 = hi[0];
9809 if (hi[1] != const0_rtx)
9810 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
9811 NULL_RTX, 0, OPTAB_WIDEN);
9813 xor0 = lo[0];
9814 if (lo[1] != const0_rtx)
9815 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
9816 NULL_RTX, 0, OPTAB_WIDEN);
9818 tmp = expand_binop (submode, ior_optab, xor1, xor0,
9819 NULL_RTX, 0, OPTAB_WIDEN);
9821 ix86_compare_op0 = tmp;
9822 ix86_compare_op1 = const0_rtx;
9823 ix86_expand_branch (code, label);
9824 return;
9827 /* Otherwise, if we are doing less-than or greater-or-equal-than,
9828 op1 is a constant and the low word is zero, then we can just
9829 examine the high word. */
9831 if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx)
9832 switch (code)
9834 case LT: case LTU: case GE: case GEU:
9835 ix86_compare_op0 = hi[0];
9836 ix86_compare_op1 = hi[1];
9837 ix86_expand_branch (code, label);
9838 return;
9839 default:
9840 break;
9843 /* Otherwise, we need two or three jumps. */
9845 label2 = gen_label_rtx ();
9847 code1 = code;
9848 code2 = swap_condition (code);
9849 code3 = unsigned_condition (code);
9851 switch (code)
9853 case LT: case GT: case LTU: case GTU:
9854 break;
9856 case LE: code1 = LT; code2 = GT; break;
9857 case GE: code1 = GT; code2 = LT; break;
9858 case LEU: code1 = LTU; code2 = GTU; break;
9859 case GEU: code1 = GTU; code2 = LTU; break;
9861 case EQ: code1 = UNKNOWN; code2 = NE; break;
9862 case NE: code2 = UNKNOWN; break;
9864 default:
9865 gcc_unreachable ();
9869 * a < b =>
9870 * if (hi(a) < hi(b)) goto true;
9871 * if (hi(a) > hi(b)) goto false;
9872 * if (lo(a) < lo(b)) goto true;
9873 * false:
9876 ix86_compare_op0 = hi[0];
9877 ix86_compare_op1 = hi[1];
9879 if (code1 != UNKNOWN)
9880 ix86_expand_branch (code1, label);
9881 if (code2 != UNKNOWN)
9882 ix86_expand_branch (code2, label2);
9884 ix86_compare_op0 = lo[0];
9885 ix86_compare_op1 = lo[1];
9886 ix86_expand_branch (code3, label);
9888 if (code2 != UNKNOWN)
9889 emit_label (label2);
9890 return;
9893 default:
9894 gcc_unreachable ();
9898 /* Split branch based on floating point condition. */
9899 void
9900 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
9901 rtx target1, rtx target2, rtx tmp, rtx pushed)
9903 rtx second, bypass;
9904 rtx label = NULL_RTX;
9905 rtx condition;
9906 int bypass_probability = -1, second_probability = -1, probability = -1;
9907 rtx i;
9909 if (target2 != pc_rtx)
9911 rtx tmp = target2;
9912 code = reverse_condition_maybe_unordered (code);
9913 target2 = target1;
9914 target1 = tmp;
9917 condition = ix86_expand_fp_compare (code, op1, op2,
9918 tmp, &second, &bypass);
9920 /* Remove pushed operand from stack. */
9921 if (pushed)
9922 ix86_free_from_memory (GET_MODE (pushed));
9924 if (split_branch_probability >= 0)
9926 /* Distribute the probabilities across the jumps.
9927 Assume the BYPASS and SECOND to be always test
9928 for UNORDERED. */
9929 probability = split_branch_probability;
9931 /* Value of 1 is low enough to make no need for probability
9932 to be updated. Later we may run some experiments and see
9933 if unordered values are more frequent in practice. */
9934 if (bypass)
9935 bypass_probability = 1;
9936 if (second)
9937 second_probability = 1;
9939 if (bypass != NULL_RTX)
9941 label = gen_label_rtx ();
9942 i = emit_jump_insn (gen_rtx_SET
9943 (VOIDmode, pc_rtx,
9944 gen_rtx_IF_THEN_ELSE (VOIDmode,
9945 bypass,
9946 gen_rtx_LABEL_REF (VOIDmode,
9947 label),
9948 pc_rtx)));
9949 if (bypass_probability >= 0)
9950 REG_NOTES (i)
9951 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9952 GEN_INT (bypass_probability),
9953 REG_NOTES (i));
9955 i = emit_jump_insn (gen_rtx_SET
9956 (VOIDmode, pc_rtx,
9957 gen_rtx_IF_THEN_ELSE (VOIDmode,
9958 condition, target1, target2)));
9959 if (probability >= 0)
9960 REG_NOTES (i)
9961 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9962 GEN_INT (probability),
9963 REG_NOTES (i));
9964 if (second != NULL_RTX)
9966 i = emit_jump_insn (gen_rtx_SET
9967 (VOIDmode, pc_rtx,
9968 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
9969 target2)));
9970 if (second_probability >= 0)
9971 REG_NOTES (i)
9972 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9973 GEN_INT (second_probability),
9974 REG_NOTES (i));
9976 if (label != NULL_RTX)
9977 emit_label (label);
9981 ix86_expand_setcc (enum rtx_code code, rtx dest)
9983 rtx ret, tmp, tmpreg, equiv;
9984 rtx second_test, bypass_test;
9986 if (GET_MODE (ix86_compare_op0) == (TARGET_64BIT ? TImode : DImode))
9987 return 0; /* FAIL */
9989 gcc_assert (GET_MODE (dest) == QImode);
9991 ret = ix86_expand_compare (code, &second_test, &bypass_test);
9992 PUT_MODE (ret, QImode);
9994 tmp = dest;
9995 tmpreg = dest;
9997 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
9998 if (bypass_test || second_test)
10000 rtx test = second_test;
10001 int bypass = 0;
10002 rtx tmp2 = gen_reg_rtx (QImode);
10003 if (bypass_test)
10005 gcc_assert (!second_test);
10006 test = bypass_test;
10007 bypass = 1;
10008 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
10010 PUT_MODE (test, QImode);
10011 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
10013 if (bypass)
10014 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
10015 else
10016 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
10019 /* Attach a REG_EQUAL note describing the comparison result. */
10020 if (ix86_compare_op0 && ix86_compare_op1)
10022 equiv = simplify_gen_relational (code, QImode,
10023 GET_MODE (ix86_compare_op0),
10024 ix86_compare_op0, ix86_compare_op1);
10025 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
10028 return 1; /* DONE */
10031 /* Expand comparison setting or clearing carry flag. Return true when
10032 successful and set pop for the operation. */
10033 static bool
10034 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
10036 enum machine_mode mode =
10037 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
10039 /* Do not handle DImode compares that go trought special path. Also we can't
10040 deal with FP compares yet. This is possible to add. */
10041 if (mode == (TARGET_64BIT ? TImode : DImode))
10042 return false;
10043 if (FLOAT_MODE_P (mode))
10045 rtx second_test = NULL, bypass_test = NULL;
10046 rtx compare_op, compare_seq;
10048 /* Shortcut: following common codes never translate into carry flag compares. */
10049 if (code == EQ || code == NE || code == UNEQ || code == LTGT
10050 || code == ORDERED || code == UNORDERED)
10051 return false;
10053 /* These comparisons require zero flag; swap operands so they won't. */
10054 if ((code == GT || code == UNLE || code == LE || code == UNGT)
10055 && !TARGET_IEEE_FP)
10057 rtx tmp = op0;
10058 op0 = op1;
10059 op1 = tmp;
10060 code = swap_condition (code);
10063 /* Try to expand the comparison and verify that we end up with carry flag
10064 based comparison. This is fails to be true only when we decide to expand
10065 comparison using arithmetic that is not too common scenario. */
10066 start_sequence ();
10067 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
10068 &second_test, &bypass_test);
10069 compare_seq = get_insns ();
10070 end_sequence ();
10072 if (second_test || bypass_test)
10073 return false;
10074 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
10075 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
10076 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
10077 else
10078 code = GET_CODE (compare_op);
10079 if (code != LTU && code != GEU)
10080 return false;
10081 emit_insn (compare_seq);
10082 *pop = compare_op;
10083 return true;
10085 if (!INTEGRAL_MODE_P (mode))
10086 return false;
10087 switch (code)
10089 case LTU:
10090 case GEU:
10091 break;
10093 /* Convert a==0 into (unsigned)a<1. */
10094 case EQ:
10095 case NE:
10096 if (op1 != const0_rtx)
10097 return false;
10098 op1 = const1_rtx;
10099 code = (code == EQ ? LTU : GEU);
10100 break;
10102 /* Convert a>b into b<a or a>=b-1. */
10103 case GTU:
10104 case LEU:
10105 if (GET_CODE (op1) == CONST_INT)
10107 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
10108 /* Bail out on overflow. We still can swap operands but that
10109 would force loading of the constant into register. */
10110 if (op1 == const0_rtx
10111 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
10112 return false;
10113 code = (code == GTU ? GEU : LTU);
10115 else
10117 rtx tmp = op1;
10118 op1 = op0;
10119 op0 = tmp;
10120 code = (code == GTU ? LTU : GEU);
10122 break;
10124 /* Convert a>=0 into (unsigned)a<0x80000000. */
10125 case LT:
10126 case GE:
10127 if (mode == DImode || op1 != const0_rtx)
10128 return false;
10129 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
10130 code = (code == LT ? GEU : LTU);
10131 break;
10132 case LE:
10133 case GT:
10134 if (mode == DImode || op1 != constm1_rtx)
10135 return false;
10136 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
10137 code = (code == LE ? GEU : LTU);
10138 break;
10140 default:
10141 return false;
10143 /* Swapping operands may cause constant to appear as first operand. */
10144 if (!nonimmediate_operand (op0, VOIDmode))
10146 if (no_new_pseudos)
10147 return false;
10148 op0 = force_reg (mode, op0);
10150 ix86_compare_op0 = op0;
10151 ix86_compare_op1 = op1;
10152 *pop = ix86_expand_compare (code, NULL, NULL);
10153 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
10154 return true;
10158 ix86_expand_int_movcc (rtx operands[])
10160 enum rtx_code code = GET_CODE (operands[1]), compare_code;
10161 rtx compare_seq, compare_op;
10162 rtx second_test, bypass_test;
10163 enum machine_mode mode = GET_MODE (operands[0]);
10164 bool sign_bit_compare_p = false;;
10166 start_sequence ();
10167 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
10168 compare_seq = get_insns ();
10169 end_sequence ();
10171 compare_code = GET_CODE (compare_op);
10173 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
10174 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
10175 sign_bit_compare_p = true;
10177 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
10178 HImode insns, we'd be swallowed in word prefix ops. */
10180 if ((mode != HImode || TARGET_FAST_PREFIX)
10181 && (mode != (TARGET_64BIT ? TImode : DImode))
10182 && GET_CODE (operands[2]) == CONST_INT
10183 && GET_CODE (operands[3]) == CONST_INT)
10185 rtx out = operands[0];
10186 HOST_WIDE_INT ct = INTVAL (operands[2]);
10187 HOST_WIDE_INT cf = INTVAL (operands[3]);
10188 HOST_WIDE_INT diff;
10190 diff = ct - cf;
10191 /* Sign bit compares are better done using shifts than we do by using
10192 sbb. */
10193 if (sign_bit_compare_p
10194 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
10195 ix86_compare_op1, &compare_op))
10197 /* Detect overlap between destination and compare sources. */
10198 rtx tmp = out;
10200 if (!sign_bit_compare_p)
10202 bool fpcmp = false;
10204 compare_code = GET_CODE (compare_op);
10206 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
10207 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
10209 fpcmp = true;
10210 compare_code = ix86_fp_compare_code_to_integer (compare_code);
10213 /* To simplify rest of code, restrict to the GEU case. */
10214 if (compare_code == LTU)
10216 HOST_WIDE_INT tmp = ct;
10217 ct = cf;
10218 cf = tmp;
10219 compare_code = reverse_condition (compare_code);
10220 code = reverse_condition (code);
10222 else
10224 if (fpcmp)
10225 PUT_CODE (compare_op,
10226 reverse_condition_maybe_unordered
10227 (GET_CODE (compare_op)));
10228 else
10229 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
10231 diff = ct - cf;
10233 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
10234 || reg_overlap_mentioned_p (out, ix86_compare_op1))
10235 tmp = gen_reg_rtx (mode);
10237 if (mode == DImode)
10238 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
10239 else
10240 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
10242 else
10244 if (code == GT || code == GE)
10245 code = reverse_condition (code);
10246 else
10248 HOST_WIDE_INT tmp = ct;
10249 ct = cf;
10250 cf = tmp;
10251 diff = ct - cf;
10253 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
10254 ix86_compare_op1, VOIDmode, 0, -1);
10257 if (diff == 1)
10260 * cmpl op0,op1
10261 * sbbl dest,dest
10262 * [addl dest, ct]
10264 * Size 5 - 8.
10266 if (ct)
10267 tmp = expand_simple_binop (mode, PLUS,
10268 tmp, GEN_INT (ct),
10269 copy_rtx (tmp), 1, OPTAB_DIRECT);
10271 else if (cf == -1)
10274 * cmpl op0,op1
10275 * sbbl dest,dest
10276 * orl $ct, dest
10278 * Size 8.
10280 tmp = expand_simple_binop (mode, IOR,
10281 tmp, GEN_INT (ct),
10282 copy_rtx (tmp), 1, OPTAB_DIRECT);
10284 else if (diff == -1 && ct)
10287 * cmpl op0,op1
10288 * sbbl dest,dest
10289 * notl dest
10290 * [addl dest, cf]
10292 * Size 8 - 11.
10294 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
10295 if (cf)
10296 tmp = expand_simple_binop (mode, PLUS,
10297 copy_rtx (tmp), GEN_INT (cf),
10298 copy_rtx (tmp), 1, OPTAB_DIRECT);
10300 else
10303 * cmpl op0,op1
10304 * sbbl dest,dest
10305 * [notl dest]
10306 * andl cf - ct, dest
10307 * [addl dest, ct]
10309 * Size 8 - 11.
10312 if (cf == 0)
10314 cf = ct;
10315 ct = 0;
10316 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
10319 tmp = expand_simple_binop (mode, AND,
10320 copy_rtx (tmp),
10321 gen_int_mode (cf - ct, mode),
10322 copy_rtx (tmp), 1, OPTAB_DIRECT);
10323 if (ct)
10324 tmp = expand_simple_binop (mode, PLUS,
10325 copy_rtx (tmp), GEN_INT (ct),
10326 copy_rtx (tmp), 1, OPTAB_DIRECT);
10329 if (!rtx_equal_p (tmp, out))
10330 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
10332 return 1; /* DONE */
10335 if (diff < 0)
10337 HOST_WIDE_INT tmp;
10338 tmp = ct, ct = cf, cf = tmp;
10339 diff = -diff;
10340 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
10342 /* We may be reversing unordered compare to normal compare, that
10343 is not valid in general (we may convert non-trapping condition
10344 to trapping one), however on i386 we currently emit all
10345 comparisons unordered. */
10346 compare_code = reverse_condition_maybe_unordered (compare_code);
10347 code = reverse_condition_maybe_unordered (code);
10349 else
10351 compare_code = reverse_condition (compare_code);
10352 code = reverse_condition (code);
10356 compare_code = UNKNOWN;
10357 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
10358 && GET_CODE (ix86_compare_op1) == CONST_INT)
10360 if (ix86_compare_op1 == const0_rtx
10361 && (code == LT || code == GE))
10362 compare_code = code;
10363 else if (ix86_compare_op1 == constm1_rtx)
10365 if (code == LE)
10366 compare_code = LT;
10367 else if (code == GT)
10368 compare_code = GE;
10372 /* Optimize dest = (op0 < 0) ? -1 : cf. */
10373 if (compare_code != UNKNOWN
10374 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
10375 && (cf == -1 || ct == -1))
10377 /* If lea code below could be used, only optimize
10378 if it results in a 2 insn sequence. */
10380 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
10381 || diff == 3 || diff == 5 || diff == 9)
10382 || (compare_code == LT && ct == -1)
10383 || (compare_code == GE && cf == -1))
10386 * notl op1 (if necessary)
10387 * sarl $31, op1
10388 * orl cf, op1
10390 if (ct != -1)
10392 cf = ct;
10393 ct = -1;
10394 code = reverse_condition (code);
10397 out = emit_store_flag (out, code, ix86_compare_op0,
10398 ix86_compare_op1, VOIDmode, 0, -1);
10400 out = expand_simple_binop (mode, IOR,
10401 out, GEN_INT (cf),
10402 out, 1, OPTAB_DIRECT);
10403 if (out != operands[0])
10404 emit_move_insn (operands[0], out);
10406 return 1; /* DONE */
10411 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
10412 || diff == 3 || diff == 5 || diff == 9)
10413 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
10414 && (mode != DImode
10415 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
10418 * xorl dest,dest
10419 * cmpl op1,op2
10420 * setcc dest
10421 * lea cf(dest*(ct-cf)),dest
10423 * Size 14.
10425 * This also catches the degenerate setcc-only case.
10428 rtx tmp;
10429 int nops;
10431 out = emit_store_flag (out, code, ix86_compare_op0,
10432 ix86_compare_op1, VOIDmode, 0, 1);
10434 nops = 0;
10435 /* On x86_64 the lea instruction operates on Pmode, so we need
10436 to get arithmetics done in proper mode to match. */
10437 if (diff == 1)
10438 tmp = copy_rtx (out);
10439 else
10441 rtx out1;
10442 out1 = copy_rtx (out);
10443 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
10444 nops++;
10445 if (diff & 1)
10447 tmp = gen_rtx_PLUS (mode, tmp, out1);
10448 nops++;
10451 if (cf != 0)
10453 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
10454 nops++;
10456 if (!rtx_equal_p (tmp, out))
10458 if (nops == 1)
10459 out = force_operand (tmp, copy_rtx (out));
10460 else
10461 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
10463 if (!rtx_equal_p (out, operands[0]))
10464 emit_move_insn (operands[0], copy_rtx (out));
10466 return 1; /* DONE */
10470 * General case: Jumpful:
10471 * xorl dest,dest cmpl op1, op2
10472 * cmpl op1, op2 movl ct, dest
10473 * setcc dest jcc 1f
10474 * decl dest movl cf, dest
10475 * andl (cf-ct),dest 1:
10476 * addl ct,dest
10478 * Size 20. Size 14.
10480 * This is reasonably steep, but branch mispredict costs are
10481 * high on modern cpus, so consider failing only if optimizing
10482 * for space.
10485 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
10486 && BRANCH_COST >= 2)
10488 if (cf == 0)
10490 cf = ct;
10491 ct = 0;
10492 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
10493 /* We may be reversing unordered compare to normal compare,
10494 that is not valid in general (we may convert non-trapping
10495 condition to trapping one), however on i386 we currently
10496 emit all comparisons unordered. */
10497 code = reverse_condition_maybe_unordered (code);
10498 else
10500 code = reverse_condition (code);
10501 if (compare_code != UNKNOWN)
10502 compare_code = reverse_condition (compare_code);
10506 if (compare_code != UNKNOWN)
10508 /* notl op1 (if needed)
10509 sarl $31, op1
10510 andl (cf-ct), op1
10511 addl ct, op1
10513 For x < 0 (resp. x <= -1) there will be no notl,
10514 so if possible swap the constants to get rid of the
10515 complement.
10516 True/false will be -1/0 while code below (store flag
10517 followed by decrement) is 0/-1, so the constants need
10518 to be exchanged once more. */
10520 if (compare_code == GE || !cf)
10522 code = reverse_condition (code);
10523 compare_code = LT;
10525 else
10527 HOST_WIDE_INT tmp = cf;
10528 cf = ct;
10529 ct = tmp;
10532 out = emit_store_flag (out, code, ix86_compare_op0,
10533 ix86_compare_op1, VOIDmode, 0, -1);
10535 else
10537 out = emit_store_flag (out, code, ix86_compare_op0,
10538 ix86_compare_op1, VOIDmode, 0, 1);
10540 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
10541 copy_rtx (out), 1, OPTAB_DIRECT);
10544 out = expand_simple_binop (mode, AND, copy_rtx (out),
10545 gen_int_mode (cf - ct, mode),
10546 copy_rtx (out), 1, OPTAB_DIRECT);
10547 if (ct)
10548 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
10549 copy_rtx (out), 1, OPTAB_DIRECT);
10550 if (!rtx_equal_p (out, operands[0]))
10551 emit_move_insn (operands[0], copy_rtx (out));
10553 return 1; /* DONE */
10557 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
10559 /* Try a few things more with specific constants and a variable. */
10561 optab op;
10562 rtx var, orig_out, out, tmp;
10564 if (BRANCH_COST <= 2)
10565 return 0; /* FAIL */
10567 /* If one of the two operands is an interesting constant, load a
10568 constant with the above and mask it in with a logical operation. */
10570 if (GET_CODE (operands[2]) == CONST_INT)
10572 var = operands[3];
10573 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
10574 operands[3] = constm1_rtx, op = and_optab;
10575 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
10576 operands[3] = const0_rtx, op = ior_optab;
10577 else
10578 return 0; /* FAIL */
10580 else if (GET_CODE (operands[3]) == CONST_INT)
10582 var = operands[2];
10583 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
10584 operands[2] = constm1_rtx, op = and_optab;
10585 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
10586 operands[2] = const0_rtx, op = ior_optab;
10587 else
10588 return 0; /* FAIL */
10590 else
10591 return 0; /* FAIL */
10593 orig_out = operands[0];
10594 tmp = gen_reg_rtx (mode);
10595 operands[0] = tmp;
10597 /* Recurse to get the constant loaded. */
10598 if (ix86_expand_int_movcc (operands) == 0)
10599 return 0; /* FAIL */
10601 /* Mask in the interesting variable. */
10602 out = expand_binop (mode, op, var, tmp, orig_out, 0,
10603 OPTAB_WIDEN);
10604 if (!rtx_equal_p (out, orig_out))
10605 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
10607 return 1; /* DONE */
10611 * For comparison with above,
10613 * movl cf,dest
10614 * movl ct,tmp
10615 * cmpl op1,op2
10616 * cmovcc tmp,dest
10618 * Size 15.
10621 if (! nonimmediate_operand (operands[2], mode))
10622 operands[2] = force_reg (mode, operands[2]);
10623 if (! nonimmediate_operand (operands[3], mode))
10624 operands[3] = force_reg (mode, operands[3]);
10626 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
10628 rtx tmp = gen_reg_rtx (mode);
10629 emit_move_insn (tmp, operands[3]);
10630 operands[3] = tmp;
10632 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
10634 rtx tmp = gen_reg_rtx (mode);
10635 emit_move_insn (tmp, operands[2]);
10636 operands[2] = tmp;
10639 if (! register_operand (operands[2], VOIDmode)
10640 && (mode == QImode
10641 || ! register_operand (operands[3], VOIDmode)))
10642 operands[2] = force_reg (mode, operands[2]);
10644 if (mode == QImode
10645 && ! register_operand (operands[3], VOIDmode))
10646 operands[3] = force_reg (mode, operands[3]);
10648 emit_insn (compare_seq);
10649 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10650 gen_rtx_IF_THEN_ELSE (mode,
10651 compare_op, operands[2],
10652 operands[3])));
10653 if (bypass_test)
10654 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
10655 gen_rtx_IF_THEN_ELSE (mode,
10656 bypass_test,
10657 copy_rtx (operands[3]),
10658 copy_rtx (operands[0]))));
10659 if (second_test)
10660 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
10661 gen_rtx_IF_THEN_ELSE (mode,
10662 second_test,
10663 copy_rtx (operands[2]),
10664 copy_rtx (operands[0]))));
10666 return 1; /* DONE */
10669 /* Swap, force into registers, or otherwise massage the two operands
10670 to an sse comparison with a mask result. Thus we differ a bit from
10671 ix86_prepare_fp_compare_args which expects to produce a flags result.
10673 The DEST operand exists to help determine whether to commute commutative
10674 operators. The POP0/POP1 operands are updated in place. The new
10675 comparison code is returned, or UNKNOWN if not implementable. */
10677 static enum rtx_code
10678 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
10679 rtx *pop0, rtx *pop1)
10681 rtx tmp;
10683 switch (code)
10685 case LTGT:
10686 case UNEQ:
10687 /* We have no LTGT as an operator. We could implement it with
10688 NE & ORDERED, but this requires an extra temporary. It's
10689 not clear that it's worth it. */
10690 return UNKNOWN;
10692 case LT:
10693 case LE:
10694 case UNGT:
10695 case UNGE:
10696 /* These are supported directly. */
10697 break;
10699 case EQ:
10700 case NE:
10701 case UNORDERED:
10702 case ORDERED:
10703 /* For commutative operators, try to canonicalize the destination
10704 operand to be first in the comparison - this helps reload to
10705 avoid extra moves. */
10706 if (!dest || !rtx_equal_p (dest, *pop1))
10707 break;
10708 /* FALLTHRU */
10710 case GE:
10711 case GT:
10712 case UNLE:
10713 case UNLT:
10714 /* These are not supported directly. Swap the comparison operands
10715 to transform into something that is supported. */
10716 tmp = *pop0;
10717 *pop0 = *pop1;
10718 *pop1 = tmp;
10719 code = swap_condition (code);
10720 break;
10722 default:
10723 gcc_unreachable ();
10726 return code;
10729 /* Detect conditional moves that exactly match min/max operational
10730 semantics. Note that this is IEEE safe, as long as we don't
10731 interchange the operands.
10733 Returns FALSE if this conditional move doesn't match a MIN/MAX,
10734 and TRUE if the operation is successful and instructions are emitted. */
10736 static bool
10737 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
10738 rtx cmp_op1, rtx if_true, rtx if_false)
10740 enum machine_mode mode;
10741 bool is_min;
10742 rtx tmp;
10744 if (code == LT)
10746 else if (code == UNGE)
10748 tmp = if_true;
10749 if_true = if_false;
10750 if_false = tmp;
10752 else
10753 return false;
10755 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
10756 is_min = true;
10757 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
10758 is_min = false;
10759 else
10760 return false;
10762 mode = GET_MODE (dest);
10764 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
10765 but MODE may be a vector mode and thus not appropriate. */
10766 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
10768 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
10769 rtvec v;
10771 if_true = force_reg (mode, if_true);
10772 v = gen_rtvec (2, if_true, if_false);
10773 tmp = gen_rtx_UNSPEC (mode, v, u);
10775 else
10777 code = is_min ? SMIN : SMAX;
10778 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
10781 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
10782 return true;
10785 /* Expand an sse vector comparison. Return the register with the result. */
10787 static rtx
10788 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
10789 rtx op_true, rtx op_false)
10791 enum machine_mode mode = GET_MODE (dest);
10792 rtx x;
10794 cmp_op0 = force_reg (mode, cmp_op0);
10795 if (!nonimmediate_operand (cmp_op1, mode))
10796 cmp_op1 = force_reg (mode, cmp_op1);
10798 if (optimize
10799 || reg_overlap_mentioned_p (dest, op_true)
10800 || reg_overlap_mentioned_p (dest, op_false))
10801 dest = gen_reg_rtx (mode);
10803 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
10804 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10806 return dest;
10809 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
10810 operations. This is used for both scalar and vector conditional moves. */
10812 static void
10813 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
10815 enum machine_mode mode = GET_MODE (dest);
10816 rtx t2, t3, x;
10818 if (op_false == CONST0_RTX (mode))
10820 op_true = force_reg (mode, op_true);
10821 x = gen_rtx_AND (mode, cmp, op_true);
10822 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10824 else if (op_true == CONST0_RTX (mode))
10826 op_false = force_reg (mode, op_false);
10827 x = gen_rtx_NOT (mode, cmp);
10828 x = gen_rtx_AND (mode, x, op_false);
10829 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10831 else
10833 op_true = force_reg (mode, op_true);
10834 op_false = force_reg (mode, op_false);
10836 t2 = gen_reg_rtx (mode);
10837 if (optimize)
10838 t3 = gen_reg_rtx (mode);
10839 else
10840 t3 = dest;
10842 x = gen_rtx_AND (mode, op_true, cmp);
10843 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
10845 x = gen_rtx_NOT (mode, cmp);
10846 x = gen_rtx_AND (mode, x, op_false);
10847 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
10849 x = gen_rtx_IOR (mode, t3, t2);
10850 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
10854 /* Expand a floating-point conditional move. Return true if successful. */
10857 ix86_expand_fp_movcc (rtx operands[])
10859 enum machine_mode mode = GET_MODE (operands[0]);
10860 enum rtx_code code = GET_CODE (operands[1]);
10861 rtx tmp, compare_op, second_test, bypass_test;
10863 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
10865 enum machine_mode cmode;
10867 /* Since we've no cmove for sse registers, don't force bad register
10868 allocation just to gain access to it. Deny movcc when the
10869 comparison mode doesn't match the move mode. */
10870 cmode = GET_MODE (ix86_compare_op0);
10871 if (cmode == VOIDmode)
10872 cmode = GET_MODE (ix86_compare_op1);
10873 if (cmode != mode)
10874 return 0;
10876 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
10877 &ix86_compare_op0,
10878 &ix86_compare_op1);
10879 if (code == UNKNOWN)
10880 return 0;
10882 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
10883 ix86_compare_op1, operands[2],
10884 operands[3]))
10885 return 1;
10887 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
10888 ix86_compare_op1, operands[2], operands[3]);
10889 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
10890 return 1;
10893 /* The floating point conditional move instructions don't directly
10894 support conditions resulting from a signed integer comparison. */
10896 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
10898 /* The floating point conditional move instructions don't directly
10899 support signed integer comparisons. */
10901 if (!fcmov_comparison_operator (compare_op, VOIDmode))
10903 gcc_assert (!second_test && !bypass_test);
10904 tmp = gen_reg_rtx (QImode);
10905 ix86_expand_setcc (code, tmp);
10906 code = NE;
10907 ix86_compare_op0 = tmp;
10908 ix86_compare_op1 = const0_rtx;
10909 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
10911 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
10913 tmp = gen_reg_rtx (mode);
10914 emit_move_insn (tmp, operands[3]);
10915 operands[3] = tmp;
10917 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
10919 tmp = gen_reg_rtx (mode);
10920 emit_move_insn (tmp, operands[2]);
10921 operands[2] = tmp;
10924 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10925 gen_rtx_IF_THEN_ELSE (mode, compare_op,
10926 operands[2], operands[3])));
10927 if (bypass_test)
10928 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10929 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
10930 operands[3], operands[0])));
10931 if (second_test)
10932 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10933 gen_rtx_IF_THEN_ELSE (mode, second_test,
10934 operands[2], operands[0])));
10936 return 1;
10939 /* Expand a floating-point vector conditional move; a vcond operation
10940 rather than a movcc operation. */
10942 bool
10943 ix86_expand_fp_vcond (rtx operands[])
10945 enum rtx_code code = GET_CODE (operands[3]);
10946 rtx cmp;
10948 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
10949 &operands[4], &operands[5]);
10950 if (code == UNKNOWN)
10951 return false;
10953 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
10954 operands[5], operands[1], operands[2]))
10955 return true;
10957 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
10958 operands[1], operands[2]);
10959 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
10960 return true;
10963 /* Expand a signed integral vector conditional move. */
10965 bool
10966 ix86_expand_int_vcond (rtx operands[])
10968 enum machine_mode mode = GET_MODE (operands[0]);
10969 enum rtx_code code = GET_CODE (operands[3]);
10970 bool negate = false;
10971 rtx x, cop0, cop1;
10973 cop0 = operands[4];
10974 cop1 = operands[5];
10976 /* Canonicalize the comparison to EQ, GT, GTU. */
10977 switch (code)
10979 case EQ:
10980 case GT:
10981 case GTU:
10982 break;
10984 case NE:
10985 case LE:
10986 case LEU:
10987 code = reverse_condition (code);
10988 negate = true;
10989 break;
10991 case GE:
10992 case GEU:
10993 code = reverse_condition (code);
10994 negate = true;
10995 /* FALLTHRU */
10997 case LT:
10998 case LTU:
10999 code = swap_condition (code);
11000 x = cop0, cop0 = cop1, cop1 = x;
11001 break;
11003 default:
11004 gcc_unreachable ();
11007 /* Unsigned parallel compare is not supported by the hardware. Play some
11008 tricks to turn this into a signed comparison against 0. */
11009 if (code == GTU)
11011 switch (mode)
11013 case V4SImode:
11015 rtx t1, t2, mask;
11017 /* Perform a parallel modulo subtraction. */
11018 t1 = gen_reg_rtx (mode);
11019 emit_insn (gen_subv4si3 (t1, cop0, cop1));
11021 /* Extract the original sign bit of op0. */
11022 mask = GEN_INT (-0x80000000);
11023 mask = gen_rtx_CONST_VECTOR (mode,
11024 gen_rtvec (4, mask, mask, mask, mask));
11025 mask = force_reg (mode, mask);
11026 t2 = gen_reg_rtx (mode);
11027 emit_insn (gen_andv4si3 (t2, cop0, mask));
11029 /* XOR it back into the result of the subtraction. This results
11030 in the sign bit set iff we saw unsigned underflow. */
11031 x = gen_reg_rtx (mode);
11032 emit_insn (gen_xorv4si3 (x, t1, t2));
11034 code = GT;
11036 break;
11038 case V16QImode:
11039 case V8HImode:
11040 /* Perform a parallel unsigned saturating subtraction. */
11041 x = gen_reg_rtx (mode);
11042 emit_insn (gen_rtx_SET (VOIDmode, x,
11043 gen_rtx_US_MINUS (mode, cop0, cop1)));
11045 code = EQ;
11046 negate = !negate;
11047 break;
11049 default:
11050 gcc_unreachable ();
11053 cop0 = x;
11054 cop1 = CONST0_RTX (mode);
11057 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
11058 operands[1+negate], operands[2-negate]);
11060 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
11061 operands[2-negate]);
11062 return true;
11065 /* Expand conditional increment or decrement using adb/sbb instructions.
11066 The default case using setcc followed by the conditional move can be
11067 done by generic code. */
11069 ix86_expand_int_addcc (rtx operands[])
11071 enum rtx_code code = GET_CODE (operands[1]);
11072 rtx compare_op;
11073 rtx val = const0_rtx;
11074 bool fpcmp = false;
11075 enum machine_mode mode = GET_MODE (operands[0]);
11077 if (operands[3] != const1_rtx
11078 && operands[3] != constm1_rtx)
11079 return 0;
11080 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
11081 ix86_compare_op1, &compare_op))
11082 return 0;
11083 code = GET_CODE (compare_op);
11085 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
11086 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
11088 fpcmp = true;
11089 code = ix86_fp_compare_code_to_integer (code);
11092 if (code != LTU)
11094 val = constm1_rtx;
11095 if (fpcmp)
11096 PUT_CODE (compare_op,
11097 reverse_condition_maybe_unordered
11098 (GET_CODE (compare_op)));
11099 else
11100 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
11102 PUT_MODE (compare_op, mode);
11104 /* Construct either adc or sbb insn. */
11105 if ((code == LTU) == (operands[3] == constm1_rtx))
11107 switch (GET_MODE (operands[0]))
11109 case QImode:
11110 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
11111 break;
11112 case HImode:
11113 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
11114 break;
11115 case SImode:
11116 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
11117 break;
11118 case DImode:
11119 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
11120 break;
11121 default:
11122 gcc_unreachable ();
11125 else
11127 switch (GET_MODE (operands[0]))
11129 case QImode:
11130 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
11131 break;
11132 case HImode:
11133 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
11134 break;
11135 case SImode:
11136 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
11137 break;
11138 case DImode:
11139 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
11140 break;
11141 default:
11142 gcc_unreachable ();
11145 return 1; /* DONE */
11149 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
11150 works for floating pointer parameters and nonoffsetable memories.
11151 For pushes, it returns just stack offsets; the values will be saved
11152 in the right order. Maximally three parts are generated. */
11154 static int
11155 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
11157 int size;
11159 if (!TARGET_64BIT)
11160 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
11161 else
11162 size = (GET_MODE_SIZE (mode) + 4) / 8;
11164 gcc_assert (GET_CODE (operand) != REG || !MMX_REGNO_P (REGNO (operand)));
11165 gcc_assert (size >= 2 && size <= 3);
11167 /* Optimize constant pool reference to immediates. This is used by fp
11168 moves, that force all constants to memory to allow combining. */
11169 if (GET_CODE (operand) == MEM && MEM_READONLY_P (operand))
11171 rtx tmp = maybe_get_pool_constant (operand);
11172 if (tmp)
11173 operand = tmp;
11176 if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand))
11178 /* The only non-offsetable memories we handle are pushes. */
11179 int ok = push_operand (operand, VOIDmode);
11181 gcc_assert (ok);
11183 operand = copy_rtx (operand);
11184 PUT_MODE (operand, Pmode);
11185 parts[0] = parts[1] = parts[2] = operand;
11186 return size;
11189 if (GET_CODE (operand) == CONST_VECTOR)
11191 enum machine_mode imode = int_mode_for_mode (mode);
11192 /* Caution: if we looked through a constant pool memory above,
11193 the operand may actually have a different mode now. That's
11194 ok, since we want to pun this all the way back to an integer. */
11195 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
11196 gcc_assert (operand != NULL);
11197 mode = imode;
11200 if (!TARGET_64BIT)
11202 if (mode == DImode)
11203 split_di (&operand, 1, &parts[0], &parts[1]);
11204 else
11206 if (REG_P (operand))
11208 gcc_assert (reload_completed);
11209 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
11210 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
11211 if (size == 3)
11212 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
11214 else if (offsettable_memref_p (operand))
11216 operand = adjust_address (operand, SImode, 0);
11217 parts[0] = operand;
11218 parts[1] = adjust_address (operand, SImode, 4);
11219 if (size == 3)
11220 parts[2] = adjust_address (operand, SImode, 8);
11222 else if (GET_CODE (operand) == CONST_DOUBLE)
11224 REAL_VALUE_TYPE r;
11225 long l[4];
11227 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
11228 switch (mode)
11230 case XFmode:
11231 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
11232 parts[2] = gen_int_mode (l[2], SImode);
11233 break;
11234 case DFmode:
11235 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
11236 break;
11237 default:
11238 gcc_unreachable ();
11240 parts[1] = gen_int_mode (l[1], SImode);
11241 parts[0] = gen_int_mode (l[0], SImode);
11243 else
11244 gcc_unreachable ();
11247 else
11249 if (mode == TImode)
11250 split_ti (&operand, 1, &parts[0], &parts[1]);
11251 if (mode == XFmode || mode == TFmode)
11253 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
11254 if (REG_P (operand))
11256 gcc_assert (reload_completed);
11257 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
11258 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
11260 else if (offsettable_memref_p (operand))
11262 operand = adjust_address (operand, DImode, 0);
11263 parts[0] = operand;
11264 parts[1] = adjust_address (operand, upper_mode, 8);
11266 else if (GET_CODE (operand) == CONST_DOUBLE)
11268 REAL_VALUE_TYPE r;
11269 long l[4];
11271 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
11272 real_to_target (l, &r, mode);
11274 /* Do not use shift by 32 to avoid warning on 32bit systems. */
11275 if (HOST_BITS_PER_WIDE_INT >= 64)
11276 parts[0]
11277 = gen_int_mode
11278 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
11279 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
11280 DImode);
11281 else
11282 parts[0] = immed_double_const (l[0], l[1], DImode);
11284 if (upper_mode == SImode)
11285 parts[1] = gen_int_mode (l[2], SImode);
11286 else if (HOST_BITS_PER_WIDE_INT >= 64)
11287 parts[1]
11288 = gen_int_mode
11289 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
11290 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
11291 DImode);
11292 else
11293 parts[1] = immed_double_const (l[2], l[3], DImode);
11295 else
11296 gcc_unreachable ();
11300 return size;
11303 /* Emit insns to perform a move or push of DI, DF, and XF values.
11304 Return false when normal moves are needed; true when all required
11305 insns have been emitted. Operands 2-4 contain the input values
11306 int the correct order; operands 5-7 contain the output values. */
11308 void
11309 ix86_split_long_move (rtx operands[])
11311 rtx part[2][3];
11312 int nparts;
11313 int push = 0;
11314 int collisions = 0;
11315 enum machine_mode mode = GET_MODE (operands[0]);
11317 /* The DFmode expanders may ask us to move double.
11318 For 64bit target this is single move. By hiding the fact
11319 here we simplify i386.md splitters. */
11320 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
11322 /* Optimize constant pool reference to immediates. This is used by
11323 fp moves, that force all constants to memory to allow combining. */
11325 if (GET_CODE (operands[1]) == MEM
11326 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
11327 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
11328 operands[1] = get_pool_constant (XEXP (operands[1], 0));
11329 if (push_operand (operands[0], VOIDmode))
11331 operands[0] = copy_rtx (operands[0]);
11332 PUT_MODE (operands[0], Pmode);
11334 else
11335 operands[0] = gen_lowpart (DImode, operands[0]);
11336 operands[1] = gen_lowpart (DImode, operands[1]);
11337 emit_move_insn (operands[0], operands[1]);
11338 return;
11341 /* The only non-offsettable memory we handle is push. */
11342 if (push_operand (operands[0], VOIDmode))
11343 push = 1;
11344 else
11345 gcc_assert (GET_CODE (operands[0]) != MEM
11346 || offsettable_memref_p (operands[0]));
11348 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
11349 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
11351 /* When emitting push, take care for source operands on the stack. */
11352 if (push && GET_CODE (operands[1]) == MEM
11353 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
11355 if (nparts == 3)
11356 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
11357 XEXP (part[1][2], 0));
11358 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
11359 XEXP (part[1][1], 0));
11362 /* We need to do copy in the right order in case an address register
11363 of the source overlaps the destination. */
11364 if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM)
11366 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
11367 collisions++;
11368 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
11369 collisions++;
11370 if (nparts == 3
11371 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
11372 collisions++;
11374 /* Collision in the middle part can be handled by reordering. */
11375 if (collisions == 1 && nparts == 3
11376 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
11378 rtx tmp;
11379 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
11380 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
11383 /* If there are more collisions, we can't handle it by reordering.
11384 Do an lea to the last part and use only one colliding move. */
11385 else if (collisions > 1)
11387 rtx base;
11389 collisions = 1;
11391 base = part[0][nparts - 1];
11393 /* Handle the case when the last part isn't valid for lea.
11394 Happens in 64-bit mode storing the 12-byte XFmode. */
11395 if (GET_MODE (base) != Pmode)
11396 base = gen_rtx_REG (Pmode, REGNO (base));
11398 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
11399 part[1][0] = replace_equiv_address (part[1][0], base);
11400 part[1][1] = replace_equiv_address (part[1][1],
11401 plus_constant (base, UNITS_PER_WORD));
11402 if (nparts == 3)
11403 part[1][2] = replace_equiv_address (part[1][2],
11404 plus_constant (base, 8));
11408 if (push)
11410 if (!TARGET_64BIT)
11412 if (nparts == 3)
11414 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
11415 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
11416 emit_move_insn (part[0][2], part[1][2]);
11419 else
11421 /* In 64bit mode we don't have 32bit push available. In case this is
11422 register, it is OK - we will just use larger counterpart. We also
11423 retype memory - these comes from attempt to avoid REX prefix on
11424 moving of second half of TFmode value. */
11425 if (GET_MODE (part[1][1]) == SImode)
11427 switch (GET_CODE (part[1][1]))
11429 case MEM:
11430 part[1][1] = adjust_address (part[1][1], DImode, 0);
11431 break;
11433 case REG:
11434 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
11435 break;
11437 default:
11438 gcc_unreachable ();
11441 if (GET_MODE (part[1][0]) == SImode)
11442 part[1][0] = part[1][1];
11445 emit_move_insn (part[0][1], part[1][1]);
11446 emit_move_insn (part[0][0], part[1][0]);
11447 return;
11450 /* Choose correct order to not overwrite the source before it is copied. */
11451 if ((REG_P (part[0][0])
11452 && REG_P (part[1][1])
11453 && (REGNO (part[0][0]) == REGNO (part[1][1])
11454 || (nparts == 3
11455 && REGNO (part[0][0]) == REGNO (part[1][2]))))
11456 || (collisions > 0
11457 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
11459 if (nparts == 3)
11461 operands[2] = part[0][2];
11462 operands[3] = part[0][1];
11463 operands[4] = part[0][0];
11464 operands[5] = part[1][2];
11465 operands[6] = part[1][1];
11466 operands[7] = part[1][0];
11468 else
11470 operands[2] = part[0][1];
11471 operands[3] = part[0][0];
11472 operands[5] = part[1][1];
11473 operands[6] = part[1][0];
11476 else
11478 if (nparts == 3)
11480 operands[2] = part[0][0];
11481 operands[3] = part[0][1];
11482 operands[4] = part[0][2];
11483 operands[5] = part[1][0];
11484 operands[6] = part[1][1];
11485 operands[7] = part[1][2];
11487 else
11489 operands[2] = part[0][0];
11490 operands[3] = part[0][1];
11491 operands[5] = part[1][0];
11492 operands[6] = part[1][1];
11496 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
11497 if (optimize_size)
11499 if (GET_CODE (operands[5]) == CONST_INT
11500 && operands[5] != const0_rtx
11501 && REG_P (operands[2]))
11503 if (GET_CODE (operands[6]) == CONST_INT
11504 && INTVAL (operands[6]) == INTVAL (operands[5]))
11505 operands[6] = operands[2];
11507 if (nparts == 3
11508 && GET_CODE (operands[7]) == CONST_INT
11509 && INTVAL (operands[7]) == INTVAL (operands[5]))
11510 operands[7] = operands[2];
11513 if (nparts == 3
11514 && GET_CODE (operands[6]) == CONST_INT
11515 && operands[6] != const0_rtx
11516 && REG_P (operands[3])
11517 && GET_CODE (operands[7]) == CONST_INT
11518 && INTVAL (operands[7]) == INTVAL (operands[6]))
11519 operands[7] = operands[3];
11522 emit_move_insn (operands[2], operands[5]);
11523 emit_move_insn (operands[3], operands[6]);
11524 if (nparts == 3)
11525 emit_move_insn (operands[4], operands[7]);
11527 return;
11530 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
11531 left shift by a constant, either using a single shift or
11532 a sequence of add instructions. */
11534 static void
11535 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
11537 if (count == 1)
11539 emit_insn ((mode == DImode
11540 ? gen_addsi3
11541 : gen_adddi3) (operand, operand, operand));
11543 else if (!optimize_size
11544 && count * ix86_cost->add <= ix86_cost->shift_const)
11546 int i;
11547 for (i=0; i<count; i++)
11549 emit_insn ((mode == DImode
11550 ? gen_addsi3
11551 : gen_adddi3) (operand, operand, operand));
11554 else
11555 emit_insn ((mode == DImode
11556 ? gen_ashlsi3
11557 : gen_ashldi3) (operand, operand, GEN_INT (count)));
11560 void
11561 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
11563 rtx low[2], high[2];
11564 int count;
11565 const int single_width = mode == DImode ? 32 : 64;
11567 if (GET_CODE (operands[2]) == CONST_INT)
11569 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
11570 count = INTVAL (operands[2]) & (single_width * 2 - 1);
11572 if (count >= single_width)
11574 emit_move_insn (high[0], low[1]);
11575 emit_move_insn (low[0], const0_rtx);
11577 if (count > single_width)
11578 ix86_expand_ashl_const (high[0], count - single_width, mode);
11580 else
11582 if (!rtx_equal_p (operands[0], operands[1]))
11583 emit_move_insn (operands[0], operands[1]);
11584 emit_insn ((mode == DImode
11585 ? gen_x86_shld_1
11586 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
11587 ix86_expand_ashl_const (low[0], count, mode);
11589 return;
11592 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11594 if (operands[1] == const1_rtx)
11596 /* Assuming we've chosen a QImode capable registers, then 1 << N
11597 can be done with two 32/64-bit shifts, no branches, no cmoves. */
11598 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
11600 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
11602 ix86_expand_clear (low[0]);
11603 ix86_expand_clear (high[0]);
11604 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
11606 d = gen_lowpart (QImode, low[0]);
11607 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
11608 s = gen_rtx_EQ (QImode, flags, const0_rtx);
11609 emit_insn (gen_rtx_SET (VOIDmode, d, s));
11611 d = gen_lowpart (QImode, high[0]);
11612 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
11613 s = gen_rtx_NE (QImode, flags, const0_rtx);
11614 emit_insn (gen_rtx_SET (VOIDmode, d, s));
11617 /* Otherwise, we can get the same results by manually performing
11618 a bit extract operation on bit 5/6, and then performing the two
11619 shifts. The two methods of getting 0/1 into low/high are exactly
11620 the same size. Avoiding the shift in the bit extract case helps
11621 pentium4 a bit; no one else seems to care much either way. */
11622 else
11624 rtx x;
11626 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
11627 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
11628 else
11629 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
11630 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
11632 emit_insn ((mode == DImode
11633 ? gen_lshrsi3
11634 : gen_lshrdi3) (high[0], high[0], GEN_INT (mode == DImode ? 5 : 6)));
11635 emit_insn ((mode == DImode
11636 ? gen_andsi3
11637 : gen_anddi3) (high[0], high[0], GEN_INT (1)));
11638 emit_move_insn (low[0], high[0]);
11639 emit_insn ((mode == DImode
11640 ? gen_xorsi3
11641 : gen_xordi3) (low[0], low[0], GEN_INT (1)));
11644 emit_insn ((mode == DImode
11645 ? gen_ashlsi3
11646 : gen_ashldi3) (low[0], low[0], operands[2]));
11647 emit_insn ((mode == DImode
11648 ? gen_ashlsi3
11649 : gen_ashldi3) (high[0], high[0], operands[2]));
11650 return;
11653 if (operands[1] == constm1_rtx)
11655 /* For -1 << N, we can avoid the shld instruction, because we
11656 know that we're shifting 0...31/63 ones into a -1. */
11657 emit_move_insn (low[0], constm1_rtx);
11658 if (optimize_size)
11659 emit_move_insn (high[0], low[0]);
11660 else
11661 emit_move_insn (high[0], constm1_rtx);
11663 else
11665 if (!rtx_equal_p (operands[0], operands[1]))
11666 emit_move_insn (operands[0], operands[1]);
11668 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11669 emit_insn ((mode == DImode
11670 ? gen_x86_shld_1
11671 : gen_x86_64_shld) (high[0], low[0], operands[2]));
11674 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
11676 if (TARGET_CMOVE && scratch)
11678 ix86_expand_clear (scratch);
11679 emit_insn ((mode == DImode
11680 ? gen_x86_shift_adj_1
11681 : gen_x86_64_shift_adj) (high[0], low[0], operands[2], scratch));
11683 else
11684 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
11687 void
11688 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
11690 rtx low[2], high[2];
11691 int count;
11692 const int single_width = mode == DImode ? 32 : 64;
11694 if (GET_CODE (operands[2]) == CONST_INT)
11696 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
11697 count = INTVAL (operands[2]) & (single_width * 2 - 1);
11699 if (count == single_width * 2 - 1)
11701 emit_move_insn (high[0], high[1]);
11702 emit_insn ((mode == DImode
11703 ? gen_ashrsi3
11704 : gen_ashrdi3) (high[0], high[0],
11705 GEN_INT (single_width - 1)));
11706 emit_move_insn (low[0], high[0]);
11709 else if (count >= single_width)
11711 emit_move_insn (low[0], high[1]);
11712 emit_move_insn (high[0], low[0]);
11713 emit_insn ((mode == DImode
11714 ? gen_ashrsi3
11715 : gen_ashrdi3) (high[0], high[0],
11716 GEN_INT (single_width - 1)));
11717 if (count > single_width)
11718 emit_insn ((mode == DImode
11719 ? gen_ashrsi3
11720 : gen_ashrdi3) (low[0], low[0],
11721 GEN_INT (count - single_width)));
11723 else
11725 if (!rtx_equal_p (operands[0], operands[1]))
11726 emit_move_insn (operands[0], operands[1]);
11727 emit_insn ((mode == DImode
11728 ? gen_x86_shrd_1
11729 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
11730 emit_insn ((mode == DImode
11731 ? gen_ashrsi3
11732 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
11735 else
11737 if (!rtx_equal_p (operands[0], operands[1]))
11738 emit_move_insn (operands[0], operands[1]);
11740 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11742 emit_insn ((mode == DImode
11743 ? gen_x86_shrd_1
11744 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
11745 emit_insn ((mode == DImode
11746 ? gen_ashrsi3
11747 : gen_ashrdi3) (high[0], high[0], operands[2]));
11749 if (TARGET_CMOVE && scratch)
11751 emit_move_insn (scratch, high[0]);
11752 emit_insn ((mode == DImode
11753 ? gen_ashrsi3
11754 : gen_ashrdi3) (scratch, scratch,
11755 GEN_INT (single_width - 1)));
11756 emit_insn ((mode == DImode
11757 ? gen_x86_shift_adj_1
11758 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
11759 scratch));
11761 else
11762 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
11766 void
11767 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
11769 rtx low[2], high[2];
11770 int count;
11771 const int single_width = mode == DImode ? 32 : 64;
11773 if (GET_CODE (operands[2]) == CONST_INT)
11775 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
11776 count = INTVAL (operands[2]) & (single_width * 2 - 1);
11778 if (count >= single_width)
11780 emit_move_insn (low[0], high[1]);
11781 ix86_expand_clear (high[0]);
11783 if (count > single_width)
11784 emit_insn ((mode == DImode
11785 ? gen_lshrsi3
11786 : gen_lshrdi3) (low[0], low[0],
11787 GEN_INT (count - single_width)));
11789 else
11791 if (!rtx_equal_p (operands[0], operands[1]))
11792 emit_move_insn (operands[0], operands[1]);
11793 emit_insn ((mode == DImode
11794 ? gen_x86_shrd_1
11795 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
11796 emit_insn ((mode == DImode
11797 ? gen_lshrsi3
11798 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
11801 else
11803 if (!rtx_equal_p (operands[0], operands[1]))
11804 emit_move_insn (operands[0], operands[1]);
11806 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
11808 emit_insn ((mode == DImode
11809 ? gen_x86_shrd_1
11810 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
11811 emit_insn ((mode == DImode
11812 ? gen_lshrsi3
11813 : gen_lshrdi3) (high[0], high[0], operands[2]));
11815 /* Heh. By reversing the arguments, we can reuse this pattern. */
11816 if (TARGET_CMOVE && scratch)
11818 ix86_expand_clear (scratch);
11819 emit_insn ((mode == DImode
11820 ? gen_x86_shift_adj_1
11821 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
11822 scratch));
11824 else
11825 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
11829 /* Helper function for the string operations below. Dest VARIABLE whether
11830 it is aligned to VALUE bytes. If true, jump to the label. */
11831 static rtx
11832 ix86_expand_aligntest (rtx variable, int value)
11834 rtx label = gen_label_rtx ();
11835 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
11836 if (GET_MODE (variable) == DImode)
11837 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
11838 else
11839 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
11840 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
11841 1, label);
11842 return label;
11845 /* Adjust COUNTER by the VALUE. */
11846 static void
11847 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
11849 if (GET_MODE (countreg) == DImode)
11850 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
11851 else
11852 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
11855 /* Zero extend possibly SImode EXP to Pmode register. */
11857 ix86_zero_extend_to_Pmode (rtx exp)
11859 rtx r;
11860 if (GET_MODE (exp) == VOIDmode)
11861 return force_reg (Pmode, exp);
11862 if (GET_MODE (exp) == Pmode)
11863 return copy_to_mode_reg (Pmode, exp);
11864 r = gen_reg_rtx (Pmode);
11865 emit_insn (gen_zero_extendsidi2 (r, exp));
11866 return r;
11869 /* Expand string move (memcpy) operation. Use i386 string operations when
11870 profitable. expand_clrmem contains similar code. */
11872 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp)
11874 rtx srcreg, destreg, countreg, srcexp, destexp;
11875 enum machine_mode counter_mode;
11876 HOST_WIDE_INT align = 0;
11877 unsigned HOST_WIDE_INT count = 0;
11879 if (GET_CODE (align_exp) == CONST_INT)
11880 align = INTVAL (align_exp);
11882 /* Can't use any of this if the user has appropriated esi or edi. */
11883 if (global_regs[4] || global_regs[5])
11884 return 0;
11886 /* This simple hack avoids all inlining code and simplifies code below. */
11887 if (!TARGET_ALIGN_STRINGOPS)
11888 align = 64;
11890 if (GET_CODE (count_exp) == CONST_INT)
11892 count = INTVAL (count_exp);
11893 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
11894 return 0;
11897 /* Figure out proper mode for counter. For 32bits it is always SImode,
11898 for 64bits use SImode when possible, otherwise DImode.
11899 Set count to number of bytes copied when known at compile time. */
11900 if (!TARGET_64BIT
11901 || GET_MODE (count_exp) == SImode
11902 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
11903 counter_mode = SImode;
11904 else
11905 counter_mode = DImode;
11907 gcc_assert (counter_mode == SImode || counter_mode == DImode);
11909 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
11910 if (destreg != XEXP (dst, 0))
11911 dst = replace_equiv_address_nv (dst, destreg);
11912 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
11913 if (srcreg != XEXP (src, 0))
11914 src = replace_equiv_address_nv (src, srcreg);
11916 /* When optimizing for size emit simple rep ; movsb instruction for
11917 counts not divisible by 4, except when (movsl;)*(movsw;)?(movsb;)?
11918 sequence is shorter than mov{b,l} $count, %{ecx,cl}; rep; movsb.
11919 Sice of (movsl;)*(movsw;)?(movsb;)? sequence is
11920 count / 4 + (count & 3), the other sequence is either 4 or 7 bytes,
11921 but we don't know whether upper 24 (resp. 56) bits of %ecx will be
11922 known to be zero or not. The rep; movsb sequence causes higher
11923 register pressure though, so take that into account. */
11925 if ((!optimize || optimize_size)
11926 && (count == 0
11927 || ((count & 0x03)
11928 && (!optimize_size
11929 || count > 5 * 4
11930 || (count & 3) + count / 4 > 6))))
11932 emit_insn (gen_cld ());
11933 countreg = ix86_zero_extend_to_Pmode (count_exp);
11934 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
11935 srcexp = gen_rtx_PLUS (Pmode, srcreg, countreg);
11936 emit_insn (gen_rep_mov (destreg, dst, srcreg, src, countreg,
11937 destexp, srcexp));
11940 /* For constant aligned (or small unaligned) copies use rep movsl
11941 followed by code copying the rest. For PentiumPro ensure 8 byte
11942 alignment to allow rep movsl acceleration. */
11944 else if (count != 0
11945 && (align >= 8
11946 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
11947 || optimize_size || count < (unsigned int) 64))
11949 unsigned HOST_WIDE_INT offset = 0;
11950 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
11951 rtx srcmem, dstmem;
11953 emit_insn (gen_cld ());
11954 if (count & ~(size - 1))
11956 if ((TARGET_SINGLE_STRINGOP || optimize_size) && count < 5 * 4)
11958 enum machine_mode movs_mode = size == 4 ? SImode : DImode;
11960 while (offset < (count & ~(size - 1)))
11962 srcmem = adjust_automodify_address_nv (src, movs_mode,
11963 srcreg, offset);
11964 dstmem = adjust_automodify_address_nv (dst, movs_mode,
11965 destreg, offset);
11966 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11967 offset += size;
11970 else
11972 countreg = GEN_INT ((count >> (size == 4 ? 2 : 3))
11973 & (TARGET_64BIT ? -1 : 0x3fffffff));
11974 countreg = copy_to_mode_reg (counter_mode, countreg);
11975 countreg = ix86_zero_extend_to_Pmode (countreg);
11977 destexp = gen_rtx_ASHIFT (Pmode, countreg,
11978 GEN_INT (size == 4 ? 2 : 3));
11979 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
11980 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
11982 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
11983 countreg, destexp, srcexp));
11984 offset = count & ~(size - 1);
11987 if (size == 8 && (count & 0x04))
11989 srcmem = adjust_automodify_address_nv (src, SImode, srcreg,
11990 offset);
11991 dstmem = adjust_automodify_address_nv (dst, SImode, destreg,
11992 offset);
11993 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
11994 offset += 4;
11996 if (count & 0x02)
11998 srcmem = adjust_automodify_address_nv (src, HImode, srcreg,
11999 offset);
12000 dstmem = adjust_automodify_address_nv (dst, HImode, destreg,
12001 offset);
12002 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12003 offset += 2;
12005 if (count & 0x01)
12007 srcmem = adjust_automodify_address_nv (src, QImode, srcreg,
12008 offset);
12009 dstmem = adjust_automodify_address_nv (dst, QImode, destreg,
12010 offset);
12011 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12014 /* The generic code based on the glibc implementation:
12015 - align destination to 4 bytes (8 byte alignment is used for PentiumPro
12016 allowing accelerated copying there)
12017 - copy the data using rep movsl
12018 - copy the rest. */
12019 else
12021 rtx countreg2;
12022 rtx label = NULL;
12023 rtx srcmem, dstmem;
12024 int desired_alignment = (TARGET_PENTIUMPRO
12025 && (count == 0 || count >= (unsigned int) 260)
12026 ? 8 : UNITS_PER_WORD);
12027 /* Get rid of MEM_OFFSETs, they won't be accurate. */
12028 dst = change_address (dst, BLKmode, destreg);
12029 src = change_address (src, BLKmode, srcreg);
12031 /* In case we don't know anything about the alignment, default to
12032 library version, since it is usually equally fast and result in
12033 shorter code.
12035 Also emit call when we know that the count is large and call overhead
12036 will not be important. */
12037 if (!TARGET_INLINE_ALL_STRINGOPS
12038 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
12039 return 0;
12041 if (TARGET_SINGLE_STRINGOP)
12042 emit_insn (gen_cld ());
12044 countreg2 = gen_reg_rtx (Pmode);
12045 countreg = copy_to_mode_reg (counter_mode, count_exp);
12047 /* We don't use loops to align destination and to copy parts smaller
12048 than 4 bytes, because gcc is able to optimize such code better (in
12049 the case the destination or the count really is aligned, gcc is often
12050 able to predict the branches) and also it is friendlier to the
12051 hardware branch prediction.
12053 Using loops is beneficial for generic case, because we can
12054 handle small counts using the loops. Many CPUs (such as Athlon)
12055 have large REP prefix setup costs.
12057 This is quite costly. Maybe we can revisit this decision later or
12058 add some customizability to this code. */
12060 if (count == 0 && align < desired_alignment)
12062 label = gen_label_rtx ();
12063 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
12064 LEU, 0, counter_mode, 1, label);
12066 if (align <= 1)
12068 rtx label = ix86_expand_aligntest (destreg, 1);
12069 srcmem = change_address (src, QImode, srcreg);
12070 dstmem = change_address (dst, QImode, destreg);
12071 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12072 ix86_adjust_counter (countreg, 1);
12073 emit_label (label);
12074 LABEL_NUSES (label) = 1;
12076 if (align <= 2)
12078 rtx label = ix86_expand_aligntest (destreg, 2);
12079 srcmem = change_address (src, HImode, srcreg);
12080 dstmem = change_address (dst, HImode, destreg);
12081 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12082 ix86_adjust_counter (countreg, 2);
12083 emit_label (label);
12084 LABEL_NUSES (label) = 1;
12086 if (align <= 4 && desired_alignment > 4)
12088 rtx label = ix86_expand_aligntest (destreg, 4);
12089 srcmem = change_address (src, SImode, srcreg);
12090 dstmem = change_address (dst, SImode, destreg);
12091 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12092 ix86_adjust_counter (countreg, 4);
12093 emit_label (label);
12094 LABEL_NUSES (label) = 1;
12097 if (label && desired_alignment > 4 && !TARGET_64BIT)
12099 emit_label (label);
12100 LABEL_NUSES (label) = 1;
12101 label = NULL_RTX;
12103 if (!TARGET_SINGLE_STRINGOP)
12104 emit_insn (gen_cld ());
12105 if (TARGET_64BIT)
12107 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
12108 GEN_INT (3)));
12109 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
12111 else
12113 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
12114 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
12116 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
12117 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
12118 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
12119 countreg2, destexp, srcexp));
12121 if (label)
12123 emit_label (label);
12124 LABEL_NUSES (label) = 1;
12126 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
12128 srcmem = change_address (src, SImode, srcreg);
12129 dstmem = change_address (dst, SImode, destreg);
12130 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12132 if ((align <= 4 || count == 0) && TARGET_64BIT)
12134 rtx label = ix86_expand_aligntest (countreg, 4);
12135 srcmem = change_address (src, SImode, srcreg);
12136 dstmem = change_address (dst, SImode, destreg);
12137 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12138 emit_label (label);
12139 LABEL_NUSES (label) = 1;
12141 if (align > 2 && count != 0 && (count & 2))
12143 srcmem = change_address (src, HImode, srcreg);
12144 dstmem = change_address (dst, HImode, destreg);
12145 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12147 if (align <= 2 || count == 0)
12149 rtx label = ix86_expand_aligntest (countreg, 2);
12150 srcmem = change_address (src, HImode, srcreg);
12151 dstmem = change_address (dst, HImode, destreg);
12152 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12153 emit_label (label);
12154 LABEL_NUSES (label) = 1;
12156 if (align > 1 && count != 0 && (count & 1))
12158 srcmem = change_address (src, QImode, srcreg);
12159 dstmem = change_address (dst, QImode, destreg);
12160 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12162 if (align <= 1 || count == 0)
12164 rtx label = ix86_expand_aligntest (countreg, 1);
12165 srcmem = change_address (src, QImode, srcreg);
12166 dstmem = change_address (dst, QImode, destreg);
12167 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
12168 emit_label (label);
12169 LABEL_NUSES (label) = 1;
12173 return 1;
12176 /* Expand string clear operation (bzero). Use i386 string operations when
12177 profitable. expand_movmem contains similar code. */
12179 ix86_expand_clrmem (rtx dst, rtx count_exp, rtx align_exp)
12181 rtx destreg, zeroreg, countreg, destexp;
12182 enum machine_mode counter_mode;
12183 HOST_WIDE_INT align = 0;
12184 unsigned HOST_WIDE_INT count = 0;
12186 if (GET_CODE (align_exp) == CONST_INT)
12187 align = INTVAL (align_exp);
12189 /* Can't use any of this if the user has appropriated esi. */
12190 if (global_regs[4])
12191 return 0;
12193 /* This simple hack avoids all inlining code and simplifies code below. */
12194 if (!TARGET_ALIGN_STRINGOPS)
12195 align = 32;
12197 if (GET_CODE (count_exp) == CONST_INT)
12199 count = INTVAL (count_exp);
12200 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
12201 return 0;
12203 /* Figure out proper mode for counter. For 32bits it is always SImode,
12204 for 64bits use SImode when possible, otherwise DImode.
12205 Set count to number of bytes copied when known at compile time. */
12206 if (!TARGET_64BIT
12207 || GET_MODE (count_exp) == SImode
12208 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
12209 counter_mode = SImode;
12210 else
12211 counter_mode = DImode;
12213 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
12214 if (destreg != XEXP (dst, 0))
12215 dst = replace_equiv_address_nv (dst, destreg);
12218 /* When optimizing for size emit simple rep ; movsb instruction for
12219 counts not divisible by 4. The movl $N, %ecx; rep; stosb
12220 sequence is 7 bytes long, so if optimizing for size and count is
12221 small enough that some stosl, stosw and stosb instructions without
12222 rep are shorter, fall back into the next if. */
12224 if ((!optimize || optimize_size)
12225 && (count == 0
12226 || ((count & 0x03)
12227 && (!optimize_size || (count & 0x03) + (count >> 2) > 7))))
12229 emit_insn (gen_cld ());
12231 countreg = ix86_zero_extend_to_Pmode (count_exp);
12232 zeroreg = copy_to_mode_reg (QImode, const0_rtx);
12233 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
12234 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg, destexp));
12236 else if (count != 0
12237 && (align >= 8
12238 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
12239 || optimize_size || count < (unsigned int) 64))
12241 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
12242 unsigned HOST_WIDE_INT offset = 0;
12244 emit_insn (gen_cld ());
12246 zeroreg = copy_to_mode_reg (size == 4 ? SImode : DImode, const0_rtx);
12247 if (count & ~(size - 1))
12249 unsigned HOST_WIDE_INT repcount;
12250 unsigned int max_nonrep;
12252 repcount = count >> (size == 4 ? 2 : 3);
12253 if (!TARGET_64BIT)
12254 repcount &= 0x3fffffff;
12256 /* movl $N, %ecx; rep; stosl is 7 bytes, while N x stosl is N bytes.
12257 movl $N, %ecx; rep; stosq is 8 bytes, while N x stosq is 2xN
12258 bytes. In both cases the latter seems to be faster for small
12259 values of N. */
12260 max_nonrep = size == 4 ? 7 : 4;
12261 if (!optimize_size)
12262 switch (ix86_tune)
12264 case PROCESSOR_PENTIUM4:
12265 case PROCESSOR_NOCONA:
12266 max_nonrep = 3;
12267 break;
12268 default:
12269 break;
12272 if (repcount <= max_nonrep)
12273 while (repcount-- > 0)
12275 rtx mem = adjust_automodify_address_nv (dst,
12276 GET_MODE (zeroreg),
12277 destreg, offset);
12278 emit_insn (gen_strset (destreg, mem, zeroreg));
12279 offset += size;
12281 else
12283 countreg = copy_to_mode_reg (counter_mode, GEN_INT (repcount));
12284 countreg = ix86_zero_extend_to_Pmode (countreg);
12285 destexp = gen_rtx_ASHIFT (Pmode, countreg,
12286 GEN_INT (size == 4 ? 2 : 3));
12287 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
12288 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg,
12289 destexp));
12290 offset = count & ~(size - 1);
12293 if (size == 8 && (count & 0x04))
12295 rtx mem = adjust_automodify_address_nv (dst, SImode, destreg,
12296 offset);
12297 emit_insn (gen_strset (destreg, mem,
12298 gen_rtx_SUBREG (SImode, zeroreg, 0)));
12299 offset += 4;
12301 if (count & 0x02)
12303 rtx mem = adjust_automodify_address_nv (dst, HImode, destreg,
12304 offset);
12305 emit_insn (gen_strset (destreg, mem,
12306 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12307 offset += 2;
12309 if (count & 0x01)
12311 rtx mem = adjust_automodify_address_nv (dst, QImode, destreg,
12312 offset);
12313 emit_insn (gen_strset (destreg, mem,
12314 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12317 else
12319 rtx countreg2;
12320 rtx label = NULL;
12321 /* Compute desired alignment of the string operation. */
12322 int desired_alignment = (TARGET_PENTIUMPRO
12323 && (count == 0 || count >= (unsigned int) 260)
12324 ? 8 : UNITS_PER_WORD);
12326 /* In case we don't know anything about the alignment, default to
12327 library version, since it is usually equally fast and result in
12328 shorter code.
12330 Also emit call when we know that the count is large and call overhead
12331 will not be important. */
12332 if (!TARGET_INLINE_ALL_STRINGOPS
12333 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
12334 return 0;
12336 if (TARGET_SINGLE_STRINGOP)
12337 emit_insn (gen_cld ());
12339 countreg2 = gen_reg_rtx (Pmode);
12340 countreg = copy_to_mode_reg (counter_mode, count_exp);
12341 zeroreg = copy_to_mode_reg (Pmode, const0_rtx);
12342 /* Get rid of MEM_OFFSET, it won't be accurate. */
12343 dst = change_address (dst, BLKmode, destreg);
12345 if (count == 0 && align < desired_alignment)
12347 label = gen_label_rtx ();
12348 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
12349 LEU, 0, counter_mode, 1, label);
12351 if (align <= 1)
12353 rtx label = ix86_expand_aligntest (destreg, 1);
12354 emit_insn (gen_strset (destreg, dst,
12355 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12356 ix86_adjust_counter (countreg, 1);
12357 emit_label (label);
12358 LABEL_NUSES (label) = 1;
12360 if (align <= 2)
12362 rtx label = ix86_expand_aligntest (destreg, 2);
12363 emit_insn (gen_strset (destreg, dst,
12364 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12365 ix86_adjust_counter (countreg, 2);
12366 emit_label (label);
12367 LABEL_NUSES (label) = 1;
12369 if (align <= 4 && desired_alignment > 4)
12371 rtx label = ix86_expand_aligntest (destreg, 4);
12372 emit_insn (gen_strset (destreg, dst,
12373 (TARGET_64BIT
12374 ? gen_rtx_SUBREG (SImode, zeroreg, 0)
12375 : zeroreg)));
12376 ix86_adjust_counter (countreg, 4);
12377 emit_label (label);
12378 LABEL_NUSES (label) = 1;
12381 if (label && desired_alignment > 4 && !TARGET_64BIT)
12383 emit_label (label);
12384 LABEL_NUSES (label) = 1;
12385 label = NULL_RTX;
12388 if (!TARGET_SINGLE_STRINGOP)
12389 emit_insn (gen_cld ());
12390 if (TARGET_64BIT)
12392 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
12393 GEN_INT (3)));
12394 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
12396 else
12398 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
12399 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
12401 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
12402 emit_insn (gen_rep_stos (destreg, countreg2, dst, zeroreg, destexp));
12404 if (label)
12406 emit_label (label);
12407 LABEL_NUSES (label) = 1;
12410 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
12411 emit_insn (gen_strset (destreg, dst,
12412 gen_rtx_SUBREG (SImode, zeroreg, 0)));
12413 if (TARGET_64BIT && (align <= 4 || count == 0))
12415 rtx label = ix86_expand_aligntest (countreg, 4);
12416 emit_insn (gen_strset (destreg, dst,
12417 gen_rtx_SUBREG (SImode, zeroreg, 0)));
12418 emit_label (label);
12419 LABEL_NUSES (label) = 1;
12421 if (align > 2 && count != 0 && (count & 2))
12422 emit_insn (gen_strset (destreg, dst,
12423 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12424 if (align <= 2 || count == 0)
12426 rtx label = ix86_expand_aligntest (countreg, 2);
12427 emit_insn (gen_strset (destreg, dst,
12428 gen_rtx_SUBREG (HImode, zeroreg, 0)));
12429 emit_label (label);
12430 LABEL_NUSES (label) = 1;
12432 if (align > 1 && count != 0 && (count & 1))
12433 emit_insn (gen_strset (destreg, dst,
12434 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12435 if (align <= 1 || count == 0)
12437 rtx label = ix86_expand_aligntest (countreg, 1);
12438 emit_insn (gen_strset (destreg, dst,
12439 gen_rtx_SUBREG (QImode, zeroreg, 0)));
12440 emit_label (label);
12441 LABEL_NUSES (label) = 1;
12444 return 1;
12447 /* Expand strlen. */
12449 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
12451 rtx addr, scratch1, scratch2, scratch3, scratch4;
12453 /* The generic case of strlen expander is long. Avoid it's
12454 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
12456 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
12457 && !TARGET_INLINE_ALL_STRINGOPS
12458 && !optimize_size
12459 && (GET_CODE (align) != CONST_INT || INTVAL (align) < 4))
12460 return 0;
12462 addr = force_reg (Pmode, XEXP (src, 0));
12463 scratch1 = gen_reg_rtx (Pmode);
12465 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
12466 && !optimize_size)
12468 /* Well it seems that some optimizer does not combine a call like
12469 foo(strlen(bar), strlen(bar));
12470 when the move and the subtraction is done here. It does calculate
12471 the length just once when these instructions are done inside of
12472 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
12473 often used and I use one fewer register for the lifetime of
12474 output_strlen_unroll() this is better. */
12476 emit_move_insn (out, addr);
12478 ix86_expand_strlensi_unroll_1 (out, src, align);
12480 /* strlensi_unroll_1 returns the address of the zero at the end of
12481 the string, like memchr(), so compute the length by subtracting
12482 the start address. */
12483 if (TARGET_64BIT)
12484 emit_insn (gen_subdi3 (out, out, addr));
12485 else
12486 emit_insn (gen_subsi3 (out, out, addr));
12488 else
12490 rtx unspec;
12491 scratch2 = gen_reg_rtx (Pmode);
12492 scratch3 = gen_reg_rtx (Pmode);
12493 scratch4 = force_reg (Pmode, constm1_rtx);
12495 emit_move_insn (scratch3, addr);
12496 eoschar = force_reg (QImode, eoschar);
12498 emit_insn (gen_cld ());
12499 src = replace_equiv_address_nv (src, scratch3);
12501 /* If .md starts supporting :P, this can be done in .md. */
12502 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
12503 scratch4), UNSPEC_SCAS);
12504 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
12505 if (TARGET_64BIT)
12507 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
12508 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
12510 else
12512 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
12513 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
12516 return 1;
12519 /* Expand the appropriate insns for doing strlen if not just doing
12520 repnz; scasb
12522 out = result, initialized with the start address
12523 align_rtx = alignment of the address.
12524 scratch = scratch register, initialized with the startaddress when
12525 not aligned, otherwise undefined
12527 This is just the body. It needs the initializations mentioned above and
12528 some address computing at the end. These things are done in i386.md. */
12530 static void
12531 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
12533 int align;
12534 rtx tmp;
12535 rtx align_2_label = NULL_RTX;
12536 rtx align_3_label = NULL_RTX;
12537 rtx align_4_label = gen_label_rtx ();
12538 rtx end_0_label = gen_label_rtx ();
12539 rtx mem;
12540 rtx tmpreg = gen_reg_rtx (SImode);
12541 rtx scratch = gen_reg_rtx (SImode);
12542 rtx cmp;
12544 align = 0;
12545 if (GET_CODE (align_rtx) == CONST_INT)
12546 align = INTVAL (align_rtx);
12548 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
12550 /* Is there a known alignment and is it less than 4? */
12551 if (align < 4)
12553 rtx scratch1 = gen_reg_rtx (Pmode);
12554 emit_move_insn (scratch1, out);
12555 /* Is there a known alignment and is it not 2? */
12556 if (align != 2)
12558 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
12559 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
12561 /* Leave just the 3 lower bits. */
12562 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
12563 NULL_RTX, 0, OPTAB_WIDEN);
12565 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
12566 Pmode, 1, align_4_label);
12567 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
12568 Pmode, 1, align_2_label);
12569 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
12570 Pmode, 1, align_3_label);
12572 else
12574 /* Since the alignment is 2, we have to check 2 or 0 bytes;
12575 check if is aligned to 4 - byte. */
12577 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
12578 NULL_RTX, 0, OPTAB_WIDEN);
12580 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
12581 Pmode, 1, align_4_label);
12584 mem = change_address (src, QImode, out);
12586 /* Now compare the bytes. */
12588 /* Compare the first n unaligned byte on a byte per byte basis. */
12589 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
12590 QImode, 1, end_0_label);
12592 /* Increment the address. */
12593 if (TARGET_64BIT)
12594 emit_insn (gen_adddi3 (out, out, const1_rtx));
12595 else
12596 emit_insn (gen_addsi3 (out, out, const1_rtx));
12598 /* Not needed with an alignment of 2 */
12599 if (align != 2)
12601 emit_label (align_2_label);
12603 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
12604 end_0_label);
12606 if (TARGET_64BIT)
12607 emit_insn (gen_adddi3 (out, out, const1_rtx));
12608 else
12609 emit_insn (gen_addsi3 (out, out, const1_rtx));
12611 emit_label (align_3_label);
12614 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
12615 end_0_label);
12617 if (TARGET_64BIT)
12618 emit_insn (gen_adddi3 (out, out, const1_rtx));
12619 else
12620 emit_insn (gen_addsi3 (out, out, const1_rtx));
12623 /* Generate loop to check 4 bytes at a time. It is not a good idea to
12624 align this loop. It gives only huge programs, but does not help to
12625 speed up. */
12626 emit_label (align_4_label);
12628 mem = change_address (src, SImode, out);
12629 emit_move_insn (scratch, mem);
12630 if (TARGET_64BIT)
12631 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
12632 else
12633 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
12635 /* This formula yields a nonzero result iff one of the bytes is zero.
12636 This saves three branches inside loop and many cycles. */
12638 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
12639 emit_insn (gen_one_cmplsi2 (scratch, scratch));
12640 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
12641 emit_insn (gen_andsi3 (tmpreg, tmpreg,
12642 gen_int_mode (0x80808080, SImode)));
12643 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
12644 align_4_label);
12646 if (TARGET_CMOVE)
12648 rtx reg = gen_reg_rtx (SImode);
12649 rtx reg2 = gen_reg_rtx (Pmode);
12650 emit_move_insn (reg, tmpreg);
12651 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
12653 /* If zero is not in the first two bytes, move two bytes forward. */
12654 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
12655 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12656 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
12657 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
12658 gen_rtx_IF_THEN_ELSE (SImode, tmp,
12659 reg,
12660 tmpreg)));
12661 /* Emit lea manually to avoid clobbering of flags. */
12662 emit_insn (gen_rtx_SET (SImode, reg2,
12663 gen_rtx_PLUS (Pmode, out, const2_rtx)));
12665 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12666 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
12667 emit_insn (gen_rtx_SET (VOIDmode, out,
12668 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
12669 reg2,
12670 out)));
12673 else
12675 rtx end_2_label = gen_label_rtx ();
12676 /* Is zero in the first two bytes? */
12678 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
12679 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
12680 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
12681 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
12682 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
12683 pc_rtx);
12684 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
12685 JUMP_LABEL (tmp) = end_2_label;
12687 /* Not in the first two. Move two bytes forward. */
12688 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
12689 if (TARGET_64BIT)
12690 emit_insn (gen_adddi3 (out, out, const2_rtx));
12691 else
12692 emit_insn (gen_addsi3 (out, out, const2_rtx));
12694 emit_label (end_2_label);
12698 /* Avoid branch in fixing the byte. */
12699 tmpreg = gen_lowpart (QImode, tmpreg);
12700 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
12701 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
12702 if (TARGET_64BIT)
12703 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
12704 else
12705 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
12707 emit_label (end_0_label);
12710 void
12711 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
12712 rtx callarg2 ATTRIBUTE_UNUSED,
12713 rtx pop, int sibcall)
12715 rtx use = NULL, call;
12717 if (pop == const0_rtx)
12718 pop = NULL;
12719 gcc_assert (!TARGET_64BIT || !pop);
12721 #if TARGET_MACHO
12722 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
12723 fnaddr = machopic_indirect_call_target (fnaddr);
12724 #else
12725 /* Static functions and indirect calls don't need the pic register. */
12726 if (! TARGET_64BIT && flag_pic
12727 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
12728 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
12729 use_reg (&use, pic_offset_table_rtx);
12731 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
12733 rtx al = gen_rtx_REG (QImode, 0);
12734 emit_move_insn (al, callarg2);
12735 use_reg (&use, al);
12737 #endif /* TARGET_MACHO */
12739 if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
12741 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
12742 fnaddr = gen_rtx_MEM (QImode, fnaddr);
12744 if (sibcall && TARGET_64BIT
12745 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
12747 rtx addr;
12748 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
12749 fnaddr = gen_rtx_REG (Pmode, FIRST_REX_INT_REG + 3 /* R11 */);
12750 emit_move_insn (fnaddr, addr);
12751 fnaddr = gen_rtx_MEM (QImode, fnaddr);
12754 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
12755 if (retval)
12756 call = gen_rtx_SET (VOIDmode, retval, call);
12757 if (pop)
12759 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
12760 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
12761 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
12764 call = emit_call_insn (call);
12765 if (use)
12766 CALL_INSN_FUNCTION_USAGE (call) = use;
12770 /* Clear stack slot assignments remembered from previous functions.
12771 This is called from INIT_EXPANDERS once before RTL is emitted for each
12772 function. */
12774 static struct machine_function *
12775 ix86_init_machine_status (void)
12777 struct machine_function *f;
12779 f = ggc_alloc_cleared (sizeof (struct machine_function));
12780 f->use_fast_prologue_epilogue_nregs = -1;
12782 return f;
12785 /* Return a MEM corresponding to a stack slot with mode MODE.
12786 Allocate a new slot if necessary.
12788 The RTL for a function can have several slots available: N is
12789 which slot to use. */
12792 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
12794 struct stack_local_entry *s;
12796 gcc_assert (n < MAX_386_STACK_LOCALS);
12798 for (s = ix86_stack_locals; s; s = s->next)
12799 if (s->mode == mode && s->n == n)
12800 return s->rtl;
12802 s = (struct stack_local_entry *)
12803 ggc_alloc (sizeof (struct stack_local_entry));
12804 s->n = n;
12805 s->mode = mode;
12806 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
12808 s->next = ix86_stack_locals;
12809 ix86_stack_locals = s;
12810 return s->rtl;
12813 /* Construct the SYMBOL_REF for the tls_get_addr function. */
12815 static GTY(()) rtx ix86_tls_symbol;
12817 ix86_tls_get_addr (void)
12820 if (!ix86_tls_symbol)
12822 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
12823 (TARGET_GNU_TLS && !TARGET_64BIT)
12824 ? "___tls_get_addr"
12825 : "__tls_get_addr");
12828 return ix86_tls_symbol;
12831 /* Calculate the length of the memory address in the instruction
12832 encoding. Does not include the one-byte modrm, opcode, or prefix. */
12835 memory_address_length (rtx addr)
12837 struct ix86_address parts;
12838 rtx base, index, disp;
12839 int len;
12840 int ok;
12842 if (GET_CODE (addr) == PRE_DEC
12843 || GET_CODE (addr) == POST_INC
12844 || GET_CODE (addr) == PRE_MODIFY
12845 || GET_CODE (addr) == POST_MODIFY)
12846 return 0;
12848 ok = ix86_decompose_address (addr, &parts);
12849 gcc_assert (ok);
12851 if (parts.base && GET_CODE (parts.base) == SUBREG)
12852 parts.base = SUBREG_REG (parts.base);
12853 if (parts.index && GET_CODE (parts.index) == SUBREG)
12854 parts.index = SUBREG_REG (parts.index);
12856 base = parts.base;
12857 index = parts.index;
12858 disp = parts.disp;
12859 len = 0;
12861 /* Rule of thumb:
12862 - esp as the base always wants an index,
12863 - ebp as the base always wants a displacement. */
12865 /* Register Indirect. */
12866 if (base && !index && !disp)
12868 /* esp (for its index) and ebp (for its displacement) need
12869 the two-byte modrm form. */
12870 if (addr == stack_pointer_rtx
12871 || addr == arg_pointer_rtx
12872 || addr == frame_pointer_rtx
12873 || addr == hard_frame_pointer_rtx)
12874 len = 1;
12877 /* Direct Addressing. */
12878 else if (disp && !base && !index)
12879 len = 4;
12881 else
12883 /* Find the length of the displacement constant. */
12884 if (disp)
12886 if (GET_CODE (disp) == CONST_INT
12887 && CONST_OK_FOR_LETTER_P (INTVAL (disp), 'K')
12888 && base)
12889 len = 1;
12890 else
12891 len = 4;
12893 /* ebp always wants a displacement. */
12894 else if (base == hard_frame_pointer_rtx)
12895 len = 1;
12897 /* An index requires the two-byte modrm form.... */
12898 if (index
12899 /* ...like esp, which always wants an index. */
12900 || base == stack_pointer_rtx
12901 || base == arg_pointer_rtx
12902 || base == frame_pointer_rtx)
12903 len += 1;
12906 return len;
12909 /* Compute default value for "length_immediate" attribute. When SHORTFORM
12910 is set, expect that insn have 8bit immediate alternative. */
12912 ix86_attr_length_immediate_default (rtx insn, int shortform)
12914 int len = 0;
12915 int i;
12916 extract_insn_cached (insn);
12917 for (i = recog_data.n_operands - 1; i >= 0; --i)
12918 if (CONSTANT_P (recog_data.operand[i]))
12920 gcc_assert (!len);
12921 if (shortform
12922 && GET_CODE (recog_data.operand[i]) == CONST_INT
12923 && CONST_OK_FOR_LETTER_P (INTVAL (recog_data.operand[i]), 'K'))
12924 len = 1;
12925 else
12927 switch (get_attr_mode (insn))
12929 case MODE_QI:
12930 len+=1;
12931 break;
12932 case MODE_HI:
12933 len+=2;
12934 break;
12935 case MODE_SI:
12936 len+=4;
12937 break;
12938 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
12939 case MODE_DI:
12940 len+=4;
12941 break;
12942 default:
12943 fatal_insn ("unknown insn mode", insn);
12947 return len;
12949 /* Compute default value for "length_address" attribute. */
12951 ix86_attr_length_address_default (rtx insn)
12953 int i;
12955 if (get_attr_type (insn) == TYPE_LEA)
12957 rtx set = PATTERN (insn);
12959 if (GET_CODE (set) == PARALLEL)
12960 set = XVECEXP (set, 0, 0);
12962 gcc_assert (GET_CODE (set) == SET);
12964 return memory_address_length (SET_SRC (set));
12967 extract_insn_cached (insn);
12968 for (i = recog_data.n_operands - 1; i >= 0; --i)
12969 if (GET_CODE (recog_data.operand[i]) == MEM)
12971 return memory_address_length (XEXP (recog_data.operand[i], 0));
12972 break;
12974 return 0;
12977 /* Return the maximum number of instructions a cpu can issue. */
12979 static int
12980 ix86_issue_rate (void)
12982 switch (ix86_tune)
12984 case PROCESSOR_PENTIUM:
12985 case PROCESSOR_K6:
12986 return 2;
12988 case PROCESSOR_PENTIUMPRO:
12989 case PROCESSOR_PENTIUM4:
12990 case PROCESSOR_ATHLON:
12991 case PROCESSOR_K8:
12992 case PROCESSOR_NOCONA:
12993 return 3;
12995 default:
12996 return 1;
13000 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
13001 by DEP_INSN and nothing set by DEP_INSN. */
13003 static int
13004 ix86_flags_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
13006 rtx set, set2;
13008 /* Simplify the test for uninteresting insns. */
13009 if (insn_type != TYPE_SETCC
13010 && insn_type != TYPE_ICMOV
13011 && insn_type != TYPE_FCMOV
13012 && insn_type != TYPE_IBR)
13013 return 0;
13015 if ((set = single_set (dep_insn)) != 0)
13017 set = SET_DEST (set);
13018 set2 = NULL_RTX;
13020 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
13021 && XVECLEN (PATTERN (dep_insn), 0) == 2
13022 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
13023 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
13025 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
13026 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
13028 else
13029 return 0;
13031 if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG)
13032 return 0;
13034 /* This test is true if the dependent insn reads the flags but
13035 not any other potentially set register. */
13036 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
13037 return 0;
13039 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
13040 return 0;
13042 return 1;
13045 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
13046 address with operands set by DEP_INSN. */
13048 static int
13049 ix86_agi_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
13051 rtx addr;
13053 if (insn_type == TYPE_LEA
13054 && TARGET_PENTIUM)
13056 addr = PATTERN (insn);
13058 if (GET_CODE (addr) == PARALLEL)
13059 addr = XVECEXP (addr, 0, 0);
13061 gcc_assert (GET_CODE (addr) == SET);
13063 addr = SET_SRC (addr);
13065 else
13067 int i;
13068 extract_insn_cached (insn);
13069 for (i = recog_data.n_operands - 1; i >= 0; --i)
13070 if (GET_CODE (recog_data.operand[i]) == MEM)
13072 addr = XEXP (recog_data.operand[i], 0);
13073 goto found;
13075 return 0;
13076 found:;
13079 return modified_in_p (addr, dep_insn);
13082 static int
13083 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
13085 enum attr_type insn_type, dep_insn_type;
13086 enum attr_memory memory;
13087 rtx set, set2;
13088 int dep_insn_code_number;
13090 /* Anti and output dependencies have zero cost on all CPUs. */
13091 if (REG_NOTE_KIND (link) != 0)
13092 return 0;
13094 dep_insn_code_number = recog_memoized (dep_insn);
13096 /* If we can't recognize the insns, we can't really do anything. */
13097 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
13098 return cost;
13100 insn_type = get_attr_type (insn);
13101 dep_insn_type = get_attr_type (dep_insn);
13103 switch (ix86_tune)
13105 case PROCESSOR_PENTIUM:
13106 /* Address Generation Interlock adds a cycle of latency. */
13107 if (ix86_agi_dependant (insn, dep_insn, insn_type))
13108 cost += 1;
13110 /* ??? Compares pair with jump/setcc. */
13111 if (ix86_flags_dependant (insn, dep_insn, insn_type))
13112 cost = 0;
13114 /* Floating point stores require value to be ready one cycle earlier. */
13115 if (insn_type == TYPE_FMOV
13116 && get_attr_memory (insn) == MEMORY_STORE
13117 && !ix86_agi_dependant (insn, dep_insn, insn_type))
13118 cost += 1;
13119 break;
13121 case PROCESSOR_PENTIUMPRO:
13122 memory = get_attr_memory (insn);
13124 /* INT->FP conversion is expensive. */
13125 if (get_attr_fp_int_src (dep_insn))
13126 cost += 5;
13128 /* There is one cycle extra latency between an FP op and a store. */
13129 if (insn_type == TYPE_FMOV
13130 && (set = single_set (dep_insn)) != NULL_RTX
13131 && (set2 = single_set (insn)) != NULL_RTX
13132 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
13133 && GET_CODE (SET_DEST (set2)) == MEM)
13134 cost += 1;
13136 /* Show ability of reorder buffer to hide latency of load by executing
13137 in parallel with previous instruction in case
13138 previous instruction is not needed to compute the address. */
13139 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
13140 && !ix86_agi_dependant (insn, dep_insn, insn_type))
13142 /* Claim moves to take one cycle, as core can issue one load
13143 at time and the next load can start cycle later. */
13144 if (dep_insn_type == TYPE_IMOV
13145 || dep_insn_type == TYPE_FMOV)
13146 cost = 1;
13147 else if (cost > 1)
13148 cost--;
13150 break;
13152 case PROCESSOR_K6:
13153 memory = get_attr_memory (insn);
13155 /* The esp dependency is resolved before the instruction is really
13156 finished. */
13157 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
13158 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
13159 return 1;
13161 /* INT->FP conversion is expensive. */
13162 if (get_attr_fp_int_src (dep_insn))
13163 cost += 5;
13165 /* Show ability of reorder buffer to hide latency of load by executing
13166 in parallel with previous instruction in case
13167 previous instruction is not needed to compute the address. */
13168 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
13169 && !ix86_agi_dependant (insn, dep_insn, insn_type))
13171 /* Claim moves to take one cycle, as core can issue one load
13172 at time and the next load can start cycle later. */
13173 if (dep_insn_type == TYPE_IMOV
13174 || dep_insn_type == TYPE_FMOV)
13175 cost = 1;
13176 else if (cost > 2)
13177 cost -= 2;
13178 else
13179 cost = 1;
13181 break;
13183 case PROCESSOR_ATHLON:
13184 case PROCESSOR_K8:
13185 memory = get_attr_memory (insn);
13187 /* Show ability of reorder buffer to hide latency of load by executing
13188 in parallel with previous instruction in case
13189 previous instruction is not needed to compute the address. */
13190 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
13191 && !ix86_agi_dependant (insn, dep_insn, insn_type))
13193 enum attr_unit unit = get_attr_unit (insn);
13194 int loadcost = 3;
13196 /* Because of the difference between the length of integer and
13197 floating unit pipeline preparation stages, the memory operands
13198 for floating point are cheaper.
13200 ??? For Athlon it the difference is most probably 2. */
13201 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
13202 loadcost = 3;
13203 else
13204 loadcost = TARGET_ATHLON ? 2 : 0;
13206 if (cost >= loadcost)
13207 cost -= loadcost;
13208 else
13209 cost = 0;
13212 default:
13213 break;
13216 return cost;
13219 /* How many alternative schedules to try. This should be as wide as the
13220 scheduling freedom in the DFA, but no wider. Making this value too
13221 large results extra work for the scheduler. */
13223 static int
13224 ia32_multipass_dfa_lookahead (void)
13226 if (ix86_tune == PROCESSOR_PENTIUM)
13227 return 2;
13229 if (ix86_tune == PROCESSOR_PENTIUMPRO
13230 || ix86_tune == PROCESSOR_K6)
13231 return 1;
13233 else
13234 return 0;
13238 /* Compute the alignment given to a constant that is being placed in memory.
13239 EXP is the constant and ALIGN is the alignment that the object would
13240 ordinarily have.
13241 The value of this function is used instead of that alignment to align
13242 the object. */
13245 ix86_constant_alignment (tree exp, int align)
13247 if (TREE_CODE (exp) == REAL_CST)
13249 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
13250 return 64;
13251 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
13252 return 128;
13254 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
13255 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
13256 return BITS_PER_WORD;
13258 return align;
13261 /* Compute the alignment for a static variable.
13262 TYPE is the data type, and ALIGN is the alignment that
13263 the object would ordinarily have. The value of this function is used
13264 instead of that alignment to align the object. */
13267 ix86_data_alignment (tree type, int align)
13269 if (AGGREGATE_TYPE_P (type)
13270 && TYPE_SIZE (type)
13271 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
13272 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 256
13273 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 256)
13274 return 256;
13276 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
13277 to 16byte boundary. */
13278 if (TARGET_64BIT)
13280 if (AGGREGATE_TYPE_P (type)
13281 && TYPE_SIZE (type)
13282 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
13283 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
13284 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
13285 return 128;
13288 if (TREE_CODE (type) == ARRAY_TYPE)
13290 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
13291 return 64;
13292 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
13293 return 128;
13295 else if (TREE_CODE (type) == COMPLEX_TYPE)
13298 if (TYPE_MODE (type) == DCmode && align < 64)
13299 return 64;
13300 if (TYPE_MODE (type) == XCmode && align < 128)
13301 return 128;
13303 else if ((TREE_CODE (type) == RECORD_TYPE
13304 || TREE_CODE (type) == UNION_TYPE
13305 || TREE_CODE (type) == QUAL_UNION_TYPE)
13306 && TYPE_FIELDS (type))
13308 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
13309 return 64;
13310 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
13311 return 128;
13313 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
13314 || TREE_CODE (type) == INTEGER_TYPE)
13316 if (TYPE_MODE (type) == DFmode && align < 64)
13317 return 64;
13318 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
13319 return 128;
13322 return align;
13325 /* Compute the alignment for a local variable.
13326 TYPE is the data type, and ALIGN is the alignment that
13327 the object would ordinarily have. The value of this macro is used
13328 instead of that alignment to align the object. */
13331 ix86_local_alignment (tree type, int align)
13333 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
13334 to 16byte boundary. */
13335 if (TARGET_64BIT)
13337 if (AGGREGATE_TYPE_P (type)
13338 && TYPE_SIZE (type)
13339 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
13340 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
13341 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
13342 return 128;
13344 if (TREE_CODE (type) == ARRAY_TYPE)
13346 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
13347 return 64;
13348 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
13349 return 128;
13351 else if (TREE_CODE (type) == COMPLEX_TYPE)
13353 if (TYPE_MODE (type) == DCmode && align < 64)
13354 return 64;
13355 if (TYPE_MODE (type) == XCmode && align < 128)
13356 return 128;
13358 else if ((TREE_CODE (type) == RECORD_TYPE
13359 || TREE_CODE (type) == UNION_TYPE
13360 || TREE_CODE (type) == QUAL_UNION_TYPE)
13361 && TYPE_FIELDS (type))
13363 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
13364 return 64;
13365 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
13366 return 128;
13368 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
13369 || TREE_CODE (type) == INTEGER_TYPE)
13372 if (TYPE_MODE (type) == DFmode && align < 64)
13373 return 64;
13374 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
13375 return 128;
13377 return align;
13380 /* Emit RTL insns to initialize the variable parts of a trampoline.
13381 FNADDR is an RTX for the address of the function's pure code.
13382 CXT is an RTX for the static chain value for the function. */
13383 void
13384 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
13386 if (!TARGET_64BIT)
13388 /* Compute offset from the end of the jmp to the target function. */
13389 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
13390 plus_constant (tramp, 10),
13391 NULL_RTX, 1, OPTAB_DIRECT);
13392 emit_move_insn (gen_rtx_MEM (QImode, tramp),
13393 gen_int_mode (0xb9, QImode));
13394 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
13395 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
13396 gen_int_mode (0xe9, QImode));
13397 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
13399 else
13401 int offset = 0;
13402 /* Try to load address using shorter movl instead of movabs.
13403 We may want to support movq for kernel mode, but kernel does not use
13404 trampolines at the moment. */
13405 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
13407 fnaddr = copy_to_mode_reg (DImode, fnaddr);
13408 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13409 gen_int_mode (0xbb41, HImode));
13410 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
13411 gen_lowpart (SImode, fnaddr));
13412 offset += 6;
13414 else
13416 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13417 gen_int_mode (0xbb49, HImode));
13418 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
13419 fnaddr);
13420 offset += 10;
13422 /* Load static chain using movabs to r10. */
13423 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13424 gen_int_mode (0xba49, HImode));
13425 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
13426 cxt);
13427 offset += 10;
13428 /* Jump to the r11 */
13429 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
13430 gen_int_mode (0xff49, HImode));
13431 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
13432 gen_int_mode (0xe3, QImode));
13433 offset += 3;
13434 gcc_assert (offset <= TRAMPOLINE_SIZE);
13437 #ifdef ENABLE_EXECUTE_STACK
13438 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
13439 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
13440 #endif
13443 /* Codes for all the SSE/MMX builtins. */
13444 enum ix86_builtins
13446 IX86_BUILTIN_ADDPS,
13447 IX86_BUILTIN_ADDSS,
13448 IX86_BUILTIN_DIVPS,
13449 IX86_BUILTIN_DIVSS,
13450 IX86_BUILTIN_MULPS,
13451 IX86_BUILTIN_MULSS,
13452 IX86_BUILTIN_SUBPS,
13453 IX86_BUILTIN_SUBSS,
13455 IX86_BUILTIN_CMPEQPS,
13456 IX86_BUILTIN_CMPLTPS,
13457 IX86_BUILTIN_CMPLEPS,
13458 IX86_BUILTIN_CMPGTPS,
13459 IX86_BUILTIN_CMPGEPS,
13460 IX86_BUILTIN_CMPNEQPS,
13461 IX86_BUILTIN_CMPNLTPS,
13462 IX86_BUILTIN_CMPNLEPS,
13463 IX86_BUILTIN_CMPNGTPS,
13464 IX86_BUILTIN_CMPNGEPS,
13465 IX86_BUILTIN_CMPORDPS,
13466 IX86_BUILTIN_CMPUNORDPS,
13467 IX86_BUILTIN_CMPNEPS,
13468 IX86_BUILTIN_CMPEQSS,
13469 IX86_BUILTIN_CMPLTSS,
13470 IX86_BUILTIN_CMPLESS,
13471 IX86_BUILTIN_CMPNEQSS,
13472 IX86_BUILTIN_CMPNLTSS,
13473 IX86_BUILTIN_CMPNLESS,
13474 IX86_BUILTIN_CMPNGTSS,
13475 IX86_BUILTIN_CMPNGESS,
13476 IX86_BUILTIN_CMPORDSS,
13477 IX86_BUILTIN_CMPUNORDSS,
13478 IX86_BUILTIN_CMPNESS,
13480 IX86_BUILTIN_COMIEQSS,
13481 IX86_BUILTIN_COMILTSS,
13482 IX86_BUILTIN_COMILESS,
13483 IX86_BUILTIN_COMIGTSS,
13484 IX86_BUILTIN_COMIGESS,
13485 IX86_BUILTIN_COMINEQSS,
13486 IX86_BUILTIN_UCOMIEQSS,
13487 IX86_BUILTIN_UCOMILTSS,
13488 IX86_BUILTIN_UCOMILESS,
13489 IX86_BUILTIN_UCOMIGTSS,
13490 IX86_BUILTIN_UCOMIGESS,
13491 IX86_BUILTIN_UCOMINEQSS,
13493 IX86_BUILTIN_CVTPI2PS,
13494 IX86_BUILTIN_CVTPS2PI,
13495 IX86_BUILTIN_CVTSI2SS,
13496 IX86_BUILTIN_CVTSI642SS,
13497 IX86_BUILTIN_CVTSS2SI,
13498 IX86_BUILTIN_CVTSS2SI64,
13499 IX86_BUILTIN_CVTTPS2PI,
13500 IX86_BUILTIN_CVTTSS2SI,
13501 IX86_BUILTIN_CVTTSS2SI64,
13503 IX86_BUILTIN_MAXPS,
13504 IX86_BUILTIN_MAXSS,
13505 IX86_BUILTIN_MINPS,
13506 IX86_BUILTIN_MINSS,
13508 IX86_BUILTIN_LOADUPS,
13509 IX86_BUILTIN_STOREUPS,
13510 IX86_BUILTIN_MOVSS,
13512 IX86_BUILTIN_MOVHLPS,
13513 IX86_BUILTIN_MOVLHPS,
13514 IX86_BUILTIN_LOADHPS,
13515 IX86_BUILTIN_LOADLPS,
13516 IX86_BUILTIN_STOREHPS,
13517 IX86_BUILTIN_STORELPS,
13519 IX86_BUILTIN_MASKMOVQ,
13520 IX86_BUILTIN_MOVMSKPS,
13521 IX86_BUILTIN_PMOVMSKB,
13523 IX86_BUILTIN_MOVNTPS,
13524 IX86_BUILTIN_MOVNTQ,
13526 IX86_BUILTIN_LOADDQU,
13527 IX86_BUILTIN_STOREDQU,
13529 IX86_BUILTIN_PACKSSWB,
13530 IX86_BUILTIN_PACKSSDW,
13531 IX86_BUILTIN_PACKUSWB,
13533 IX86_BUILTIN_PADDB,
13534 IX86_BUILTIN_PADDW,
13535 IX86_BUILTIN_PADDD,
13536 IX86_BUILTIN_PADDQ,
13537 IX86_BUILTIN_PADDSB,
13538 IX86_BUILTIN_PADDSW,
13539 IX86_BUILTIN_PADDUSB,
13540 IX86_BUILTIN_PADDUSW,
13541 IX86_BUILTIN_PSUBB,
13542 IX86_BUILTIN_PSUBW,
13543 IX86_BUILTIN_PSUBD,
13544 IX86_BUILTIN_PSUBQ,
13545 IX86_BUILTIN_PSUBSB,
13546 IX86_BUILTIN_PSUBSW,
13547 IX86_BUILTIN_PSUBUSB,
13548 IX86_BUILTIN_PSUBUSW,
13550 IX86_BUILTIN_PAND,
13551 IX86_BUILTIN_PANDN,
13552 IX86_BUILTIN_POR,
13553 IX86_BUILTIN_PXOR,
13555 IX86_BUILTIN_PAVGB,
13556 IX86_BUILTIN_PAVGW,
13558 IX86_BUILTIN_PCMPEQB,
13559 IX86_BUILTIN_PCMPEQW,
13560 IX86_BUILTIN_PCMPEQD,
13561 IX86_BUILTIN_PCMPGTB,
13562 IX86_BUILTIN_PCMPGTW,
13563 IX86_BUILTIN_PCMPGTD,
13565 IX86_BUILTIN_PMADDWD,
13567 IX86_BUILTIN_PMAXSW,
13568 IX86_BUILTIN_PMAXUB,
13569 IX86_BUILTIN_PMINSW,
13570 IX86_BUILTIN_PMINUB,
13572 IX86_BUILTIN_PMULHUW,
13573 IX86_BUILTIN_PMULHW,
13574 IX86_BUILTIN_PMULLW,
13576 IX86_BUILTIN_PSADBW,
13577 IX86_BUILTIN_PSHUFW,
13579 IX86_BUILTIN_PSLLW,
13580 IX86_BUILTIN_PSLLD,
13581 IX86_BUILTIN_PSLLQ,
13582 IX86_BUILTIN_PSRAW,
13583 IX86_BUILTIN_PSRAD,
13584 IX86_BUILTIN_PSRLW,
13585 IX86_BUILTIN_PSRLD,
13586 IX86_BUILTIN_PSRLQ,
13587 IX86_BUILTIN_PSLLWI,
13588 IX86_BUILTIN_PSLLDI,
13589 IX86_BUILTIN_PSLLQI,
13590 IX86_BUILTIN_PSRAWI,
13591 IX86_BUILTIN_PSRADI,
13592 IX86_BUILTIN_PSRLWI,
13593 IX86_BUILTIN_PSRLDI,
13594 IX86_BUILTIN_PSRLQI,
13596 IX86_BUILTIN_PUNPCKHBW,
13597 IX86_BUILTIN_PUNPCKHWD,
13598 IX86_BUILTIN_PUNPCKHDQ,
13599 IX86_BUILTIN_PUNPCKLBW,
13600 IX86_BUILTIN_PUNPCKLWD,
13601 IX86_BUILTIN_PUNPCKLDQ,
13603 IX86_BUILTIN_SHUFPS,
13605 IX86_BUILTIN_RCPPS,
13606 IX86_BUILTIN_RCPSS,
13607 IX86_BUILTIN_RSQRTPS,
13608 IX86_BUILTIN_RSQRTSS,
13609 IX86_BUILTIN_SQRTPS,
13610 IX86_BUILTIN_SQRTSS,
13612 IX86_BUILTIN_UNPCKHPS,
13613 IX86_BUILTIN_UNPCKLPS,
13615 IX86_BUILTIN_ANDPS,
13616 IX86_BUILTIN_ANDNPS,
13617 IX86_BUILTIN_ORPS,
13618 IX86_BUILTIN_XORPS,
13620 IX86_BUILTIN_EMMS,
13621 IX86_BUILTIN_LDMXCSR,
13622 IX86_BUILTIN_STMXCSR,
13623 IX86_BUILTIN_SFENCE,
13625 /* 3DNow! Original */
13626 IX86_BUILTIN_FEMMS,
13627 IX86_BUILTIN_PAVGUSB,
13628 IX86_BUILTIN_PF2ID,
13629 IX86_BUILTIN_PFACC,
13630 IX86_BUILTIN_PFADD,
13631 IX86_BUILTIN_PFCMPEQ,
13632 IX86_BUILTIN_PFCMPGE,
13633 IX86_BUILTIN_PFCMPGT,
13634 IX86_BUILTIN_PFMAX,
13635 IX86_BUILTIN_PFMIN,
13636 IX86_BUILTIN_PFMUL,
13637 IX86_BUILTIN_PFRCP,
13638 IX86_BUILTIN_PFRCPIT1,
13639 IX86_BUILTIN_PFRCPIT2,
13640 IX86_BUILTIN_PFRSQIT1,
13641 IX86_BUILTIN_PFRSQRT,
13642 IX86_BUILTIN_PFSUB,
13643 IX86_BUILTIN_PFSUBR,
13644 IX86_BUILTIN_PI2FD,
13645 IX86_BUILTIN_PMULHRW,
13647 /* 3DNow! Athlon Extensions */
13648 IX86_BUILTIN_PF2IW,
13649 IX86_BUILTIN_PFNACC,
13650 IX86_BUILTIN_PFPNACC,
13651 IX86_BUILTIN_PI2FW,
13652 IX86_BUILTIN_PSWAPDSI,
13653 IX86_BUILTIN_PSWAPDSF,
13655 /* SSE2 */
13656 IX86_BUILTIN_ADDPD,
13657 IX86_BUILTIN_ADDSD,
13658 IX86_BUILTIN_DIVPD,
13659 IX86_BUILTIN_DIVSD,
13660 IX86_BUILTIN_MULPD,
13661 IX86_BUILTIN_MULSD,
13662 IX86_BUILTIN_SUBPD,
13663 IX86_BUILTIN_SUBSD,
13665 IX86_BUILTIN_CMPEQPD,
13666 IX86_BUILTIN_CMPLTPD,
13667 IX86_BUILTIN_CMPLEPD,
13668 IX86_BUILTIN_CMPGTPD,
13669 IX86_BUILTIN_CMPGEPD,
13670 IX86_BUILTIN_CMPNEQPD,
13671 IX86_BUILTIN_CMPNLTPD,
13672 IX86_BUILTIN_CMPNLEPD,
13673 IX86_BUILTIN_CMPNGTPD,
13674 IX86_BUILTIN_CMPNGEPD,
13675 IX86_BUILTIN_CMPORDPD,
13676 IX86_BUILTIN_CMPUNORDPD,
13677 IX86_BUILTIN_CMPNEPD,
13678 IX86_BUILTIN_CMPEQSD,
13679 IX86_BUILTIN_CMPLTSD,
13680 IX86_BUILTIN_CMPLESD,
13681 IX86_BUILTIN_CMPNEQSD,
13682 IX86_BUILTIN_CMPNLTSD,
13683 IX86_BUILTIN_CMPNLESD,
13684 IX86_BUILTIN_CMPORDSD,
13685 IX86_BUILTIN_CMPUNORDSD,
13686 IX86_BUILTIN_CMPNESD,
13688 IX86_BUILTIN_COMIEQSD,
13689 IX86_BUILTIN_COMILTSD,
13690 IX86_BUILTIN_COMILESD,
13691 IX86_BUILTIN_COMIGTSD,
13692 IX86_BUILTIN_COMIGESD,
13693 IX86_BUILTIN_COMINEQSD,
13694 IX86_BUILTIN_UCOMIEQSD,
13695 IX86_BUILTIN_UCOMILTSD,
13696 IX86_BUILTIN_UCOMILESD,
13697 IX86_BUILTIN_UCOMIGTSD,
13698 IX86_BUILTIN_UCOMIGESD,
13699 IX86_BUILTIN_UCOMINEQSD,
13701 IX86_BUILTIN_MAXPD,
13702 IX86_BUILTIN_MAXSD,
13703 IX86_BUILTIN_MINPD,
13704 IX86_BUILTIN_MINSD,
13706 IX86_BUILTIN_ANDPD,
13707 IX86_BUILTIN_ANDNPD,
13708 IX86_BUILTIN_ORPD,
13709 IX86_BUILTIN_XORPD,
13711 IX86_BUILTIN_SQRTPD,
13712 IX86_BUILTIN_SQRTSD,
13714 IX86_BUILTIN_UNPCKHPD,
13715 IX86_BUILTIN_UNPCKLPD,
13717 IX86_BUILTIN_SHUFPD,
13719 IX86_BUILTIN_LOADUPD,
13720 IX86_BUILTIN_STOREUPD,
13721 IX86_BUILTIN_MOVSD,
13723 IX86_BUILTIN_LOADHPD,
13724 IX86_BUILTIN_LOADLPD,
13726 IX86_BUILTIN_CVTDQ2PD,
13727 IX86_BUILTIN_CVTDQ2PS,
13729 IX86_BUILTIN_CVTPD2DQ,
13730 IX86_BUILTIN_CVTPD2PI,
13731 IX86_BUILTIN_CVTPD2PS,
13732 IX86_BUILTIN_CVTTPD2DQ,
13733 IX86_BUILTIN_CVTTPD2PI,
13735 IX86_BUILTIN_CVTPI2PD,
13736 IX86_BUILTIN_CVTSI2SD,
13737 IX86_BUILTIN_CVTSI642SD,
13739 IX86_BUILTIN_CVTSD2SI,
13740 IX86_BUILTIN_CVTSD2SI64,
13741 IX86_BUILTIN_CVTSD2SS,
13742 IX86_BUILTIN_CVTSS2SD,
13743 IX86_BUILTIN_CVTTSD2SI,
13744 IX86_BUILTIN_CVTTSD2SI64,
13746 IX86_BUILTIN_CVTPS2DQ,
13747 IX86_BUILTIN_CVTPS2PD,
13748 IX86_BUILTIN_CVTTPS2DQ,
13750 IX86_BUILTIN_MOVNTI,
13751 IX86_BUILTIN_MOVNTPD,
13752 IX86_BUILTIN_MOVNTDQ,
13754 /* SSE2 MMX */
13755 IX86_BUILTIN_MASKMOVDQU,
13756 IX86_BUILTIN_MOVMSKPD,
13757 IX86_BUILTIN_PMOVMSKB128,
13759 IX86_BUILTIN_PACKSSWB128,
13760 IX86_BUILTIN_PACKSSDW128,
13761 IX86_BUILTIN_PACKUSWB128,
13763 IX86_BUILTIN_PADDB128,
13764 IX86_BUILTIN_PADDW128,
13765 IX86_BUILTIN_PADDD128,
13766 IX86_BUILTIN_PADDQ128,
13767 IX86_BUILTIN_PADDSB128,
13768 IX86_BUILTIN_PADDSW128,
13769 IX86_BUILTIN_PADDUSB128,
13770 IX86_BUILTIN_PADDUSW128,
13771 IX86_BUILTIN_PSUBB128,
13772 IX86_BUILTIN_PSUBW128,
13773 IX86_BUILTIN_PSUBD128,
13774 IX86_BUILTIN_PSUBQ128,
13775 IX86_BUILTIN_PSUBSB128,
13776 IX86_BUILTIN_PSUBSW128,
13777 IX86_BUILTIN_PSUBUSB128,
13778 IX86_BUILTIN_PSUBUSW128,
13780 IX86_BUILTIN_PAND128,
13781 IX86_BUILTIN_PANDN128,
13782 IX86_BUILTIN_POR128,
13783 IX86_BUILTIN_PXOR128,
13785 IX86_BUILTIN_PAVGB128,
13786 IX86_BUILTIN_PAVGW128,
13788 IX86_BUILTIN_PCMPEQB128,
13789 IX86_BUILTIN_PCMPEQW128,
13790 IX86_BUILTIN_PCMPEQD128,
13791 IX86_BUILTIN_PCMPGTB128,
13792 IX86_BUILTIN_PCMPGTW128,
13793 IX86_BUILTIN_PCMPGTD128,
13795 IX86_BUILTIN_PMADDWD128,
13797 IX86_BUILTIN_PMAXSW128,
13798 IX86_BUILTIN_PMAXUB128,
13799 IX86_BUILTIN_PMINSW128,
13800 IX86_BUILTIN_PMINUB128,
13802 IX86_BUILTIN_PMULUDQ,
13803 IX86_BUILTIN_PMULUDQ128,
13804 IX86_BUILTIN_PMULHUW128,
13805 IX86_BUILTIN_PMULHW128,
13806 IX86_BUILTIN_PMULLW128,
13808 IX86_BUILTIN_PSADBW128,
13809 IX86_BUILTIN_PSHUFHW,
13810 IX86_BUILTIN_PSHUFLW,
13811 IX86_BUILTIN_PSHUFD,
13813 IX86_BUILTIN_PSLLW128,
13814 IX86_BUILTIN_PSLLD128,
13815 IX86_BUILTIN_PSLLQ128,
13816 IX86_BUILTIN_PSRAW128,
13817 IX86_BUILTIN_PSRAD128,
13818 IX86_BUILTIN_PSRLW128,
13819 IX86_BUILTIN_PSRLD128,
13820 IX86_BUILTIN_PSRLQ128,
13821 IX86_BUILTIN_PSLLDQI128,
13822 IX86_BUILTIN_PSLLWI128,
13823 IX86_BUILTIN_PSLLDI128,
13824 IX86_BUILTIN_PSLLQI128,
13825 IX86_BUILTIN_PSRAWI128,
13826 IX86_BUILTIN_PSRADI128,
13827 IX86_BUILTIN_PSRLDQI128,
13828 IX86_BUILTIN_PSRLWI128,
13829 IX86_BUILTIN_PSRLDI128,
13830 IX86_BUILTIN_PSRLQI128,
13832 IX86_BUILTIN_PUNPCKHBW128,
13833 IX86_BUILTIN_PUNPCKHWD128,
13834 IX86_BUILTIN_PUNPCKHDQ128,
13835 IX86_BUILTIN_PUNPCKHQDQ128,
13836 IX86_BUILTIN_PUNPCKLBW128,
13837 IX86_BUILTIN_PUNPCKLWD128,
13838 IX86_BUILTIN_PUNPCKLDQ128,
13839 IX86_BUILTIN_PUNPCKLQDQ128,
13841 IX86_BUILTIN_CLFLUSH,
13842 IX86_BUILTIN_MFENCE,
13843 IX86_BUILTIN_LFENCE,
13845 /* Prescott New Instructions. */
13846 IX86_BUILTIN_ADDSUBPS,
13847 IX86_BUILTIN_HADDPS,
13848 IX86_BUILTIN_HSUBPS,
13849 IX86_BUILTIN_MOVSHDUP,
13850 IX86_BUILTIN_MOVSLDUP,
13851 IX86_BUILTIN_ADDSUBPD,
13852 IX86_BUILTIN_HADDPD,
13853 IX86_BUILTIN_HSUBPD,
13854 IX86_BUILTIN_LDDQU,
13856 IX86_BUILTIN_MONITOR,
13857 IX86_BUILTIN_MWAIT,
13859 IX86_BUILTIN_VEC_INIT_V2SI,
13860 IX86_BUILTIN_VEC_INIT_V4HI,
13861 IX86_BUILTIN_VEC_INIT_V8QI,
13862 IX86_BUILTIN_VEC_EXT_V2DF,
13863 IX86_BUILTIN_VEC_EXT_V2DI,
13864 IX86_BUILTIN_VEC_EXT_V4SF,
13865 IX86_BUILTIN_VEC_EXT_V4SI,
13866 IX86_BUILTIN_VEC_EXT_V8HI,
13867 IX86_BUILTIN_VEC_EXT_V2SI,
13868 IX86_BUILTIN_VEC_EXT_V4HI,
13869 IX86_BUILTIN_VEC_SET_V8HI,
13870 IX86_BUILTIN_VEC_SET_V4HI,
13872 IX86_BUILTIN_MAX
13875 #define def_builtin(MASK, NAME, TYPE, CODE) \
13876 do { \
13877 if ((MASK) & target_flags \
13878 && (!((MASK) & MASK_64BIT) || TARGET_64BIT)) \
13879 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
13880 NULL, NULL_TREE); \
13881 } while (0)
13883 /* Bits for builtin_description.flag. */
13885 /* Set when we don't support the comparison natively, and should
13886 swap_comparison in order to support it. */
13887 #define BUILTIN_DESC_SWAP_OPERANDS 1
13889 struct builtin_description
13891 const unsigned int mask;
13892 const enum insn_code icode;
13893 const char *const name;
13894 const enum ix86_builtins code;
13895 const enum rtx_code comparison;
13896 const unsigned int flag;
13899 static const struct builtin_description bdesc_comi[] =
13901 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
13902 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
13903 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
13904 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
13905 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
13906 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
13907 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
13908 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
13909 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
13910 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
13911 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
13912 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
13913 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
13914 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
13915 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
13916 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
13917 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
13918 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
13919 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
13920 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
13921 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
13922 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
13923 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
13924 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
13927 static const struct builtin_description bdesc_2arg[] =
13929 /* SSE */
13930 { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
13931 { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
13932 { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
13933 { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
13934 { MASK_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
13935 { MASK_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
13936 { MASK_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
13937 { MASK_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
13939 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
13940 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
13941 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
13942 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT,
13943 BUILTIN_DESC_SWAP_OPERANDS },
13944 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE,
13945 BUILTIN_DESC_SWAP_OPERANDS },
13946 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
13947 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, 0 },
13948 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, 0 },
13949 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, 0 },
13950 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE,
13951 BUILTIN_DESC_SWAP_OPERANDS },
13952 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT,
13953 BUILTIN_DESC_SWAP_OPERANDS },
13954 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, 0 },
13955 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
13956 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
13957 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
13958 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
13959 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, 0 },
13960 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, 0 },
13961 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, 0 },
13962 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE,
13963 BUILTIN_DESC_SWAP_OPERANDS },
13964 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT,
13965 BUILTIN_DESC_SWAP_OPERANDS },
13966 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 },
13968 { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
13969 { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
13970 { MASK_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
13971 { MASK_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
13973 { MASK_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
13974 { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
13975 { MASK_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
13976 { MASK_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
13978 { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
13979 { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
13980 { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
13981 { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
13982 { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
13984 /* MMX */
13985 { MASK_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
13986 { MASK_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
13987 { MASK_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
13988 { MASK_MMX, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
13989 { MASK_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
13990 { MASK_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
13991 { MASK_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
13992 { MASK_MMX, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
13994 { MASK_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
13995 { MASK_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
13996 { MASK_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
13997 { MASK_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
13998 { MASK_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
13999 { MASK_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
14000 { MASK_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
14001 { MASK_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
14003 { MASK_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
14004 { MASK_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
14005 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
14007 { MASK_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
14008 { MASK_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
14009 { MASK_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
14010 { MASK_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
14012 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
14013 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
14015 { MASK_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
14016 { MASK_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
14017 { MASK_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
14018 { MASK_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
14019 { MASK_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
14020 { MASK_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
14022 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
14023 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
14024 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
14025 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
14027 { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
14028 { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
14029 { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
14030 { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
14031 { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
14032 { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
14034 /* Special. */
14035 { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
14036 { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
14037 { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
14039 { MASK_SSE, CODE_FOR_sse_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
14040 { MASK_SSE, CODE_FOR_sse_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
14041 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
14043 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
14044 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
14045 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
14046 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
14047 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
14048 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
14050 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
14051 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
14052 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
14053 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
14054 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
14055 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
14057 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
14058 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
14059 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
14060 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 },
14062 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
14063 { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
14065 /* SSE2 */
14066 { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
14067 { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
14068 { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
14069 { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
14070 { MASK_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
14071 { MASK_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
14072 { MASK_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
14073 { MASK_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
14075 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
14076 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
14077 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
14078 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT,
14079 BUILTIN_DESC_SWAP_OPERANDS },
14080 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE,
14081 BUILTIN_DESC_SWAP_OPERANDS },
14082 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
14083 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, 0 },
14084 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, 0 },
14085 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, 0 },
14086 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE,
14087 BUILTIN_DESC_SWAP_OPERANDS },
14088 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT,
14089 BUILTIN_DESC_SWAP_OPERANDS },
14090 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, 0 },
14091 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
14092 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
14093 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
14094 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
14095 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, 0 },
14096 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, 0 },
14097 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, 0 },
14098 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, 0 },
14100 { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
14101 { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
14102 { MASK_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
14103 { MASK_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
14105 { MASK_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
14106 { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
14107 { MASK_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
14108 { MASK_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
14110 { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
14111 { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
14112 { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
14114 /* SSE2 MMX */
14115 { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
14116 { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
14117 { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
14118 { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
14119 { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
14120 { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
14121 { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
14122 { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
14124 { MASK_MMX, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
14125 { MASK_MMX, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
14126 { MASK_MMX, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
14127 { MASK_MMX, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
14128 { MASK_MMX, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
14129 { MASK_MMX, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
14130 { MASK_MMX, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
14131 { MASK_MMX, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
14133 { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
14134 { MASK_SSE2, CODE_FOR_sse2_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
14136 { MASK_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
14137 { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
14138 { MASK_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
14139 { MASK_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
14141 { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
14142 { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
14144 { MASK_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
14145 { MASK_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
14146 { MASK_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
14147 { MASK_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
14148 { MASK_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
14149 { MASK_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
14151 { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
14152 { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
14153 { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
14154 { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
14156 { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
14157 { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
14158 { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
14159 { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
14160 { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
14161 { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
14162 { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
14163 { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
14165 { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
14166 { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
14167 { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
14169 { MASK_SSE2, CODE_FOR_sse2_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
14170 { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
14172 { MASK_SSE2, CODE_FOR_sse2_umulsidi3, 0, IX86_BUILTIN_PMULUDQ, 0, 0 },
14173 { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, 0, 0 },
14175 { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
14176 { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
14177 { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
14179 { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
14180 { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
14181 { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
14183 { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
14184 { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
14186 { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
14188 { MASK_SSE2, CODE_FOR_sse2_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
14189 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
14190 { MASK_SSE2, CODE_FOR_sse2_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
14191 { MASK_SSE2, CODE_FOR_sse2_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
14193 /* SSE3 MMX */
14194 { MASK_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
14195 { MASK_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
14196 { MASK_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
14197 { MASK_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
14198 { MASK_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
14199 { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 }
14202 static const struct builtin_description bdesc_1arg[] =
14204 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
14205 { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
14207 { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
14208 { MASK_SSE, CODE_FOR_sse_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
14209 { MASK_SSE, CODE_FOR_sse_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
14211 { MASK_SSE, CODE_FOR_sse_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
14212 { MASK_SSE, CODE_FOR_sse_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
14213 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
14214 { MASK_SSE, CODE_FOR_sse_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
14215 { MASK_SSE, CODE_FOR_sse_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
14216 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
14218 { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
14219 { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
14221 { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
14223 { MASK_SSE2, CODE_FOR_sse2_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
14224 { MASK_SSE2, CODE_FOR_sse2_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
14226 { MASK_SSE2, CODE_FOR_sse2_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
14227 { MASK_SSE2, CODE_FOR_sse2_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
14228 { MASK_SSE2, CODE_FOR_sse2_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
14229 { MASK_SSE2, CODE_FOR_sse2_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
14230 { MASK_SSE2, CODE_FOR_sse2_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
14232 { MASK_SSE2, CODE_FOR_sse2_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
14234 { MASK_SSE2, CODE_FOR_sse2_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
14235 { MASK_SSE2, CODE_FOR_sse2_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
14236 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
14237 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
14239 { MASK_SSE2, CODE_FOR_sse2_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
14240 { MASK_SSE2, CODE_FOR_sse2_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
14241 { MASK_SSE2, CODE_FOR_sse2_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
14243 /* SSE3 */
14244 { MASK_SSE3, CODE_FOR_sse3_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
14245 { MASK_SSE3, CODE_FOR_sse3_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
14248 static void
14249 ix86_init_builtins (void)
14251 if (TARGET_MMX)
14252 ix86_init_mmx_sse_builtins ();
14255 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
14256 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
14257 builtins. */
14258 static void
14259 ix86_init_mmx_sse_builtins (void)
14261 const struct builtin_description * d;
14262 size_t i;
14264 tree V16QI_type_node = build_vector_type_for_mode (intQI_type_node, V16QImode);
14265 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
14266 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
14267 tree V2DI_type_node
14268 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
14269 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
14270 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
14271 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
14272 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
14273 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
14274 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
14276 tree pchar_type_node = build_pointer_type (char_type_node);
14277 tree pcchar_type_node = build_pointer_type (
14278 build_type_variant (char_type_node, 1, 0));
14279 tree pfloat_type_node = build_pointer_type (float_type_node);
14280 tree pcfloat_type_node = build_pointer_type (
14281 build_type_variant (float_type_node, 1, 0));
14282 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
14283 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
14284 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
14286 /* Comparisons. */
14287 tree int_ftype_v4sf_v4sf
14288 = build_function_type_list (integer_type_node,
14289 V4SF_type_node, V4SF_type_node, NULL_TREE);
14290 tree v4si_ftype_v4sf_v4sf
14291 = build_function_type_list (V4SI_type_node,
14292 V4SF_type_node, V4SF_type_node, NULL_TREE);
14293 /* MMX/SSE/integer conversions. */
14294 tree int_ftype_v4sf
14295 = build_function_type_list (integer_type_node,
14296 V4SF_type_node, NULL_TREE);
14297 tree int64_ftype_v4sf
14298 = build_function_type_list (long_long_integer_type_node,
14299 V4SF_type_node, NULL_TREE);
14300 tree int_ftype_v8qi
14301 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
14302 tree v4sf_ftype_v4sf_int
14303 = build_function_type_list (V4SF_type_node,
14304 V4SF_type_node, integer_type_node, NULL_TREE);
14305 tree v4sf_ftype_v4sf_int64
14306 = build_function_type_list (V4SF_type_node,
14307 V4SF_type_node, long_long_integer_type_node,
14308 NULL_TREE);
14309 tree v4sf_ftype_v4sf_v2si
14310 = build_function_type_list (V4SF_type_node,
14311 V4SF_type_node, V2SI_type_node, NULL_TREE);
14313 /* Miscellaneous. */
14314 tree v8qi_ftype_v4hi_v4hi
14315 = build_function_type_list (V8QI_type_node,
14316 V4HI_type_node, V4HI_type_node, NULL_TREE);
14317 tree v4hi_ftype_v2si_v2si
14318 = build_function_type_list (V4HI_type_node,
14319 V2SI_type_node, V2SI_type_node, NULL_TREE);
14320 tree v4sf_ftype_v4sf_v4sf_int
14321 = build_function_type_list (V4SF_type_node,
14322 V4SF_type_node, V4SF_type_node,
14323 integer_type_node, NULL_TREE);
14324 tree v2si_ftype_v4hi_v4hi
14325 = build_function_type_list (V2SI_type_node,
14326 V4HI_type_node, V4HI_type_node, NULL_TREE);
14327 tree v4hi_ftype_v4hi_int
14328 = build_function_type_list (V4HI_type_node,
14329 V4HI_type_node, integer_type_node, NULL_TREE);
14330 tree v4hi_ftype_v4hi_di
14331 = build_function_type_list (V4HI_type_node,
14332 V4HI_type_node, long_long_unsigned_type_node,
14333 NULL_TREE);
14334 tree v2si_ftype_v2si_di
14335 = build_function_type_list (V2SI_type_node,
14336 V2SI_type_node, long_long_unsigned_type_node,
14337 NULL_TREE);
14338 tree void_ftype_void
14339 = build_function_type (void_type_node, void_list_node);
14340 tree void_ftype_unsigned
14341 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
14342 tree void_ftype_unsigned_unsigned
14343 = build_function_type_list (void_type_node, unsigned_type_node,
14344 unsigned_type_node, NULL_TREE);
14345 tree void_ftype_pcvoid_unsigned_unsigned
14346 = build_function_type_list (void_type_node, const_ptr_type_node,
14347 unsigned_type_node, unsigned_type_node,
14348 NULL_TREE);
14349 tree unsigned_ftype_void
14350 = build_function_type (unsigned_type_node, void_list_node);
14351 tree v2si_ftype_v4sf
14352 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
14353 /* Loads/stores. */
14354 tree void_ftype_v8qi_v8qi_pchar
14355 = build_function_type_list (void_type_node,
14356 V8QI_type_node, V8QI_type_node,
14357 pchar_type_node, NULL_TREE);
14358 tree v4sf_ftype_pcfloat
14359 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
14360 /* @@@ the type is bogus */
14361 tree v4sf_ftype_v4sf_pv2si
14362 = build_function_type_list (V4SF_type_node,
14363 V4SF_type_node, pv2si_type_node, NULL_TREE);
14364 tree void_ftype_pv2si_v4sf
14365 = build_function_type_list (void_type_node,
14366 pv2si_type_node, V4SF_type_node, NULL_TREE);
14367 tree void_ftype_pfloat_v4sf
14368 = build_function_type_list (void_type_node,
14369 pfloat_type_node, V4SF_type_node, NULL_TREE);
14370 tree void_ftype_pdi_di
14371 = build_function_type_list (void_type_node,
14372 pdi_type_node, long_long_unsigned_type_node,
14373 NULL_TREE);
14374 tree void_ftype_pv2di_v2di
14375 = build_function_type_list (void_type_node,
14376 pv2di_type_node, V2DI_type_node, NULL_TREE);
14377 /* Normal vector unops. */
14378 tree v4sf_ftype_v4sf
14379 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
14381 /* Normal vector binops. */
14382 tree v4sf_ftype_v4sf_v4sf
14383 = build_function_type_list (V4SF_type_node,
14384 V4SF_type_node, V4SF_type_node, NULL_TREE);
14385 tree v8qi_ftype_v8qi_v8qi
14386 = build_function_type_list (V8QI_type_node,
14387 V8QI_type_node, V8QI_type_node, NULL_TREE);
14388 tree v4hi_ftype_v4hi_v4hi
14389 = build_function_type_list (V4HI_type_node,
14390 V4HI_type_node, V4HI_type_node, NULL_TREE);
14391 tree v2si_ftype_v2si_v2si
14392 = build_function_type_list (V2SI_type_node,
14393 V2SI_type_node, V2SI_type_node, NULL_TREE);
14394 tree di_ftype_di_di
14395 = build_function_type_list (long_long_unsigned_type_node,
14396 long_long_unsigned_type_node,
14397 long_long_unsigned_type_node, NULL_TREE);
14399 tree v2si_ftype_v2sf
14400 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
14401 tree v2sf_ftype_v2si
14402 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
14403 tree v2si_ftype_v2si
14404 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
14405 tree v2sf_ftype_v2sf
14406 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
14407 tree v2sf_ftype_v2sf_v2sf
14408 = build_function_type_list (V2SF_type_node,
14409 V2SF_type_node, V2SF_type_node, NULL_TREE);
14410 tree v2si_ftype_v2sf_v2sf
14411 = build_function_type_list (V2SI_type_node,
14412 V2SF_type_node, V2SF_type_node, NULL_TREE);
14413 tree pint_type_node = build_pointer_type (integer_type_node);
14414 tree pdouble_type_node = build_pointer_type (double_type_node);
14415 tree pcdouble_type_node = build_pointer_type (
14416 build_type_variant (double_type_node, 1, 0));
14417 tree int_ftype_v2df_v2df
14418 = build_function_type_list (integer_type_node,
14419 V2DF_type_node, V2DF_type_node, NULL_TREE);
14421 tree ti_ftype_ti_ti
14422 = build_function_type_list (intTI_type_node,
14423 intTI_type_node, intTI_type_node, NULL_TREE);
14424 tree void_ftype_pcvoid
14425 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
14426 tree v4sf_ftype_v4si
14427 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
14428 tree v4si_ftype_v4sf
14429 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
14430 tree v2df_ftype_v4si
14431 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
14432 tree v4si_ftype_v2df
14433 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
14434 tree v2si_ftype_v2df
14435 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
14436 tree v4sf_ftype_v2df
14437 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
14438 tree v2df_ftype_v2si
14439 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
14440 tree v2df_ftype_v4sf
14441 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
14442 tree int_ftype_v2df
14443 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
14444 tree int64_ftype_v2df
14445 = build_function_type_list (long_long_integer_type_node,
14446 V2DF_type_node, NULL_TREE);
14447 tree v2df_ftype_v2df_int
14448 = build_function_type_list (V2DF_type_node,
14449 V2DF_type_node, integer_type_node, NULL_TREE);
14450 tree v2df_ftype_v2df_int64
14451 = build_function_type_list (V2DF_type_node,
14452 V2DF_type_node, long_long_integer_type_node,
14453 NULL_TREE);
14454 tree v4sf_ftype_v4sf_v2df
14455 = build_function_type_list (V4SF_type_node,
14456 V4SF_type_node, V2DF_type_node, NULL_TREE);
14457 tree v2df_ftype_v2df_v4sf
14458 = build_function_type_list (V2DF_type_node,
14459 V2DF_type_node, V4SF_type_node, NULL_TREE);
14460 tree v2df_ftype_v2df_v2df_int
14461 = build_function_type_list (V2DF_type_node,
14462 V2DF_type_node, V2DF_type_node,
14463 integer_type_node,
14464 NULL_TREE);
14465 tree v2df_ftype_v2df_pcdouble
14466 = build_function_type_list (V2DF_type_node,
14467 V2DF_type_node, pcdouble_type_node, NULL_TREE);
14468 tree void_ftype_pdouble_v2df
14469 = build_function_type_list (void_type_node,
14470 pdouble_type_node, V2DF_type_node, NULL_TREE);
14471 tree void_ftype_pint_int
14472 = build_function_type_list (void_type_node,
14473 pint_type_node, integer_type_node, NULL_TREE);
14474 tree void_ftype_v16qi_v16qi_pchar
14475 = build_function_type_list (void_type_node,
14476 V16QI_type_node, V16QI_type_node,
14477 pchar_type_node, NULL_TREE);
14478 tree v2df_ftype_pcdouble
14479 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
14480 tree v2df_ftype_v2df_v2df
14481 = build_function_type_list (V2DF_type_node,
14482 V2DF_type_node, V2DF_type_node, NULL_TREE);
14483 tree v16qi_ftype_v16qi_v16qi
14484 = build_function_type_list (V16QI_type_node,
14485 V16QI_type_node, V16QI_type_node, NULL_TREE);
14486 tree v8hi_ftype_v8hi_v8hi
14487 = build_function_type_list (V8HI_type_node,
14488 V8HI_type_node, V8HI_type_node, NULL_TREE);
14489 tree v4si_ftype_v4si_v4si
14490 = build_function_type_list (V4SI_type_node,
14491 V4SI_type_node, V4SI_type_node, NULL_TREE);
14492 tree v2di_ftype_v2di_v2di
14493 = build_function_type_list (V2DI_type_node,
14494 V2DI_type_node, V2DI_type_node, NULL_TREE);
14495 tree v2di_ftype_v2df_v2df
14496 = build_function_type_list (V2DI_type_node,
14497 V2DF_type_node, V2DF_type_node, NULL_TREE);
14498 tree v2df_ftype_v2df
14499 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
14500 tree v2di_ftype_v2di_int
14501 = build_function_type_list (V2DI_type_node,
14502 V2DI_type_node, integer_type_node, NULL_TREE);
14503 tree v4si_ftype_v4si_int
14504 = build_function_type_list (V4SI_type_node,
14505 V4SI_type_node, integer_type_node, NULL_TREE);
14506 tree v8hi_ftype_v8hi_int
14507 = build_function_type_list (V8HI_type_node,
14508 V8HI_type_node, integer_type_node, NULL_TREE);
14509 tree v8hi_ftype_v8hi_v2di
14510 = build_function_type_list (V8HI_type_node,
14511 V8HI_type_node, V2DI_type_node, NULL_TREE);
14512 tree v4si_ftype_v4si_v2di
14513 = build_function_type_list (V4SI_type_node,
14514 V4SI_type_node, V2DI_type_node, NULL_TREE);
14515 tree v4si_ftype_v8hi_v8hi
14516 = build_function_type_list (V4SI_type_node,
14517 V8HI_type_node, V8HI_type_node, NULL_TREE);
14518 tree di_ftype_v8qi_v8qi
14519 = build_function_type_list (long_long_unsigned_type_node,
14520 V8QI_type_node, V8QI_type_node, NULL_TREE);
14521 tree di_ftype_v2si_v2si
14522 = build_function_type_list (long_long_unsigned_type_node,
14523 V2SI_type_node, V2SI_type_node, NULL_TREE);
14524 tree v2di_ftype_v16qi_v16qi
14525 = build_function_type_list (V2DI_type_node,
14526 V16QI_type_node, V16QI_type_node, NULL_TREE);
14527 tree v2di_ftype_v4si_v4si
14528 = build_function_type_list (V2DI_type_node,
14529 V4SI_type_node, V4SI_type_node, NULL_TREE);
14530 tree int_ftype_v16qi
14531 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
14532 tree v16qi_ftype_pcchar
14533 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
14534 tree void_ftype_pchar_v16qi
14535 = build_function_type_list (void_type_node,
14536 pchar_type_node, V16QI_type_node, NULL_TREE);
14538 tree float80_type;
14539 tree float128_type;
14540 tree ftype;
14542 /* The __float80 type. */
14543 if (TYPE_MODE (long_double_type_node) == XFmode)
14544 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
14545 "__float80");
14546 else
14548 /* The __float80 type. */
14549 float80_type = make_node (REAL_TYPE);
14550 TYPE_PRECISION (float80_type) = 80;
14551 layout_type (float80_type);
14552 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
14555 float128_type = make_node (REAL_TYPE);
14556 TYPE_PRECISION (float128_type) = 128;
14557 layout_type (float128_type);
14558 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
14560 /* Add all builtins that are more or less simple operations on two
14561 operands. */
14562 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
14564 /* Use one of the operands; the target can have a different mode for
14565 mask-generating compares. */
14566 enum machine_mode mode;
14567 tree type;
14569 if (d->name == 0)
14570 continue;
14571 mode = insn_data[d->icode].operand[1].mode;
14573 switch (mode)
14575 case V16QImode:
14576 type = v16qi_ftype_v16qi_v16qi;
14577 break;
14578 case V8HImode:
14579 type = v8hi_ftype_v8hi_v8hi;
14580 break;
14581 case V4SImode:
14582 type = v4si_ftype_v4si_v4si;
14583 break;
14584 case V2DImode:
14585 type = v2di_ftype_v2di_v2di;
14586 break;
14587 case V2DFmode:
14588 type = v2df_ftype_v2df_v2df;
14589 break;
14590 case TImode:
14591 type = ti_ftype_ti_ti;
14592 break;
14593 case V4SFmode:
14594 type = v4sf_ftype_v4sf_v4sf;
14595 break;
14596 case V8QImode:
14597 type = v8qi_ftype_v8qi_v8qi;
14598 break;
14599 case V4HImode:
14600 type = v4hi_ftype_v4hi_v4hi;
14601 break;
14602 case V2SImode:
14603 type = v2si_ftype_v2si_v2si;
14604 break;
14605 case DImode:
14606 type = di_ftype_di_di;
14607 break;
14609 default:
14610 gcc_unreachable ();
14613 /* Override for comparisons. */
14614 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
14615 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3)
14616 type = v4si_ftype_v4sf_v4sf;
14618 if (d->icode == CODE_FOR_sse2_maskcmpv2df3
14619 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
14620 type = v2di_ftype_v2df_v2df;
14622 def_builtin (d->mask, d->name, type, d->code);
14625 /* Add the remaining MMX insns with somewhat more complicated types. */
14626 def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
14627 def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
14628 def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD);
14629 def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ);
14631 def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW);
14632 def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD);
14633 def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ);
14635 def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
14636 def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
14638 def_builtin (MASK_MMX, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
14639 def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
14641 /* comi/ucomi insns. */
14642 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
14643 if (d->mask == MASK_SSE2)
14644 def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
14645 else
14646 def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
14648 def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
14649 def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
14650 def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
14652 def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
14653 def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
14654 def_builtin (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
14655 def_builtin (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
14656 def_builtin (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
14657 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
14658 def_builtin (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
14659 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
14660 def_builtin (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
14661 def_builtin (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
14662 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
14664 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
14666 def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
14667 def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
14669 def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
14670 def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
14671 def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
14672 def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
14674 def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
14675 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
14676 def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
14677 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
14679 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
14681 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
14683 def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
14684 def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
14685 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
14686 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
14687 def_builtin (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
14688 def_builtin (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
14690 def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
14692 /* Original 3DNow! */
14693 def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
14694 def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
14695 def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
14696 def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
14697 def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
14698 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
14699 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
14700 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
14701 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
14702 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
14703 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
14704 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
14705 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
14706 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
14707 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
14708 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
14709 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
14710 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
14711 def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
14712 def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
14714 /* 3DNow! extension as used in the Athlon CPU. */
14715 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
14716 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
14717 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
14718 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
14719 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
14720 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
14722 /* SSE2 */
14723 def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
14725 def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
14726 def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
14728 def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADHPD);
14729 def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADLPD);
14731 def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
14732 def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
14733 def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
14734 def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
14735 def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
14737 def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
14738 def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
14739 def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
14740 def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
14742 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
14743 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
14745 def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
14747 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
14748 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
14750 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
14751 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
14752 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
14753 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
14754 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
14756 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
14758 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
14759 def_builtin (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
14760 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
14761 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
14763 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
14764 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
14765 def_builtin (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
14767 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
14768 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
14769 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
14770 def_builtin (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
14772 def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
14773 def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
14774 def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
14776 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
14777 def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
14779 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
14780 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
14782 def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128);
14783 def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128);
14784 def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
14786 def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRLW128);
14787 def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRLD128);
14788 def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
14790 def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRAW128);
14791 def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRAD128);
14793 def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
14794 def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
14795 def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
14796 def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
14798 def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
14799 def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
14800 def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
14801 def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
14803 def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
14804 def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
14806 def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
14808 /* Prescott New Instructions. */
14809 def_builtin (MASK_SSE3, "__builtin_ia32_monitor",
14810 void_ftype_pcvoid_unsigned_unsigned,
14811 IX86_BUILTIN_MONITOR);
14812 def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
14813 void_ftype_unsigned_unsigned,
14814 IX86_BUILTIN_MWAIT);
14815 def_builtin (MASK_SSE3, "__builtin_ia32_movshdup",
14816 v4sf_ftype_v4sf,
14817 IX86_BUILTIN_MOVSHDUP);
14818 def_builtin (MASK_SSE3, "__builtin_ia32_movsldup",
14819 v4sf_ftype_v4sf,
14820 IX86_BUILTIN_MOVSLDUP);
14821 def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
14822 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
14824 /* Access to the vec_init patterns. */
14825 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
14826 integer_type_node, NULL_TREE);
14827 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v2si",
14828 ftype, IX86_BUILTIN_VEC_INIT_V2SI);
14830 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
14831 short_integer_type_node,
14832 short_integer_type_node,
14833 short_integer_type_node, NULL_TREE);
14834 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v4hi",
14835 ftype, IX86_BUILTIN_VEC_INIT_V4HI);
14837 ftype = build_function_type_list (V8QI_type_node, char_type_node,
14838 char_type_node, char_type_node,
14839 char_type_node, char_type_node,
14840 char_type_node, char_type_node,
14841 char_type_node, NULL_TREE);
14842 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v8qi",
14843 ftype, IX86_BUILTIN_VEC_INIT_V8QI);
14845 /* Access to the vec_extract patterns. */
14846 ftype = build_function_type_list (double_type_node, V2DF_type_node,
14847 integer_type_node, NULL_TREE);
14848 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2df",
14849 ftype, IX86_BUILTIN_VEC_EXT_V2DF);
14851 ftype = build_function_type_list (long_long_integer_type_node,
14852 V2DI_type_node, integer_type_node,
14853 NULL_TREE);
14854 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2di",
14855 ftype, IX86_BUILTIN_VEC_EXT_V2DI);
14857 ftype = build_function_type_list (float_type_node, V4SF_type_node,
14858 integer_type_node, NULL_TREE);
14859 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4sf",
14860 ftype, IX86_BUILTIN_VEC_EXT_V4SF);
14862 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
14863 integer_type_node, NULL_TREE);
14864 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4si",
14865 ftype, IX86_BUILTIN_VEC_EXT_V4SI);
14867 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
14868 integer_type_node, NULL_TREE);
14869 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v8hi",
14870 ftype, IX86_BUILTIN_VEC_EXT_V8HI);
14872 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
14873 integer_type_node, NULL_TREE);
14874 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_ext_v4hi",
14875 ftype, IX86_BUILTIN_VEC_EXT_V4HI);
14877 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
14878 integer_type_node, NULL_TREE);
14879 def_builtin (MASK_MMX, "__builtin_ia32_vec_ext_v2si",
14880 ftype, IX86_BUILTIN_VEC_EXT_V2SI);
14882 /* Access to the vec_set patterns. */
14883 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
14884 intHI_type_node,
14885 integer_type_node, NULL_TREE);
14886 def_builtin (MASK_SSE, "__builtin_ia32_vec_set_v8hi",
14887 ftype, IX86_BUILTIN_VEC_SET_V8HI);
14889 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
14890 intHI_type_node,
14891 integer_type_node, NULL_TREE);
14892 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_set_v4hi",
14893 ftype, IX86_BUILTIN_VEC_SET_V4HI);
14896 /* Errors in the source file can cause expand_expr to return const0_rtx
14897 where we expect a vector. To avoid crashing, use one of the vector
14898 clear instructions. */
14899 static rtx
14900 safe_vector_operand (rtx x, enum machine_mode mode)
14902 if (x == const0_rtx)
14903 x = CONST0_RTX (mode);
14904 return x;
14907 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
14909 static rtx
14910 ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
14912 rtx pat, xops[3];
14913 tree arg0 = TREE_VALUE (arglist);
14914 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14915 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14916 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14917 enum machine_mode tmode = insn_data[icode].operand[0].mode;
14918 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
14919 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
14921 if (VECTOR_MODE_P (mode0))
14922 op0 = safe_vector_operand (op0, mode0);
14923 if (VECTOR_MODE_P (mode1))
14924 op1 = safe_vector_operand (op1, mode1);
14926 if (optimize || !target
14927 || GET_MODE (target) != tmode
14928 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14929 target = gen_reg_rtx (tmode);
14931 if (GET_MODE (op1) == SImode && mode1 == TImode)
14933 rtx x = gen_reg_rtx (V4SImode);
14934 emit_insn (gen_sse2_loadd (x, op1));
14935 op1 = gen_lowpart (TImode, x);
14938 /* The insn must want input operands in the same modes as the
14939 result. */
14940 gcc_assert ((GET_MODE (op0) == mode0 || GET_MODE (op0) == VOIDmode)
14941 && (GET_MODE (op1) == mode1 || GET_MODE (op1) == VOIDmode));
14943 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
14944 op0 = copy_to_mode_reg (mode0, op0);
14945 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
14946 op1 = copy_to_mode_reg (mode1, op1);
14948 /* ??? Using ix86_fixup_binary_operands is problematic when
14949 we've got mismatched modes. Fake it. */
14951 xops[0] = target;
14952 xops[1] = op0;
14953 xops[2] = op1;
14955 if (tmode == mode0 && tmode == mode1)
14957 target = ix86_fixup_binary_operands (UNKNOWN, tmode, xops);
14958 op0 = xops[1];
14959 op1 = xops[2];
14961 else if (optimize || !ix86_binary_operator_ok (UNKNOWN, tmode, xops))
14963 op0 = force_reg (mode0, op0);
14964 op1 = force_reg (mode1, op1);
14965 target = gen_reg_rtx (tmode);
14968 pat = GEN_FCN (icode) (target, op0, op1);
14969 if (! pat)
14970 return 0;
14971 emit_insn (pat);
14972 return target;
14975 /* Subroutine of ix86_expand_builtin to take care of stores. */
14977 static rtx
14978 ix86_expand_store_builtin (enum insn_code icode, tree arglist)
14980 rtx pat;
14981 tree arg0 = TREE_VALUE (arglist);
14982 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14983 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14984 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14985 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
14986 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
14988 if (VECTOR_MODE_P (mode1))
14989 op1 = safe_vector_operand (op1, mode1);
14991 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14992 op1 = copy_to_mode_reg (mode1, op1);
14994 pat = GEN_FCN (icode) (op0, op1);
14995 if (pat)
14996 emit_insn (pat);
14997 return 0;
15000 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
15002 static rtx
15003 ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
15004 rtx target, int do_load)
15006 rtx pat;
15007 tree arg0 = TREE_VALUE (arglist);
15008 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15009 enum machine_mode tmode = insn_data[icode].operand[0].mode;
15010 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
15012 if (optimize || !target
15013 || GET_MODE (target) != tmode
15014 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15015 target = gen_reg_rtx (tmode);
15016 if (do_load)
15017 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15018 else
15020 if (VECTOR_MODE_P (mode0))
15021 op0 = safe_vector_operand (op0, mode0);
15023 if ((optimize && !register_operand (op0, mode0))
15024 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15025 op0 = copy_to_mode_reg (mode0, op0);
15028 pat = GEN_FCN (icode) (target, op0);
15029 if (! pat)
15030 return 0;
15031 emit_insn (pat);
15032 return target;
15035 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
15036 sqrtss, rsqrtss, rcpss. */
15038 static rtx
15039 ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
15041 rtx pat;
15042 tree arg0 = TREE_VALUE (arglist);
15043 rtx op1, op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15044 enum machine_mode tmode = insn_data[icode].operand[0].mode;
15045 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
15047 if (optimize || !target
15048 || GET_MODE (target) != tmode
15049 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15050 target = gen_reg_rtx (tmode);
15052 if (VECTOR_MODE_P (mode0))
15053 op0 = safe_vector_operand (op0, mode0);
15055 if ((optimize && !register_operand (op0, mode0))
15056 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15057 op0 = copy_to_mode_reg (mode0, op0);
15059 op1 = op0;
15060 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
15061 op1 = copy_to_mode_reg (mode0, op1);
15063 pat = GEN_FCN (icode) (target, op0, op1);
15064 if (! pat)
15065 return 0;
15066 emit_insn (pat);
15067 return target;
15070 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
15072 static rtx
15073 ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
15074 rtx target)
15076 rtx pat;
15077 tree arg0 = TREE_VALUE (arglist);
15078 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15079 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15080 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15081 rtx op2;
15082 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
15083 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
15084 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
15085 enum rtx_code comparison = d->comparison;
15087 if (VECTOR_MODE_P (mode0))
15088 op0 = safe_vector_operand (op0, mode0);
15089 if (VECTOR_MODE_P (mode1))
15090 op1 = safe_vector_operand (op1, mode1);
15092 /* Swap operands if we have a comparison that isn't available in
15093 hardware. */
15094 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
15096 rtx tmp = gen_reg_rtx (mode1);
15097 emit_move_insn (tmp, op1);
15098 op1 = op0;
15099 op0 = tmp;
15102 if (optimize || !target
15103 || GET_MODE (target) != tmode
15104 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
15105 target = gen_reg_rtx (tmode);
15107 if ((optimize && !register_operand (op0, mode0))
15108 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
15109 op0 = copy_to_mode_reg (mode0, op0);
15110 if ((optimize && !register_operand (op1, mode1))
15111 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
15112 op1 = copy_to_mode_reg (mode1, op1);
15114 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
15115 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
15116 if (! pat)
15117 return 0;
15118 emit_insn (pat);
15119 return target;
15122 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
15124 static rtx
15125 ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
15126 rtx target)
15128 rtx pat;
15129 tree arg0 = TREE_VALUE (arglist);
15130 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15131 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15132 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15133 rtx op2;
15134 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
15135 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
15136 enum rtx_code comparison = d->comparison;
15138 if (VECTOR_MODE_P (mode0))
15139 op0 = safe_vector_operand (op0, mode0);
15140 if (VECTOR_MODE_P (mode1))
15141 op1 = safe_vector_operand (op1, mode1);
15143 /* Swap operands if we have a comparison that isn't available in
15144 hardware. */
15145 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
15147 rtx tmp = op1;
15148 op1 = op0;
15149 op0 = tmp;
15152 target = gen_reg_rtx (SImode);
15153 emit_move_insn (target, const0_rtx);
15154 target = gen_rtx_SUBREG (QImode, target, 0);
15156 if ((optimize && !register_operand (op0, mode0))
15157 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
15158 op0 = copy_to_mode_reg (mode0, op0);
15159 if ((optimize && !register_operand (op1, mode1))
15160 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
15161 op1 = copy_to_mode_reg (mode1, op1);
15163 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
15164 pat = GEN_FCN (d->icode) (op0, op1);
15165 if (! pat)
15166 return 0;
15167 emit_insn (pat);
15168 emit_insn (gen_rtx_SET (VOIDmode,
15169 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
15170 gen_rtx_fmt_ee (comparison, QImode,
15171 SET_DEST (pat),
15172 const0_rtx)));
15174 return SUBREG_REG (target);
15177 /* Return the integer constant in ARG. Constrain it to be in the range
15178 of the subparts of VEC_TYPE; issue an error if not. */
15180 static int
15181 get_element_number (tree vec_type, tree arg)
15183 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
15185 if (!host_integerp (arg, 1)
15186 || (elt = tree_low_cst (arg, 1), elt > max))
15188 error ("selector must be an integer constant in the range 0..%wi", max);
15189 return 0;
15192 return elt;
15195 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
15196 ix86_expand_vector_init. We DO have language-level syntax for this, in
15197 the form of (type){ init-list }. Except that since we can't place emms
15198 instructions from inside the compiler, we can't allow the use of MMX
15199 registers unless the user explicitly asks for it. So we do *not* define
15200 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
15201 we have builtins invoked by mmintrin.h that gives us license to emit
15202 these sorts of instructions. */
15204 static rtx
15205 ix86_expand_vec_init_builtin (tree type, tree arglist, rtx target)
15207 enum machine_mode tmode = TYPE_MODE (type);
15208 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
15209 int i, n_elt = GET_MODE_NUNITS (tmode);
15210 rtvec v = rtvec_alloc (n_elt);
15212 gcc_assert (VECTOR_MODE_P (tmode));
15214 for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
15216 rtx x = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
15217 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
15220 gcc_assert (arglist == NULL);
15222 if (!target || !register_operand (target, tmode))
15223 target = gen_reg_rtx (tmode);
15225 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
15226 return target;
15229 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
15230 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
15231 had a language-level syntax for referencing vector elements. */
15233 static rtx
15234 ix86_expand_vec_ext_builtin (tree arglist, rtx target)
15236 enum machine_mode tmode, mode0;
15237 tree arg0, arg1;
15238 int elt;
15239 rtx op0;
15241 arg0 = TREE_VALUE (arglist);
15242 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15244 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15245 elt = get_element_number (TREE_TYPE (arg0), arg1);
15247 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15248 mode0 = TYPE_MODE (TREE_TYPE (arg0));
15249 gcc_assert (VECTOR_MODE_P (mode0));
15251 op0 = force_reg (mode0, op0);
15253 if (optimize || !target || !register_operand (target, tmode))
15254 target = gen_reg_rtx (tmode);
15256 ix86_expand_vector_extract (true, target, op0, elt);
15258 return target;
15261 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
15262 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
15263 a language-level syntax for referencing vector elements. */
15265 static rtx
15266 ix86_expand_vec_set_builtin (tree arglist)
15268 enum machine_mode tmode, mode1;
15269 tree arg0, arg1, arg2;
15270 int elt;
15271 rtx op0, op1;
15273 arg0 = TREE_VALUE (arglist);
15274 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15275 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15277 tmode = TYPE_MODE (TREE_TYPE (arg0));
15278 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15279 gcc_assert (VECTOR_MODE_P (tmode));
15281 op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
15282 op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
15283 elt = get_element_number (TREE_TYPE (arg0), arg2);
15285 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
15286 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
15288 op0 = force_reg (tmode, op0);
15289 op1 = force_reg (mode1, op1);
15291 ix86_expand_vector_set (true, op0, op1, elt);
15293 return op0;
15296 /* Expand an expression EXP that calls a built-in function,
15297 with result going to TARGET if that's convenient
15298 (and in mode MODE if that's convenient).
15299 SUBTARGET may be used as the target for computing one of EXP's operands.
15300 IGNORE is nonzero if the value is to be ignored. */
15302 static rtx
15303 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
15304 enum machine_mode mode ATTRIBUTE_UNUSED,
15305 int ignore ATTRIBUTE_UNUSED)
15307 const struct builtin_description *d;
15308 size_t i;
15309 enum insn_code icode;
15310 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
15311 tree arglist = TREE_OPERAND (exp, 1);
15312 tree arg0, arg1, arg2;
15313 rtx op0, op1, op2, pat;
15314 enum machine_mode tmode, mode0, mode1, mode2;
15315 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15317 switch (fcode)
15319 case IX86_BUILTIN_EMMS:
15320 emit_insn (gen_mmx_emms ());
15321 return 0;
15323 case IX86_BUILTIN_SFENCE:
15324 emit_insn (gen_sse_sfence ());
15325 return 0;
15327 case IX86_BUILTIN_MASKMOVQ:
15328 case IX86_BUILTIN_MASKMOVDQU:
15329 icode = (fcode == IX86_BUILTIN_MASKMOVQ
15330 ? CODE_FOR_mmx_maskmovq
15331 : CODE_FOR_sse2_maskmovdqu);
15332 /* Note the arg order is different from the operand order. */
15333 arg1 = TREE_VALUE (arglist);
15334 arg2 = TREE_VALUE (TREE_CHAIN (arglist));
15335 arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15336 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15337 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15338 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
15339 mode0 = insn_data[icode].operand[0].mode;
15340 mode1 = insn_data[icode].operand[1].mode;
15341 mode2 = insn_data[icode].operand[2].mode;
15343 op0 = force_reg (Pmode, op0);
15344 op0 = gen_rtx_MEM (mode1, op0);
15346 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15347 op0 = copy_to_mode_reg (mode0, op0);
15348 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
15349 op1 = copy_to_mode_reg (mode1, op1);
15350 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
15351 op2 = copy_to_mode_reg (mode2, op2);
15352 pat = GEN_FCN (icode) (op0, op1, op2);
15353 if (! pat)
15354 return 0;
15355 emit_insn (pat);
15356 return 0;
15358 case IX86_BUILTIN_SQRTSS:
15359 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2, arglist, target);
15360 case IX86_BUILTIN_RSQRTSS:
15361 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2, arglist, target);
15362 case IX86_BUILTIN_RCPSS:
15363 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, arglist, target);
15365 case IX86_BUILTIN_LOADUPS:
15366 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
15368 case IX86_BUILTIN_STOREUPS:
15369 return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
15371 case IX86_BUILTIN_LOADHPS:
15372 case IX86_BUILTIN_LOADLPS:
15373 case IX86_BUILTIN_LOADHPD:
15374 case IX86_BUILTIN_LOADLPD:
15375 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_loadhps
15376 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_loadlps
15377 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_loadhpd
15378 : CODE_FOR_sse2_loadlpd);
15379 arg0 = TREE_VALUE (arglist);
15380 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15381 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15382 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15383 tmode = insn_data[icode].operand[0].mode;
15384 mode0 = insn_data[icode].operand[1].mode;
15385 mode1 = insn_data[icode].operand[2].mode;
15387 op0 = force_reg (mode0, op0);
15388 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
15389 if (optimize || target == 0
15390 || GET_MODE (target) != tmode
15391 || !register_operand (target, tmode))
15392 target = gen_reg_rtx (tmode);
15393 pat = GEN_FCN (icode) (target, op0, op1);
15394 if (! pat)
15395 return 0;
15396 emit_insn (pat);
15397 return target;
15399 case IX86_BUILTIN_STOREHPS:
15400 case IX86_BUILTIN_STORELPS:
15401 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_storehps
15402 : CODE_FOR_sse_storelps);
15403 arg0 = TREE_VALUE (arglist);
15404 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15405 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15406 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15407 mode0 = insn_data[icode].operand[0].mode;
15408 mode1 = insn_data[icode].operand[1].mode;
15410 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15411 op1 = force_reg (mode1, op1);
15413 pat = GEN_FCN (icode) (op0, op1);
15414 if (! pat)
15415 return 0;
15416 emit_insn (pat);
15417 return const0_rtx;
15419 case IX86_BUILTIN_MOVNTPS:
15420 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
15421 case IX86_BUILTIN_MOVNTQ:
15422 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
15424 case IX86_BUILTIN_LDMXCSR:
15425 op0 = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
15426 target = assign_386_stack_local (SImode, SLOT_TEMP);
15427 emit_move_insn (target, op0);
15428 emit_insn (gen_sse_ldmxcsr (target));
15429 return 0;
15431 case IX86_BUILTIN_STMXCSR:
15432 target = assign_386_stack_local (SImode, SLOT_TEMP);
15433 emit_insn (gen_sse_stmxcsr (target));
15434 return copy_to_mode_reg (SImode, target);
15436 case IX86_BUILTIN_SHUFPS:
15437 case IX86_BUILTIN_SHUFPD:
15438 icode = (fcode == IX86_BUILTIN_SHUFPS
15439 ? CODE_FOR_sse_shufps
15440 : CODE_FOR_sse2_shufpd);
15441 arg0 = TREE_VALUE (arglist);
15442 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15443 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15444 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15445 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15446 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
15447 tmode = insn_data[icode].operand[0].mode;
15448 mode0 = insn_data[icode].operand[1].mode;
15449 mode1 = insn_data[icode].operand[2].mode;
15450 mode2 = insn_data[icode].operand[3].mode;
15452 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15453 op0 = copy_to_mode_reg (mode0, op0);
15454 if ((optimize && !register_operand (op1, mode1))
15455 || !(*insn_data[icode].operand[2].predicate) (op1, mode1))
15456 op1 = copy_to_mode_reg (mode1, op1);
15457 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
15459 /* @@@ better error message */
15460 error ("mask must be an immediate");
15461 return gen_reg_rtx (tmode);
15463 if (optimize || target == 0
15464 || GET_MODE (target) != tmode
15465 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15466 target = gen_reg_rtx (tmode);
15467 pat = GEN_FCN (icode) (target, op0, op1, op2);
15468 if (! pat)
15469 return 0;
15470 emit_insn (pat);
15471 return target;
15473 case IX86_BUILTIN_PSHUFW:
15474 case IX86_BUILTIN_PSHUFD:
15475 case IX86_BUILTIN_PSHUFHW:
15476 case IX86_BUILTIN_PSHUFLW:
15477 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
15478 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
15479 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
15480 : CODE_FOR_mmx_pshufw);
15481 arg0 = TREE_VALUE (arglist);
15482 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15483 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15484 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15485 tmode = insn_data[icode].operand[0].mode;
15486 mode1 = insn_data[icode].operand[1].mode;
15487 mode2 = insn_data[icode].operand[2].mode;
15489 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
15490 op0 = copy_to_mode_reg (mode1, op0);
15491 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
15493 /* @@@ better error message */
15494 error ("mask must be an immediate");
15495 return const0_rtx;
15497 if (target == 0
15498 || GET_MODE (target) != tmode
15499 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15500 target = gen_reg_rtx (tmode);
15501 pat = GEN_FCN (icode) (target, op0, op1);
15502 if (! pat)
15503 return 0;
15504 emit_insn (pat);
15505 return target;
15507 case IX86_BUILTIN_PSLLDQI128:
15508 case IX86_BUILTIN_PSRLDQI128:
15509 icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
15510 : CODE_FOR_sse2_lshrti3);
15511 arg0 = TREE_VALUE (arglist);
15512 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15513 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15514 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15515 tmode = insn_data[icode].operand[0].mode;
15516 mode1 = insn_data[icode].operand[1].mode;
15517 mode2 = insn_data[icode].operand[2].mode;
15519 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
15521 op0 = copy_to_reg (op0);
15522 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
15524 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
15526 error ("shift must be an immediate");
15527 return const0_rtx;
15529 target = gen_reg_rtx (V2DImode);
15530 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0), op0, op1);
15531 if (! pat)
15532 return 0;
15533 emit_insn (pat);
15534 return target;
15536 case IX86_BUILTIN_FEMMS:
15537 emit_insn (gen_mmx_femms ());
15538 return NULL_RTX;
15540 case IX86_BUILTIN_PAVGUSB:
15541 return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3, arglist, target);
15543 case IX86_BUILTIN_PF2ID:
15544 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id, arglist, target, 0);
15546 case IX86_BUILTIN_PFACC:
15547 return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3, arglist, target);
15549 case IX86_BUILTIN_PFADD:
15550 return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3, arglist, target);
15552 case IX86_BUILTIN_PFCMPEQ:
15553 return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3, arglist, target);
15555 case IX86_BUILTIN_PFCMPGE:
15556 return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3, arglist, target);
15558 case IX86_BUILTIN_PFCMPGT:
15559 return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3, arglist, target);
15561 case IX86_BUILTIN_PFMAX:
15562 return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3, arglist, target);
15564 case IX86_BUILTIN_PFMIN:
15565 return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3, arglist, target);
15567 case IX86_BUILTIN_PFMUL:
15568 return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3, arglist, target);
15570 case IX86_BUILTIN_PFRCP:
15571 return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2, arglist, target, 0);
15573 case IX86_BUILTIN_PFRCPIT1:
15574 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3, arglist, target);
15576 case IX86_BUILTIN_PFRCPIT2:
15577 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3, arglist, target);
15579 case IX86_BUILTIN_PFRSQIT1:
15580 return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3, arglist, target);
15582 case IX86_BUILTIN_PFRSQRT:
15583 return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2, arglist, target, 0);
15585 case IX86_BUILTIN_PFSUB:
15586 return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3, arglist, target);
15588 case IX86_BUILTIN_PFSUBR:
15589 return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3, arglist, target);
15591 case IX86_BUILTIN_PI2FD:
15592 return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2, arglist, target, 0);
15594 case IX86_BUILTIN_PMULHRW:
15595 return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3, arglist, target);
15597 case IX86_BUILTIN_PF2IW:
15598 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw, arglist, target, 0);
15600 case IX86_BUILTIN_PFNACC:
15601 return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3, arglist, target);
15603 case IX86_BUILTIN_PFPNACC:
15604 return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3, arglist, target);
15606 case IX86_BUILTIN_PI2FW:
15607 return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw, arglist, target, 0);
15609 case IX86_BUILTIN_PSWAPDSI:
15610 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2, arglist, target, 0);
15612 case IX86_BUILTIN_PSWAPDSF:
15613 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, arglist, target, 0);
15615 case IX86_BUILTIN_SQRTSD:
15616 return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, arglist, target);
15617 case IX86_BUILTIN_LOADUPD:
15618 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
15619 case IX86_BUILTIN_STOREUPD:
15620 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
15622 case IX86_BUILTIN_MFENCE:
15623 emit_insn (gen_sse2_mfence ());
15624 return 0;
15625 case IX86_BUILTIN_LFENCE:
15626 emit_insn (gen_sse2_lfence ());
15627 return 0;
15629 case IX86_BUILTIN_CLFLUSH:
15630 arg0 = TREE_VALUE (arglist);
15631 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15632 icode = CODE_FOR_sse2_clflush;
15633 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
15634 op0 = copy_to_mode_reg (Pmode, op0);
15636 emit_insn (gen_sse2_clflush (op0));
15637 return 0;
15639 case IX86_BUILTIN_MOVNTPD:
15640 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
15641 case IX86_BUILTIN_MOVNTDQ:
15642 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
15643 case IX86_BUILTIN_MOVNTI:
15644 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
15646 case IX86_BUILTIN_LOADDQU:
15647 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
15648 case IX86_BUILTIN_STOREDQU:
15649 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
15651 case IX86_BUILTIN_MONITOR:
15652 arg0 = TREE_VALUE (arglist);
15653 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15654 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
15655 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15656 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15657 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
15658 if (!REG_P (op0))
15659 op0 = copy_to_mode_reg (SImode, op0);
15660 if (!REG_P (op1))
15661 op1 = copy_to_mode_reg (SImode, op1);
15662 if (!REG_P (op2))
15663 op2 = copy_to_mode_reg (SImode, op2);
15664 emit_insn (gen_sse3_monitor (op0, op1, op2));
15665 return 0;
15667 case IX86_BUILTIN_MWAIT:
15668 arg0 = TREE_VALUE (arglist);
15669 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
15670 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
15671 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
15672 if (!REG_P (op0))
15673 op0 = copy_to_mode_reg (SImode, op0);
15674 if (!REG_P (op1))
15675 op1 = copy_to_mode_reg (SImode, op1);
15676 emit_insn (gen_sse3_mwait (op0, op1));
15677 return 0;
15679 case IX86_BUILTIN_LDDQU:
15680 return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist,
15681 target, 1);
15683 case IX86_BUILTIN_VEC_INIT_V2SI:
15684 case IX86_BUILTIN_VEC_INIT_V4HI:
15685 case IX86_BUILTIN_VEC_INIT_V8QI:
15686 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
15688 case IX86_BUILTIN_VEC_EXT_V2DF:
15689 case IX86_BUILTIN_VEC_EXT_V2DI:
15690 case IX86_BUILTIN_VEC_EXT_V4SF:
15691 case IX86_BUILTIN_VEC_EXT_V4SI:
15692 case IX86_BUILTIN_VEC_EXT_V8HI:
15693 case IX86_BUILTIN_VEC_EXT_V2SI:
15694 case IX86_BUILTIN_VEC_EXT_V4HI:
15695 return ix86_expand_vec_ext_builtin (arglist, target);
15697 case IX86_BUILTIN_VEC_SET_V8HI:
15698 case IX86_BUILTIN_VEC_SET_V4HI:
15699 return ix86_expand_vec_set_builtin (arglist);
15701 default:
15702 break;
15705 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
15706 if (d->code == fcode)
15708 /* Compares are treated specially. */
15709 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
15710 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3
15711 || d->icode == CODE_FOR_sse2_maskcmpv2df3
15712 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
15713 return ix86_expand_sse_compare (d, arglist, target);
15715 return ix86_expand_binop_builtin (d->icode, arglist, target);
15718 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
15719 if (d->code == fcode)
15720 return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
15722 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
15723 if (d->code == fcode)
15724 return ix86_expand_sse_comi (d, arglist, target);
15726 gcc_unreachable ();
15729 /* Store OPERAND to the memory after reload is completed. This means
15730 that we can't easily use assign_stack_local. */
15732 ix86_force_to_memory (enum machine_mode mode, rtx operand)
15734 rtx result;
15736 gcc_assert (reload_completed);
15737 if (TARGET_RED_ZONE)
15739 result = gen_rtx_MEM (mode,
15740 gen_rtx_PLUS (Pmode,
15741 stack_pointer_rtx,
15742 GEN_INT (-RED_ZONE_SIZE)));
15743 emit_move_insn (result, operand);
15745 else if (!TARGET_RED_ZONE && TARGET_64BIT)
15747 switch (mode)
15749 case HImode:
15750 case SImode:
15751 operand = gen_lowpart (DImode, operand);
15752 /* FALLTHRU */
15753 case DImode:
15754 emit_insn (
15755 gen_rtx_SET (VOIDmode,
15756 gen_rtx_MEM (DImode,
15757 gen_rtx_PRE_DEC (DImode,
15758 stack_pointer_rtx)),
15759 operand));
15760 break;
15761 default:
15762 gcc_unreachable ();
15764 result = gen_rtx_MEM (mode, stack_pointer_rtx);
15766 else
15768 switch (mode)
15770 case DImode:
15772 rtx operands[2];
15773 split_di (&operand, 1, operands, operands + 1);
15774 emit_insn (
15775 gen_rtx_SET (VOIDmode,
15776 gen_rtx_MEM (SImode,
15777 gen_rtx_PRE_DEC (Pmode,
15778 stack_pointer_rtx)),
15779 operands[1]));
15780 emit_insn (
15781 gen_rtx_SET (VOIDmode,
15782 gen_rtx_MEM (SImode,
15783 gen_rtx_PRE_DEC (Pmode,
15784 stack_pointer_rtx)),
15785 operands[0]));
15787 break;
15788 case HImode:
15789 /* It is better to store HImodes as SImodes. */
15790 if (!TARGET_PARTIAL_REG_STALL)
15791 operand = gen_lowpart (SImode, operand);
15792 /* FALLTHRU */
15793 case SImode:
15794 emit_insn (
15795 gen_rtx_SET (VOIDmode,
15796 gen_rtx_MEM (GET_MODE (operand),
15797 gen_rtx_PRE_DEC (SImode,
15798 stack_pointer_rtx)),
15799 operand));
15800 break;
15801 default:
15802 gcc_unreachable ();
15804 result = gen_rtx_MEM (mode, stack_pointer_rtx);
15806 return result;
15809 /* Free operand from the memory. */
15810 void
15811 ix86_free_from_memory (enum machine_mode mode)
15813 if (!TARGET_RED_ZONE)
15815 int size;
15817 if (mode == DImode || TARGET_64BIT)
15818 size = 8;
15819 else if (mode == HImode && TARGET_PARTIAL_REG_STALL)
15820 size = 2;
15821 else
15822 size = 4;
15823 /* Use LEA to deallocate stack space. In peephole2 it will be converted
15824 to pop or add instruction if registers are available. */
15825 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
15826 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
15827 GEN_INT (size))));
15831 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
15832 QImode must go into class Q_REGS.
15833 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
15834 movdf to do mem-to-mem moves through integer regs. */
15835 enum reg_class
15836 ix86_preferred_reload_class (rtx x, enum reg_class class)
15838 /* We're only allowed to return a subclass of CLASS. Many of the
15839 following checks fail for NO_REGS, so eliminate that early. */
15840 if (class == NO_REGS)
15841 return NO_REGS;
15843 /* All classes can load zeros. */
15844 if (x == CONST0_RTX (GET_MODE (x)))
15845 return class;
15847 /* Floating-point constants need more complex checks. */
15848 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
15850 /* General regs can load everything. */
15851 if (reg_class_subset_p (class, GENERAL_REGS))
15852 return class;
15854 /* Floats can load 0 and 1 plus some others. Note that we eliminated
15855 zero above. We only want to wind up preferring 80387 registers if
15856 we plan on doing computation with them. */
15857 if (TARGET_80387
15858 && (TARGET_MIX_SSE_I387
15859 || !(TARGET_SSE_MATH && SSE_FLOAT_MODE_P (GET_MODE (x))))
15860 && standard_80387_constant_p (x))
15862 /* Limit class to non-sse. */
15863 if (class == FLOAT_SSE_REGS)
15864 return FLOAT_REGS;
15865 if (class == FP_TOP_SSE_REGS)
15866 return FP_TOP_REG;
15867 if (class == FP_SECOND_SSE_REGS)
15868 return FP_SECOND_REG;
15869 if (class == FLOAT_INT_REGS || class == FLOAT_REGS)
15870 return class;
15873 return NO_REGS;
15875 if (MAYBE_MMX_CLASS_P (class) && CONSTANT_P (x))
15876 return NO_REGS;
15877 if (MAYBE_SSE_CLASS_P (class) && CONSTANT_P (x))
15878 return NO_REGS;
15880 /* Generally when we see PLUS here, it's the function invariant
15881 (plus soft-fp const_int). Which can only be computed into general
15882 regs. */
15883 if (GET_CODE (x) == PLUS)
15884 return reg_class_subset_p (class, GENERAL_REGS) ? class : NO_REGS;
15886 /* QImode constants are easy to load, but non-constant QImode data
15887 must go into Q_REGS. */
15888 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
15890 if (reg_class_subset_p (class, Q_REGS))
15891 return class;
15892 if (reg_class_subset_p (Q_REGS, class))
15893 return Q_REGS;
15894 return NO_REGS;
15897 return class;
15900 /* If we are copying between general and FP registers, we need a memory
15901 location. The same is true for SSE and MMX registers.
15903 The macro can't work reliably when one of the CLASSES is class containing
15904 registers from multiple units (SSE, MMX, integer). We avoid this by never
15905 combining those units in single alternative in the machine description.
15906 Ensure that this constraint holds to avoid unexpected surprises.
15908 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
15909 enforce these sanity checks. */
15912 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
15913 enum machine_mode mode, int strict)
15915 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
15916 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
15917 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
15918 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
15919 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
15920 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
15922 gcc_assert (!strict);
15923 return true;
15926 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
15927 return true;
15929 /* ??? This is a lie. We do have moves between mmx/general, and for
15930 mmx/sse2. But by saying we need secondary memory we discourage the
15931 register allocator from using the mmx registers unless needed. */
15932 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
15933 return true;
15935 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
15937 /* SSE1 doesn't have any direct moves from other classes. */
15938 if (!TARGET_SSE2)
15939 return true;
15941 /* If the target says that inter-unit moves are more expensive
15942 than moving through memory, then don't generate them. */
15943 if (!TARGET_INTER_UNIT_MOVES && !optimize_size)
15944 return true;
15946 /* Between SSE and general, we have moves no larger than word size. */
15947 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
15948 return true;
15950 /* ??? For the cost of one register reformat penalty, we could use
15951 the same instructions to move SFmode and DFmode data, but the
15952 relevant move patterns don't support those alternatives. */
15953 if (mode == SFmode || mode == DFmode)
15954 return true;
15957 return false;
15960 /* Return true if the registers in CLASS cannot represent the change from
15961 modes FROM to TO. */
15963 bool
15964 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
15965 enum reg_class class)
15967 if (from == to)
15968 return false;
15970 /* x87 registers can't do subreg at all, as all values are reformatted
15971 to extended precision. */
15972 if (MAYBE_FLOAT_CLASS_P (class))
15973 return true;
15975 if (MAYBE_SSE_CLASS_P (class) || MAYBE_MMX_CLASS_P (class))
15977 /* Vector registers do not support QI or HImode loads. If we don't
15978 disallow a change to these modes, reload will assume it's ok to
15979 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
15980 the vec_dupv4hi pattern. */
15981 if (GET_MODE_SIZE (from) < 4)
15982 return true;
15984 /* Vector registers do not support subreg with nonzero offsets, which
15985 are otherwise valid for integer registers. Since we can't see
15986 whether we have a nonzero offset from here, prohibit all
15987 nonparadoxical subregs changing size. */
15988 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
15989 return true;
15992 return false;
15995 /* Return the cost of moving data from a register in class CLASS1 to
15996 one in class CLASS2.
15998 It is not required that the cost always equal 2 when FROM is the same as TO;
15999 on some machines it is expensive to move between registers if they are not
16000 general registers. */
16003 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
16004 enum reg_class class2)
16006 /* In case we require secondary memory, compute cost of the store followed
16007 by load. In order to avoid bad register allocation choices, we need
16008 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
16010 if (ix86_secondary_memory_needed (class1, class2, mode, 0))
16012 int cost = 1;
16014 cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
16015 MEMORY_MOVE_COST (mode, class1, 1));
16016 cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
16017 MEMORY_MOVE_COST (mode, class2, 1));
16019 /* In case of copying from general_purpose_register we may emit multiple
16020 stores followed by single load causing memory size mismatch stall.
16021 Count this as arbitrarily high cost of 20. */
16022 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
16023 cost += 20;
16025 /* In the case of FP/MMX moves, the registers actually overlap, and we
16026 have to switch modes in order to treat them differently. */
16027 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
16028 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
16029 cost += 20;
16031 return cost;
16034 /* Moves between SSE/MMX and integer unit are expensive. */
16035 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
16036 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
16037 return ix86_cost->mmxsse_to_integer;
16038 if (MAYBE_FLOAT_CLASS_P (class1))
16039 return ix86_cost->fp_move;
16040 if (MAYBE_SSE_CLASS_P (class1))
16041 return ix86_cost->sse_move;
16042 if (MAYBE_MMX_CLASS_P (class1))
16043 return ix86_cost->mmx_move;
16044 return 2;
16047 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
16049 bool
16050 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
16052 /* Flags and only flags can only hold CCmode values. */
16053 if (CC_REGNO_P (regno))
16054 return GET_MODE_CLASS (mode) == MODE_CC;
16055 if (GET_MODE_CLASS (mode) == MODE_CC
16056 || GET_MODE_CLASS (mode) == MODE_RANDOM
16057 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
16058 return 0;
16059 if (FP_REGNO_P (regno))
16060 return VALID_FP_MODE_P (mode);
16061 if (SSE_REGNO_P (regno))
16063 /* We implement the move patterns for all vector modes into and
16064 out of SSE registers, even when no operation instructions
16065 are available. */
16066 return (VALID_SSE_REG_MODE (mode)
16067 || VALID_SSE2_REG_MODE (mode)
16068 || VALID_MMX_REG_MODE (mode)
16069 || VALID_MMX_REG_MODE_3DNOW (mode));
16071 if (MMX_REGNO_P (regno))
16073 /* We implement the move patterns for 3DNOW modes even in MMX mode,
16074 so if the register is available at all, then we can move data of
16075 the given mode into or out of it. */
16076 return (VALID_MMX_REG_MODE (mode)
16077 || VALID_MMX_REG_MODE_3DNOW (mode));
16080 if (mode == QImode)
16082 /* Take care for QImode values - they can be in non-QI regs,
16083 but then they do cause partial register stalls. */
16084 if (regno < 4 || TARGET_64BIT)
16085 return 1;
16086 if (!TARGET_PARTIAL_REG_STALL)
16087 return 1;
16088 return reload_in_progress || reload_completed;
16090 /* We handle both integer and floats in the general purpose registers. */
16091 else if (VALID_INT_MODE_P (mode))
16092 return 1;
16093 else if (VALID_FP_MODE_P (mode))
16094 return 1;
16095 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
16096 on to use that value in smaller contexts, this can easily force a
16097 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
16098 supporting DImode, allow it. */
16099 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
16100 return 1;
16102 return 0;
16105 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
16106 tieable integer mode. */
16108 static bool
16109 ix86_tieable_integer_mode_p (enum machine_mode mode)
16111 switch (mode)
16113 case HImode:
16114 case SImode:
16115 return true;
16117 case QImode:
16118 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
16120 case DImode:
16121 return TARGET_64BIT;
16123 default:
16124 return false;
16128 /* Return true if MODE1 is accessible in a register that can hold MODE2
16129 without copying. That is, all register classes that can hold MODE2
16130 can also hold MODE1. */
16132 bool
16133 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
16135 if (mode1 == mode2)
16136 return true;
16138 if (ix86_tieable_integer_mode_p (mode1)
16139 && ix86_tieable_integer_mode_p (mode2))
16140 return true;
16142 /* MODE2 being XFmode implies fp stack or general regs, which means we
16143 can tie any smaller floating point modes to it. Note that we do not
16144 tie this with TFmode. */
16145 if (mode2 == XFmode)
16146 return mode1 == SFmode || mode1 == DFmode;
16148 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
16149 that we can tie it with SFmode. */
16150 if (mode2 == DFmode)
16151 return mode1 == SFmode;
16153 /* If MODE2 is only appropriate for an SSE register, then tie with
16154 any other mode acceptable to SSE registers. */
16155 if (GET_MODE_SIZE (mode2) >= 8
16156 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
16157 return ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1);
16159 /* If MODE2 is appropriate for an MMX (or SSE) register, then tie
16160 with any other mode acceptable to MMX registers. */
16161 if (GET_MODE_SIZE (mode2) == 8
16162 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
16163 return ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1);
16165 return false;
16168 /* Return the cost of moving data of mode M between a
16169 register and memory. A value of 2 is the default; this cost is
16170 relative to those in `REGISTER_MOVE_COST'.
16172 If moving between registers and memory is more expensive than
16173 between two registers, you should define this macro to express the
16174 relative cost.
16176 Model also increased moving costs of QImode registers in non
16177 Q_REGS classes.
16180 ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
16182 if (FLOAT_CLASS_P (class))
16184 int index;
16185 switch (mode)
16187 case SFmode:
16188 index = 0;
16189 break;
16190 case DFmode:
16191 index = 1;
16192 break;
16193 case XFmode:
16194 index = 2;
16195 break;
16196 default:
16197 return 100;
16199 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
16201 if (SSE_CLASS_P (class))
16203 int index;
16204 switch (GET_MODE_SIZE (mode))
16206 case 4:
16207 index = 0;
16208 break;
16209 case 8:
16210 index = 1;
16211 break;
16212 case 16:
16213 index = 2;
16214 break;
16215 default:
16216 return 100;
16218 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
16220 if (MMX_CLASS_P (class))
16222 int index;
16223 switch (GET_MODE_SIZE (mode))
16225 case 4:
16226 index = 0;
16227 break;
16228 case 8:
16229 index = 1;
16230 break;
16231 default:
16232 return 100;
16234 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
16236 switch (GET_MODE_SIZE (mode))
16238 case 1:
16239 if (in)
16240 return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
16241 : ix86_cost->movzbl_load);
16242 else
16243 return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
16244 : ix86_cost->int_store[0] + 4);
16245 break;
16246 case 2:
16247 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
16248 default:
16249 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
16250 if (mode == TFmode)
16251 mode = XFmode;
16252 return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
16253 * (((int) GET_MODE_SIZE (mode)
16254 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
16258 /* Compute a (partial) cost for rtx X. Return true if the complete
16259 cost has been computed, and false if subexpressions should be
16260 scanned. In either case, *TOTAL contains the cost result. */
16262 static bool
16263 ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
16265 enum machine_mode mode = GET_MODE (x);
16267 switch (code)
16269 case CONST_INT:
16270 case CONST:
16271 case LABEL_REF:
16272 case SYMBOL_REF:
16273 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
16274 *total = 3;
16275 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
16276 *total = 2;
16277 else if (flag_pic && SYMBOLIC_CONST (x)
16278 && (!TARGET_64BIT
16279 || (!GET_CODE (x) != LABEL_REF
16280 && (GET_CODE (x) != SYMBOL_REF
16281 || !SYMBOL_REF_LOCAL_P (x)))))
16282 *total = 1;
16283 else
16284 *total = 0;
16285 return true;
16287 case CONST_DOUBLE:
16288 if (mode == VOIDmode)
16289 *total = 0;
16290 else
16291 switch (standard_80387_constant_p (x))
16293 case 1: /* 0.0 */
16294 *total = 1;
16295 break;
16296 default: /* Other constants */
16297 *total = 2;
16298 break;
16299 case 0:
16300 case -1:
16301 /* Start with (MEM (SYMBOL_REF)), since that's where
16302 it'll probably end up. Add a penalty for size. */
16303 *total = (COSTS_N_INSNS (1)
16304 + (flag_pic != 0 && !TARGET_64BIT)
16305 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
16306 break;
16308 return true;
16310 case ZERO_EXTEND:
16311 /* The zero extensions is often completely free on x86_64, so make
16312 it as cheap as possible. */
16313 if (TARGET_64BIT && mode == DImode
16314 && GET_MODE (XEXP (x, 0)) == SImode)
16315 *total = 1;
16316 else if (TARGET_ZERO_EXTEND_WITH_AND)
16317 *total = COSTS_N_INSNS (ix86_cost->add);
16318 else
16319 *total = COSTS_N_INSNS (ix86_cost->movzx);
16320 return false;
16322 case SIGN_EXTEND:
16323 *total = COSTS_N_INSNS (ix86_cost->movsx);
16324 return false;
16326 case ASHIFT:
16327 if (GET_CODE (XEXP (x, 1)) == CONST_INT
16328 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
16330 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
16331 if (value == 1)
16333 *total = COSTS_N_INSNS (ix86_cost->add);
16334 return false;
16336 if ((value == 2 || value == 3)
16337 && ix86_cost->lea <= ix86_cost->shift_const)
16339 *total = COSTS_N_INSNS (ix86_cost->lea);
16340 return false;
16343 /* FALLTHRU */
16345 case ROTATE:
16346 case ASHIFTRT:
16347 case LSHIFTRT:
16348 case ROTATERT:
16349 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
16351 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
16353 if (INTVAL (XEXP (x, 1)) > 32)
16354 *total = COSTS_N_INSNS(ix86_cost->shift_const + 2);
16355 else
16356 *total = COSTS_N_INSNS(ix86_cost->shift_const * 2);
16358 else
16360 if (GET_CODE (XEXP (x, 1)) == AND)
16361 *total = COSTS_N_INSNS(ix86_cost->shift_var * 2);
16362 else
16363 *total = COSTS_N_INSNS(ix86_cost->shift_var * 6 + 2);
16366 else
16368 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
16369 *total = COSTS_N_INSNS (ix86_cost->shift_const);
16370 else
16371 *total = COSTS_N_INSNS (ix86_cost->shift_var);
16373 return false;
16375 case MULT:
16376 if (FLOAT_MODE_P (mode))
16378 *total = COSTS_N_INSNS (ix86_cost->fmul);
16379 return false;
16381 else
16383 rtx op0 = XEXP (x, 0);
16384 rtx op1 = XEXP (x, 1);
16385 int nbits;
16386 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
16388 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
16389 for (nbits = 0; value != 0; value &= value - 1)
16390 nbits++;
16392 else
16393 /* This is arbitrary. */
16394 nbits = 7;
16396 /* Compute costs correctly for widening multiplication. */
16397 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND)
16398 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
16399 == GET_MODE_SIZE (mode))
16401 int is_mulwiden = 0;
16402 enum machine_mode inner_mode = GET_MODE (op0);
16404 if (GET_CODE (op0) == GET_CODE (op1))
16405 is_mulwiden = 1, op1 = XEXP (op1, 0);
16406 else if (GET_CODE (op1) == CONST_INT)
16408 if (GET_CODE (op0) == SIGN_EXTEND)
16409 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
16410 == INTVAL (op1);
16411 else
16412 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
16415 if (is_mulwiden)
16416 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
16419 *total = COSTS_N_INSNS (ix86_cost->mult_init[MODE_INDEX (mode)]
16420 + nbits * ix86_cost->mult_bit)
16421 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code);
16423 return true;
16426 case DIV:
16427 case UDIV:
16428 case MOD:
16429 case UMOD:
16430 if (FLOAT_MODE_P (mode))
16431 *total = COSTS_N_INSNS (ix86_cost->fdiv);
16432 else
16433 *total = COSTS_N_INSNS (ix86_cost->divide[MODE_INDEX (mode)]);
16434 return false;
16436 case PLUS:
16437 if (FLOAT_MODE_P (mode))
16438 *total = COSTS_N_INSNS (ix86_cost->fadd);
16439 else if (GET_MODE_CLASS (mode) == MODE_INT
16440 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
16442 if (GET_CODE (XEXP (x, 0)) == PLUS
16443 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
16444 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
16445 && CONSTANT_P (XEXP (x, 1)))
16447 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
16448 if (val == 2 || val == 4 || val == 8)
16450 *total = COSTS_N_INSNS (ix86_cost->lea);
16451 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
16452 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
16453 outer_code);
16454 *total += rtx_cost (XEXP (x, 1), outer_code);
16455 return true;
16458 else if (GET_CODE (XEXP (x, 0)) == MULT
16459 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
16461 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
16462 if (val == 2 || val == 4 || val == 8)
16464 *total = COSTS_N_INSNS (ix86_cost->lea);
16465 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
16466 *total += rtx_cost (XEXP (x, 1), outer_code);
16467 return true;
16470 else if (GET_CODE (XEXP (x, 0)) == PLUS)
16472 *total = COSTS_N_INSNS (ix86_cost->lea);
16473 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
16474 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
16475 *total += rtx_cost (XEXP (x, 1), outer_code);
16476 return true;
16479 /* FALLTHRU */
16481 case MINUS:
16482 if (FLOAT_MODE_P (mode))
16484 *total = COSTS_N_INSNS (ix86_cost->fadd);
16485 return false;
16487 /* FALLTHRU */
16489 case AND:
16490 case IOR:
16491 case XOR:
16492 if (!TARGET_64BIT && mode == DImode)
16494 *total = (COSTS_N_INSNS (ix86_cost->add) * 2
16495 + (rtx_cost (XEXP (x, 0), outer_code)
16496 << (GET_MODE (XEXP (x, 0)) != DImode))
16497 + (rtx_cost (XEXP (x, 1), outer_code)
16498 << (GET_MODE (XEXP (x, 1)) != DImode)));
16499 return true;
16501 /* FALLTHRU */
16503 case NEG:
16504 if (FLOAT_MODE_P (mode))
16506 *total = COSTS_N_INSNS (ix86_cost->fchs);
16507 return false;
16509 /* FALLTHRU */
16511 case NOT:
16512 if (!TARGET_64BIT && mode == DImode)
16513 *total = COSTS_N_INSNS (ix86_cost->add * 2);
16514 else
16515 *total = COSTS_N_INSNS (ix86_cost->add);
16516 return false;
16518 case COMPARE:
16519 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
16520 && XEXP (XEXP (x, 0), 1) == const1_rtx
16521 && GET_CODE (XEXP (XEXP (x, 0), 2)) == CONST_INT
16522 && XEXP (x, 1) == const0_rtx)
16524 /* This kind of construct is implemented using test[bwl].
16525 Treat it as if we had an AND. */
16526 *total = (COSTS_N_INSNS (ix86_cost->add)
16527 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
16528 + rtx_cost (const1_rtx, outer_code));
16529 return true;
16531 return false;
16533 case FLOAT_EXTEND:
16534 if (!TARGET_SSE_MATH
16535 || mode == XFmode
16536 || (mode == DFmode && !TARGET_SSE2))
16537 *total = 0;
16538 return false;
16540 case ABS:
16541 if (FLOAT_MODE_P (mode))
16542 *total = COSTS_N_INSNS (ix86_cost->fabs);
16543 return false;
16545 case SQRT:
16546 if (FLOAT_MODE_P (mode))
16547 *total = COSTS_N_INSNS (ix86_cost->fsqrt);
16548 return false;
16550 case UNSPEC:
16551 if (XINT (x, 1) == UNSPEC_TP)
16552 *total = 0;
16553 return false;
16555 default:
16556 return false;
16560 #if TARGET_MACHO
16562 static int current_machopic_label_num;
16564 /* Given a symbol name and its associated stub, write out the
16565 definition of the stub. */
16567 void
16568 machopic_output_stub (FILE *file, const char *symb, const char *stub)
16570 unsigned int length;
16571 char *binder_name, *symbol_name, lazy_ptr_name[32];
16572 int label = ++current_machopic_label_num;
16574 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
16575 symb = (*targetm.strip_name_encoding) (symb);
16577 length = strlen (stub);
16578 binder_name = alloca (length + 32);
16579 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
16581 length = strlen (symb);
16582 symbol_name = alloca (length + 32);
16583 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
16585 sprintf (lazy_ptr_name, "L%d$lz", label);
16587 if (MACHOPIC_PURE)
16588 machopic_picsymbol_stub_section ();
16589 else
16590 machopic_symbol_stub_section ();
16592 fprintf (file, "%s:\n", stub);
16593 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
16595 if (MACHOPIC_PURE)
16597 fprintf (file, "\tcall LPC$%d\nLPC$%d:\tpopl %%eax\n", label, label);
16598 fprintf (file, "\tmovl %s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
16599 fprintf (file, "\tjmp %%edx\n");
16601 else
16602 fprintf (file, "\tjmp *%s\n", lazy_ptr_name);
16604 fprintf (file, "%s:\n", binder_name);
16606 if (MACHOPIC_PURE)
16608 fprintf (file, "\tlea %s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
16609 fprintf (file, "\tpushl %%eax\n");
16611 else
16612 fprintf (file, "\t pushl $%s\n", lazy_ptr_name);
16614 fprintf (file, "\tjmp dyld_stub_binding_helper\n");
16616 machopic_lazy_symbol_ptr_section ();
16617 fprintf (file, "%s:\n", lazy_ptr_name);
16618 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
16619 fprintf (file, "\t.long %s\n", binder_name);
16621 #endif /* TARGET_MACHO */
16623 /* Order the registers for register allocator. */
16625 void
16626 x86_order_regs_for_local_alloc (void)
16628 int pos = 0;
16629 int i;
16631 /* First allocate the local general purpose registers. */
16632 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
16633 if (GENERAL_REGNO_P (i) && call_used_regs[i])
16634 reg_alloc_order [pos++] = i;
16636 /* Global general purpose registers. */
16637 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
16638 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
16639 reg_alloc_order [pos++] = i;
16641 /* x87 registers come first in case we are doing FP math
16642 using them. */
16643 if (!TARGET_SSE_MATH)
16644 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
16645 reg_alloc_order [pos++] = i;
16647 /* SSE registers. */
16648 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
16649 reg_alloc_order [pos++] = i;
16650 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
16651 reg_alloc_order [pos++] = i;
16653 /* x87 registers. */
16654 if (TARGET_SSE_MATH)
16655 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
16656 reg_alloc_order [pos++] = i;
16658 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
16659 reg_alloc_order [pos++] = i;
16661 /* Initialize the rest of array as we do not allocate some registers
16662 at all. */
16663 while (pos < FIRST_PSEUDO_REGISTER)
16664 reg_alloc_order [pos++] = 0;
16667 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
16668 struct attribute_spec.handler. */
16669 static tree
16670 ix86_handle_struct_attribute (tree *node, tree name,
16671 tree args ATTRIBUTE_UNUSED,
16672 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
16674 tree *type = NULL;
16675 if (DECL_P (*node))
16677 if (TREE_CODE (*node) == TYPE_DECL)
16678 type = &TREE_TYPE (*node);
16680 else
16681 type = node;
16683 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
16684 || TREE_CODE (*type) == UNION_TYPE)))
16686 warning (OPT_Wattributes, "%qs attribute ignored",
16687 IDENTIFIER_POINTER (name));
16688 *no_add_attrs = true;
16691 else if ((is_attribute_p ("ms_struct", name)
16692 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
16693 || ((is_attribute_p ("gcc_struct", name)
16694 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
16696 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
16697 IDENTIFIER_POINTER (name));
16698 *no_add_attrs = true;
16701 return NULL_TREE;
16704 static bool
16705 ix86_ms_bitfield_layout_p (tree record_type)
16707 return (TARGET_MS_BITFIELD_LAYOUT &&
16708 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
16709 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
16712 /* Returns an expression indicating where the this parameter is
16713 located on entry to the FUNCTION. */
16715 static rtx
16716 x86_this_parameter (tree function)
16718 tree type = TREE_TYPE (function);
16720 if (TARGET_64BIT)
16722 int n = aggregate_value_p (TREE_TYPE (type), type) != 0;
16723 return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]);
16726 if (ix86_function_regparm (type, function) > 0)
16728 tree parm;
16730 parm = TYPE_ARG_TYPES (type);
16731 /* Figure out whether or not the function has a variable number of
16732 arguments. */
16733 for (; parm; parm = TREE_CHAIN (parm))
16734 if (TREE_VALUE (parm) == void_type_node)
16735 break;
16736 /* If not, the this parameter is in the first argument. */
16737 if (parm)
16739 int regno = 0;
16740 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
16741 regno = 2;
16742 return gen_rtx_REG (SImode, regno);
16746 if (aggregate_value_p (TREE_TYPE (type), type))
16747 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8));
16748 else
16749 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4));
16752 /* Determine whether x86_output_mi_thunk can succeed. */
16754 static bool
16755 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
16756 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
16757 HOST_WIDE_INT vcall_offset, tree function)
16759 /* 64-bit can handle anything. */
16760 if (TARGET_64BIT)
16761 return true;
16763 /* For 32-bit, everything's fine if we have one free register. */
16764 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
16765 return true;
16767 /* Need a free register for vcall_offset. */
16768 if (vcall_offset)
16769 return false;
16771 /* Need a free register for GOT references. */
16772 if (flag_pic && !(*targetm.binds_local_p) (function))
16773 return false;
16775 /* Otherwise ok. */
16776 return true;
16779 /* Output the assembler code for a thunk function. THUNK_DECL is the
16780 declaration for the thunk function itself, FUNCTION is the decl for
16781 the target function. DELTA is an immediate constant offset to be
16782 added to THIS. If VCALL_OFFSET is nonzero, the word at
16783 *(*this + vcall_offset) should be added to THIS. */
16785 static void
16786 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
16787 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
16788 HOST_WIDE_INT vcall_offset, tree function)
16790 rtx xops[3];
16791 rtx this = x86_this_parameter (function);
16792 rtx this_reg, tmp;
16794 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
16795 pull it in now and let DELTA benefit. */
16796 if (REG_P (this))
16797 this_reg = this;
16798 else if (vcall_offset)
16800 /* Put the this parameter into %eax. */
16801 xops[0] = this;
16802 xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
16803 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
16805 else
16806 this_reg = NULL_RTX;
16808 /* Adjust the this parameter by a fixed constant. */
16809 if (delta)
16811 xops[0] = GEN_INT (delta);
16812 xops[1] = this_reg ? this_reg : this;
16813 if (TARGET_64BIT)
16815 if (!x86_64_general_operand (xops[0], DImode))
16817 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
16818 xops[1] = tmp;
16819 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
16820 xops[0] = tmp;
16821 xops[1] = this;
16823 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
16825 else
16826 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
16829 /* Adjust the this parameter by a value stored in the vtable. */
16830 if (vcall_offset)
16832 if (TARGET_64BIT)
16833 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
16834 else
16836 int tmp_regno = 2 /* ECX */;
16837 if (lookup_attribute ("fastcall",
16838 TYPE_ATTRIBUTES (TREE_TYPE (function))))
16839 tmp_regno = 0 /* EAX */;
16840 tmp = gen_rtx_REG (SImode, tmp_regno);
16843 xops[0] = gen_rtx_MEM (Pmode, this_reg);
16844 xops[1] = tmp;
16845 if (TARGET_64BIT)
16846 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
16847 else
16848 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
16850 /* Adjust the this parameter. */
16851 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
16852 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
16854 rtx tmp2 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
16855 xops[0] = GEN_INT (vcall_offset);
16856 xops[1] = tmp2;
16857 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
16858 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
16860 xops[1] = this_reg;
16861 if (TARGET_64BIT)
16862 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
16863 else
16864 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
16867 /* If necessary, drop THIS back to its stack slot. */
16868 if (this_reg && this_reg != this)
16870 xops[0] = this_reg;
16871 xops[1] = this;
16872 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
16875 xops[0] = XEXP (DECL_RTL (function), 0);
16876 if (TARGET_64BIT)
16878 if (!flag_pic || (*targetm.binds_local_p) (function))
16879 output_asm_insn ("jmp\t%P0", xops);
16880 else
16882 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
16883 tmp = gen_rtx_CONST (Pmode, tmp);
16884 tmp = gen_rtx_MEM (QImode, tmp);
16885 xops[0] = tmp;
16886 output_asm_insn ("jmp\t%A0", xops);
16889 else
16891 if (!flag_pic || (*targetm.binds_local_p) (function))
16892 output_asm_insn ("jmp\t%P0", xops);
16893 else
16894 #if TARGET_MACHO
16895 if (TARGET_MACHO)
16897 rtx sym_ref = XEXP (DECL_RTL (function), 0);
16898 tmp = (gen_rtx_SYMBOL_REF
16899 (Pmode,
16900 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
16901 tmp = gen_rtx_MEM (QImode, tmp);
16902 xops[0] = tmp;
16903 output_asm_insn ("jmp\t%0", xops);
16905 else
16906 #endif /* TARGET_MACHO */
16908 tmp = gen_rtx_REG (SImode, 2 /* ECX */);
16909 output_set_got (tmp);
16911 xops[1] = tmp;
16912 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
16913 output_asm_insn ("jmp\t{*}%1", xops);
16918 static void
16919 x86_file_start (void)
16921 default_file_start ();
16922 if (X86_FILE_START_VERSION_DIRECTIVE)
16923 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
16924 if (X86_FILE_START_FLTUSED)
16925 fputs ("\t.global\t__fltused\n", asm_out_file);
16926 if (ix86_asm_dialect == ASM_INTEL)
16927 fputs ("\t.intel_syntax\n", asm_out_file);
16931 x86_field_alignment (tree field, int computed)
16933 enum machine_mode mode;
16934 tree type = TREE_TYPE (field);
16936 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
16937 return computed;
16938 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
16939 ? get_inner_array_type (type) : type);
16940 if (mode == DFmode || mode == DCmode
16941 || GET_MODE_CLASS (mode) == MODE_INT
16942 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
16943 return MIN (32, computed);
16944 return computed;
16947 /* Output assembler code to FILE to increment profiler label # LABELNO
16948 for profiling a function entry. */
16949 void
16950 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
16952 if (TARGET_64BIT)
16953 if (flag_pic)
16955 #ifndef NO_PROFILE_COUNTERS
16956 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
16957 #endif
16958 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
16960 else
16962 #ifndef NO_PROFILE_COUNTERS
16963 fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno);
16964 #endif
16965 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
16967 else if (flag_pic)
16969 #ifndef NO_PROFILE_COUNTERS
16970 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
16971 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
16972 #endif
16973 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
16975 else
16977 #ifndef NO_PROFILE_COUNTERS
16978 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
16979 PROFILE_COUNT_REGISTER);
16980 #endif
16981 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
16985 /* We don't have exact information about the insn sizes, but we may assume
16986 quite safely that we are informed about all 1 byte insns and memory
16987 address sizes. This is enough to eliminate unnecessary padding in
16988 99% of cases. */
16990 static int
16991 min_insn_size (rtx insn)
16993 int l = 0;
16995 if (!INSN_P (insn) || !active_insn_p (insn))
16996 return 0;
16998 /* Discard alignments we've emit and jump instructions. */
16999 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
17000 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
17001 return 0;
17002 if (GET_CODE (insn) == JUMP_INSN
17003 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
17004 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
17005 return 0;
17007 /* Important case - calls are always 5 bytes.
17008 It is common to have many calls in the row. */
17009 if (GET_CODE (insn) == CALL_INSN
17010 && symbolic_reference_mentioned_p (PATTERN (insn))
17011 && !SIBLING_CALL_P (insn))
17012 return 5;
17013 if (get_attr_length (insn) <= 1)
17014 return 1;
17016 /* For normal instructions we may rely on the sizes of addresses
17017 and the presence of symbol to require 4 bytes of encoding.
17018 This is not the case for jumps where references are PC relative. */
17019 if (GET_CODE (insn) != JUMP_INSN)
17021 l = get_attr_length_address (insn);
17022 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
17023 l = 4;
17025 if (l)
17026 return 1+l;
17027 else
17028 return 2;
17031 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
17032 window. */
17034 static void
17035 ix86_avoid_jump_misspredicts (void)
17037 rtx insn, start = get_insns ();
17038 int nbytes = 0, njumps = 0;
17039 int isjump = 0;
17041 /* Look for all minimal intervals of instructions containing 4 jumps.
17042 The intervals are bounded by START and INSN. NBYTES is the total
17043 size of instructions in the interval including INSN and not including
17044 START. When the NBYTES is smaller than 16 bytes, it is possible
17045 that the end of START and INSN ends up in the same 16byte page.
17047 The smallest offset in the page INSN can start is the case where START
17048 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
17049 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
17051 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
17054 nbytes += min_insn_size (insn);
17055 if (dump_file)
17056 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
17057 INSN_UID (insn), min_insn_size (insn));
17058 if ((GET_CODE (insn) == JUMP_INSN
17059 && GET_CODE (PATTERN (insn)) != ADDR_VEC
17060 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
17061 || GET_CODE (insn) == CALL_INSN)
17062 njumps++;
17063 else
17064 continue;
17066 while (njumps > 3)
17068 start = NEXT_INSN (start);
17069 if ((GET_CODE (start) == JUMP_INSN
17070 && GET_CODE (PATTERN (start)) != ADDR_VEC
17071 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
17072 || GET_CODE (start) == CALL_INSN)
17073 njumps--, isjump = 1;
17074 else
17075 isjump = 0;
17076 nbytes -= min_insn_size (start);
17078 gcc_assert (njumps >= 0);
17079 if (dump_file)
17080 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
17081 INSN_UID (start), INSN_UID (insn), nbytes);
17083 if (njumps == 3 && isjump && nbytes < 16)
17085 int padsize = 15 - nbytes + min_insn_size (insn);
17087 if (dump_file)
17088 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
17089 INSN_UID (insn), padsize);
17090 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
17095 /* AMD Athlon works faster
17096 when RET is not destination of conditional jump or directly preceded
17097 by other jump instruction. We avoid the penalty by inserting NOP just
17098 before the RET instructions in such cases. */
17099 static void
17100 ix86_pad_returns (void)
17102 edge e;
17103 edge_iterator ei;
17105 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
17107 basic_block bb = e->src;
17108 rtx ret = BB_END (bb);
17109 rtx prev;
17110 bool replace = false;
17112 if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN
17113 || !maybe_hot_bb_p (bb))
17114 continue;
17115 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
17116 if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL)
17117 break;
17118 if (prev && GET_CODE (prev) == CODE_LABEL)
17120 edge e;
17121 edge_iterator ei;
17123 FOR_EACH_EDGE (e, ei, bb->preds)
17124 if (EDGE_FREQUENCY (e) && e->src->index >= 0
17125 && !(e->flags & EDGE_FALLTHRU))
17126 replace = true;
17128 if (!replace)
17130 prev = prev_active_insn (ret);
17131 if (prev
17132 && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev))
17133 || GET_CODE (prev) == CALL_INSN))
17134 replace = true;
17135 /* Empty functions get branch mispredict even when the jump destination
17136 is not visible to us. */
17137 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
17138 replace = true;
17140 if (replace)
17142 emit_insn_before (gen_return_internal_long (), ret);
17143 delete_insn (ret);
17148 /* Implement machine specific optimizations. We implement padding of returns
17149 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
17150 static void
17151 ix86_reorg (void)
17153 if (TARGET_ATHLON_K8 && optimize && !optimize_size)
17154 ix86_pad_returns ();
17155 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
17156 ix86_avoid_jump_misspredicts ();
17159 /* Return nonzero when QImode register that must be represented via REX prefix
17160 is used. */
17161 bool
17162 x86_extended_QIreg_mentioned_p (rtx insn)
17164 int i;
17165 extract_insn_cached (insn);
17166 for (i = 0; i < recog_data.n_operands; i++)
17167 if (REG_P (recog_data.operand[i])
17168 && REGNO (recog_data.operand[i]) >= 4)
17169 return true;
17170 return false;
17173 /* Return nonzero when P points to register encoded via REX prefix.
17174 Called via for_each_rtx. */
17175 static int
17176 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
17178 unsigned int regno;
17179 if (!REG_P (*p))
17180 return 0;
17181 regno = REGNO (*p);
17182 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
17185 /* Return true when INSN mentions register that must be encoded using REX
17186 prefix. */
17187 bool
17188 x86_extended_reg_mentioned_p (rtx insn)
17190 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
17193 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
17194 optabs would emit if we didn't have TFmode patterns. */
17196 void
17197 x86_emit_floatuns (rtx operands[2])
17199 rtx neglab, donelab, i0, i1, f0, in, out;
17200 enum machine_mode mode, inmode;
17202 inmode = GET_MODE (operands[1]);
17203 gcc_assert (inmode == SImode || inmode == DImode);
17205 out = operands[0];
17206 in = force_reg (inmode, operands[1]);
17207 mode = GET_MODE (out);
17208 neglab = gen_label_rtx ();
17209 donelab = gen_label_rtx ();
17210 i1 = gen_reg_rtx (Pmode);
17211 f0 = gen_reg_rtx (mode);
17213 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab);
17215 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
17216 emit_jump_insn (gen_jump (donelab));
17217 emit_barrier ();
17219 emit_label (neglab);
17221 i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
17222 i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
17223 i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
17224 expand_float (f0, i0, 0);
17225 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
17227 emit_label (donelab);
17230 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
17231 with all elements equal to VAR. Return true if successful. */
17233 static bool
17234 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
17235 rtx target, rtx val)
17237 enum machine_mode smode, wsmode, wvmode;
17238 rtx x;
17240 switch (mode)
17242 case V2SImode:
17243 case V2SFmode:
17244 if (!mmx_ok && !TARGET_SSE)
17245 return false;
17246 /* FALLTHRU */
17248 case V2DFmode:
17249 case V2DImode:
17250 case V4SFmode:
17251 case V4SImode:
17252 val = force_reg (GET_MODE_INNER (mode), val);
17253 x = gen_rtx_VEC_DUPLICATE (mode, val);
17254 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17255 return true;
17257 case V4HImode:
17258 if (!mmx_ok)
17259 return false;
17260 if (TARGET_SSE || TARGET_3DNOW_A)
17262 val = gen_lowpart (SImode, val);
17263 x = gen_rtx_TRUNCATE (HImode, val);
17264 x = gen_rtx_VEC_DUPLICATE (mode, x);
17265 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17266 return true;
17268 else
17270 smode = HImode;
17271 wsmode = SImode;
17272 wvmode = V2SImode;
17273 goto widen;
17276 case V8QImode:
17277 if (!mmx_ok)
17278 return false;
17279 smode = QImode;
17280 wsmode = HImode;
17281 wvmode = V4HImode;
17282 goto widen;
17283 case V8HImode:
17284 smode = HImode;
17285 wsmode = SImode;
17286 wvmode = V4SImode;
17287 goto widen;
17288 case V16QImode:
17289 smode = QImode;
17290 wsmode = HImode;
17291 wvmode = V8HImode;
17292 goto widen;
17293 widen:
17294 /* Replicate the value once into the next wider mode and recurse. */
17295 val = convert_modes (wsmode, smode, val, true);
17296 x = expand_simple_binop (wsmode, ASHIFT, val,
17297 GEN_INT (GET_MODE_BITSIZE (smode)),
17298 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17299 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
17301 x = gen_reg_rtx (wvmode);
17302 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
17303 gcc_unreachable ();
17304 emit_move_insn (target, gen_lowpart (mode, x));
17305 return true;
17307 default:
17308 return false;
17312 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
17313 whose low element is VAR, and other elements are zero. Return true
17314 if successful. */
17316 static bool
17317 ix86_expand_vector_init_low_nonzero (bool mmx_ok, enum machine_mode mode,
17318 rtx target, rtx var)
17320 enum machine_mode vsimode;
17321 rtx x;
17323 switch (mode)
17325 case V2SFmode:
17326 case V2SImode:
17327 if (!mmx_ok && !TARGET_SSE)
17328 return false;
17329 /* FALLTHRU */
17331 case V2DFmode:
17332 case V2DImode:
17333 var = force_reg (GET_MODE_INNER (mode), var);
17334 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
17335 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17336 return true;
17338 case V4SFmode:
17339 case V4SImode:
17340 var = force_reg (GET_MODE_INNER (mode), var);
17341 x = gen_rtx_VEC_DUPLICATE (mode, var);
17342 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
17343 emit_insn (gen_rtx_SET (VOIDmode, target, x));
17344 return true;
17346 case V8HImode:
17347 case V16QImode:
17348 vsimode = V4SImode;
17349 goto widen;
17350 case V4HImode:
17351 case V8QImode:
17352 if (!mmx_ok)
17353 return false;
17354 vsimode = V2SImode;
17355 goto widen;
17356 widen:
17357 /* Zero extend the variable element to SImode and recurse. */
17358 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
17360 x = gen_reg_rtx (vsimode);
17361 if (!ix86_expand_vector_init_low_nonzero (mmx_ok, vsimode, x, var))
17362 gcc_unreachable ();
17364 emit_move_insn (target, gen_lowpart (mode, x));
17365 return true;
17367 default:
17368 return false;
17372 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
17373 consisting of the values in VALS. It is known that all elements
17374 except ONE_VAR are constants. Return true if successful. */
17376 static bool
17377 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
17378 rtx target, rtx vals, int one_var)
17380 rtx var = XVECEXP (vals, 0, one_var);
17381 enum machine_mode wmode;
17382 rtx const_vec, x;
17384 const_vec = copy_rtx (vals);
17385 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
17386 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
17388 switch (mode)
17390 case V2DFmode:
17391 case V2DImode:
17392 case V2SFmode:
17393 case V2SImode:
17394 /* For the two element vectors, it's just as easy to use
17395 the general case. */
17396 return false;
17398 case V4SFmode:
17399 case V4SImode:
17400 case V8HImode:
17401 case V4HImode:
17402 break;
17404 case V16QImode:
17405 wmode = V8HImode;
17406 goto widen;
17407 case V8QImode:
17408 wmode = V4HImode;
17409 goto widen;
17410 widen:
17411 /* There's no way to set one QImode entry easily. Combine
17412 the variable value with its adjacent constant value, and
17413 promote to an HImode set. */
17414 x = XVECEXP (vals, 0, one_var ^ 1);
17415 if (one_var & 1)
17417 var = convert_modes (HImode, QImode, var, true);
17418 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
17419 NULL_RTX, 1, OPTAB_LIB_WIDEN);
17420 x = GEN_INT (INTVAL (x) & 0xff);
17422 else
17424 var = convert_modes (HImode, QImode, var, true);
17425 x = gen_int_mode (INTVAL (x) << 8, HImode);
17427 if (x != const0_rtx)
17428 var = expand_simple_binop (HImode, IOR, var, x, var,
17429 1, OPTAB_LIB_WIDEN);
17431 x = gen_reg_rtx (wmode);
17432 emit_move_insn (x, gen_lowpart (wmode, const_vec));
17433 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
17435 emit_move_insn (target, gen_lowpart (mode, x));
17436 return true;
17438 default:
17439 return false;
17442 emit_move_insn (target, const_vec);
17443 ix86_expand_vector_set (mmx_ok, target, var, one_var);
17444 return true;
17447 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
17448 all values variable, and none identical. */
17450 static void
17451 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
17452 rtx target, rtx vals)
17454 enum machine_mode half_mode = GET_MODE_INNER (mode);
17455 rtx op0 = NULL, op1 = NULL;
17456 bool use_vec_concat = false;
17458 switch (mode)
17460 case V2SFmode:
17461 case V2SImode:
17462 if (!mmx_ok && !TARGET_SSE)
17463 break;
17464 /* FALLTHRU */
17466 case V2DFmode:
17467 case V2DImode:
17468 /* For the two element vectors, we always implement VEC_CONCAT. */
17469 op0 = XVECEXP (vals, 0, 0);
17470 op1 = XVECEXP (vals, 0, 1);
17471 use_vec_concat = true;
17472 break;
17474 case V4SFmode:
17475 half_mode = V2SFmode;
17476 goto half;
17477 case V4SImode:
17478 half_mode = V2SImode;
17479 goto half;
17480 half:
17482 rtvec v;
17484 /* For V4SF and V4SI, we implement a concat of two V2 vectors.
17485 Recurse to load the two halves. */
17487 op0 = gen_reg_rtx (half_mode);
17488 v = gen_rtvec (2, XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1));
17489 ix86_expand_vector_init (false, op0, gen_rtx_PARALLEL (half_mode, v));
17491 op1 = gen_reg_rtx (half_mode);
17492 v = gen_rtvec (2, XVECEXP (vals, 0, 2), XVECEXP (vals, 0, 3));
17493 ix86_expand_vector_init (false, op1, gen_rtx_PARALLEL (half_mode, v));
17495 use_vec_concat = true;
17497 break;
17499 case V8HImode:
17500 case V16QImode:
17501 case V4HImode:
17502 case V8QImode:
17503 break;
17505 default:
17506 gcc_unreachable ();
17509 if (use_vec_concat)
17511 if (!register_operand (op0, half_mode))
17512 op0 = force_reg (half_mode, op0);
17513 if (!register_operand (op1, half_mode))
17514 op1 = force_reg (half_mode, op1);
17516 emit_insn (gen_rtx_SET (VOIDmode, target,
17517 gen_rtx_VEC_CONCAT (mode, op0, op1)));
17519 else
17521 int i, j, n_elts, n_words, n_elt_per_word;
17522 enum machine_mode inner_mode;
17523 rtx words[4], shift;
17525 inner_mode = GET_MODE_INNER (mode);
17526 n_elts = GET_MODE_NUNITS (mode);
17527 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
17528 n_elt_per_word = n_elts / n_words;
17529 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
17531 for (i = 0; i < n_words; ++i)
17533 rtx word = NULL_RTX;
17535 for (j = 0; j < n_elt_per_word; ++j)
17537 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
17538 elt = convert_modes (word_mode, inner_mode, elt, true);
17540 if (j == 0)
17541 word = elt;
17542 else
17544 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
17545 word, 1, OPTAB_LIB_WIDEN);
17546 word = expand_simple_binop (word_mode, IOR, word, elt,
17547 word, 1, OPTAB_LIB_WIDEN);
17551 words[i] = word;
17554 if (n_words == 1)
17555 emit_move_insn (target, gen_lowpart (mode, words[0]));
17556 else if (n_words == 2)
17558 rtx tmp = gen_reg_rtx (mode);
17559 emit_insn (gen_rtx_CLOBBER (VOIDmode, tmp));
17560 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
17561 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
17562 emit_move_insn (target, tmp);
17564 else if (n_words == 4)
17566 rtx tmp = gen_reg_rtx (V4SImode);
17567 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
17568 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
17569 emit_move_insn (target, gen_lowpart (mode, tmp));
17571 else
17572 gcc_unreachable ();
17576 /* Initialize vector TARGET via VALS. Suppress the use of MMX
17577 instructions unless MMX_OK is true. */
17579 void
17580 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
17582 enum machine_mode mode = GET_MODE (target);
17583 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17584 int n_elts = GET_MODE_NUNITS (mode);
17585 int n_var = 0, one_var = -1;
17586 bool all_same = true, all_const_zero = true;
17587 int i;
17588 rtx x;
17590 for (i = 0; i < n_elts; ++i)
17592 x = XVECEXP (vals, 0, i);
17593 if (!CONSTANT_P (x))
17594 n_var++, one_var = i;
17595 else if (x != CONST0_RTX (inner_mode))
17596 all_const_zero = false;
17597 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
17598 all_same = false;
17601 /* Constants are best loaded from the constant pool. */
17602 if (n_var == 0)
17604 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
17605 return;
17608 /* If all values are identical, broadcast the value. */
17609 if (all_same
17610 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
17611 XVECEXP (vals, 0, 0)))
17612 return;
17614 /* Values where only one field is non-constant are best loaded from
17615 the pool and overwritten via move later. */
17616 if (n_var == 1)
17618 if (all_const_zero && one_var == 0
17619 && ix86_expand_vector_init_low_nonzero (mmx_ok, mode, target,
17620 XVECEXP (vals, 0, 0)))
17621 return;
17623 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
17624 return;
17627 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
17630 void
17631 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
17633 enum machine_mode mode = GET_MODE (target);
17634 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17635 bool use_vec_merge = false;
17636 rtx tmp;
17638 switch (mode)
17640 case V2SFmode:
17641 case V2SImode:
17642 if (mmx_ok)
17644 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
17645 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
17646 if (elt == 0)
17647 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
17648 else
17649 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
17650 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17651 return;
17653 break;
17655 case V2DFmode:
17656 case V2DImode:
17658 rtx op0, op1;
17660 /* For the two element vectors, we implement a VEC_CONCAT with
17661 the extraction of the other element. */
17663 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
17664 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
17666 if (elt == 0)
17667 op0 = val, op1 = tmp;
17668 else
17669 op0 = tmp, op1 = val;
17671 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
17672 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17674 return;
17676 case V4SFmode:
17677 switch (elt)
17679 case 0:
17680 use_vec_merge = true;
17681 break;
17683 case 1:
17684 /* tmp = target = A B C D */
17685 tmp = copy_to_reg (target);
17686 /* target = A A B B */
17687 emit_insn (gen_sse_unpcklps (target, target, target));
17688 /* target = X A B B */
17689 ix86_expand_vector_set (false, target, val, 0);
17690 /* target = A X C D */
17691 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17692 GEN_INT (1), GEN_INT (0),
17693 GEN_INT (2+4), GEN_INT (3+4)));
17694 return;
17696 case 2:
17697 /* tmp = target = A B C D */
17698 tmp = copy_to_reg (target);
17699 /* tmp = X B C D */
17700 ix86_expand_vector_set (false, tmp, val, 0);
17701 /* target = A B X D */
17702 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17703 GEN_INT (0), GEN_INT (1),
17704 GEN_INT (0+4), GEN_INT (3+4)));
17705 return;
17707 case 3:
17708 /* tmp = target = A B C D */
17709 tmp = copy_to_reg (target);
17710 /* tmp = X B C D */
17711 ix86_expand_vector_set (false, tmp, val, 0);
17712 /* target = A B X D */
17713 emit_insn (gen_sse_shufps_1 (target, target, tmp,
17714 GEN_INT (0), GEN_INT (1),
17715 GEN_INT (2+4), GEN_INT (0+4)));
17716 return;
17718 default:
17719 gcc_unreachable ();
17721 break;
17723 case V4SImode:
17724 /* Element 0 handled by vec_merge below. */
17725 if (elt == 0)
17727 use_vec_merge = true;
17728 break;
17731 if (TARGET_SSE2)
17733 /* With SSE2, use integer shuffles to swap element 0 and ELT,
17734 store into element 0, then shuffle them back. */
17736 rtx order[4];
17738 order[0] = GEN_INT (elt);
17739 order[1] = const1_rtx;
17740 order[2] = const2_rtx;
17741 order[3] = GEN_INT (3);
17742 order[elt] = const0_rtx;
17744 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
17745 order[1], order[2], order[3]));
17747 ix86_expand_vector_set (false, target, val, 0);
17749 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
17750 order[1], order[2], order[3]));
17752 else
17754 /* For SSE1, we have to reuse the V4SF code. */
17755 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
17756 gen_lowpart (SFmode, val), elt);
17758 return;
17760 case V8HImode:
17761 use_vec_merge = TARGET_SSE2;
17762 break;
17763 case V4HImode:
17764 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
17765 break;
17767 case V16QImode:
17768 case V8QImode:
17769 default:
17770 break;
17773 if (use_vec_merge)
17775 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
17776 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
17777 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17779 else
17781 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
17783 emit_move_insn (mem, target);
17785 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
17786 emit_move_insn (tmp, val);
17788 emit_move_insn (target, mem);
17792 void
17793 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
17795 enum machine_mode mode = GET_MODE (vec);
17796 enum machine_mode inner_mode = GET_MODE_INNER (mode);
17797 bool use_vec_extr = false;
17798 rtx tmp;
17800 switch (mode)
17802 case V2SImode:
17803 case V2SFmode:
17804 if (!mmx_ok)
17805 break;
17806 /* FALLTHRU */
17808 case V2DFmode:
17809 case V2DImode:
17810 use_vec_extr = true;
17811 break;
17813 case V4SFmode:
17814 switch (elt)
17816 case 0:
17817 tmp = vec;
17818 break;
17820 case 1:
17821 case 3:
17822 tmp = gen_reg_rtx (mode);
17823 emit_insn (gen_sse_shufps_1 (tmp, vec, vec,
17824 GEN_INT (elt), GEN_INT (elt),
17825 GEN_INT (elt+4), GEN_INT (elt+4)));
17826 break;
17828 case 2:
17829 tmp = gen_reg_rtx (mode);
17830 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
17831 break;
17833 default:
17834 gcc_unreachable ();
17836 vec = tmp;
17837 use_vec_extr = true;
17838 elt = 0;
17839 break;
17841 case V4SImode:
17842 if (TARGET_SSE2)
17844 switch (elt)
17846 case 0:
17847 tmp = vec;
17848 break;
17850 case 1:
17851 case 3:
17852 tmp = gen_reg_rtx (mode);
17853 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
17854 GEN_INT (elt), GEN_INT (elt),
17855 GEN_INT (elt), GEN_INT (elt)));
17856 break;
17858 case 2:
17859 tmp = gen_reg_rtx (mode);
17860 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
17861 break;
17863 default:
17864 gcc_unreachable ();
17866 vec = tmp;
17867 use_vec_extr = true;
17868 elt = 0;
17870 else
17872 /* For SSE1, we have to reuse the V4SF code. */
17873 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
17874 gen_lowpart (V4SFmode, vec), elt);
17875 return;
17877 break;
17879 case V8HImode:
17880 use_vec_extr = TARGET_SSE2;
17881 break;
17882 case V4HImode:
17883 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
17884 break;
17886 case V16QImode:
17887 case V8QImode:
17888 /* ??? Could extract the appropriate HImode element and shift. */
17889 default:
17890 break;
17893 if (use_vec_extr)
17895 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
17896 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
17898 /* Let the rtl optimizers know about the zero extension performed. */
17899 if (inner_mode == HImode)
17901 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
17902 target = gen_lowpart (SImode, target);
17905 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
17907 else
17909 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
17911 emit_move_insn (mem, vec);
17913 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
17914 emit_move_insn (target, tmp);
17918 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
17919 pattern to reduce; DEST is the destination; IN is the input vector. */
17921 void
17922 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
17924 rtx tmp1, tmp2, tmp3;
17926 tmp1 = gen_reg_rtx (V4SFmode);
17927 tmp2 = gen_reg_rtx (V4SFmode);
17928 tmp3 = gen_reg_rtx (V4SFmode);
17930 emit_insn (gen_sse_movhlps (tmp1, in, in));
17931 emit_insn (fn (tmp2, tmp1, in));
17933 emit_insn (gen_sse_shufps_1 (tmp3, tmp2, tmp2,
17934 GEN_INT (1), GEN_INT (1),
17935 GEN_INT (1+4), GEN_INT (1+4)));
17936 emit_insn (fn (dest, tmp2, tmp3));
17939 /* Implements target hook vector_mode_supported_p. */
17940 static bool
17941 ix86_vector_mode_supported_p (enum machine_mode mode)
17943 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
17944 return true;
17945 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
17946 return true;
17947 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
17948 return true;
17949 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
17950 return true;
17951 return false;
17954 /* Worker function for TARGET_MD_ASM_CLOBBERS.
17956 We do this in the new i386 backend to maintain source compatibility
17957 with the old cc0-based compiler. */
17959 static tree
17960 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
17961 tree inputs ATTRIBUTE_UNUSED,
17962 tree clobbers)
17964 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
17965 clobbers);
17966 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
17967 clobbers);
17968 clobbers = tree_cons (NULL_TREE, build_string (7, "dirflag"),
17969 clobbers);
17970 return clobbers;
17973 /* Return true if this goes in small data/bss. */
17975 static bool
17976 ix86_in_large_data_p (tree exp)
17978 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
17979 return false;
17981 /* Functions are never large data. */
17982 if (TREE_CODE (exp) == FUNCTION_DECL)
17983 return false;
17985 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
17987 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
17988 if (strcmp (section, ".ldata") == 0
17989 || strcmp (section, ".lbss") == 0)
17990 return true;
17991 return false;
17993 else
17995 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
17997 /* If this is an incomplete type with size 0, then we can't put it
17998 in data because it might be too big when completed. */
17999 if (!size || size > ix86_section_threshold)
18000 return true;
18003 return false;
18005 static void
18006 ix86_encode_section_info (tree decl, rtx rtl, int first)
18008 default_encode_section_info (decl, rtl, first);
18010 if (TREE_CODE (decl) == VAR_DECL
18011 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
18012 && ix86_in_large_data_p (decl))
18013 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
18016 /* Worker function for REVERSE_CONDITION. */
18018 enum rtx_code
18019 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
18021 return (mode != CCFPmode && mode != CCFPUmode
18022 ? reverse_condition (code)
18023 : reverse_condition_maybe_unordered (code));
18026 /* Output code to perform an x87 FP register move, from OPERANDS[1]
18027 to OPERANDS[0]. */
18029 const char *
18030 output_387_reg_move (rtx insn, rtx *operands)
18032 if (REG_P (operands[1])
18033 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
18035 if (REGNO (operands[0]) == FIRST_STACK_REG
18036 && TARGET_USE_FFREEP)
18037 return "ffreep\t%y0";
18038 return "fstp\t%y0";
18040 if (STACK_TOP_P (operands[0]))
18041 return "fld%z1\t%y1";
18042 return "fst\t%y0";
18045 /* Output code to perform a conditional jump to LABEL, if C2 flag in
18046 FP status register is set. */
18048 void
18049 ix86_emit_fp_unordered_jump (rtx label)
18051 rtx reg = gen_reg_rtx (HImode);
18052 rtx temp;
18054 emit_insn (gen_x86_fnstsw_1 (reg));
18056 if (TARGET_USE_SAHF)
18058 emit_insn (gen_x86_sahf_1 (reg));
18060 temp = gen_rtx_REG (CCmode, FLAGS_REG);
18061 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
18063 else
18065 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
18067 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
18068 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
18071 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
18072 gen_rtx_LABEL_REF (VOIDmode, label),
18073 pc_rtx);
18074 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
18075 emit_jump_insn (temp);
18078 /* Output code to perform a log1p XFmode calculation. */
18080 void ix86_emit_i387_log1p (rtx op0, rtx op1)
18082 rtx label1 = gen_label_rtx ();
18083 rtx label2 = gen_label_rtx ();
18085 rtx tmp = gen_reg_rtx (XFmode);
18086 rtx tmp2 = gen_reg_rtx (XFmode);
18088 emit_insn (gen_absxf2 (tmp, op1));
18089 emit_insn (gen_cmpxf (tmp,
18090 CONST_DOUBLE_FROM_REAL_VALUE (
18091 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
18092 XFmode)));
18093 emit_jump_insn (gen_bge (label1));
18095 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
18096 emit_insn (gen_fyl2xp1_xf3 (op0, tmp2, op1));
18097 emit_jump (label2);
18099 emit_label (label1);
18100 emit_move_insn (tmp, CONST1_RTX (XFmode));
18101 emit_insn (gen_addxf3 (tmp, op1, tmp));
18102 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
18103 emit_insn (gen_fyl2x_xf3 (op0, tmp2, tmp));
18105 emit_label (label2);
18108 /* Solaris named-section hook. Parameters are as for
18109 named_section_real. */
18111 static void
18112 i386_solaris_elf_named_section (const char *name, unsigned int flags,
18113 tree decl)
18115 /* With Binutils 2.15, the "@unwind" marker must be specified on
18116 every occurrence of the ".eh_frame" section, not just the first
18117 one. */
18118 if (TARGET_64BIT
18119 && strcmp (name, ".eh_frame") == 0)
18121 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
18122 flags & SECTION_WRITE ? "aw" : "a");
18123 return;
18125 default_elf_asm_named_section (name, flags, decl);
18128 /* Return the mangling of TYPE if it is an extended fundamental type. */
18130 static const char *
18131 ix86_mangle_fundamental_type (tree type)
18133 switch (TYPE_MODE (type))
18135 case TFmode:
18136 /* __float128 is "g". */
18137 return "g";
18138 case XFmode:
18139 /* "long double" or __float80 is "e". */
18140 return "e";
18141 default:
18142 return NULL;
18146 /* For 32-bit code we can save PIC register setup by using
18147 __stack_chk_fail_local hidden function instead of calling
18148 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
18149 register, so it is better to call __stack_chk_fail directly. */
18151 static tree
18152 ix86_stack_protect_fail (void)
18154 return TARGET_64BIT
18155 ? default_external_stack_protect_fail ()
18156 : default_hidden_stack_protect_fail ();
18159 /* Select a format to encode pointers in exception handling data. CODE
18160 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
18161 true if the symbol may be affected by dynamic relocations.
18163 ??? All x86 object file formats are capable of representing this.
18164 After all, the relocation needed is the same as for the call insn.
18165 Whether or not a particular assembler allows us to enter such, I
18166 guess we'll have to see. */
18168 asm_preferred_eh_data_format (int code, int global)
18170 if (flag_pic)
18172 int type = DW_EH_PE_sdata8;
18173 if (!TARGET_64BIT
18174 || ix86_cmodel == CM_SMALL_PIC
18175 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
18176 type = DW_EH_PE_sdata4;
18177 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
18179 if (ix86_cmodel == CM_SMALL
18180 || (ix86_cmodel == CM_MEDIUM && code))
18181 return DW_EH_PE_udata4;
18182 return DW_EH_PE_absptr;
18185 #include "gt-i386.h"