* config/i386/i386.c (ix86_split_ashldi): Special case op1 as one
[official-gcc.git] / gcc / config / i386 / i386.c
blobc1911092419826ab67ff1416af65a620b362fbf7
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "tree-gimple.h"
52 #ifndef CHECK_STACK_LIMIT
53 #define CHECK_STACK_LIMIT (-1)
54 #endif
56 /* Return index of given mode in mult and division cost tables. */
57 #define MODE_INDEX(mode) \
58 ((mode) == QImode ? 0 \
59 : (mode) == HImode ? 1 \
60 : (mode) == SImode ? 2 \
61 : (mode) == DImode ? 3 \
62 : 4)
64 /* Processor costs (relative to an add) */
65 static const
66 struct processor_costs size_cost = { /* costs for tunning for size */
67 2, /* cost of an add instruction */
68 3, /* cost of a lea instruction */
69 2, /* variable shift costs */
70 3, /* constant shift costs */
71 {3, 3, 3, 3, 5}, /* cost of starting a multiply */
72 0, /* cost of multiply per each bit set */
73 {3, 3, 3, 3, 5}, /* cost of a divide/mod */
74 3, /* cost of movsx */
75 3, /* cost of movzx */
76 0, /* "large" insn */
77 2, /* MOVE_RATIO */
78 2, /* cost for loading QImode using movzbl */
79 {2, 2, 2}, /* cost of loading integer registers
80 in QImode, HImode and SImode.
81 Relative to reg-reg move (2). */
82 {2, 2, 2}, /* cost of storing integer registers */
83 2, /* cost of reg,reg fld/fst */
84 {2, 2, 2}, /* cost of loading fp registers
85 in SFmode, DFmode and XFmode */
86 {2, 2, 2}, /* cost of loading integer registers */
87 3, /* cost of moving MMX register */
88 {3, 3}, /* cost of loading MMX registers
89 in SImode and DImode */
90 {3, 3}, /* cost of storing MMX registers
91 in SImode and DImode */
92 3, /* cost of moving SSE register */
93 {3, 3, 3}, /* cost of loading SSE registers
94 in SImode, DImode and TImode */
95 {3, 3, 3}, /* cost of storing SSE registers
96 in SImode, DImode and TImode */
97 3, /* MMX or SSE register to integer */
98 0, /* size of prefetch block */
99 0, /* number of parallel prefetches */
100 1, /* Branch cost */
101 2, /* cost of FADD and FSUB insns. */
102 2, /* cost of FMUL instruction. */
103 2, /* cost of FDIV instruction. */
104 2, /* cost of FABS instruction. */
105 2, /* cost of FCHS instruction. */
106 2, /* cost of FSQRT instruction. */
109 /* Processor costs (relative to an add) */
110 static const
111 struct processor_costs i386_cost = { /* 386 specific costs */
112 1, /* cost of an add instruction */
113 1, /* cost of a lea instruction */
114 3, /* variable shift costs */
115 2, /* constant shift costs */
116 {6, 6, 6, 6, 6}, /* cost of starting a multiply */
117 1, /* cost of multiply per each bit set */
118 {23, 23, 23, 23, 23}, /* cost of a divide/mod */
119 3, /* cost of movsx */
120 2, /* cost of movzx */
121 15, /* "large" insn */
122 3, /* MOVE_RATIO */
123 4, /* cost for loading QImode using movzbl */
124 {2, 4, 2}, /* cost of loading integer registers
125 in QImode, HImode and SImode.
126 Relative to reg-reg move (2). */
127 {2, 4, 2}, /* cost of storing integer registers */
128 2, /* cost of reg,reg fld/fst */
129 {8, 8, 8}, /* cost of loading fp registers
130 in SFmode, DFmode and XFmode */
131 {8, 8, 8}, /* cost of loading integer registers */
132 2, /* cost of moving MMX register */
133 {4, 8}, /* cost of loading MMX registers
134 in SImode and DImode */
135 {4, 8}, /* cost of storing MMX registers
136 in SImode and DImode */
137 2, /* cost of moving SSE register */
138 {4, 8, 16}, /* cost of loading SSE registers
139 in SImode, DImode and TImode */
140 {4, 8, 16}, /* cost of storing SSE registers
141 in SImode, DImode and TImode */
142 3, /* MMX or SSE register to integer */
143 0, /* size of prefetch block */
144 0, /* number of parallel prefetches */
145 1, /* Branch cost */
146 23, /* cost of FADD and FSUB insns. */
147 27, /* cost of FMUL instruction. */
148 88, /* cost of FDIV instruction. */
149 22, /* cost of FABS instruction. */
150 24, /* cost of FCHS instruction. */
151 122, /* cost of FSQRT instruction. */
154 static const
155 struct processor_costs i486_cost = { /* 486 specific costs */
156 1, /* cost of an add instruction */
157 1, /* cost of a lea instruction */
158 3, /* variable shift costs */
159 2, /* constant shift costs */
160 {12, 12, 12, 12, 12}, /* cost of starting a multiply */
161 1, /* cost of multiply per each bit set */
162 {40, 40, 40, 40, 40}, /* cost of a divide/mod */
163 3, /* cost of movsx */
164 2, /* cost of movzx */
165 15, /* "large" insn */
166 3, /* MOVE_RATIO */
167 4, /* cost for loading QImode using movzbl */
168 {2, 4, 2}, /* cost of loading integer registers
169 in QImode, HImode and SImode.
170 Relative to reg-reg move (2). */
171 {2, 4, 2}, /* cost of storing integer registers */
172 2, /* cost of reg,reg fld/fst */
173 {8, 8, 8}, /* cost of loading fp registers
174 in SFmode, DFmode and XFmode */
175 {8, 8, 8}, /* cost of loading integer registers */
176 2, /* cost of moving MMX register */
177 {4, 8}, /* cost of loading MMX registers
178 in SImode and DImode */
179 {4, 8}, /* cost of storing MMX registers
180 in SImode and DImode */
181 2, /* cost of moving SSE register */
182 {4, 8, 16}, /* cost of loading SSE registers
183 in SImode, DImode and TImode */
184 {4, 8, 16}, /* cost of storing SSE registers
185 in SImode, DImode and TImode */
186 3, /* MMX or SSE register to integer */
187 0, /* size of prefetch block */
188 0, /* number of parallel prefetches */
189 1, /* Branch cost */
190 8, /* cost of FADD and FSUB insns. */
191 16, /* cost of FMUL instruction. */
192 73, /* cost of FDIV instruction. */
193 3, /* cost of FABS instruction. */
194 3, /* cost of FCHS instruction. */
195 83, /* cost of FSQRT instruction. */
198 static const
199 struct processor_costs pentium_cost = {
200 1, /* cost of an add instruction */
201 1, /* cost of a lea instruction */
202 4, /* variable shift costs */
203 1, /* constant shift costs */
204 {11, 11, 11, 11, 11}, /* cost of starting a multiply */
205 0, /* cost of multiply per each bit set */
206 {25, 25, 25, 25, 25}, /* cost of a divide/mod */
207 3, /* cost of movsx */
208 2, /* cost of movzx */
209 8, /* "large" insn */
210 6, /* MOVE_RATIO */
211 6, /* cost for loading QImode using movzbl */
212 {2, 4, 2}, /* cost of loading integer registers
213 in QImode, HImode and SImode.
214 Relative to reg-reg move (2). */
215 {2, 4, 2}, /* cost of storing integer registers */
216 2, /* cost of reg,reg fld/fst */
217 {2, 2, 6}, /* cost of loading fp registers
218 in SFmode, DFmode and XFmode */
219 {4, 4, 6}, /* cost of loading integer registers */
220 8, /* cost of moving MMX register */
221 {8, 8}, /* cost of loading MMX registers
222 in SImode and DImode */
223 {8, 8}, /* cost of storing MMX registers
224 in SImode and DImode */
225 2, /* cost of moving SSE register */
226 {4, 8, 16}, /* cost of loading SSE registers
227 in SImode, DImode and TImode */
228 {4, 8, 16}, /* cost of storing SSE registers
229 in SImode, DImode and TImode */
230 3, /* MMX or SSE register to integer */
231 0, /* size of prefetch block */
232 0, /* number of parallel prefetches */
233 2, /* Branch cost */
234 3, /* cost of FADD and FSUB insns. */
235 3, /* cost of FMUL instruction. */
236 39, /* cost of FDIV instruction. */
237 1, /* cost of FABS instruction. */
238 1, /* cost of FCHS instruction. */
239 70, /* cost of FSQRT instruction. */
242 static const
243 struct processor_costs pentiumpro_cost = {
244 1, /* cost of an add instruction */
245 1, /* cost of a lea instruction */
246 1, /* variable shift costs */
247 1, /* constant shift costs */
248 {4, 4, 4, 4, 4}, /* cost of starting a multiply */
249 0, /* cost of multiply per each bit set */
250 {17, 17, 17, 17, 17}, /* cost of a divide/mod */
251 1, /* cost of movsx */
252 1, /* cost of movzx */
253 8, /* "large" insn */
254 6, /* MOVE_RATIO */
255 2, /* cost for loading QImode using movzbl */
256 {4, 4, 4}, /* cost of loading integer registers
257 in QImode, HImode and SImode.
258 Relative to reg-reg move (2). */
259 {2, 2, 2}, /* cost of storing integer registers */
260 2, /* cost of reg,reg fld/fst */
261 {2, 2, 6}, /* cost of loading fp registers
262 in SFmode, DFmode and XFmode */
263 {4, 4, 6}, /* cost of loading integer registers */
264 2, /* cost of moving MMX register */
265 {2, 2}, /* cost of loading MMX registers
266 in SImode and DImode */
267 {2, 2}, /* cost of storing MMX registers
268 in SImode and DImode */
269 2, /* cost of moving SSE register */
270 {2, 2, 8}, /* cost of loading SSE registers
271 in SImode, DImode and TImode */
272 {2, 2, 8}, /* cost of storing SSE registers
273 in SImode, DImode and TImode */
274 3, /* MMX or SSE register to integer */
275 32, /* size of prefetch block */
276 6, /* number of parallel prefetches */
277 2, /* Branch cost */
278 3, /* cost of FADD and FSUB insns. */
279 5, /* cost of FMUL instruction. */
280 56, /* cost of FDIV instruction. */
281 2, /* cost of FABS instruction. */
282 2, /* cost of FCHS instruction. */
283 56, /* cost of FSQRT instruction. */
286 static const
287 struct processor_costs k6_cost = {
288 1, /* cost of an add instruction */
289 2, /* cost of a lea instruction */
290 1, /* variable shift costs */
291 1, /* constant shift costs */
292 {3, 3, 3, 3, 3}, /* cost of starting a multiply */
293 0, /* cost of multiply per each bit set */
294 {18, 18, 18, 18, 18}, /* cost of a divide/mod */
295 2, /* cost of movsx */
296 2, /* cost of movzx */
297 8, /* "large" insn */
298 4, /* MOVE_RATIO */
299 3, /* cost for loading QImode using movzbl */
300 {4, 5, 4}, /* cost of loading integer registers
301 in QImode, HImode and SImode.
302 Relative to reg-reg move (2). */
303 {2, 3, 2}, /* cost of storing integer registers */
304 4, /* cost of reg,reg fld/fst */
305 {6, 6, 6}, /* cost of loading fp registers
306 in SFmode, DFmode and XFmode */
307 {4, 4, 4}, /* cost of loading integer registers */
308 2, /* cost of moving MMX register */
309 {2, 2}, /* cost of loading MMX registers
310 in SImode and DImode */
311 {2, 2}, /* cost of storing MMX registers
312 in SImode and DImode */
313 2, /* cost of moving SSE register */
314 {2, 2, 8}, /* cost of loading SSE registers
315 in SImode, DImode and TImode */
316 {2, 2, 8}, /* cost of storing SSE registers
317 in SImode, DImode and TImode */
318 6, /* MMX or SSE register to integer */
319 32, /* size of prefetch block */
320 1, /* number of parallel prefetches */
321 1, /* Branch cost */
322 2, /* cost of FADD and FSUB insns. */
323 2, /* cost of FMUL instruction. */
324 56, /* cost of FDIV instruction. */
325 2, /* cost of FABS instruction. */
326 2, /* cost of FCHS instruction. */
327 56, /* cost of FSQRT instruction. */
330 static const
331 struct processor_costs athlon_cost = {
332 1, /* cost of an add instruction */
333 2, /* cost of a lea instruction */
334 1, /* variable shift costs */
335 1, /* constant shift costs */
336 {5, 5, 5, 5, 5}, /* cost of starting a multiply */
337 0, /* cost of multiply per each bit set */
338 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
339 1, /* cost of movsx */
340 1, /* cost of movzx */
341 8, /* "large" insn */
342 9, /* MOVE_RATIO */
343 4, /* cost for loading QImode using movzbl */
344 {3, 4, 3}, /* cost of loading integer registers
345 in QImode, HImode and SImode.
346 Relative to reg-reg move (2). */
347 {3, 4, 3}, /* cost of storing integer registers */
348 4, /* cost of reg,reg fld/fst */
349 {4, 4, 12}, /* cost of loading fp registers
350 in SFmode, DFmode and XFmode */
351 {6, 6, 8}, /* cost of loading integer registers */
352 2, /* cost of moving MMX register */
353 {4, 4}, /* cost of loading MMX registers
354 in SImode and DImode */
355 {4, 4}, /* cost of storing MMX registers
356 in SImode and DImode */
357 2, /* cost of moving SSE register */
358 {4, 4, 6}, /* cost of loading SSE registers
359 in SImode, DImode and TImode */
360 {4, 4, 5}, /* cost of storing SSE registers
361 in SImode, DImode and TImode */
362 5, /* MMX or SSE register to integer */
363 64, /* size of prefetch block */
364 6, /* number of parallel prefetches */
365 2, /* Branch cost */
366 4, /* cost of FADD and FSUB insns. */
367 4, /* cost of FMUL instruction. */
368 24, /* cost of FDIV instruction. */
369 2, /* cost of FABS instruction. */
370 2, /* cost of FCHS instruction. */
371 35, /* cost of FSQRT instruction. */
374 static const
375 struct processor_costs k8_cost = {
376 1, /* cost of an add instruction */
377 2, /* cost of a lea instruction */
378 1, /* variable shift costs */
379 1, /* constant shift costs */
380 {3, 4, 3, 4, 5}, /* cost of starting a multiply */
381 0, /* cost of multiply per each bit set */
382 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
383 1, /* cost of movsx */
384 1, /* cost of movzx */
385 8, /* "large" insn */
386 9, /* MOVE_RATIO */
387 4, /* cost for loading QImode using movzbl */
388 {3, 4, 3}, /* cost of loading integer registers
389 in QImode, HImode and SImode.
390 Relative to reg-reg move (2). */
391 {3, 4, 3}, /* cost of storing integer registers */
392 4, /* cost of reg,reg fld/fst */
393 {4, 4, 12}, /* cost of loading fp registers
394 in SFmode, DFmode and XFmode */
395 {6, 6, 8}, /* cost of loading integer registers */
396 2, /* cost of moving MMX register */
397 {3, 3}, /* cost of loading MMX registers
398 in SImode and DImode */
399 {4, 4}, /* cost of storing MMX registers
400 in SImode and DImode */
401 2, /* cost of moving SSE register */
402 {4, 3, 6}, /* cost of loading SSE registers
403 in SImode, DImode and TImode */
404 {4, 4, 5}, /* cost of storing SSE registers
405 in SImode, DImode and TImode */
406 5, /* MMX or SSE register to integer */
407 64, /* size of prefetch block */
408 6, /* number of parallel prefetches */
409 2, /* Branch cost */
410 4, /* cost of FADD and FSUB insns. */
411 4, /* cost of FMUL instruction. */
412 19, /* cost of FDIV instruction. */
413 2, /* cost of FABS instruction. */
414 2, /* cost of FCHS instruction. */
415 35, /* cost of FSQRT instruction. */
418 static const
419 struct processor_costs pentium4_cost = {
420 1, /* cost of an add instruction */
421 3, /* cost of a lea instruction */
422 4, /* variable shift costs */
423 4, /* constant shift costs */
424 {15, 15, 15, 15, 15}, /* cost of starting a multiply */
425 0, /* cost of multiply per each bit set */
426 {56, 56, 56, 56, 56}, /* cost of a divide/mod */
427 1, /* cost of movsx */
428 1, /* cost of movzx */
429 16, /* "large" insn */
430 6, /* MOVE_RATIO */
431 2, /* cost for loading QImode using movzbl */
432 {4, 5, 4}, /* cost of loading integer registers
433 in QImode, HImode and SImode.
434 Relative to reg-reg move (2). */
435 {2, 3, 2}, /* cost of storing integer registers */
436 2, /* cost of reg,reg fld/fst */
437 {2, 2, 6}, /* cost of loading fp registers
438 in SFmode, DFmode and XFmode */
439 {4, 4, 6}, /* cost of loading integer registers */
440 2, /* cost of moving MMX register */
441 {2, 2}, /* cost of loading MMX registers
442 in SImode and DImode */
443 {2, 2}, /* cost of storing MMX registers
444 in SImode and DImode */
445 12, /* cost of moving SSE register */
446 {12, 12, 12}, /* cost of loading SSE registers
447 in SImode, DImode and TImode */
448 {2, 2, 8}, /* cost of storing SSE registers
449 in SImode, DImode and TImode */
450 10, /* MMX or SSE register to integer */
451 64, /* size of prefetch block */
452 6, /* number of parallel prefetches */
453 2, /* Branch cost */
454 5, /* cost of FADD and FSUB insns. */
455 7, /* cost of FMUL instruction. */
456 43, /* cost of FDIV instruction. */
457 2, /* cost of FABS instruction. */
458 2, /* cost of FCHS instruction. */
459 43, /* cost of FSQRT instruction. */
462 static const
463 struct processor_costs nocona_cost = {
464 1, /* cost of an add instruction */
465 1, /* cost of a lea instruction */
466 1, /* variable shift costs */
467 1, /* constant shift costs */
468 {10, 10, 10, 10, 10}, /* cost of starting a multiply */
469 0, /* cost of multiply per each bit set */
470 {66, 66, 66, 66, 66}, /* cost of a divide/mod */
471 1, /* cost of movsx */
472 1, /* cost of movzx */
473 16, /* "large" insn */
474 9, /* MOVE_RATIO */
475 4, /* cost for loading QImode using movzbl */
476 {4, 4, 4}, /* cost of loading integer registers
477 in QImode, HImode and SImode.
478 Relative to reg-reg move (2). */
479 {4, 4, 4}, /* cost of storing integer registers */
480 3, /* cost of reg,reg fld/fst */
481 {12, 12, 12}, /* cost of loading fp registers
482 in SFmode, DFmode and XFmode */
483 {4, 4, 4}, /* cost of loading integer registers */
484 6, /* cost of moving MMX register */
485 {12, 12}, /* cost of loading MMX registers
486 in SImode and DImode */
487 {12, 12}, /* cost of storing MMX registers
488 in SImode and DImode */
489 6, /* cost of moving SSE register */
490 {12, 12, 12}, /* cost of loading SSE registers
491 in SImode, DImode and TImode */
492 {12, 12, 12}, /* cost of storing SSE registers
493 in SImode, DImode and TImode */
494 8, /* MMX or SSE register to integer */
495 128, /* size of prefetch block */
496 8, /* number of parallel prefetches */
497 1, /* Branch cost */
498 6, /* cost of FADD and FSUB insns. */
499 8, /* cost of FMUL instruction. */
500 40, /* cost of FDIV instruction. */
501 3, /* cost of FABS instruction. */
502 3, /* cost of FCHS instruction. */
503 44, /* cost of FSQRT instruction. */
506 const struct processor_costs *ix86_cost = &pentium_cost;
508 /* Processor feature/optimization bitmasks. */
509 #define m_386 (1<<PROCESSOR_I386)
510 #define m_486 (1<<PROCESSOR_I486)
511 #define m_PENT (1<<PROCESSOR_PENTIUM)
512 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
513 #define m_K6 (1<<PROCESSOR_K6)
514 #define m_ATHLON (1<<PROCESSOR_ATHLON)
515 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
516 #define m_K8 (1<<PROCESSOR_K8)
517 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
518 #define m_NOCONA (1<<PROCESSOR_NOCONA)
520 const int x86_use_leave = m_386 | m_K6 | m_ATHLON_K8;
521 const int x86_push_memory = m_386 | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
522 const int x86_zero_extend_with_and = m_486 | m_PENT;
523 const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA /* m_386 | m_K6 */;
524 const int x86_double_with_add = ~m_386;
525 const int x86_use_bit_test = m_386;
526 const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6;
527 const int x86_cmove = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
528 const int x86_3dnow_a = m_ATHLON_K8;
529 const int x86_deep_branch = m_PPRO | m_K6 | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
530 /* Branch hints were put in P4 based on simulation result. But
531 after P4 was made, no performance benefit was observed with
532 branch hints. It also increases the code size. As the result,
533 icc never generates branch hints. */
534 const int x86_branch_hints = 0;
535 const int x86_use_sahf = m_PPRO | m_K6 | m_PENT4 | m_NOCONA;
536 const int x86_partial_reg_stall = m_PPRO;
537 const int x86_use_loop = m_K6;
538 const int x86_use_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT);
539 const int x86_use_mov0 = m_K6;
540 const int x86_use_cltd = ~(m_PENT | m_K6);
541 const int x86_read_modify_write = ~m_PENT;
542 const int x86_read_modify = ~(m_PENT | m_PPRO);
543 const int x86_split_long_moves = m_PPRO;
544 const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486 | m_ATHLON_K8;
545 const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
546 const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
547 const int x86_qimode_math = ~(0);
548 const int x86_promote_qi_regs = 0;
549 const int x86_himode_math = ~(m_PPRO);
550 const int x86_promote_hi_regs = m_PPRO;
551 const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA;
552 const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA;
553 const int x86_add_esp_4 = m_ATHLON_K8 | m_K6 | m_PENT4 | m_NOCONA;
554 const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6 | m_386 | m_486 | m_PENT4 | m_NOCONA;
555 const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO);
556 const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
557 const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA;
558 const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO;
559 const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO;
560 const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO;
561 const int x86_decompose_lea = m_PENT4 | m_NOCONA;
562 const int x86_shift1 = ~m_486;
563 const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
564 const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO;
565 /* Set for machines where the type and dependencies are resolved on SSE register
566 parts instead of whole registers, so we may maintain just lower part of
567 scalar values in proper format leaving the upper part undefined. */
568 const int x86_sse_partial_regs = m_ATHLON_K8;
569 /* Athlon optimizes partial-register FPS special case, thus avoiding the
570 need for extra instructions beforehand */
571 const int x86_sse_partial_regs_for_cvtsd2ss = 0;
572 const int x86_sse_typeless_stores = m_ATHLON_K8;
573 const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
574 const int x86_use_ffreep = m_ATHLON_K8;
575 const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6;
576 const int x86_inter_unit_moves = ~(m_ATHLON_K8);
577 const int x86_ext_80387_constants = m_K6 | m_ATHLON | m_PENT4 | m_NOCONA | m_PPRO;
578 /* Some CPU cores are not able to predict more than 4 branch instructions in
579 the 16 byte window. */
580 const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
582 /* In case the average insn count for single function invocation is
583 lower than this constant, emit fast (but longer) prologue and
584 epilogue code. */
585 #define FAST_PROLOGUE_INSN_COUNT 20
587 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
588 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
589 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
590 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
592 /* Array of the smallest class containing reg number REGNO, indexed by
593 REGNO. Used by REGNO_REG_CLASS in i386.h. */
595 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
597 /* ax, dx, cx, bx */
598 AREG, DREG, CREG, BREG,
599 /* si, di, bp, sp */
600 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
601 /* FP registers */
602 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
603 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
604 /* arg pointer */
605 NON_Q_REGS,
606 /* flags, fpsr, dirflag, frame */
607 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
608 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
609 SSE_REGS, SSE_REGS,
610 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
611 MMX_REGS, MMX_REGS,
612 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
613 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
614 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
615 SSE_REGS, SSE_REGS,
618 /* The "default" register map used in 32bit mode. */
620 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
622 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
623 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
624 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
625 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
626 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
627 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
628 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
631 static int const x86_64_int_parameter_registers[6] =
633 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
634 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
637 static int const x86_64_int_return_registers[4] =
639 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
642 /* The "default" register map used in 64bit mode. */
643 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
645 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
646 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
647 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
648 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
649 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
650 8,9,10,11,12,13,14,15, /* extended integer registers */
651 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
654 /* Define the register numbers to be used in Dwarf debugging information.
655 The SVR4 reference port C compiler uses the following register numbers
656 in its Dwarf output code:
657 0 for %eax (gcc regno = 0)
658 1 for %ecx (gcc regno = 2)
659 2 for %edx (gcc regno = 1)
660 3 for %ebx (gcc regno = 3)
661 4 for %esp (gcc regno = 7)
662 5 for %ebp (gcc regno = 6)
663 6 for %esi (gcc regno = 4)
664 7 for %edi (gcc regno = 5)
665 The following three DWARF register numbers are never generated by
666 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
667 believes these numbers have these meanings.
668 8 for %eip (no gcc equivalent)
669 9 for %eflags (gcc regno = 17)
670 10 for %trapno (no gcc equivalent)
671 It is not at all clear how we should number the FP stack registers
672 for the x86 architecture. If the version of SDB on x86/svr4 were
673 a bit less brain dead with respect to floating-point then we would
674 have a precedent to follow with respect to DWARF register numbers
675 for x86 FP registers, but the SDB on x86/svr4 is so completely
676 broken with respect to FP registers that it is hardly worth thinking
677 of it as something to strive for compatibility with.
678 The version of x86/svr4 SDB I have at the moment does (partially)
679 seem to believe that DWARF register number 11 is associated with
680 the x86 register %st(0), but that's about all. Higher DWARF
681 register numbers don't seem to be associated with anything in
682 particular, and even for DWARF regno 11, SDB only seems to under-
683 stand that it should say that a variable lives in %st(0) (when
684 asked via an `=' command) if we said it was in DWARF regno 11,
685 but SDB still prints garbage when asked for the value of the
686 variable in question (via a `/' command).
687 (Also note that the labels SDB prints for various FP stack regs
688 when doing an `x' command are all wrong.)
689 Note that these problems generally don't affect the native SVR4
690 C compiler because it doesn't allow the use of -O with -g and
691 because when it is *not* optimizing, it allocates a memory
692 location for each floating-point variable, and the memory
693 location is what gets described in the DWARF AT_location
694 attribute for the variable in question.
695 Regardless of the severe mental illness of the x86/svr4 SDB, we
696 do something sensible here and we use the following DWARF
697 register numbers. Note that these are all stack-top-relative
698 numbers.
699 11 for %st(0) (gcc regno = 8)
700 12 for %st(1) (gcc regno = 9)
701 13 for %st(2) (gcc regno = 10)
702 14 for %st(3) (gcc regno = 11)
703 15 for %st(4) (gcc regno = 12)
704 16 for %st(5) (gcc regno = 13)
705 17 for %st(6) (gcc regno = 14)
706 18 for %st(7) (gcc regno = 15)
708 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
710 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
711 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
712 -1, 9, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
713 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
714 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
715 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
716 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
719 /* Test and compare insns in i386.md store the information needed to
720 generate branch and scc insns here. */
722 rtx ix86_compare_op0 = NULL_RTX;
723 rtx ix86_compare_op1 = NULL_RTX;
725 #define MAX_386_STACK_LOCALS 3
726 /* Size of the register save area. */
727 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
729 /* Define the structure for the machine field in struct function. */
731 struct stack_local_entry GTY(())
733 unsigned short mode;
734 unsigned short n;
735 rtx rtl;
736 struct stack_local_entry *next;
739 /* Structure describing stack frame layout.
740 Stack grows downward:
742 [arguments]
743 <- ARG_POINTER
744 saved pc
746 saved frame pointer if frame_pointer_needed
747 <- HARD_FRAME_POINTER
748 [saved regs]
750 [padding1] \
752 [va_arg registers] (
753 > to_allocate <- FRAME_POINTER
754 [frame] (
756 [padding2] /
758 struct ix86_frame
760 int nregs;
761 int padding1;
762 int va_arg_size;
763 HOST_WIDE_INT frame;
764 int padding2;
765 int outgoing_arguments_size;
766 int red_zone_size;
768 HOST_WIDE_INT to_allocate;
769 /* The offsets relative to ARG_POINTER. */
770 HOST_WIDE_INT frame_pointer_offset;
771 HOST_WIDE_INT hard_frame_pointer_offset;
772 HOST_WIDE_INT stack_pointer_offset;
774 /* When save_regs_using_mov is set, emit prologue using
775 move instead of push instructions. */
776 bool save_regs_using_mov;
779 /* Used to enable/disable debugging features. */
780 const char *ix86_debug_arg_string, *ix86_debug_addr_string;
781 /* Code model option as passed by user. */
782 const char *ix86_cmodel_string;
783 /* Parsed value. */
784 enum cmodel ix86_cmodel;
785 /* Asm dialect. */
786 const char *ix86_asm_string;
787 enum asm_dialect ix86_asm_dialect = ASM_ATT;
788 /* TLS dialext. */
789 const char *ix86_tls_dialect_string;
790 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
792 /* Which unit we are generating floating point math for. */
793 enum fpmath_unit ix86_fpmath;
795 /* Which cpu are we scheduling for. */
796 enum processor_type ix86_tune;
797 /* Which instruction set architecture to use. */
798 enum processor_type ix86_arch;
800 /* Strings to hold which cpu and instruction set architecture to use. */
801 const char *ix86_tune_string; /* for -mtune=<xxx> */
802 const char *ix86_arch_string; /* for -march=<xxx> */
803 const char *ix86_fpmath_string; /* for -mfpmath=<xxx> */
805 /* # of registers to use to pass arguments. */
806 const char *ix86_regparm_string;
808 /* true if sse prefetch instruction is not NOOP. */
809 int x86_prefetch_sse;
811 /* ix86_regparm_string as a number */
812 int ix86_regparm;
814 /* Alignment to use for loops and jumps: */
816 /* Power of two alignment for loops. */
817 const char *ix86_align_loops_string;
819 /* Power of two alignment for non-loop jumps. */
820 const char *ix86_align_jumps_string;
822 /* Power of two alignment for stack boundary in bytes. */
823 const char *ix86_preferred_stack_boundary_string;
825 /* Preferred alignment for stack boundary in bits. */
826 unsigned int ix86_preferred_stack_boundary;
828 /* Values 1-5: see jump.c */
829 int ix86_branch_cost;
830 const char *ix86_branch_cost_string;
832 /* Power of two alignment for functions. */
833 const char *ix86_align_funcs_string;
835 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
836 char internal_label_prefix[16];
837 int internal_label_prefix_len;
839 static void output_pic_addr_const (FILE *, rtx, int);
840 static void put_condition_code (enum rtx_code, enum machine_mode,
841 int, int, FILE *);
842 static const char *get_some_local_dynamic_name (void);
843 static int get_some_local_dynamic_name_1 (rtx *, void *);
844 static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx);
845 static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *,
846 rtx *);
847 static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *);
848 static enum machine_mode ix86_cc_modes_compatible (enum machine_mode,
849 enum machine_mode);
850 static rtx get_thread_pointer (int);
851 static rtx legitimize_tls_address (rtx, enum tls_model, int);
852 static void get_pc_thunk_name (char [32], unsigned int);
853 static rtx gen_push (rtx);
854 static int ix86_flags_dependant (rtx, rtx, enum attr_type);
855 static int ix86_agi_dependant (rtx, rtx, enum attr_type);
856 static struct machine_function * ix86_init_machine_status (void);
857 static int ix86_split_to_parts (rtx, rtx *, enum machine_mode);
858 static int ix86_nsaved_regs (void);
859 static void ix86_emit_save_regs (void);
860 static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT);
861 static void ix86_emit_restore_regs_using_mov (rtx, HOST_WIDE_INT, int);
862 static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT);
863 static HOST_WIDE_INT ix86_GOT_alias_set (void);
864 static void ix86_adjust_counter (rtx, HOST_WIDE_INT);
865 static rtx ix86_expand_aligntest (rtx, int);
866 static void ix86_expand_strlensi_unroll_1 (rtx, rtx, rtx);
867 static int ix86_issue_rate (void);
868 static int ix86_adjust_cost (rtx, rtx, rtx, int);
869 static int ia32_multipass_dfa_lookahead (void);
870 static void ix86_init_mmx_sse_builtins (void);
871 static rtx x86_this_parameter (tree);
872 static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
873 HOST_WIDE_INT, tree);
874 static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
875 static void x86_file_start (void);
876 static void ix86_reorg (void);
877 static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*);
878 static tree ix86_build_builtin_va_list (void);
879 static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
880 tree, int *, int);
881 static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
882 static bool ix86_vector_mode_supported_p (enum machine_mode);
884 static int ix86_address_cost (rtx);
885 static bool ix86_cannot_force_const_mem (rtx);
886 static rtx ix86_delegitimize_address (rtx);
888 struct builtin_description;
889 static rtx ix86_expand_sse_comi (const struct builtin_description *,
890 tree, rtx);
891 static rtx ix86_expand_sse_compare (const struct builtin_description *,
892 tree, rtx);
893 static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx);
894 static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int);
895 static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx);
896 static rtx ix86_expand_store_builtin (enum insn_code, tree);
897 static rtx safe_vector_operand (rtx, enum machine_mode);
898 static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *);
899 static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code);
900 static int ix86_fp_comparison_fcomi_cost (enum rtx_code code);
901 static int ix86_fp_comparison_sahf_cost (enum rtx_code code);
902 static int ix86_fp_comparison_cost (enum rtx_code code);
903 static unsigned int ix86_select_alt_pic_regnum (void);
904 static int ix86_save_reg (unsigned int, int);
905 static void ix86_compute_frame_layout (struct ix86_frame *);
906 static int ix86_comp_type_attributes (tree, tree);
907 static int ix86_function_regparm (tree, tree);
908 const struct attribute_spec ix86_attribute_table[];
909 static bool ix86_function_ok_for_sibcall (tree, tree);
910 static tree ix86_handle_cdecl_attribute (tree *, tree, tree, int, bool *);
911 static tree ix86_handle_regparm_attribute (tree *, tree, tree, int, bool *);
912 static int ix86_value_regno (enum machine_mode);
913 static bool contains_128bit_aligned_vector_p (tree);
914 static rtx ix86_struct_value_rtx (tree, int);
915 static bool ix86_ms_bitfield_layout_p (tree);
916 static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *);
917 static int extended_reg_mentioned_1 (rtx *, void *);
918 static bool ix86_rtx_costs (rtx, int, int, int *);
919 static int min_insn_size (rtx);
920 static tree ix86_md_asm_clobbers (tree clobbers);
921 static bool ix86_must_pass_in_stack (enum machine_mode mode, tree type);
922 static bool ix86_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
923 tree, bool);
925 #if defined (DO_GLOBAL_CTORS_BODY) && defined (HAS_INIT_SECTION)
926 static void ix86_svr3_asm_out_constructor (rtx, int);
927 #endif
929 /* Register class used for passing given 64bit part of the argument.
930 These represent classes as documented by the PS ABI, with the exception
931 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
932 use SF or DFmode move instead of DImode to avoid reformatting penalties.
934 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
935 whenever possible (upper half does contain padding).
937 enum x86_64_reg_class
939 X86_64_NO_CLASS,
940 X86_64_INTEGER_CLASS,
941 X86_64_INTEGERSI_CLASS,
942 X86_64_SSE_CLASS,
943 X86_64_SSESF_CLASS,
944 X86_64_SSEDF_CLASS,
945 X86_64_SSEUP_CLASS,
946 X86_64_X87_CLASS,
947 X86_64_X87UP_CLASS,
948 X86_64_MEMORY_CLASS
950 static const char * const x86_64_reg_class_name[] =
951 {"no", "integer", "integerSI", "sse", "sseSF", "sseDF", "sseup", "x87", "x87up", "no"};
953 #define MAX_CLASSES 4
954 static int classify_argument (enum machine_mode, tree,
955 enum x86_64_reg_class [MAX_CLASSES], int);
956 static int examine_argument (enum machine_mode, tree, int, int *, int *);
957 static rtx construct_container (enum machine_mode, tree, int, int, int,
958 const int *, int);
959 static enum x86_64_reg_class merge_classes (enum x86_64_reg_class,
960 enum x86_64_reg_class);
962 /* Table of constants used by fldpi, fldln2, etc.... */
963 static REAL_VALUE_TYPE ext_80387_constants_table [5];
964 static bool ext_80387_constants_init = 0;
965 static void init_ext_80387_constants (void);
967 /* Initialize the GCC target structure. */
968 #undef TARGET_ATTRIBUTE_TABLE
969 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
970 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
971 # undef TARGET_MERGE_DECL_ATTRIBUTES
972 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
973 #endif
975 #undef TARGET_COMP_TYPE_ATTRIBUTES
976 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
978 #undef TARGET_INIT_BUILTINS
979 #define TARGET_INIT_BUILTINS ix86_init_builtins
981 #undef TARGET_EXPAND_BUILTIN
982 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
984 #undef TARGET_ASM_FUNCTION_EPILOGUE
985 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
987 #undef TARGET_ASM_OPEN_PAREN
988 #define TARGET_ASM_OPEN_PAREN ""
989 #undef TARGET_ASM_CLOSE_PAREN
990 #define TARGET_ASM_CLOSE_PAREN ""
992 #undef TARGET_ASM_ALIGNED_HI_OP
993 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
994 #undef TARGET_ASM_ALIGNED_SI_OP
995 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
996 #ifdef ASM_QUAD
997 #undef TARGET_ASM_ALIGNED_DI_OP
998 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
999 #endif
1001 #undef TARGET_ASM_UNALIGNED_HI_OP
1002 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
1003 #undef TARGET_ASM_UNALIGNED_SI_OP
1004 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
1005 #undef TARGET_ASM_UNALIGNED_DI_OP
1006 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
1008 #undef TARGET_SCHED_ADJUST_COST
1009 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
1010 #undef TARGET_SCHED_ISSUE_RATE
1011 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
1012 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1013 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1014 ia32_multipass_dfa_lookahead
1016 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1017 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
1019 #ifdef HAVE_AS_TLS
1020 #undef TARGET_HAVE_TLS
1021 #define TARGET_HAVE_TLS true
1022 #endif
1023 #undef TARGET_CANNOT_FORCE_CONST_MEM
1024 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
1026 #undef TARGET_DELEGITIMIZE_ADDRESS
1027 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
1029 #undef TARGET_MS_BITFIELD_LAYOUT_P
1030 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
1032 #undef TARGET_ASM_OUTPUT_MI_THUNK
1033 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
1034 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1035 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
1037 #undef TARGET_ASM_FILE_START
1038 #define TARGET_ASM_FILE_START x86_file_start
1040 #undef TARGET_RTX_COSTS
1041 #define TARGET_RTX_COSTS ix86_rtx_costs
1042 #undef TARGET_ADDRESS_COST
1043 #define TARGET_ADDRESS_COST ix86_address_cost
1045 #undef TARGET_FIXED_CONDITION_CODE_REGS
1046 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
1047 #undef TARGET_CC_MODES_COMPATIBLE
1048 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
1050 #undef TARGET_MACHINE_DEPENDENT_REORG
1051 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
1053 #undef TARGET_BUILD_BUILTIN_VA_LIST
1054 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
1056 #undef TARGET_MD_ASM_CLOBBERS
1057 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
1059 #undef TARGET_PROMOTE_PROTOTYPES
1060 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1061 #undef TARGET_STRUCT_VALUE_RTX
1062 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
1063 #undef TARGET_SETUP_INCOMING_VARARGS
1064 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
1065 #undef TARGET_MUST_PASS_IN_STACK
1066 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
1067 #undef TARGET_PASS_BY_REFERENCE
1068 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
1070 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1071 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
1073 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1074 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
1076 #ifdef SUBTARGET_INSERT_ATTRIBUTES
1077 #undef TARGET_INSERT_ATTRIBUTES
1078 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
1079 #endif
1081 struct gcc_target targetm = TARGET_INITIALIZER;
1084 /* The svr4 ABI for the i386 says that records and unions are returned
1085 in memory. */
1086 #ifndef DEFAULT_PCC_STRUCT_RETURN
1087 #define DEFAULT_PCC_STRUCT_RETURN 1
1088 #endif
1090 /* Sometimes certain combinations of command options do not make
1091 sense on a particular target machine. You can define a macro
1092 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1093 defined, is executed once just after all the command options have
1094 been parsed.
1096 Don't use this macro to turn on various extra optimizations for
1097 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1099 void
1100 override_options (void)
1102 int i;
1103 int ix86_tune_defaulted = 0;
1105 /* Comes from final.c -- no real reason to change it. */
1106 #define MAX_CODE_ALIGN 16
1108 static struct ptt
1110 const struct processor_costs *cost; /* Processor costs */
1111 const int target_enable; /* Target flags to enable. */
1112 const int target_disable; /* Target flags to disable. */
1113 const int align_loop; /* Default alignments. */
1114 const int align_loop_max_skip;
1115 const int align_jump;
1116 const int align_jump_max_skip;
1117 const int align_func;
1119 const processor_target_table[PROCESSOR_max] =
1121 {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
1122 {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
1123 {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
1124 {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
1125 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1126 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1127 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1128 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1129 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0}
1132 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1133 static struct pta
1135 const char *const name; /* processor name or nickname. */
1136 const enum processor_type processor;
1137 const enum pta_flags
1139 PTA_SSE = 1,
1140 PTA_SSE2 = 2,
1141 PTA_SSE3 = 4,
1142 PTA_MMX = 8,
1143 PTA_PREFETCH_SSE = 16,
1144 PTA_3DNOW = 32,
1145 PTA_3DNOW_A = 64,
1146 PTA_64BIT = 128
1147 } flags;
1149 const processor_alias_table[] =
1151 {"i386", PROCESSOR_I386, 0},
1152 {"i486", PROCESSOR_I486, 0},
1153 {"i586", PROCESSOR_PENTIUM, 0},
1154 {"pentium", PROCESSOR_PENTIUM, 0},
1155 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
1156 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
1157 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1158 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1159 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
1160 {"i686", PROCESSOR_PENTIUMPRO, 0},
1161 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
1162 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
1163 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1164 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1165 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1166 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1167 | PTA_MMX | PTA_PREFETCH_SSE},
1168 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1169 | PTA_MMX | PTA_PREFETCH_SSE},
1170 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1171 | PTA_MMX | PTA_PREFETCH_SSE},
1172 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1173 | PTA_MMX | PTA_PREFETCH_SSE},
1174 {"k6", PROCESSOR_K6, PTA_MMX},
1175 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1176 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1177 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1178 | PTA_3DNOW_A},
1179 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
1180 | PTA_3DNOW | PTA_3DNOW_A},
1181 {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1182 | PTA_3DNOW_A | PTA_SSE},
1183 {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1184 | PTA_3DNOW_A | PTA_SSE},
1185 {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1186 | PTA_3DNOW_A | PTA_SSE},
1187 {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT
1188 | PTA_SSE | PTA_SSE2 },
1189 {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1190 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1191 {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1192 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1193 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1194 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1195 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1196 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1199 int const pta_size = ARRAY_SIZE (processor_alias_table);
1201 /* Set the default values for switches whose default depends on TARGET_64BIT
1202 in case they weren't overwritten by command line options. */
1203 if (TARGET_64BIT)
1205 if (flag_omit_frame_pointer == 2)
1206 flag_omit_frame_pointer = 1;
1207 if (flag_asynchronous_unwind_tables == 2)
1208 flag_asynchronous_unwind_tables = 1;
1209 if (flag_pcc_struct_return == 2)
1210 flag_pcc_struct_return = 0;
1212 else
1214 if (flag_omit_frame_pointer == 2)
1215 flag_omit_frame_pointer = 0;
1216 if (flag_asynchronous_unwind_tables == 2)
1217 flag_asynchronous_unwind_tables = 0;
1218 if (flag_pcc_struct_return == 2)
1219 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
1222 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1223 SUBTARGET_OVERRIDE_OPTIONS;
1224 #endif
1226 if (!ix86_tune_string && ix86_arch_string)
1227 ix86_tune_string = ix86_arch_string;
1228 if (!ix86_tune_string)
1230 ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
1231 ix86_tune_defaulted = 1;
1233 if (!ix86_arch_string)
1234 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
1236 if (ix86_cmodel_string != 0)
1238 if (!strcmp (ix86_cmodel_string, "small"))
1239 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1240 else if (flag_pic)
1241 sorry ("code model %s not supported in PIC mode", ix86_cmodel_string);
1242 else if (!strcmp (ix86_cmodel_string, "32"))
1243 ix86_cmodel = CM_32;
1244 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
1245 ix86_cmodel = CM_KERNEL;
1246 else if (!strcmp (ix86_cmodel_string, "medium") && !flag_pic)
1247 ix86_cmodel = CM_MEDIUM;
1248 else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic)
1249 ix86_cmodel = CM_LARGE;
1250 else
1251 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
1253 else
1255 ix86_cmodel = CM_32;
1256 if (TARGET_64BIT)
1257 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1259 if (ix86_asm_string != 0)
1261 if (!strcmp (ix86_asm_string, "intel"))
1262 ix86_asm_dialect = ASM_INTEL;
1263 else if (!strcmp (ix86_asm_string, "att"))
1264 ix86_asm_dialect = ASM_ATT;
1265 else
1266 error ("bad value (%s) for -masm= switch", ix86_asm_string);
1268 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
1269 error ("code model `%s' not supported in the %s bit mode",
1270 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
1271 if (ix86_cmodel == CM_LARGE)
1272 sorry ("code model `large' not supported yet");
1273 if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
1274 sorry ("%i-bit mode not compiled in",
1275 (target_flags & MASK_64BIT) ? 64 : 32);
1277 for (i = 0; i < pta_size; i++)
1278 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
1280 ix86_arch = processor_alias_table[i].processor;
1281 /* Default cpu tuning to the architecture. */
1282 ix86_tune = ix86_arch;
1283 if (processor_alias_table[i].flags & PTA_MMX
1284 && !(target_flags_explicit & MASK_MMX))
1285 target_flags |= MASK_MMX;
1286 if (processor_alias_table[i].flags & PTA_3DNOW
1287 && !(target_flags_explicit & MASK_3DNOW))
1288 target_flags |= MASK_3DNOW;
1289 if (processor_alias_table[i].flags & PTA_3DNOW_A
1290 && !(target_flags_explicit & MASK_3DNOW_A))
1291 target_flags |= MASK_3DNOW_A;
1292 if (processor_alias_table[i].flags & PTA_SSE
1293 && !(target_flags_explicit & MASK_SSE))
1294 target_flags |= MASK_SSE;
1295 if (processor_alias_table[i].flags & PTA_SSE2
1296 && !(target_flags_explicit & MASK_SSE2))
1297 target_flags |= MASK_SSE2;
1298 if (processor_alias_table[i].flags & PTA_SSE3
1299 && !(target_flags_explicit & MASK_SSE3))
1300 target_flags |= MASK_SSE3;
1301 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1302 x86_prefetch_sse = true;
1303 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1305 if (ix86_tune_defaulted)
1307 ix86_tune_string = "x86-64";
1308 for (i = 0; i < pta_size; i++)
1309 if (! strcmp (ix86_tune_string,
1310 processor_alias_table[i].name))
1311 break;
1312 ix86_tune = processor_alias_table[i].processor;
1314 else
1315 error ("CPU you selected does not support x86-64 "
1316 "instruction set");
1318 break;
1321 if (i == pta_size)
1322 error ("bad value (%s) for -march= switch", ix86_arch_string);
1324 for (i = 0; i < pta_size; i++)
1325 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
1327 ix86_tune = processor_alias_table[i].processor;
1328 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1329 error ("CPU you selected does not support x86-64 instruction set");
1331 /* Intel CPUs have always interpreted SSE prefetch instructions as
1332 NOPs; so, we can enable SSE prefetch instructions even when
1333 -mtune (rather than -march) points us to a processor that has them.
1334 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
1335 higher processors. */
1336 if (TARGET_CMOVE && (processor_alias_table[i].flags & PTA_PREFETCH_SSE))
1337 x86_prefetch_sse = true;
1338 break;
1340 if (i == pta_size)
1341 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1343 if (optimize_size)
1344 ix86_cost = &size_cost;
1345 else
1346 ix86_cost = processor_target_table[ix86_tune].cost;
1347 target_flags |= processor_target_table[ix86_tune].target_enable;
1348 target_flags &= ~processor_target_table[ix86_tune].target_disable;
1350 /* Arrange to set up i386_stack_locals for all functions. */
1351 init_machine_status = ix86_init_machine_status;
1353 /* Validate -mregparm= value. */
1354 if (ix86_regparm_string)
1356 i = atoi (ix86_regparm_string);
1357 if (i < 0 || i > REGPARM_MAX)
1358 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
1359 else
1360 ix86_regparm = i;
1362 else
1363 if (TARGET_64BIT)
1364 ix86_regparm = REGPARM_MAX;
1366 /* If the user has provided any of the -malign-* options,
1367 warn and use that value only if -falign-* is not set.
1368 Remove this code in GCC 3.2 or later. */
1369 if (ix86_align_loops_string)
1371 warning ("-malign-loops is obsolete, use -falign-loops");
1372 if (align_loops == 0)
1374 i = atoi (ix86_align_loops_string);
1375 if (i < 0 || i > MAX_CODE_ALIGN)
1376 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1377 else
1378 align_loops = 1 << i;
1382 if (ix86_align_jumps_string)
1384 warning ("-malign-jumps is obsolete, use -falign-jumps");
1385 if (align_jumps == 0)
1387 i = atoi (ix86_align_jumps_string);
1388 if (i < 0 || i > MAX_CODE_ALIGN)
1389 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1390 else
1391 align_jumps = 1 << i;
1395 if (ix86_align_funcs_string)
1397 warning ("-malign-functions is obsolete, use -falign-functions");
1398 if (align_functions == 0)
1400 i = atoi (ix86_align_funcs_string);
1401 if (i < 0 || i > MAX_CODE_ALIGN)
1402 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1403 else
1404 align_functions = 1 << i;
1408 /* Default align_* from the processor table. */
1409 if (align_loops == 0)
1411 align_loops = processor_target_table[ix86_tune].align_loop;
1412 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
1414 if (align_jumps == 0)
1416 align_jumps = processor_target_table[ix86_tune].align_jump;
1417 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
1419 if (align_functions == 0)
1421 align_functions = processor_target_table[ix86_tune].align_func;
1424 /* Validate -mpreferred-stack-boundary= value, or provide default.
1425 The default of 128 bits is for Pentium III's SSE __m128, but we
1426 don't want additional code to keep the stack aligned when
1427 optimizing for code size. */
1428 ix86_preferred_stack_boundary = (optimize_size
1429 ? TARGET_64BIT ? 128 : 32
1430 : 128);
1431 if (ix86_preferred_stack_boundary_string)
1433 i = atoi (ix86_preferred_stack_boundary_string);
1434 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
1435 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
1436 TARGET_64BIT ? 4 : 2);
1437 else
1438 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
1441 /* Validate -mbranch-cost= value, or provide default. */
1442 ix86_branch_cost = processor_target_table[ix86_tune].cost->branch_cost;
1443 if (ix86_branch_cost_string)
1445 i = atoi (ix86_branch_cost_string);
1446 if (i < 0 || i > 5)
1447 error ("-mbranch-cost=%d is not between 0 and 5", i);
1448 else
1449 ix86_branch_cost = i;
1452 if (ix86_tls_dialect_string)
1454 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
1455 ix86_tls_dialect = TLS_DIALECT_GNU;
1456 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
1457 ix86_tls_dialect = TLS_DIALECT_SUN;
1458 else
1459 error ("bad value (%s) for -mtls-dialect= switch",
1460 ix86_tls_dialect_string);
1463 /* Keep nonleaf frame pointers. */
1464 if (TARGET_OMIT_LEAF_FRAME_POINTER)
1465 flag_omit_frame_pointer = 1;
1467 /* If we're doing fast math, we don't care about comparison order
1468 wrt NaNs. This lets us use a shorter comparison sequence. */
1469 if (flag_unsafe_math_optimizations)
1470 target_flags &= ~MASK_IEEE_FP;
1472 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
1473 since the insns won't need emulation. */
1474 if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
1475 target_flags &= ~MASK_NO_FANCY_MATH_387;
1477 /* Turn on SSE2 builtins for -msse3. */
1478 if (TARGET_SSE3)
1479 target_flags |= MASK_SSE2;
1481 /* Turn on SSE builtins for -msse2. */
1482 if (TARGET_SSE2)
1483 target_flags |= MASK_SSE;
1485 if (TARGET_64BIT)
1487 if (TARGET_ALIGN_DOUBLE)
1488 error ("-malign-double makes no sense in the 64bit mode");
1489 if (TARGET_RTD)
1490 error ("-mrtd calling convention not supported in the 64bit mode");
1491 /* Enable by default the SSE and MMX builtins. */
1492 target_flags |= (MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE);
1493 ix86_fpmath = FPMATH_SSE;
1495 else
1497 ix86_fpmath = FPMATH_387;
1498 /* i386 ABI does not specify red zone. It still makes sense to use it
1499 when programmer takes care to stack from being destroyed. */
1500 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
1501 target_flags |= MASK_NO_RED_ZONE;
1504 if (ix86_fpmath_string != 0)
1506 if (! strcmp (ix86_fpmath_string, "387"))
1507 ix86_fpmath = FPMATH_387;
1508 else if (! strcmp (ix86_fpmath_string, "sse"))
1510 if (!TARGET_SSE)
1512 warning ("SSE instruction set disabled, using 387 arithmetics");
1513 ix86_fpmath = FPMATH_387;
1515 else
1516 ix86_fpmath = FPMATH_SSE;
1518 else if (! strcmp (ix86_fpmath_string, "387,sse")
1519 || ! strcmp (ix86_fpmath_string, "sse,387"))
1521 if (!TARGET_SSE)
1523 warning ("SSE instruction set disabled, using 387 arithmetics");
1524 ix86_fpmath = FPMATH_387;
1526 else if (!TARGET_80387)
1528 warning ("387 instruction set disabled, using SSE arithmetics");
1529 ix86_fpmath = FPMATH_SSE;
1531 else
1532 ix86_fpmath = FPMATH_SSE | FPMATH_387;
1534 else
1535 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
1538 /* It makes no sense to ask for just SSE builtins, so MMX is also turned
1539 on by -msse. */
1540 if (TARGET_SSE)
1542 target_flags |= MASK_MMX;
1543 x86_prefetch_sse = true;
1546 /* If it has 3DNow! it also has MMX so MMX is also turned on by -m3dnow */
1547 if (TARGET_3DNOW)
1549 target_flags |= MASK_MMX;
1550 /* If we are targeting the Athlon architecture, enable the 3Dnow/MMX
1551 extensions it adds. */
1552 if (x86_3dnow_a & (1 << ix86_arch))
1553 target_flags |= MASK_3DNOW_A;
1555 if ((x86_accumulate_outgoing_args & TUNEMASK)
1556 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
1557 && !optimize_size)
1558 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
1560 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
1562 char *p;
1563 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
1564 p = strchr (internal_label_prefix, 'X');
1565 internal_label_prefix_len = p - internal_label_prefix;
1566 *p = '\0';
1570 void
1571 optimization_options (int level, int size ATTRIBUTE_UNUSED)
1573 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
1574 make the problem with not enough registers even worse. */
1575 #ifdef INSN_SCHEDULING
1576 if (level > 1)
1577 flag_schedule_insns = 0;
1578 #endif
1580 /* The default values of these switches depend on the TARGET_64BIT
1581 that is not known at this moment. Mark these values with 2 and
1582 let user the to override these. In case there is no command line option
1583 specifying them, we will set the defaults in override_options. */
1584 if (optimize >= 1)
1585 flag_omit_frame_pointer = 2;
1586 flag_pcc_struct_return = 2;
1587 flag_asynchronous_unwind_tables = 2;
1590 /* Table of valid machine attributes. */
1591 const struct attribute_spec ix86_attribute_table[] =
1593 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
1594 /* Stdcall attribute says callee is responsible for popping arguments
1595 if they are not variable. */
1596 { "stdcall", 0, 0, false, true, true, ix86_handle_cdecl_attribute },
1597 /* Fastcall attribute says callee is responsible for popping arguments
1598 if they are not variable. */
1599 { "fastcall", 0, 0, false, true, true, ix86_handle_cdecl_attribute },
1600 /* Cdecl attribute says the callee is a normal C declaration */
1601 { "cdecl", 0, 0, false, true, true, ix86_handle_cdecl_attribute },
1602 /* Regparm attribute specifies how many integer arguments are to be
1603 passed in registers. */
1604 { "regparm", 1, 1, false, true, true, ix86_handle_regparm_attribute },
1605 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1606 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
1607 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
1608 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
1609 #endif
1610 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1611 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1612 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1613 SUBTARGET_ATTRIBUTE_TABLE,
1614 #endif
1615 { NULL, 0, 0, false, false, false, NULL }
1618 /* Decide whether we can make a sibling call to a function. DECL is the
1619 declaration of the function being targeted by the call and EXP is the
1620 CALL_EXPR representing the call. */
1622 static bool
1623 ix86_function_ok_for_sibcall (tree decl, tree exp)
1625 /* If we are generating position-independent code, we cannot sibcall
1626 optimize any indirect call, or a direct call to a global function,
1627 as the PLT requires %ebx be live. */
1628 if (!TARGET_64BIT && flag_pic && (!decl || TREE_PUBLIC (decl)))
1629 return false;
1631 /* If we are returning floats on the 80387 register stack, we cannot
1632 make a sibcall from a function that doesn't return a float to a
1633 function that does or, conversely, from a function that does return
1634 a float to a function that doesn't; the necessary stack adjustment
1635 would not be executed. */
1636 if (STACK_REG_P (ix86_function_value (TREE_TYPE (exp)))
1637 != STACK_REG_P (ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)))))
1638 return false;
1640 /* If this call is indirect, we'll need to be able to use a call-clobbered
1641 register for the address of the target function. Make sure that all
1642 such registers are not used for passing parameters. */
1643 if (!decl && !TARGET_64BIT)
1645 tree type;
1647 /* We're looking at the CALL_EXPR, we need the type of the function. */
1648 type = TREE_OPERAND (exp, 0); /* pointer expression */
1649 type = TREE_TYPE (type); /* pointer type */
1650 type = TREE_TYPE (type); /* function type */
1652 if (ix86_function_regparm (type, NULL) >= 3)
1654 /* ??? Need to count the actual number of registers to be used,
1655 not the possible number of registers. Fix later. */
1656 return false;
1660 /* Otherwise okay. That also includes certain types of indirect calls. */
1661 return true;
1664 /* Handle a "cdecl", "stdcall", or "fastcall" attribute;
1665 arguments as in struct attribute_spec.handler. */
1666 static tree
1667 ix86_handle_cdecl_attribute (tree *node, tree name,
1668 tree args ATTRIBUTE_UNUSED,
1669 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1671 if (TREE_CODE (*node) != FUNCTION_TYPE
1672 && TREE_CODE (*node) != METHOD_TYPE
1673 && TREE_CODE (*node) != FIELD_DECL
1674 && TREE_CODE (*node) != TYPE_DECL)
1676 warning ("`%s' attribute only applies to functions",
1677 IDENTIFIER_POINTER (name));
1678 *no_add_attrs = true;
1680 else
1682 if (is_attribute_p ("fastcall", name))
1684 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
1686 error ("fastcall and stdcall attributes are not compatible");
1688 else if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
1690 error ("fastcall and regparm attributes are not compatible");
1693 else if (is_attribute_p ("stdcall", name))
1695 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
1697 error ("fastcall and stdcall attributes are not compatible");
1702 if (TARGET_64BIT)
1704 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
1705 *no_add_attrs = true;
1708 return NULL_TREE;
1711 /* Handle a "regparm" attribute;
1712 arguments as in struct attribute_spec.handler. */
1713 static tree
1714 ix86_handle_regparm_attribute (tree *node, tree name, tree args,
1715 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1717 if (TREE_CODE (*node) != FUNCTION_TYPE
1718 && TREE_CODE (*node) != METHOD_TYPE
1719 && TREE_CODE (*node) != FIELD_DECL
1720 && TREE_CODE (*node) != TYPE_DECL)
1722 warning ("`%s' attribute only applies to functions",
1723 IDENTIFIER_POINTER (name));
1724 *no_add_attrs = true;
1726 else
1728 tree cst;
1730 cst = TREE_VALUE (args);
1731 if (TREE_CODE (cst) != INTEGER_CST)
1733 warning ("`%s' attribute requires an integer constant argument",
1734 IDENTIFIER_POINTER (name));
1735 *no_add_attrs = true;
1737 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
1739 warning ("argument to `%s' attribute larger than %d",
1740 IDENTIFIER_POINTER (name), REGPARM_MAX);
1741 *no_add_attrs = true;
1744 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
1746 error ("fastcall and regparm attributes are not compatible");
1750 return NULL_TREE;
1753 /* Return 0 if the attributes for two types are incompatible, 1 if they
1754 are compatible, and 2 if they are nearly compatible (which causes a
1755 warning to be generated). */
1757 static int
1758 ix86_comp_type_attributes (tree type1, tree type2)
1760 /* Check for mismatch of non-default calling convention. */
1761 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
1763 if (TREE_CODE (type1) != FUNCTION_TYPE)
1764 return 1;
1766 /* Check for mismatched fastcall types */
1767 if (!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
1768 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
1769 return 0;
1771 /* Check for mismatched return types (cdecl vs stdcall). */
1772 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
1773 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
1774 return 0;
1775 if (ix86_function_regparm (type1, NULL)
1776 != ix86_function_regparm (type2, NULL))
1777 return 0;
1778 return 1;
1781 /* Return the regparm value for a fuctio with the indicated TYPE and DECL.
1782 DECL may be NULL when calling function indirectly
1783 or considering a libcall. */
1785 static int
1786 ix86_function_regparm (tree type, tree decl)
1788 tree attr;
1789 int regparm = ix86_regparm;
1790 bool user_convention = false;
1792 if (!TARGET_64BIT)
1794 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
1795 if (attr)
1797 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
1798 user_convention = true;
1801 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
1803 regparm = 2;
1804 user_convention = true;
1807 /* Use register calling convention for local functions when possible. */
1808 if (!TARGET_64BIT && !user_convention && decl
1809 && flag_unit_at_a_time && !profile_flag)
1811 struct cgraph_local_info *i = cgraph_local_info (decl);
1812 if (i && i->local)
1814 /* We can't use regparm(3) for nested functions as these use
1815 static chain pointer in third argument. */
1816 if (DECL_CONTEXT (decl) && !DECL_NO_STATIC_CHAIN (decl))
1817 regparm = 2;
1818 else
1819 regparm = 3;
1823 return regparm;
1826 /* Return true if EAX is live at the start of the function. Used by
1827 ix86_expand_prologue to determine if we need special help before
1828 calling allocate_stack_worker. */
1830 static bool
1831 ix86_eax_live_at_start_p (void)
1833 /* Cheat. Don't bother working forward from ix86_function_regparm
1834 to the function type to whether an actual argument is located in
1835 eax. Instead just look at cfg info, which is still close enough
1836 to correct at this point. This gives false positives for broken
1837 functions that might use uninitialized data that happens to be
1838 allocated in eax, but who cares? */
1839 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->global_live_at_end, 0);
1842 /* Value is the number of bytes of arguments automatically
1843 popped when returning from a subroutine call.
1844 FUNDECL is the declaration node of the function (as a tree),
1845 FUNTYPE is the data type of the function (as a tree),
1846 or for a library call it is an identifier node for the subroutine name.
1847 SIZE is the number of bytes of arguments passed on the stack.
1849 On the 80386, the RTD insn may be used to pop them if the number
1850 of args is fixed, but if the number is variable then the caller
1851 must pop them all. RTD can't be used for library calls now
1852 because the library is compiled with the Unix compiler.
1853 Use of RTD is a selectable option, since it is incompatible with
1854 standard Unix calling sequences. If the option is not selected,
1855 the caller must always pop the args.
1857 The attribute stdcall is equivalent to RTD on a per module basis. */
1860 ix86_return_pops_args (tree fundecl, tree funtype, int size)
1862 int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
1864 /* Cdecl functions override -mrtd, and never pop the stack. */
1865 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
1867 /* Stdcall and fastcall functions will pop the stack if not
1868 variable args. */
1869 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
1870 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
1871 rtd = 1;
1873 if (rtd
1874 && (TYPE_ARG_TYPES (funtype) == NULL_TREE
1875 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
1876 == void_type_node)))
1877 return size;
1880 /* Lose any fake structure return argument if it is passed on the stack. */
1881 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
1882 && !TARGET_64BIT
1883 && !KEEP_AGGREGATE_RETURN_POINTER)
1885 int nregs = ix86_function_regparm (funtype, fundecl);
1887 if (!nregs)
1888 return GET_MODE_SIZE (Pmode);
1891 return 0;
1894 /* Argument support functions. */
1896 /* Return true when register may be used to pass function parameters. */
1897 bool
1898 ix86_function_arg_regno_p (int regno)
1900 int i;
1901 if (!TARGET_64BIT)
1902 return (regno < REGPARM_MAX
1903 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
1904 if (SSE_REGNO_P (regno) && TARGET_SSE)
1905 return true;
1906 /* RAX is used as hidden argument to va_arg functions. */
1907 if (!regno)
1908 return true;
1909 for (i = 0; i < REGPARM_MAX; i++)
1910 if (regno == x86_64_int_parameter_registers[i])
1911 return true;
1912 return false;
1915 /* Return if we do not know how to pass TYPE solely in registers. */
1917 static bool
1918 ix86_must_pass_in_stack (enum machine_mode mode, tree type)
1920 if (must_pass_in_stack_var_size_or_pad (mode, type))
1921 return true;
1922 return (!TARGET_64BIT && type && mode == TImode);
1925 /* Initialize a variable CUM of type CUMULATIVE_ARGS
1926 for a call to a function whose data type is FNTYPE.
1927 For a library call, FNTYPE is 0. */
1929 void
1930 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
1931 tree fntype, /* tree ptr for function decl */
1932 rtx libname, /* SYMBOL_REF of library name or 0 */
1933 tree fndecl)
1935 static CUMULATIVE_ARGS zero_cum;
1936 tree param, next_param;
1938 if (TARGET_DEBUG_ARG)
1940 fprintf (stderr, "\ninit_cumulative_args (");
1941 if (fntype)
1942 fprintf (stderr, "fntype code = %s, ret code = %s",
1943 tree_code_name[(int) TREE_CODE (fntype)],
1944 tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
1945 else
1946 fprintf (stderr, "no fntype");
1948 if (libname)
1949 fprintf (stderr, ", libname = %s", XSTR (libname, 0));
1952 *cum = zero_cum;
1954 /* Set up the number of registers to use for passing arguments. */
1955 if (fntype)
1956 cum->nregs = ix86_function_regparm (fntype, fndecl);
1957 else
1958 cum->nregs = ix86_regparm;
1959 if (TARGET_SSE)
1960 cum->sse_nregs = SSE_REGPARM_MAX;
1961 if (TARGET_MMX)
1962 cum->mmx_nregs = MMX_REGPARM_MAX;
1963 cum->warn_sse = true;
1964 cum->warn_mmx = true;
1965 cum->maybe_vaarg = false;
1967 /* Use ecx and edx registers if function has fastcall attribute */
1968 if (fntype && !TARGET_64BIT)
1970 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
1972 cum->nregs = 2;
1973 cum->fastcall = 1;
1977 /* Determine if this function has variable arguments. This is
1978 indicated by the last argument being 'void_type_mode' if there
1979 are no variable arguments. If there are variable arguments, then
1980 we won't pass anything in registers in 32-bit mode. */
1982 if (cum->nregs || cum->mmx_nregs || cum->sse_nregs)
1984 for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
1985 param != 0; param = next_param)
1987 next_param = TREE_CHAIN (param);
1988 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
1990 if (!TARGET_64BIT)
1992 cum->nregs = 0;
1993 cum->sse_nregs = 0;
1994 cum->mmx_nregs = 0;
1995 cum->warn_sse = 0;
1996 cum->warn_mmx = 0;
1997 cum->fastcall = 0;
1999 cum->maybe_vaarg = true;
2003 if ((!fntype && !libname)
2004 || (fntype && !TYPE_ARG_TYPES (fntype)))
2005 cum->maybe_vaarg = 1;
2007 if (TARGET_DEBUG_ARG)
2008 fprintf (stderr, ", nregs=%d )\n", cum->nregs);
2010 return;
2013 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
2014 of this code is to classify each 8bytes of incoming argument by the register
2015 class and assign registers accordingly. */
2017 /* Return the union class of CLASS1 and CLASS2.
2018 See the x86-64 PS ABI for details. */
2020 static enum x86_64_reg_class
2021 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
2023 /* Rule #1: If both classes are equal, this is the resulting class. */
2024 if (class1 == class2)
2025 return class1;
2027 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
2028 the other class. */
2029 if (class1 == X86_64_NO_CLASS)
2030 return class2;
2031 if (class2 == X86_64_NO_CLASS)
2032 return class1;
2034 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
2035 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
2036 return X86_64_MEMORY_CLASS;
2038 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
2039 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
2040 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
2041 return X86_64_INTEGERSI_CLASS;
2042 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
2043 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
2044 return X86_64_INTEGER_CLASS;
2046 /* Rule #5: If one of the classes is X87 or X87UP class, MEMORY is used. */
2047 if (class1 == X86_64_X87_CLASS || class1 == X86_64_X87UP_CLASS
2048 || class2 == X86_64_X87_CLASS || class2 == X86_64_X87UP_CLASS)
2049 return X86_64_MEMORY_CLASS;
2051 /* Rule #6: Otherwise class SSE is used. */
2052 return X86_64_SSE_CLASS;
2055 /* Classify the argument of type TYPE and mode MODE.
2056 CLASSES will be filled by the register class used to pass each word
2057 of the operand. The number of words is returned. In case the parameter
2058 should be passed in memory, 0 is returned. As a special case for zero
2059 sized containers, classes[0] will be NO_CLASS and 1 is returned.
2061 BIT_OFFSET is used internally for handling records and specifies offset
2062 of the offset in bits modulo 256 to avoid overflow cases.
2064 See the x86-64 PS ABI for details.
2067 static int
2068 classify_argument (enum machine_mode mode, tree type,
2069 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
2071 HOST_WIDE_INT bytes =
2072 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2073 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2075 /* Variable sized entities are always passed/returned in memory. */
2076 if (bytes < 0)
2077 return 0;
2079 if (mode != VOIDmode
2080 && targetm.calls.must_pass_in_stack (mode, type))
2081 return 0;
2083 if (type && AGGREGATE_TYPE_P (type))
2085 int i;
2086 tree field;
2087 enum x86_64_reg_class subclasses[MAX_CLASSES];
2089 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
2090 if (bytes > 16)
2091 return 0;
2093 for (i = 0; i < words; i++)
2094 classes[i] = X86_64_NO_CLASS;
2096 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
2097 signalize memory class, so handle it as special case. */
2098 if (!words)
2100 classes[0] = X86_64_NO_CLASS;
2101 return 1;
2104 /* Classify each field of record and merge classes. */
2105 if (TREE_CODE (type) == RECORD_TYPE)
2107 /* For classes first merge in the field of the subclasses. */
2108 if (TYPE_BINFO (type))
2110 tree binfo, base_binfo;
2111 int i;
2113 for (binfo = TYPE_BINFO (type), i = 0;
2114 BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
2116 int num;
2117 int offset = tree_low_cst (BINFO_OFFSET (base_binfo), 0) * 8;
2118 tree type = BINFO_TYPE (base_binfo);
2120 num = classify_argument (TYPE_MODE (type),
2121 type, subclasses,
2122 (offset + bit_offset) % 256);
2123 if (!num)
2124 return 0;
2125 for (i = 0; i < num; i++)
2127 int pos = (offset + (bit_offset % 64)) / 8 / 8;
2128 classes[i + pos] =
2129 merge_classes (subclasses[i], classes[i + pos]);
2133 /* And now merge the fields of structure. */
2134 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2136 if (TREE_CODE (field) == FIELD_DECL)
2138 int num;
2140 /* Bitfields are always classified as integer. Handle them
2141 early, since later code would consider them to be
2142 misaligned integers. */
2143 if (DECL_BIT_FIELD (field))
2145 for (i = int_bit_position (field) / 8 / 8;
2146 i < (int_bit_position (field)
2147 + tree_low_cst (DECL_SIZE (field), 0)
2148 + 63) / 8 / 8; i++)
2149 classes[i] =
2150 merge_classes (X86_64_INTEGER_CLASS,
2151 classes[i]);
2153 else
2155 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2156 TREE_TYPE (field), subclasses,
2157 (int_bit_position (field)
2158 + bit_offset) % 256);
2159 if (!num)
2160 return 0;
2161 for (i = 0; i < num; i++)
2163 int pos =
2164 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
2165 classes[i + pos] =
2166 merge_classes (subclasses[i], classes[i + pos]);
2172 /* Arrays are handled as small records. */
2173 else if (TREE_CODE (type) == ARRAY_TYPE)
2175 int num;
2176 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
2177 TREE_TYPE (type), subclasses, bit_offset);
2178 if (!num)
2179 return 0;
2181 /* The partial classes are now full classes. */
2182 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
2183 subclasses[0] = X86_64_SSE_CLASS;
2184 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
2185 subclasses[0] = X86_64_INTEGER_CLASS;
2187 for (i = 0; i < words; i++)
2188 classes[i] = subclasses[i % num];
2190 /* Unions are similar to RECORD_TYPE but offset is always 0. */
2191 else if (TREE_CODE (type) == UNION_TYPE
2192 || TREE_CODE (type) == QUAL_UNION_TYPE)
2194 /* For classes first merge in the field of the subclasses. */
2195 if (TYPE_BINFO (type))
2197 tree binfo, base_binfo;
2198 int i;
2200 for (binfo = TYPE_BINFO (type), i = 0;
2201 BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
2203 int num;
2204 int offset = tree_low_cst (BINFO_OFFSET (base_binfo), 0) * 8;
2205 tree type = BINFO_TYPE (base_binfo);
2207 num = classify_argument (TYPE_MODE (type),
2208 type, subclasses,
2209 (offset + (bit_offset % 64)) % 256);
2210 if (!num)
2211 return 0;
2212 for (i = 0; i < num; i++)
2214 int pos = (offset + (bit_offset % 64)) / 8 / 8;
2215 classes[i + pos] =
2216 merge_classes (subclasses[i], classes[i + pos]);
2220 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2222 if (TREE_CODE (field) == FIELD_DECL)
2224 int num;
2225 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2226 TREE_TYPE (field), subclasses,
2227 bit_offset);
2228 if (!num)
2229 return 0;
2230 for (i = 0; i < num; i++)
2231 classes[i] = merge_classes (subclasses[i], classes[i]);
2235 else if (TREE_CODE (type) == SET_TYPE)
2237 if (bytes <= 4)
2239 classes[0] = X86_64_INTEGERSI_CLASS;
2240 return 1;
2242 else if (bytes <= 8)
2244 classes[0] = X86_64_INTEGER_CLASS;
2245 return 1;
2247 else if (bytes <= 12)
2249 classes[0] = X86_64_INTEGER_CLASS;
2250 classes[1] = X86_64_INTEGERSI_CLASS;
2251 return 2;
2253 else
2255 classes[0] = X86_64_INTEGER_CLASS;
2256 classes[1] = X86_64_INTEGER_CLASS;
2257 return 2;
2260 else
2261 abort ();
2263 /* Final merger cleanup. */
2264 for (i = 0; i < words; i++)
2266 /* If one class is MEMORY, everything should be passed in
2267 memory. */
2268 if (classes[i] == X86_64_MEMORY_CLASS)
2269 return 0;
2271 /* The X86_64_SSEUP_CLASS should be always preceded by
2272 X86_64_SSE_CLASS. */
2273 if (classes[i] == X86_64_SSEUP_CLASS
2274 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
2275 classes[i] = X86_64_SSE_CLASS;
2277 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
2278 if (classes[i] == X86_64_X87UP_CLASS
2279 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
2280 classes[i] = X86_64_SSE_CLASS;
2282 return words;
2285 /* Compute alignment needed. We align all types to natural boundaries with
2286 exception of XFmode that is aligned to 64bits. */
2287 if (mode != VOIDmode && mode != BLKmode)
2289 int mode_alignment = GET_MODE_BITSIZE (mode);
2291 if (mode == XFmode)
2292 mode_alignment = 128;
2293 else if (mode == XCmode)
2294 mode_alignment = 256;
2295 if (COMPLEX_MODE_P (mode))
2296 mode_alignment /= 2;
2297 /* Misaligned fields are always returned in memory. */
2298 if (bit_offset % mode_alignment)
2299 return 0;
2302 /* for V1xx modes, just use the base mode */
2303 if (VECTOR_MODE_P (mode)
2304 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
2305 mode = GET_MODE_INNER (mode);
2307 /* Classification of atomic types. */
2308 switch (mode)
2310 case DImode:
2311 case SImode:
2312 case HImode:
2313 case QImode:
2314 case CSImode:
2315 case CHImode:
2316 case CQImode:
2317 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2318 classes[0] = X86_64_INTEGERSI_CLASS;
2319 else
2320 classes[0] = X86_64_INTEGER_CLASS;
2321 return 1;
2322 case CDImode:
2323 case TImode:
2324 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
2325 return 2;
2326 case CTImode:
2327 return 0;
2328 case SFmode:
2329 if (!(bit_offset % 64))
2330 classes[0] = X86_64_SSESF_CLASS;
2331 else
2332 classes[0] = X86_64_SSE_CLASS;
2333 return 1;
2334 case DFmode:
2335 classes[0] = X86_64_SSEDF_CLASS;
2336 return 1;
2337 case XFmode:
2338 classes[0] = X86_64_X87_CLASS;
2339 classes[1] = X86_64_X87UP_CLASS;
2340 return 2;
2341 case TFmode:
2342 classes[0] = X86_64_SSE_CLASS;
2343 classes[1] = X86_64_SSEUP_CLASS;
2344 return 2;
2345 case SCmode:
2346 classes[0] = X86_64_SSE_CLASS;
2347 return 1;
2348 case DCmode:
2349 classes[0] = X86_64_SSEDF_CLASS;
2350 classes[1] = X86_64_SSEDF_CLASS;
2351 return 2;
2352 case XCmode:
2353 case TCmode:
2354 /* These modes are larger than 16 bytes. */
2355 return 0;
2356 case V4SFmode:
2357 case V4SImode:
2358 case V16QImode:
2359 case V8HImode:
2360 case V2DFmode:
2361 case V2DImode:
2362 classes[0] = X86_64_SSE_CLASS;
2363 classes[1] = X86_64_SSEUP_CLASS;
2364 return 2;
2365 case V2SFmode:
2366 case V2SImode:
2367 case V4HImode:
2368 case V8QImode:
2369 classes[0] = X86_64_SSE_CLASS;
2370 return 1;
2371 case BLKmode:
2372 case VOIDmode:
2373 return 0;
2374 default:
2375 if (VECTOR_MODE_P (mode))
2377 if (bytes > 16)
2378 return 0;
2379 if (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT)
2381 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2382 classes[0] = X86_64_INTEGERSI_CLASS;
2383 else
2384 classes[0] = X86_64_INTEGER_CLASS;
2385 classes[1] = X86_64_INTEGER_CLASS;
2386 return 1 + (bytes > 8);
2389 abort ();
2393 /* Examine the argument and return set number of register required in each
2394 class. Return 0 iff parameter should be passed in memory. */
2395 static int
2396 examine_argument (enum machine_mode mode, tree type, int in_return,
2397 int *int_nregs, int *sse_nregs)
2399 enum x86_64_reg_class class[MAX_CLASSES];
2400 int n = classify_argument (mode, type, class, 0);
2402 *int_nregs = 0;
2403 *sse_nregs = 0;
2404 if (!n)
2405 return 0;
2406 for (n--; n >= 0; n--)
2407 switch (class[n])
2409 case X86_64_INTEGER_CLASS:
2410 case X86_64_INTEGERSI_CLASS:
2411 (*int_nregs)++;
2412 break;
2413 case X86_64_SSE_CLASS:
2414 case X86_64_SSESF_CLASS:
2415 case X86_64_SSEDF_CLASS:
2416 (*sse_nregs)++;
2417 break;
2418 case X86_64_NO_CLASS:
2419 case X86_64_SSEUP_CLASS:
2420 break;
2421 case X86_64_X87_CLASS:
2422 case X86_64_X87UP_CLASS:
2423 if (!in_return)
2424 return 0;
2425 break;
2426 case X86_64_MEMORY_CLASS:
2427 abort ();
2429 return 1;
2431 /* Construct container for the argument used by GCC interface. See
2432 FUNCTION_ARG for the detailed description. */
2433 static rtx
2434 construct_container (enum machine_mode mode, tree type, int in_return,
2435 int nintregs, int nsseregs, const int * intreg,
2436 int sse_regno)
2438 enum machine_mode tmpmode;
2439 int bytes =
2440 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2441 enum x86_64_reg_class class[MAX_CLASSES];
2442 int n;
2443 int i;
2444 int nexps = 0;
2445 int needed_sseregs, needed_intregs;
2446 rtx exp[MAX_CLASSES];
2447 rtx ret;
2449 n = classify_argument (mode, type, class, 0);
2450 if (TARGET_DEBUG_ARG)
2452 if (!n)
2453 fprintf (stderr, "Memory class\n");
2454 else
2456 fprintf (stderr, "Classes:");
2457 for (i = 0; i < n; i++)
2459 fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]);
2461 fprintf (stderr, "\n");
2464 if (!n)
2465 return NULL;
2466 if (!examine_argument (mode, type, in_return, &needed_intregs, &needed_sseregs))
2467 return NULL;
2468 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
2469 return NULL;
2471 /* First construct simple cases. Avoid SCmode, since we want to use
2472 single register to pass this type. */
2473 if (n == 1 && mode != SCmode)
2474 switch (class[0])
2476 case X86_64_INTEGER_CLASS:
2477 case X86_64_INTEGERSI_CLASS:
2478 return gen_rtx_REG (mode, intreg[0]);
2479 case X86_64_SSE_CLASS:
2480 case X86_64_SSESF_CLASS:
2481 case X86_64_SSEDF_CLASS:
2482 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
2483 case X86_64_X87_CLASS:
2484 return gen_rtx_REG (mode, FIRST_STACK_REG);
2485 case X86_64_NO_CLASS:
2486 /* Zero sized array, struct or class. */
2487 return NULL;
2488 default:
2489 abort ();
2491 if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS
2492 && mode != BLKmode)
2493 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
2494 if (n == 2
2495 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
2496 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
2497 if (n == 2 && class[0] == X86_64_INTEGER_CLASS
2498 && class[1] == X86_64_INTEGER_CLASS
2499 && (mode == CDImode || mode == TImode || mode == TFmode)
2500 && intreg[0] + 1 == intreg[1])
2501 return gen_rtx_REG (mode, intreg[0]);
2502 if (n == 4
2503 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS
2504 && class[2] == X86_64_X87_CLASS && class[3] == X86_64_X87UP_CLASS
2505 && mode != BLKmode)
2506 return gen_rtx_REG (XCmode, FIRST_STACK_REG);
2508 /* Otherwise figure out the entries of the PARALLEL. */
2509 for (i = 0; i < n; i++)
2511 switch (class[i])
2513 case X86_64_NO_CLASS:
2514 break;
2515 case X86_64_INTEGER_CLASS:
2516 case X86_64_INTEGERSI_CLASS:
2517 /* Merge TImodes on aligned occasions here too. */
2518 if (i * 8 + 8 > bytes)
2519 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
2520 else if (class[i] == X86_64_INTEGERSI_CLASS)
2521 tmpmode = SImode;
2522 else
2523 tmpmode = DImode;
2524 /* We've requested 24 bytes we don't have mode for. Use DImode. */
2525 if (tmpmode == BLKmode)
2526 tmpmode = DImode;
2527 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2528 gen_rtx_REG (tmpmode, *intreg),
2529 GEN_INT (i*8));
2530 intreg++;
2531 break;
2532 case X86_64_SSESF_CLASS:
2533 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2534 gen_rtx_REG (SFmode,
2535 SSE_REGNO (sse_regno)),
2536 GEN_INT (i*8));
2537 sse_regno++;
2538 break;
2539 case X86_64_SSEDF_CLASS:
2540 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2541 gen_rtx_REG (DFmode,
2542 SSE_REGNO (sse_regno)),
2543 GEN_INT (i*8));
2544 sse_regno++;
2545 break;
2546 case X86_64_SSE_CLASS:
2547 if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
2548 tmpmode = TImode;
2549 else
2550 tmpmode = DImode;
2551 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2552 gen_rtx_REG (tmpmode,
2553 SSE_REGNO (sse_regno)),
2554 GEN_INT (i*8));
2555 if (tmpmode == TImode)
2556 i++;
2557 sse_regno++;
2558 break;
2559 default:
2560 abort ();
2563 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
2564 for (i = 0; i < nexps; i++)
2565 XVECEXP (ret, 0, i) = exp [i];
2566 return ret;
2569 /* Update the data in CUM to advance over an argument
2570 of mode MODE and data type TYPE.
2571 (TYPE is null for libcalls where that information may not be available.) */
2573 void
2574 function_arg_advance (CUMULATIVE_ARGS *cum, /* current arg information */
2575 enum machine_mode mode, /* current arg mode */
2576 tree type, /* type of the argument or 0 if lib support */
2577 int named) /* whether or not the argument was named */
2579 int bytes =
2580 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2581 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2583 if (TARGET_DEBUG_ARG)
2584 fprintf (stderr,
2585 "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, mode=%s, named=%d)\n\n",
2586 words, cum->words, cum->nregs, cum->sse_nregs, GET_MODE_NAME (mode), named);
2587 if (TARGET_64BIT)
2589 int int_nregs, sse_nregs;
2590 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
2591 cum->words += words;
2592 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
2594 cum->nregs -= int_nregs;
2595 cum->sse_nregs -= sse_nregs;
2596 cum->regno += int_nregs;
2597 cum->sse_regno += sse_nregs;
2599 else
2600 cum->words += words;
2602 else
2604 if (TARGET_SSE && SSE_REG_MODE_P (mode)
2605 && (!type || !AGGREGATE_TYPE_P (type)))
2607 cum->sse_words += words;
2608 cum->sse_nregs -= 1;
2609 cum->sse_regno += 1;
2610 if (cum->sse_nregs <= 0)
2612 cum->sse_nregs = 0;
2613 cum->sse_regno = 0;
2616 else if (TARGET_MMX && MMX_REG_MODE_P (mode)
2617 && (!type || !AGGREGATE_TYPE_P (type)))
2619 cum->mmx_words += words;
2620 cum->mmx_nregs -= 1;
2621 cum->mmx_regno += 1;
2622 if (cum->mmx_nregs <= 0)
2624 cum->mmx_nregs = 0;
2625 cum->mmx_regno = 0;
2628 else
2630 cum->words += words;
2631 cum->nregs -= words;
2632 cum->regno += words;
2634 if (cum->nregs <= 0)
2636 cum->nregs = 0;
2637 cum->regno = 0;
2641 return;
2644 /* Define where to put the arguments to a function.
2645 Value is zero to push the argument on the stack,
2646 or a hard register in which to store the argument.
2648 MODE is the argument's machine mode.
2649 TYPE is the data type of the argument (as a tree).
2650 This is null for libcalls where that information may
2651 not be available.
2652 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2653 the preceding args and about the function being called.
2654 NAMED is nonzero if this argument is a named parameter
2655 (otherwise it is an extra parameter matching an ellipsis). */
2658 function_arg (CUMULATIVE_ARGS *cum, /* current arg information */
2659 enum machine_mode mode, /* current arg mode */
2660 tree type, /* type of the argument or 0 if lib support */
2661 int named) /* != 0 for normal args, == 0 for ... args */
2663 rtx ret = NULL_RTX;
2664 int bytes =
2665 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2666 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2667 static bool warnedsse, warnedmmx;
2669 /* To simplify the code below, represent vector types with a vector mode
2670 even if MMX/SSE are not active. */
2671 if (type
2672 && TREE_CODE (type) == VECTOR_TYPE
2673 && (bytes == 8 || bytes == 16)
2674 && GET_MODE_CLASS (TYPE_MODE (type)) != MODE_VECTOR_INT
2675 && GET_MODE_CLASS (TYPE_MODE (type)) != MODE_VECTOR_FLOAT)
2677 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
2678 enum machine_mode newmode
2679 = TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
2680 ? MIN_MODE_VECTOR_FLOAT : MIN_MODE_VECTOR_INT;
2682 /* Get the mode which has this inner mode and number of units. */
2683 for (; newmode != VOIDmode; newmode = GET_MODE_WIDER_MODE (newmode))
2684 if (GET_MODE_NUNITS (newmode) == TYPE_VECTOR_SUBPARTS (type)
2685 && GET_MODE_INNER (newmode) == innermode)
2687 mode = newmode;
2688 break;
2692 /* Handle a hidden AL argument containing number of registers for varargs
2693 x86-64 functions. For i386 ABI just return constm1_rtx to avoid
2694 any AL settings. */
2695 if (mode == VOIDmode)
2697 if (TARGET_64BIT)
2698 return GEN_INT (cum->maybe_vaarg
2699 ? (cum->sse_nregs < 0
2700 ? SSE_REGPARM_MAX
2701 : cum->sse_regno)
2702 : -1);
2703 else
2704 return constm1_rtx;
2706 if (TARGET_64BIT)
2707 ret = construct_container (mode, type, 0, cum->nregs, cum->sse_nregs,
2708 &x86_64_int_parameter_registers [cum->regno],
2709 cum->sse_regno);
2710 else
2711 switch (mode)
2713 /* For now, pass fp/complex values on the stack. */
2714 default:
2715 break;
2717 case BLKmode:
2718 if (bytes < 0)
2719 break;
2720 /* FALLTHRU */
2721 case DImode:
2722 case SImode:
2723 case HImode:
2724 case QImode:
2725 if (words <= cum->nregs)
2727 int regno = cum->regno;
2729 /* Fastcall allocates the first two DWORD (SImode) or
2730 smaller arguments to ECX and EDX. */
2731 if (cum->fastcall)
2733 if (mode == BLKmode || mode == DImode)
2734 break;
2736 /* ECX not EAX is the first allocated register. */
2737 if (regno == 0)
2738 regno = 2;
2740 ret = gen_rtx_REG (mode, regno);
2742 break;
2743 case TImode:
2744 case V16QImode:
2745 case V8HImode:
2746 case V4SImode:
2747 case V2DImode:
2748 case V4SFmode:
2749 case V2DFmode:
2750 if (!type || !AGGREGATE_TYPE_P (type))
2752 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
2754 warnedsse = true;
2755 warning ("SSE vector argument without SSE enabled "
2756 "changes the ABI");
2758 if (cum->sse_nregs)
2759 ret = gen_rtx_REG (mode, cum->sse_regno + FIRST_SSE_REG);
2761 break;
2762 case V8QImode:
2763 case V4HImode:
2764 case V2SImode:
2765 case V2SFmode:
2766 if (!type || !AGGREGATE_TYPE_P (type))
2768 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
2770 warnedmmx = true;
2771 warning ("MMX vector argument without MMX enabled "
2772 "changes the ABI");
2774 if (cum->mmx_nregs)
2775 ret = gen_rtx_REG (mode, cum->mmx_regno + FIRST_MMX_REG);
2777 break;
2780 if (TARGET_DEBUG_ARG)
2782 fprintf (stderr,
2783 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
2784 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
2786 if (ret)
2787 print_simple_rtl (stderr, ret);
2788 else
2789 fprintf (stderr, ", stack");
2791 fprintf (stderr, " )\n");
2794 return ret;
2797 /* A C expression that indicates when an argument must be passed by
2798 reference. If nonzero for an argument, a copy of that argument is
2799 made in memory and a pointer to the argument is passed instead of
2800 the argument itself. The pointer is passed in whatever way is
2801 appropriate for passing a pointer to that type. */
2803 static bool
2804 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2805 enum machine_mode mode ATTRIBUTE_UNUSED,
2806 tree type, bool named ATTRIBUTE_UNUSED)
2808 if (!TARGET_64BIT)
2809 return 0;
2811 if (type && int_size_in_bytes (type) == -1)
2813 if (TARGET_DEBUG_ARG)
2814 fprintf (stderr, "function_arg_pass_by_reference\n");
2815 return 1;
2818 return 0;
2821 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
2822 ABI. Only called if TARGET_SSE. */
2823 static bool
2824 contains_128bit_aligned_vector_p (tree type)
2826 enum machine_mode mode = TYPE_MODE (type);
2827 if (SSE_REG_MODE_P (mode)
2828 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
2829 return true;
2830 if (TYPE_ALIGN (type) < 128)
2831 return false;
2833 if (AGGREGATE_TYPE_P (type))
2835 /* Walk the aggregates recursively. */
2836 if (TREE_CODE (type) == RECORD_TYPE
2837 || TREE_CODE (type) == UNION_TYPE
2838 || TREE_CODE (type) == QUAL_UNION_TYPE)
2840 tree field;
2842 if (TYPE_BINFO (type))
2844 tree binfo, base_binfo;
2845 int i;
2847 for (binfo = TYPE_BINFO (type), i = 0;
2848 BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
2849 if (contains_128bit_aligned_vector_p (BINFO_TYPE (base_binfo)))
2850 return true;
2852 /* And now merge the fields of structure. */
2853 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2855 if (TREE_CODE (field) == FIELD_DECL
2856 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
2857 return true;
2860 /* Just for use if some languages passes arrays by value. */
2861 else if (TREE_CODE (type) == ARRAY_TYPE)
2863 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
2864 return true;
2866 else
2867 abort ();
2869 return false;
2872 /* Gives the alignment boundary, in bits, of an argument with the
2873 specified mode and type. */
2876 ix86_function_arg_boundary (enum machine_mode mode, tree type)
2878 int align;
2879 if (type)
2880 align = TYPE_ALIGN (type);
2881 else
2882 align = GET_MODE_ALIGNMENT (mode);
2883 if (align < PARM_BOUNDARY)
2884 align = PARM_BOUNDARY;
2885 if (!TARGET_64BIT)
2887 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
2888 make an exception for SSE modes since these require 128bit
2889 alignment.
2891 The handling here differs from field_alignment. ICC aligns MMX
2892 arguments to 4 byte boundaries, while structure fields are aligned
2893 to 8 byte boundaries. */
2894 if (!TARGET_SSE)
2895 align = PARM_BOUNDARY;
2896 else if (!type)
2898 if (!SSE_REG_MODE_P (mode))
2899 align = PARM_BOUNDARY;
2901 else
2903 if (!contains_128bit_aligned_vector_p (type))
2904 align = PARM_BOUNDARY;
2907 if (align > 128)
2908 align = 128;
2909 return align;
2912 /* Return true if N is a possible register number of function value. */
2913 bool
2914 ix86_function_value_regno_p (int regno)
2916 if (!TARGET_64BIT)
2918 return ((regno) == 0
2919 || ((regno) == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
2920 || ((regno) == FIRST_SSE_REG && TARGET_SSE));
2922 return ((regno) == 0 || (regno) == FIRST_FLOAT_REG
2923 || ((regno) == FIRST_SSE_REG && TARGET_SSE)
2924 || ((regno) == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387));
2927 /* Define how to find the value returned by a function.
2928 VALTYPE is the data type of the value (as a tree).
2929 If the precise function being called is known, FUNC is its FUNCTION_DECL;
2930 otherwise, FUNC is 0. */
2932 ix86_function_value (tree valtype)
2934 if (TARGET_64BIT)
2936 rtx ret = construct_container (TYPE_MODE (valtype), valtype, 1,
2937 REGPARM_MAX, SSE_REGPARM_MAX,
2938 x86_64_int_return_registers, 0);
2939 /* For zero sized structures, construct_container return NULL, but we need
2940 to keep rest of compiler happy by returning meaningful value. */
2941 if (!ret)
2942 ret = gen_rtx_REG (TYPE_MODE (valtype), 0);
2943 return ret;
2945 else
2946 return gen_rtx_REG (TYPE_MODE (valtype),
2947 ix86_value_regno (TYPE_MODE (valtype)));
2950 /* Return false iff type is returned in memory. */
2952 ix86_return_in_memory (tree type)
2954 int needed_intregs, needed_sseregs, size;
2955 enum machine_mode mode = TYPE_MODE (type);
2957 if (TARGET_64BIT)
2958 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
2960 if (mode == BLKmode)
2961 return 1;
2963 size = int_size_in_bytes (type);
2965 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
2966 return 0;
2968 if (VECTOR_MODE_P (mode) || mode == TImode)
2970 /* User-created vectors small enough to fit in EAX. */
2971 if (size < 8)
2972 return 0;
2974 /* MMX/3dNow values are returned on the stack, since we've
2975 got to EMMS/FEMMS before returning. */
2976 if (size == 8)
2977 return 1;
2979 /* SSE values are returned in XMM0, except when it doesn't exist. */
2980 if (size == 16)
2981 return (TARGET_SSE ? 0 : 1);
2984 if (mode == XFmode)
2985 return 0;
2987 if (size > 12)
2988 return 1;
2989 return 0;
2992 /* When returning SSE vector types, we have a choice of either
2993 (1) being abi incompatible with a -march switch, or
2994 (2) generating an error.
2995 Given no good solution, I think the safest thing is one warning.
2996 The user won't be able to use -Werror, but....
2998 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
2999 called in response to actually generating a caller or callee that
3000 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
3001 via aggregate_value_p for general type probing from tree-ssa. */
3003 static rtx
3004 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
3006 static bool warned;
3008 if (!TARGET_SSE && type && !warned)
3010 /* Look at the return type of the function, not the function type. */
3011 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
3013 if (mode == TImode
3014 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3016 warned = true;
3017 warning ("SSE vector return without SSE enabled changes the ABI");
3021 return NULL;
3024 /* Define how to find the value returned by a library function
3025 assuming the value has mode MODE. */
3027 ix86_libcall_value (enum machine_mode mode)
3029 if (TARGET_64BIT)
3031 switch (mode)
3033 case SFmode:
3034 case SCmode:
3035 case DFmode:
3036 case DCmode:
3037 case TFmode:
3038 return gen_rtx_REG (mode, FIRST_SSE_REG);
3039 case XFmode:
3040 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
3041 case XCmode:
3042 case TCmode:
3043 return NULL;
3044 default:
3045 return gen_rtx_REG (mode, 0);
3048 else
3049 return gen_rtx_REG (mode, ix86_value_regno (mode));
3052 /* Given a mode, return the register to use for a return value. */
3054 static int
3055 ix86_value_regno (enum machine_mode mode)
3057 /* Floating point return values in %st(0). */
3058 if (GET_MODE_CLASS (mode) == MODE_FLOAT && TARGET_FLOAT_RETURNS_IN_80387)
3059 return FIRST_FLOAT_REG;
3060 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
3061 we prevent this case when sse is not available. */
3062 if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
3063 return FIRST_SSE_REG;
3064 /* Everything else in %eax. */
3065 return 0;
3068 /* Create the va_list data type. */
3070 static tree
3071 ix86_build_builtin_va_list (void)
3073 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
3075 /* For i386 we use plain pointer to argument area. */
3076 if (!TARGET_64BIT)
3077 return build_pointer_type (char_type_node);
3079 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
3080 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
3082 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
3083 unsigned_type_node);
3084 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
3085 unsigned_type_node);
3086 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
3087 ptr_type_node);
3088 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
3089 ptr_type_node);
3091 DECL_FIELD_CONTEXT (f_gpr) = record;
3092 DECL_FIELD_CONTEXT (f_fpr) = record;
3093 DECL_FIELD_CONTEXT (f_ovf) = record;
3094 DECL_FIELD_CONTEXT (f_sav) = record;
3096 TREE_CHAIN (record) = type_decl;
3097 TYPE_NAME (record) = type_decl;
3098 TYPE_FIELDS (record) = f_gpr;
3099 TREE_CHAIN (f_gpr) = f_fpr;
3100 TREE_CHAIN (f_fpr) = f_ovf;
3101 TREE_CHAIN (f_ovf) = f_sav;
3103 layout_type (record);
3105 /* The correct type is an array type of one element. */
3106 return build_array_type (record, build_index_type (size_zero_node));
3109 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
3111 static void
3112 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3113 tree type, int *pretend_size ATTRIBUTE_UNUSED,
3114 int no_rtl)
3116 CUMULATIVE_ARGS next_cum;
3117 rtx save_area = NULL_RTX, mem;
3118 rtx label;
3119 rtx label_ref;
3120 rtx tmp_reg;
3121 rtx nsse_reg;
3122 int set;
3123 tree fntype;
3124 int stdarg_p;
3125 int i;
3127 if (!TARGET_64BIT)
3128 return;
3130 /* Indicate to allocate space on the stack for varargs save area. */
3131 ix86_save_varrargs_registers = 1;
3133 cfun->stack_alignment_needed = 128;
3135 fntype = TREE_TYPE (current_function_decl);
3136 stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
3137 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
3138 != void_type_node));
3140 /* For varargs, we do not want to skip the dummy va_dcl argument.
3141 For stdargs, we do want to skip the last named argument. */
3142 next_cum = *cum;
3143 if (stdarg_p)
3144 function_arg_advance (&next_cum, mode, type, 1);
3146 if (!no_rtl)
3147 save_area = frame_pointer_rtx;
3149 set = get_varargs_alias_set ();
3151 for (i = next_cum.regno; i < ix86_regparm; i++)
3153 mem = gen_rtx_MEM (Pmode,
3154 plus_constant (save_area, i * UNITS_PER_WORD));
3155 set_mem_alias_set (mem, set);
3156 emit_move_insn (mem, gen_rtx_REG (Pmode,
3157 x86_64_int_parameter_registers[i]));
3160 if (next_cum.sse_nregs)
3162 /* Now emit code to save SSE registers. The AX parameter contains number
3163 of SSE parameter registers used to call this function. We use
3164 sse_prologue_save insn template that produces computed jump across
3165 SSE saves. We need some preparation work to get this working. */
3167 label = gen_label_rtx ();
3168 label_ref = gen_rtx_LABEL_REF (Pmode, label);
3170 /* Compute address to jump to :
3171 label - 5*eax + nnamed_sse_arguments*5 */
3172 tmp_reg = gen_reg_rtx (Pmode);
3173 nsse_reg = gen_reg_rtx (Pmode);
3174 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
3175 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3176 gen_rtx_MULT (Pmode, nsse_reg,
3177 GEN_INT (4))));
3178 if (next_cum.sse_regno)
3179 emit_move_insn
3180 (nsse_reg,
3181 gen_rtx_CONST (DImode,
3182 gen_rtx_PLUS (DImode,
3183 label_ref,
3184 GEN_INT (next_cum.sse_regno * 4))));
3185 else
3186 emit_move_insn (nsse_reg, label_ref);
3187 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
3189 /* Compute address of memory block we save into. We always use pointer
3190 pointing 127 bytes after first byte to store - this is needed to keep
3191 instruction size limited by 4 bytes. */
3192 tmp_reg = gen_reg_rtx (Pmode);
3193 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3194 plus_constant (save_area,
3195 8 * REGPARM_MAX + 127)));
3196 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
3197 set_mem_alias_set (mem, set);
3198 set_mem_align (mem, BITS_PER_WORD);
3200 /* And finally do the dirty job! */
3201 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
3202 GEN_INT (next_cum.sse_regno), label));
3207 /* Implement va_start. */
3209 void
3210 ix86_va_start (tree valist, rtx nextarg)
3212 HOST_WIDE_INT words, n_gpr, n_fpr;
3213 tree f_gpr, f_fpr, f_ovf, f_sav;
3214 tree gpr, fpr, ovf, sav, t;
3216 /* Only 64bit target needs something special. */
3217 if (!TARGET_64BIT)
3219 std_expand_builtin_va_start (valist, nextarg);
3220 return;
3223 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3224 f_fpr = TREE_CHAIN (f_gpr);
3225 f_ovf = TREE_CHAIN (f_fpr);
3226 f_sav = TREE_CHAIN (f_ovf);
3228 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
3229 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3230 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3231 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3232 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3234 /* Count number of gp and fp argument registers used. */
3235 words = current_function_args_info.words;
3236 n_gpr = current_function_args_info.regno;
3237 n_fpr = current_function_args_info.sse_regno;
3239 if (TARGET_DEBUG_ARG)
3240 fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
3241 (int) words, (int) n_gpr, (int) n_fpr);
3243 t = build (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
3244 build_int_cst (NULL_TREE, n_gpr * 8));
3245 TREE_SIDE_EFFECTS (t) = 1;
3246 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3248 t = build (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
3249 build_int_cst (NULL_TREE, n_fpr * 16 + 8*REGPARM_MAX));
3250 TREE_SIDE_EFFECTS (t) = 1;
3251 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3253 /* Find the overflow area. */
3254 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
3255 if (words != 0)
3256 t = build (PLUS_EXPR, TREE_TYPE (ovf), t,
3257 build_int_cst (NULL_TREE, words * UNITS_PER_WORD));
3258 t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3259 TREE_SIDE_EFFECTS (t) = 1;
3260 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3262 /* Find the register save area.
3263 Prologue of the function save it right above stack frame. */
3264 t = make_tree (TREE_TYPE (sav), frame_pointer_rtx);
3265 t = build (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
3266 TREE_SIDE_EFFECTS (t) = 1;
3267 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3270 /* Implement va_arg. */
3272 tree
3273 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
3275 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
3276 tree f_gpr, f_fpr, f_ovf, f_sav;
3277 tree gpr, fpr, ovf, sav, t;
3278 int size, rsize;
3279 tree lab_false, lab_over = NULL_TREE;
3280 tree addr, t2;
3281 rtx container;
3282 int indirect_p = 0;
3283 tree ptrtype;
3285 /* Only 64bit target needs something special. */
3286 if (!TARGET_64BIT)
3287 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3289 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3290 f_fpr = TREE_CHAIN (f_gpr);
3291 f_ovf = TREE_CHAIN (f_fpr);
3292 f_sav = TREE_CHAIN (f_ovf);
3294 valist = build_va_arg_indirect_ref (valist);
3295 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
3296 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
3297 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
3298 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
3300 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
3301 if (indirect_p)
3302 type = build_pointer_type (type);
3303 size = int_size_in_bytes (type);
3304 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3306 container = construct_container (TYPE_MODE (type), type, 0,
3307 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
3309 * Pull the value out of the saved registers ...
3312 addr = create_tmp_var (ptr_type_node, "addr");
3313 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
3315 if (container)
3317 int needed_intregs, needed_sseregs;
3318 bool need_temp;
3319 tree int_addr, sse_addr;
3321 lab_false = create_artificial_label ();
3322 lab_over = create_artificial_label ();
3324 examine_argument (TYPE_MODE (type), type, 0,
3325 &needed_intregs, &needed_sseregs);
3327 need_temp = (!REG_P (container)
3328 && ((needed_intregs && TYPE_ALIGN (type) > 64)
3329 || TYPE_ALIGN (type) > 128));
3331 /* In case we are passing structure, verify that it is consecutive block
3332 on the register save area. If not we need to do moves. */
3333 if (!need_temp && !REG_P (container))
3335 /* Verify that all registers are strictly consecutive */
3336 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
3338 int i;
3340 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3342 rtx slot = XVECEXP (container, 0, i);
3343 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
3344 || INTVAL (XEXP (slot, 1)) != i * 16)
3345 need_temp = 1;
3348 else
3350 int i;
3352 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3354 rtx slot = XVECEXP (container, 0, i);
3355 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
3356 || INTVAL (XEXP (slot, 1)) != i * 8)
3357 need_temp = 1;
3361 if (!need_temp)
3363 int_addr = addr;
3364 sse_addr = addr;
3366 else
3368 int_addr = create_tmp_var (ptr_type_node, "int_addr");
3369 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
3370 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
3371 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
3373 /* First ensure that we fit completely in registers. */
3374 if (needed_intregs)
3376 t = build_int_cst (TREE_TYPE (gpr),
3377 (REGPARM_MAX - needed_intregs + 1) * 8);
3378 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
3379 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3380 t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3381 gimplify_and_add (t, pre_p);
3383 if (needed_sseregs)
3385 t = build_int_cst (TREE_TYPE (fpr),
3386 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
3387 + REGPARM_MAX * 8);
3388 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
3389 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
3390 t = build (COND_EXPR, void_type_node, t, t2, NULL_TREE);
3391 gimplify_and_add (t, pre_p);
3394 /* Compute index to start of area used for integer regs. */
3395 if (needed_intregs)
3397 /* int_addr = gpr + sav; */
3398 t = build2 (PLUS_EXPR, ptr_type_node, sav, gpr);
3399 t = build2 (MODIFY_EXPR, void_type_node, int_addr, t);
3400 gimplify_and_add (t, pre_p);
3402 if (needed_sseregs)
3404 /* sse_addr = fpr + sav; */
3405 t = build2 (PLUS_EXPR, ptr_type_node, sav, fpr);
3406 t = build2 (MODIFY_EXPR, void_type_node, sse_addr, t);
3407 gimplify_and_add (t, pre_p);
3409 if (need_temp)
3411 int i;
3412 tree temp = create_tmp_var (type, "va_arg_tmp");
3414 /* addr = &temp; */
3415 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
3416 t = build2 (MODIFY_EXPR, void_type_node, addr, t);
3417 gimplify_and_add (t, pre_p);
3419 for (i = 0; i < XVECLEN (container, 0); i++)
3421 rtx slot = XVECEXP (container, 0, i);
3422 rtx reg = XEXP (slot, 0);
3423 enum machine_mode mode = GET_MODE (reg);
3424 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
3425 tree addr_type = build_pointer_type (piece_type);
3426 tree src_addr, src;
3427 int src_offset;
3428 tree dest_addr, dest;
3430 if (SSE_REGNO_P (REGNO (reg)))
3432 src_addr = sse_addr;
3433 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
3435 else
3437 src_addr = int_addr;
3438 src_offset = REGNO (reg) * 8;
3440 src_addr = fold_convert (addr_type, src_addr);
3441 src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr,
3442 size_int (src_offset)));
3443 src = build_va_arg_indirect_ref (src_addr);
3445 dest_addr = fold_convert (addr_type, addr);
3446 dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr,
3447 size_int (INTVAL (XEXP (slot, 1)))));
3448 dest = build_va_arg_indirect_ref (dest_addr);
3450 t = build2 (MODIFY_EXPR, void_type_node, dest, src);
3451 gimplify_and_add (t, pre_p);
3455 if (needed_intregs)
3457 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
3458 build_int_cst (NULL_TREE, needed_intregs * 8));
3459 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr, t);
3460 gimplify_and_add (t, pre_p);
3462 if (needed_sseregs)
3464 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
3465 build_int_cst (NULL_TREE, needed_sseregs * 16));
3466 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr, t);
3467 gimplify_and_add (t, pre_p);
3470 t = build1 (GOTO_EXPR, void_type_node, lab_over);
3471 gimplify_and_add (t, pre_p);
3473 t = build1 (LABEL_EXPR, void_type_node, lab_false);
3474 append_to_statement_list (t, pre_p);
3477 /* ... otherwise out of the overflow area. */
3479 /* Care for on-stack alignment if needed. */
3480 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64)
3481 t = ovf;
3482 else
3484 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
3485 t = build (PLUS_EXPR, TREE_TYPE (ovf), ovf,
3486 build_int_cst (NULL_TREE, align - 1));
3487 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
3488 build_int_cst (NULL_TREE, -align));
3490 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
3492 t2 = build2 (MODIFY_EXPR, void_type_node, addr, t);
3493 gimplify_and_add (t2, pre_p);
3495 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
3496 build_int_cst (NULL_TREE, rsize * UNITS_PER_WORD));
3497 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3498 gimplify_and_add (t, pre_p);
3500 if (container)
3502 t = build1 (LABEL_EXPR, void_type_node, lab_over);
3503 append_to_statement_list (t, pre_p);
3506 ptrtype = build_pointer_type (type);
3507 addr = fold_convert (ptrtype, addr);
3509 if (indirect_p)
3510 addr = build_va_arg_indirect_ref (addr);
3511 return build_va_arg_indirect_ref (addr);
3514 /* Return nonzero if OPNUM's MEM should be matched
3515 in movabs* patterns. */
3518 ix86_check_movabs (rtx insn, int opnum)
3520 rtx set, mem;
3522 set = PATTERN (insn);
3523 if (GET_CODE (set) == PARALLEL)
3524 set = XVECEXP (set, 0, 0);
3525 if (GET_CODE (set) != SET)
3526 abort ();
3527 mem = XEXP (set, opnum);
3528 while (GET_CODE (mem) == SUBREG)
3529 mem = SUBREG_REG (mem);
3530 if (GET_CODE (mem) != MEM)
3531 abort ();
3532 return (volatile_ok || !MEM_VOLATILE_P (mem));
3535 /* Initialize the table of extra 80387 mathematical constants. */
3537 static void
3538 init_ext_80387_constants (void)
3540 static const char * cst[5] =
3542 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
3543 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
3544 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
3545 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
3546 "3.1415926535897932385128089594061862044", /* 4: fldpi */
3548 int i;
3550 for (i = 0; i < 5; i++)
3552 real_from_string (&ext_80387_constants_table[i], cst[i]);
3553 /* Ensure each constant is rounded to XFmode precision. */
3554 real_convert (&ext_80387_constants_table[i],
3555 XFmode, &ext_80387_constants_table[i]);
3558 ext_80387_constants_init = 1;
3561 /* Return true if the constant is something that can be loaded with
3562 a special instruction. */
3565 standard_80387_constant_p (rtx x)
3567 if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
3568 return -1;
3570 if (x == CONST0_RTX (GET_MODE (x)))
3571 return 1;
3572 if (x == CONST1_RTX (GET_MODE (x)))
3573 return 2;
3575 /* For XFmode constants, try to find a special 80387 instruction when
3576 optimizing for size or on those CPUs that benefit from them. */
3577 if (GET_MODE (x) == XFmode
3578 && (optimize_size || x86_ext_80387_constants & TUNEMASK))
3580 REAL_VALUE_TYPE r;
3581 int i;
3583 if (! ext_80387_constants_init)
3584 init_ext_80387_constants ();
3586 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
3587 for (i = 0; i < 5; i++)
3588 if (real_identical (&r, &ext_80387_constants_table[i]))
3589 return i + 3;
3592 return 0;
3595 /* Return the opcode of the special instruction to be used to load
3596 the constant X. */
3598 const char *
3599 standard_80387_constant_opcode (rtx x)
3601 switch (standard_80387_constant_p (x))
3603 case 1:
3604 return "fldz";
3605 case 2:
3606 return "fld1";
3607 case 3:
3608 return "fldlg2";
3609 case 4:
3610 return "fldln2";
3611 case 5:
3612 return "fldl2e";
3613 case 6:
3614 return "fldl2t";
3615 case 7:
3616 return "fldpi";
3618 abort ();
3621 /* Return the CONST_DOUBLE representing the 80387 constant that is
3622 loaded by the specified special instruction. The argument IDX
3623 matches the return value from standard_80387_constant_p. */
3626 standard_80387_constant_rtx (int idx)
3628 int i;
3630 if (! ext_80387_constants_init)
3631 init_ext_80387_constants ();
3633 switch (idx)
3635 case 3:
3636 case 4:
3637 case 5:
3638 case 6:
3639 case 7:
3640 i = idx - 3;
3641 break;
3643 default:
3644 abort ();
3647 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
3648 XFmode);
3651 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
3654 standard_sse_constant_p (rtx x)
3656 if (x == const0_rtx)
3657 return 1;
3658 return (x == CONST0_RTX (GET_MODE (x)));
3661 /* Returns 1 if OP contains a symbol reference */
3664 symbolic_reference_mentioned_p (rtx op)
3666 const char *fmt;
3667 int i;
3669 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
3670 return 1;
3672 fmt = GET_RTX_FORMAT (GET_CODE (op));
3673 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
3675 if (fmt[i] == 'E')
3677 int j;
3679 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
3680 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
3681 return 1;
3684 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
3685 return 1;
3688 return 0;
3691 /* Return 1 if it is appropriate to emit `ret' instructions in the
3692 body of a function. Do this only if the epilogue is simple, needing a
3693 couple of insns. Prior to reloading, we can't tell how many registers
3694 must be saved, so return 0 then. Return 0 if there is no frame
3695 marker to de-allocate.
3697 If NON_SAVING_SETJMP is defined and true, then it is not possible
3698 for the epilogue to be simple, so return 0. This is a special case
3699 since NON_SAVING_SETJMP will not cause regs_ever_live to change
3700 until final, but jump_optimize may need to know sooner if a
3701 `return' is OK. */
3704 ix86_can_use_return_insn_p (void)
3706 struct ix86_frame frame;
3708 #ifdef NON_SAVING_SETJMP
3709 if (NON_SAVING_SETJMP && current_function_calls_setjmp)
3710 return 0;
3711 #endif
3713 if (! reload_completed || frame_pointer_needed)
3714 return 0;
3716 /* Don't allow more than 32 pop, since that's all we can do
3717 with one instruction. */
3718 if (current_function_pops_args
3719 && current_function_args_size >= 32768)
3720 return 0;
3722 ix86_compute_frame_layout (&frame);
3723 return frame.to_allocate == 0 && frame.nregs == 0;
3726 /* Value should be nonzero if functions must have frame pointers.
3727 Zero means the frame pointer need not be set up (and parms may
3728 be accessed via the stack pointer) in functions that seem suitable. */
3731 ix86_frame_pointer_required (void)
3733 /* If we accessed previous frames, then the generated code expects
3734 to be able to access the saved ebp value in our frame. */
3735 if (cfun->machine->accesses_prev_frame)
3736 return 1;
3738 /* Several x86 os'es need a frame pointer for other reasons,
3739 usually pertaining to setjmp. */
3740 if (SUBTARGET_FRAME_POINTER_REQUIRED)
3741 return 1;
3743 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
3744 the frame pointer by default. Turn it back on now if we've not
3745 got a leaf function. */
3746 if (TARGET_OMIT_LEAF_FRAME_POINTER
3747 && (!current_function_is_leaf))
3748 return 1;
3750 if (current_function_profile)
3751 return 1;
3753 return 0;
3756 /* Record that the current function accesses previous call frames. */
3758 void
3759 ix86_setup_frame_addresses (void)
3761 cfun->machine->accesses_prev_frame = 1;
3764 #if defined(HAVE_GAS_HIDDEN) && defined(SUPPORTS_ONE_ONLY)
3765 # define USE_HIDDEN_LINKONCE 1
3766 #else
3767 # define USE_HIDDEN_LINKONCE 0
3768 #endif
3770 static int pic_labels_used;
3772 /* Fills in the label name that should be used for a pc thunk for
3773 the given register. */
3775 static void
3776 get_pc_thunk_name (char name[32], unsigned int regno)
3778 if (USE_HIDDEN_LINKONCE)
3779 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
3780 else
3781 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
3785 /* This function generates code for -fpic that loads %ebx with
3786 the return address of the caller and then returns. */
3788 void
3789 ix86_file_end (void)
3791 rtx xops[2];
3792 int regno;
3794 for (regno = 0; regno < 8; ++regno)
3796 char name[32];
3798 if (! ((pic_labels_used >> regno) & 1))
3799 continue;
3801 get_pc_thunk_name (name, regno);
3803 if (USE_HIDDEN_LINKONCE)
3805 tree decl;
3807 decl = build_decl (FUNCTION_DECL, get_identifier (name),
3808 error_mark_node);
3809 TREE_PUBLIC (decl) = 1;
3810 TREE_STATIC (decl) = 1;
3811 DECL_ONE_ONLY (decl) = 1;
3813 (*targetm.asm_out.unique_section) (decl, 0);
3814 named_section (decl, NULL, 0);
3816 (*targetm.asm_out.globalize_label) (asm_out_file, name);
3817 fputs ("\t.hidden\t", asm_out_file);
3818 assemble_name (asm_out_file, name);
3819 fputc ('\n', asm_out_file);
3820 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
3822 else
3824 text_section ();
3825 ASM_OUTPUT_LABEL (asm_out_file, name);
3828 xops[0] = gen_rtx_REG (SImode, regno);
3829 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
3830 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
3831 output_asm_insn ("ret", xops);
3834 if (NEED_INDICATE_EXEC_STACK)
3835 file_end_indicate_exec_stack ();
3838 /* Emit code for the SET_GOT patterns. */
3840 const char *
3841 output_set_got (rtx dest)
3843 rtx xops[3];
3845 xops[0] = dest;
3846 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
3848 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
3850 xops[2] = gen_rtx_LABEL_REF (Pmode, gen_label_rtx ());
3852 if (!flag_pic)
3853 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
3854 else
3855 output_asm_insn ("call\t%a2", xops);
3857 #if TARGET_MACHO
3858 /* Output the "canonical" label name ("Lxx$pb") here too. This
3859 is what will be referred to by the Mach-O PIC subsystem. */
3860 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
3861 #endif
3862 (*targetm.asm_out.internal_label) (asm_out_file, "L",
3863 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
3865 if (flag_pic)
3866 output_asm_insn ("pop{l}\t%0", xops);
3868 else
3870 char name[32];
3871 get_pc_thunk_name (name, REGNO (dest));
3872 pic_labels_used |= 1 << REGNO (dest);
3874 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
3875 xops[2] = gen_rtx_MEM (QImode, xops[2]);
3876 output_asm_insn ("call\t%X2", xops);
3879 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
3880 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
3881 else if (!TARGET_MACHO)
3882 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %a1+(.-%a2)}", xops);
3884 return "";
3887 /* Generate an "push" pattern for input ARG. */
3889 static rtx
3890 gen_push (rtx arg)
3892 return gen_rtx_SET (VOIDmode,
3893 gen_rtx_MEM (Pmode,
3894 gen_rtx_PRE_DEC (Pmode,
3895 stack_pointer_rtx)),
3896 arg);
3899 /* Return >= 0 if there is an unused call-clobbered register available
3900 for the entire function. */
3902 static unsigned int
3903 ix86_select_alt_pic_regnum (void)
3905 if (current_function_is_leaf && !current_function_profile)
3907 int i;
3908 for (i = 2; i >= 0; --i)
3909 if (!regs_ever_live[i])
3910 return i;
3913 return INVALID_REGNUM;
3916 /* Return 1 if we need to save REGNO. */
3917 static int
3918 ix86_save_reg (unsigned int regno, int maybe_eh_return)
3920 if (pic_offset_table_rtx
3921 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
3922 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
3923 || current_function_profile
3924 || current_function_calls_eh_return
3925 || current_function_uses_const_pool))
3927 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
3928 return 0;
3929 return 1;
3932 if (current_function_calls_eh_return && maybe_eh_return)
3934 unsigned i;
3935 for (i = 0; ; i++)
3937 unsigned test = EH_RETURN_DATA_REGNO (i);
3938 if (test == INVALID_REGNUM)
3939 break;
3940 if (test == regno)
3941 return 1;
3945 return (regs_ever_live[regno]
3946 && !call_used_regs[regno]
3947 && !fixed_regs[regno]
3948 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
3951 /* Return number of registers to be saved on the stack. */
3953 static int
3954 ix86_nsaved_regs (void)
3956 int nregs = 0;
3957 int regno;
3959 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
3960 if (ix86_save_reg (regno, true))
3961 nregs++;
3962 return nregs;
3965 /* Return the offset between two registers, one to be eliminated, and the other
3966 its replacement, at the start of a routine. */
3968 HOST_WIDE_INT
3969 ix86_initial_elimination_offset (int from, int to)
3971 struct ix86_frame frame;
3972 ix86_compute_frame_layout (&frame);
3974 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
3975 return frame.hard_frame_pointer_offset;
3976 else if (from == FRAME_POINTER_REGNUM
3977 && to == HARD_FRAME_POINTER_REGNUM)
3978 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
3979 else
3981 if (to != STACK_POINTER_REGNUM)
3982 abort ();
3983 else if (from == ARG_POINTER_REGNUM)
3984 return frame.stack_pointer_offset;
3985 else if (from != FRAME_POINTER_REGNUM)
3986 abort ();
3987 else
3988 return frame.stack_pointer_offset - frame.frame_pointer_offset;
3992 /* Fill structure ix86_frame about frame of currently computed function. */
3994 static void
3995 ix86_compute_frame_layout (struct ix86_frame *frame)
3997 HOST_WIDE_INT total_size;
3998 unsigned int stack_alignment_needed;
3999 HOST_WIDE_INT offset;
4000 unsigned int preferred_alignment;
4001 HOST_WIDE_INT size = get_frame_size ();
4003 frame->nregs = ix86_nsaved_regs ();
4004 total_size = size;
4006 stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
4007 preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
4009 /* During reload iteration the amount of registers saved can change.
4010 Recompute the value as needed. Do not recompute when amount of registers
4011 didn't change as reload does mutiple calls to the function and does not
4012 expect the decision to change within single iteration. */
4013 if (!optimize_size
4014 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
4016 int count = frame->nregs;
4018 cfun->machine->use_fast_prologue_epilogue_nregs = count;
4019 /* The fast prologue uses move instead of push to save registers. This
4020 is significantly longer, but also executes faster as modern hardware
4021 can execute the moves in parallel, but can't do that for push/pop.
4023 Be careful about choosing what prologue to emit: When function takes
4024 many instructions to execute we may use slow version as well as in
4025 case function is known to be outside hot spot (this is known with
4026 feedback only). Weight the size of function by number of registers
4027 to save as it is cheap to use one or two push instructions but very
4028 slow to use many of them. */
4029 if (count)
4030 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
4031 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
4032 || (flag_branch_probabilities
4033 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
4034 cfun->machine->use_fast_prologue_epilogue = false;
4035 else
4036 cfun->machine->use_fast_prologue_epilogue
4037 = !expensive_function_p (count);
4039 if (TARGET_PROLOGUE_USING_MOVE
4040 && cfun->machine->use_fast_prologue_epilogue)
4041 frame->save_regs_using_mov = true;
4042 else
4043 frame->save_regs_using_mov = false;
4046 /* Skip return address and saved base pointer. */
4047 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
4049 frame->hard_frame_pointer_offset = offset;
4051 /* Do some sanity checking of stack_alignment_needed and
4052 preferred_alignment, since i386 port is the only using those features
4053 that may break easily. */
4055 if (size && !stack_alignment_needed)
4056 abort ();
4057 if (preferred_alignment < STACK_BOUNDARY / BITS_PER_UNIT)
4058 abort ();
4059 if (preferred_alignment > PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)
4060 abort ();
4061 if (stack_alignment_needed > PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)
4062 abort ();
4064 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
4065 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
4067 /* Register save area */
4068 offset += frame->nregs * UNITS_PER_WORD;
4070 /* Va-arg area */
4071 if (ix86_save_varrargs_registers)
4073 offset += X86_64_VARARGS_SIZE;
4074 frame->va_arg_size = X86_64_VARARGS_SIZE;
4076 else
4077 frame->va_arg_size = 0;
4079 /* Align start of frame for local function. */
4080 frame->padding1 = ((offset + stack_alignment_needed - 1)
4081 & -stack_alignment_needed) - offset;
4083 offset += frame->padding1;
4085 /* Frame pointer points here. */
4086 frame->frame_pointer_offset = offset;
4088 offset += size;
4090 /* Add outgoing arguments area. Can be skipped if we eliminated
4091 all the function calls as dead code.
4092 Skipping is however impossible when function calls alloca. Alloca
4093 expander assumes that last current_function_outgoing_args_size
4094 of stack frame are unused. */
4095 if (ACCUMULATE_OUTGOING_ARGS
4096 && (!current_function_is_leaf || current_function_calls_alloca))
4098 offset += current_function_outgoing_args_size;
4099 frame->outgoing_arguments_size = current_function_outgoing_args_size;
4101 else
4102 frame->outgoing_arguments_size = 0;
4104 /* Align stack boundary. Only needed if we're calling another function
4105 or using alloca. */
4106 if (!current_function_is_leaf || current_function_calls_alloca)
4107 frame->padding2 = ((offset + preferred_alignment - 1)
4108 & -preferred_alignment) - offset;
4109 else
4110 frame->padding2 = 0;
4112 offset += frame->padding2;
4114 /* We've reached end of stack frame. */
4115 frame->stack_pointer_offset = offset;
4117 /* Size prologue needs to allocate. */
4118 frame->to_allocate =
4119 (size + frame->padding1 + frame->padding2
4120 + frame->outgoing_arguments_size + frame->va_arg_size);
4122 if ((!frame->to_allocate && frame->nregs <= 1)
4123 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
4124 frame->save_regs_using_mov = false;
4126 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
4127 && current_function_is_leaf)
4129 frame->red_zone_size = frame->to_allocate;
4130 if (frame->save_regs_using_mov)
4131 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
4132 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
4133 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
4135 else
4136 frame->red_zone_size = 0;
4137 frame->to_allocate -= frame->red_zone_size;
4138 frame->stack_pointer_offset -= frame->red_zone_size;
4139 #if 0
4140 fprintf (stderr, "nregs: %i\n", frame->nregs);
4141 fprintf (stderr, "size: %i\n", size);
4142 fprintf (stderr, "alignment1: %i\n", stack_alignment_needed);
4143 fprintf (stderr, "padding1: %i\n", frame->padding1);
4144 fprintf (stderr, "va_arg: %i\n", frame->va_arg_size);
4145 fprintf (stderr, "padding2: %i\n", frame->padding2);
4146 fprintf (stderr, "to_allocate: %i\n", frame->to_allocate);
4147 fprintf (stderr, "red_zone_size: %i\n", frame->red_zone_size);
4148 fprintf (stderr, "frame_pointer_offset: %i\n", frame->frame_pointer_offset);
4149 fprintf (stderr, "hard_frame_pointer_offset: %i\n",
4150 frame->hard_frame_pointer_offset);
4151 fprintf (stderr, "stack_pointer_offset: %i\n", frame->stack_pointer_offset);
4152 #endif
4155 /* Emit code to save registers in the prologue. */
4157 static void
4158 ix86_emit_save_regs (void)
4160 int regno;
4161 rtx insn;
4163 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
4164 if (ix86_save_reg (regno, true))
4166 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
4167 RTX_FRAME_RELATED_P (insn) = 1;
4171 /* Emit code to save registers using MOV insns. First register
4172 is restored from POINTER + OFFSET. */
4173 static void
4174 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
4176 int regno;
4177 rtx insn;
4179 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4180 if (ix86_save_reg (regno, true))
4182 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
4183 Pmode, offset),
4184 gen_rtx_REG (Pmode, regno));
4185 RTX_FRAME_RELATED_P (insn) = 1;
4186 offset += UNITS_PER_WORD;
4190 /* Expand prologue or epilogue stack adjustment.
4191 The pattern exist to put a dependency on all ebp-based memory accesses.
4192 STYLE should be negative if instructions should be marked as frame related,
4193 zero if %r11 register is live and cannot be freely used and positive
4194 otherwise. */
4196 static void
4197 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
4199 rtx insn;
4201 if (! TARGET_64BIT)
4202 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
4203 else if (x86_64_immediate_operand (offset, DImode))
4204 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
4205 else
4207 rtx r11;
4208 /* r11 is used by indirect sibcall return as well, set before the
4209 epilogue and used after the epilogue. ATM indirect sibcall
4210 shouldn't be used together with huge frame sizes in one
4211 function because of the frame_size check in sibcall.c. */
4212 if (style == 0)
4213 abort ();
4214 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4215 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
4216 if (style < 0)
4217 RTX_FRAME_RELATED_P (insn) = 1;
4218 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
4219 offset));
4221 if (style < 0)
4222 RTX_FRAME_RELATED_P (insn) = 1;
4225 /* Expand the prologue into a bunch of separate insns. */
4227 void
4228 ix86_expand_prologue (void)
4230 rtx insn;
4231 bool pic_reg_used;
4232 struct ix86_frame frame;
4233 HOST_WIDE_INT allocate;
4235 ix86_compute_frame_layout (&frame);
4237 /* Note: AT&T enter does NOT have reversed args. Enter is probably
4238 slower on all targets. Also sdb doesn't like it. */
4240 if (frame_pointer_needed)
4242 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
4243 RTX_FRAME_RELATED_P (insn) = 1;
4245 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
4246 RTX_FRAME_RELATED_P (insn) = 1;
4249 allocate = frame.to_allocate;
4251 if (!frame.save_regs_using_mov)
4252 ix86_emit_save_regs ();
4253 else
4254 allocate += frame.nregs * UNITS_PER_WORD;
4256 /* When using red zone we may start register saving before allocating
4257 the stack frame saving one cycle of the prologue. */
4258 if (TARGET_RED_ZONE && frame.save_regs_using_mov)
4259 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
4260 : stack_pointer_rtx,
4261 -frame.nregs * UNITS_PER_WORD);
4263 if (allocate == 0)
4265 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
4266 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4267 GEN_INT (-allocate), -1);
4268 else
4270 /* Only valid for Win32. */
4271 rtx eax = gen_rtx_REG (SImode, 0);
4272 bool eax_live = ix86_eax_live_at_start_p ();
4274 if (TARGET_64BIT)
4275 abort ();
4277 if (eax_live)
4279 emit_insn (gen_push (eax));
4280 allocate -= 4;
4283 insn = emit_move_insn (eax, GEN_INT (allocate));
4284 RTX_FRAME_RELATED_P (insn) = 1;
4286 insn = emit_insn (gen_allocate_stack_worker (eax));
4287 RTX_FRAME_RELATED_P (insn) = 1;
4289 if (eax_live)
4291 rtx t;
4292 if (frame_pointer_needed)
4293 t = plus_constant (hard_frame_pointer_rtx,
4294 allocate
4295 - frame.to_allocate
4296 - frame.nregs * UNITS_PER_WORD);
4297 else
4298 t = plus_constant (stack_pointer_rtx, allocate);
4299 emit_move_insn (eax, gen_rtx_MEM (SImode, t));
4303 if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
4305 if (!frame_pointer_needed || !frame.to_allocate)
4306 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
4307 else
4308 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
4309 -frame.nregs * UNITS_PER_WORD);
4312 pic_reg_used = false;
4313 if (pic_offset_table_rtx
4314 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4315 || current_function_profile))
4317 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
4319 if (alt_pic_reg_used != INVALID_REGNUM)
4320 REGNO (pic_offset_table_rtx) = alt_pic_reg_used;
4322 pic_reg_used = true;
4325 if (pic_reg_used)
4327 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
4329 /* Even with accurate pre-reload life analysis, we can wind up
4330 deleting all references to the pic register after reload.
4331 Consider if cross-jumping unifies two sides of a branch
4332 controlled by a comparison vs the only read from a global.
4333 In which case, allow the set_got to be deleted, though we're
4334 too late to do anything about the ebx save in the prologue. */
4335 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
4338 /* Prevent function calls from be scheduled before the call to mcount.
4339 In the pic_reg_used case, make sure that the got load isn't deleted. */
4340 if (current_function_profile)
4341 emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
4344 /* Emit code to restore saved registers using MOV insns. First register
4345 is restored from POINTER + OFFSET. */
4346 static void
4347 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
4348 int maybe_eh_return)
4350 int regno;
4351 rtx base_address = gen_rtx_MEM (Pmode, pointer);
4353 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4354 if (ix86_save_reg (regno, maybe_eh_return))
4356 /* Ensure that adjust_address won't be forced to produce pointer
4357 out of range allowed by x86-64 instruction set. */
4358 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
4360 rtx r11;
4362 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
4363 emit_move_insn (r11, GEN_INT (offset));
4364 emit_insn (gen_adddi3 (r11, r11, pointer));
4365 base_address = gen_rtx_MEM (Pmode, r11);
4366 offset = 0;
4368 emit_move_insn (gen_rtx_REG (Pmode, regno),
4369 adjust_address (base_address, Pmode, offset));
4370 offset += UNITS_PER_WORD;
4374 /* Restore function stack, frame, and registers. */
4376 void
4377 ix86_expand_epilogue (int style)
4379 int regno;
4380 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
4381 struct ix86_frame frame;
4382 HOST_WIDE_INT offset;
4384 ix86_compute_frame_layout (&frame);
4386 /* Calculate start of saved registers relative to ebp. Special care
4387 must be taken for the normal return case of a function using
4388 eh_return: the eax and edx registers are marked as saved, but not
4389 restored along this path. */
4390 offset = frame.nregs;
4391 if (current_function_calls_eh_return && style != 2)
4392 offset -= 2;
4393 offset *= -UNITS_PER_WORD;
4395 /* If we're only restoring one register and sp is not valid then
4396 using a move instruction to restore the register since it's
4397 less work than reloading sp and popping the register.
4399 The default code result in stack adjustment using add/lea instruction,
4400 while this code results in LEAVE instruction (or discrete equivalent),
4401 so it is profitable in some other cases as well. Especially when there
4402 are no registers to restore. We also use this code when TARGET_USE_LEAVE
4403 and there is exactly one register to pop. This heuristic may need some
4404 tuning in future. */
4405 if ((!sp_valid && frame.nregs <= 1)
4406 || (TARGET_EPILOGUE_USING_MOVE
4407 && cfun->machine->use_fast_prologue_epilogue
4408 && (frame.nregs > 1 || frame.to_allocate))
4409 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
4410 || (frame_pointer_needed && TARGET_USE_LEAVE
4411 && cfun->machine->use_fast_prologue_epilogue
4412 && frame.nregs == 1)
4413 || current_function_calls_eh_return)
4415 /* Restore registers. We can use ebp or esp to address the memory
4416 locations. If both are available, default to ebp, since offsets
4417 are known to be small. Only exception is esp pointing directly to the
4418 end of block of saved registers, where we may simplify addressing
4419 mode. */
4421 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
4422 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
4423 frame.to_allocate, style == 2);
4424 else
4425 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
4426 offset, style == 2);
4428 /* eh_return epilogues need %ecx added to the stack pointer. */
4429 if (style == 2)
4431 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
4433 if (frame_pointer_needed)
4435 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
4436 tmp = plus_constant (tmp, UNITS_PER_WORD);
4437 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
4439 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
4440 emit_move_insn (hard_frame_pointer_rtx, tmp);
4442 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
4443 const0_rtx, style);
4445 else
4447 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
4448 tmp = plus_constant (tmp, (frame.to_allocate
4449 + frame.nregs * UNITS_PER_WORD));
4450 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
4453 else if (!frame_pointer_needed)
4454 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4455 GEN_INT (frame.to_allocate
4456 + frame.nregs * UNITS_PER_WORD),
4457 style);
4458 /* If not an i386, mov & pop is faster than "leave". */
4459 else if (TARGET_USE_LEAVE || optimize_size
4460 || !cfun->machine->use_fast_prologue_epilogue)
4461 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
4462 else
4464 pro_epilogue_adjust_stack (stack_pointer_rtx,
4465 hard_frame_pointer_rtx,
4466 const0_rtx, style);
4467 if (TARGET_64BIT)
4468 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
4469 else
4470 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
4473 else
4475 /* First step is to deallocate the stack frame so that we can
4476 pop the registers. */
4477 if (!sp_valid)
4479 if (!frame_pointer_needed)
4480 abort ();
4481 pro_epilogue_adjust_stack (stack_pointer_rtx,
4482 hard_frame_pointer_rtx,
4483 GEN_INT (offset), style);
4485 else if (frame.to_allocate)
4486 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
4487 GEN_INT (frame.to_allocate), style);
4489 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4490 if (ix86_save_reg (regno, false))
4492 if (TARGET_64BIT)
4493 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
4494 else
4495 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
4497 if (frame_pointer_needed)
4499 /* Leave results in shorter dependency chains on CPUs that are
4500 able to grok it fast. */
4501 if (TARGET_USE_LEAVE)
4502 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
4503 else if (TARGET_64BIT)
4504 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
4505 else
4506 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
4510 /* Sibcall epilogues don't want a return instruction. */
4511 if (style == 0)
4512 return;
4514 if (current_function_pops_args && current_function_args_size)
4516 rtx popc = GEN_INT (current_function_pops_args);
4518 /* i386 can only pop 64K bytes. If asked to pop more, pop
4519 return address, do explicit add, and jump indirectly to the
4520 caller. */
4522 if (current_function_pops_args >= 65536)
4524 rtx ecx = gen_rtx_REG (SImode, 2);
4526 /* There is no "pascal" calling convention in 64bit ABI. */
4527 if (TARGET_64BIT)
4528 abort ();
4530 emit_insn (gen_popsi1 (ecx));
4531 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
4532 emit_jump_insn (gen_return_indirect_internal (ecx));
4534 else
4535 emit_jump_insn (gen_return_pop_internal (popc));
4537 else
4538 emit_jump_insn (gen_return_internal ());
4541 /* Reset from the function's potential modifications. */
4543 static void
4544 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
4545 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4547 if (pic_offset_table_rtx)
4548 REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
4551 /* Extract the parts of an RTL expression that is a valid memory address
4552 for an instruction. Return 0 if the structure of the address is
4553 grossly off. Return -1 if the address contains ASHIFT, so it is not
4554 strictly valid, but still used for computing length of lea instruction. */
4557 ix86_decompose_address (rtx addr, struct ix86_address *out)
4559 rtx base = NULL_RTX;
4560 rtx index = NULL_RTX;
4561 rtx disp = NULL_RTX;
4562 HOST_WIDE_INT scale = 1;
4563 rtx scale_rtx = NULL_RTX;
4564 int retval = 1;
4565 enum ix86_address_seg seg = SEG_DEFAULT;
4567 if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
4568 base = addr;
4569 else if (GET_CODE (addr) == PLUS)
4571 rtx addends[4], op;
4572 int n = 0, i;
4574 op = addr;
4577 if (n >= 4)
4578 return 0;
4579 addends[n++] = XEXP (op, 1);
4580 op = XEXP (op, 0);
4582 while (GET_CODE (op) == PLUS);
4583 if (n >= 4)
4584 return 0;
4585 addends[n] = op;
4587 for (i = n; i >= 0; --i)
4589 op = addends[i];
4590 switch (GET_CODE (op))
4592 case MULT:
4593 if (index)
4594 return 0;
4595 index = XEXP (op, 0);
4596 scale_rtx = XEXP (op, 1);
4597 break;
4599 case UNSPEC:
4600 if (XINT (op, 1) == UNSPEC_TP
4601 && TARGET_TLS_DIRECT_SEG_REFS
4602 && seg == SEG_DEFAULT)
4603 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
4604 else
4605 return 0;
4606 break;
4608 case REG:
4609 case SUBREG:
4610 if (!base)
4611 base = op;
4612 else if (!index)
4613 index = op;
4614 else
4615 return 0;
4616 break;
4618 case CONST:
4619 case CONST_INT:
4620 case SYMBOL_REF:
4621 case LABEL_REF:
4622 if (disp)
4623 return 0;
4624 disp = op;
4625 break;
4627 default:
4628 return 0;
4632 else if (GET_CODE (addr) == MULT)
4634 index = XEXP (addr, 0); /* index*scale */
4635 scale_rtx = XEXP (addr, 1);
4637 else if (GET_CODE (addr) == ASHIFT)
4639 rtx tmp;
4641 /* We're called for lea too, which implements ashift on occasion. */
4642 index = XEXP (addr, 0);
4643 tmp = XEXP (addr, 1);
4644 if (GET_CODE (tmp) != CONST_INT)
4645 return 0;
4646 scale = INTVAL (tmp);
4647 if ((unsigned HOST_WIDE_INT) scale > 3)
4648 return 0;
4649 scale = 1 << scale;
4650 retval = -1;
4652 else
4653 disp = addr; /* displacement */
4655 /* Extract the integral value of scale. */
4656 if (scale_rtx)
4658 if (GET_CODE (scale_rtx) != CONST_INT)
4659 return 0;
4660 scale = INTVAL (scale_rtx);
4663 /* Allow arg pointer and stack pointer as index if there is not scaling. */
4664 if (base && index && scale == 1
4665 && (index == arg_pointer_rtx
4666 || index == frame_pointer_rtx
4667 || (REG_P (index) && REGNO (index) == STACK_POINTER_REGNUM)))
4669 rtx tmp = base;
4670 base = index;
4671 index = tmp;
4674 /* Special case: %ebp cannot be encoded as a base without a displacement. */
4675 if ((base == hard_frame_pointer_rtx
4676 || base == frame_pointer_rtx
4677 || base == arg_pointer_rtx) && !disp)
4678 disp = const0_rtx;
4680 /* Special case: on K6, [%esi] makes the instruction vector decoded.
4681 Avoid this by transforming to [%esi+0]. */
4682 if (ix86_tune == PROCESSOR_K6 && !optimize_size
4683 && base && !index && !disp
4684 && REG_P (base)
4685 && REGNO_REG_CLASS (REGNO (base)) == SIREG)
4686 disp = const0_rtx;
4688 /* Special case: encode reg+reg instead of reg*2. */
4689 if (!base && index && scale && scale == 2)
4690 base = index, scale = 1;
4692 /* Special case: scaling cannot be encoded without base or displacement. */
4693 if (!base && !disp && index && scale != 1)
4694 disp = const0_rtx;
4696 out->base = base;
4697 out->index = index;
4698 out->disp = disp;
4699 out->scale = scale;
4700 out->seg = seg;
4702 return retval;
4705 /* Return cost of the memory address x.
4706 For i386, it is better to use a complex address than let gcc copy
4707 the address into a reg and make a new pseudo. But not if the address
4708 requires to two regs - that would mean more pseudos with longer
4709 lifetimes. */
4710 static int
4711 ix86_address_cost (rtx x)
4713 struct ix86_address parts;
4714 int cost = 1;
4716 if (!ix86_decompose_address (x, &parts))
4717 abort ();
4719 /* More complex memory references are better. */
4720 if (parts.disp && parts.disp != const0_rtx)
4721 cost--;
4722 if (parts.seg != SEG_DEFAULT)
4723 cost--;
4725 /* Attempt to minimize number of registers in the address. */
4726 if ((parts.base
4727 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
4728 || (parts.index
4729 && (!REG_P (parts.index)
4730 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
4731 cost++;
4733 if (parts.base
4734 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
4735 && parts.index
4736 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
4737 && parts.base != parts.index)
4738 cost++;
4740 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
4741 since it's predecode logic can't detect the length of instructions
4742 and it degenerates to vector decoded. Increase cost of such
4743 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
4744 to split such addresses or even refuse such addresses at all.
4746 Following addressing modes are affected:
4747 [base+scale*index]
4748 [scale*index+disp]
4749 [base+index]
4751 The first and last case may be avoidable by explicitly coding the zero in
4752 memory address, but I don't have AMD-K6 machine handy to check this
4753 theory. */
4755 if (TARGET_K6
4756 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
4757 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
4758 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
4759 cost += 10;
4761 return cost;
4764 /* If X is a machine specific address (i.e. a symbol or label being
4765 referenced as a displacement from the GOT implemented using an
4766 UNSPEC), then return the base term. Otherwise return X. */
4769 ix86_find_base_term (rtx x)
4771 rtx term;
4773 if (TARGET_64BIT)
4775 if (GET_CODE (x) != CONST)
4776 return x;
4777 term = XEXP (x, 0);
4778 if (GET_CODE (term) == PLUS
4779 && (GET_CODE (XEXP (term, 1)) == CONST_INT
4780 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
4781 term = XEXP (term, 0);
4782 if (GET_CODE (term) != UNSPEC
4783 || XINT (term, 1) != UNSPEC_GOTPCREL)
4784 return x;
4786 term = XVECEXP (term, 0, 0);
4788 if (GET_CODE (term) != SYMBOL_REF
4789 && GET_CODE (term) != LABEL_REF)
4790 return x;
4792 return term;
4795 term = ix86_delegitimize_address (x);
4797 if (GET_CODE (term) != SYMBOL_REF
4798 && GET_CODE (term) != LABEL_REF)
4799 return x;
4801 return term;
4804 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
4805 this is used for to form addresses to local data when -fPIC is in
4806 use. */
4808 static bool
4809 darwin_local_data_pic (rtx disp)
4811 if (GET_CODE (disp) == MINUS)
4813 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
4814 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
4815 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
4817 const char *sym_name = XSTR (XEXP (disp, 1), 0);
4818 if (! strcmp (sym_name, "<pic base>"))
4819 return true;
4823 return false;
4826 /* Determine if a given RTX is a valid constant. We already know this
4827 satisfies CONSTANT_P. */
4829 bool
4830 legitimate_constant_p (rtx x)
4832 switch (GET_CODE (x))
4834 case CONST:
4835 x = XEXP (x, 0);
4837 if (GET_CODE (x) == PLUS)
4839 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
4840 return false;
4841 x = XEXP (x, 0);
4844 if (TARGET_MACHO && darwin_local_data_pic (x))
4845 return true;
4847 /* Only some unspecs are valid as "constants". */
4848 if (GET_CODE (x) == UNSPEC)
4849 switch (XINT (x, 1))
4851 case UNSPEC_TPOFF:
4852 case UNSPEC_NTPOFF:
4853 return local_exec_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
4854 case UNSPEC_DTPOFF:
4855 return local_dynamic_symbolic_operand (XVECEXP (x, 0, 0), Pmode);
4856 default:
4857 return false;
4860 /* We must have drilled down to a symbol. */
4861 if (!symbolic_operand (x, Pmode))
4862 return false;
4863 /* FALLTHRU */
4865 case SYMBOL_REF:
4866 /* TLS symbols are never valid. */
4867 if (tls_symbolic_operand (x, Pmode))
4868 return false;
4869 break;
4871 default:
4872 break;
4875 /* Otherwise we handle everything else in the move patterns. */
4876 return true;
4879 /* Determine if it's legal to put X into the constant pool. This
4880 is not possible for the address of thread-local symbols, which
4881 is checked above. */
4883 static bool
4884 ix86_cannot_force_const_mem (rtx x)
4886 return !legitimate_constant_p (x);
4889 /* Determine if a given RTX is a valid constant address. */
4891 bool
4892 constant_address_p (rtx x)
4894 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
4897 /* Nonzero if the constant value X is a legitimate general operand
4898 when generating PIC code. It is given that flag_pic is on and
4899 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
4901 bool
4902 legitimate_pic_operand_p (rtx x)
4904 rtx inner;
4906 switch (GET_CODE (x))
4908 case CONST:
4909 inner = XEXP (x, 0);
4911 /* Only some unspecs are valid as "constants". */
4912 if (GET_CODE (inner) == UNSPEC)
4913 switch (XINT (inner, 1))
4915 case UNSPEC_TPOFF:
4916 return local_exec_symbolic_operand (XVECEXP (inner, 0, 0), Pmode);
4917 default:
4918 return false;
4920 /* FALLTHRU */
4922 case SYMBOL_REF:
4923 case LABEL_REF:
4924 return legitimate_pic_address_disp_p (x);
4926 default:
4927 return true;
4931 /* Determine if a given CONST RTX is a valid memory displacement
4932 in PIC mode. */
4935 legitimate_pic_address_disp_p (rtx disp)
4937 bool saw_plus;
4939 /* In 64bit mode we can allow direct addresses of symbols and labels
4940 when they are not dynamic symbols. */
4941 if (TARGET_64BIT)
4943 /* TLS references should always be enclosed in UNSPEC. */
4944 if (tls_symbolic_operand (disp, GET_MODE (disp)))
4945 return 0;
4946 if (GET_CODE (disp) == SYMBOL_REF
4947 && ix86_cmodel == CM_SMALL_PIC
4948 && SYMBOL_REF_LOCAL_P (disp))
4949 return 1;
4950 if (GET_CODE (disp) == LABEL_REF)
4951 return 1;
4952 if (GET_CODE (disp) == CONST
4953 && GET_CODE (XEXP (disp, 0)) == PLUS)
4955 rtx op0 = XEXP (XEXP (disp, 0), 0);
4956 rtx op1 = XEXP (XEXP (disp, 0), 1);
4958 /* TLS references should always be enclosed in UNSPEC. */
4959 if (tls_symbolic_operand (op0, GET_MODE (op0)))
4960 return 0;
4961 if (((GET_CODE (op0) == SYMBOL_REF
4962 && ix86_cmodel == CM_SMALL_PIC
4963 && SYMBOL_REF_LOCAL_P (op0))
4964 || GET_CODE (op0) == LABEL_REF)
4965 && GET_CODE (op1) == CONST_INT
4966 && INTVAL (op1) < 16*1024*1024
4967 && INTVAL (op1) >= -16*1024*1024)
4968 return 1;
4971 if (GET_CODE (disp) != CONST)
4972 return 0;
4973 disp = XEXP (disp, 0);
4975 if (TARGET_64BIT)
4977 /* We are unsafe to allow PLUS expressions. This limit allowed distance
4978 of GOT tables. We should not need these anyway. */
4979 if (GET_CODE (disp) != UNSPEC
4980 || XINT (disp, 1) != UNSPEC_GOTPCREL)
4981 return 0;
4983 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
4984 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
4985 return 0;
4986 return 1;
4989 saw_plus = false;
4990 if (GET_CODE (disp) == PLUS)
4992 if (GET_CODE (XEXP (disp, 1)) != CONST_INT)
4993 return 0;
4994 disp = XEXP (disp, 0);
4995 saw_plus = true;
4998 if (TARGET_MACHO && darwin_local_data_pic (disp))
4999 return 1;
5001 if (GET_CODE (disp) != UNSPEC)
5002 return 0;
5004 switch (XINT (disp, 1))
5006 case UNSPEC_GOT:
5007 if (saw_plus)
5008 return false;
5009 return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF;
5010 case UNSPEC_GOTOFF:
5011 if (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
5012 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
5013 return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5014 return false;
5015 case UNSPEC_GOTTPOFF:
5016 case UNSPEC_GOTNTPOFF:
5017 case UNSPEC_INDNTPOFF:
5018 if (saw_plus)
5019 return false;
5020 return initial_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5021 case UNSPEC_NTPOFF:
5022 return local_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5023 case UNSPEC_DTPOFF:
5024 return local_dynamic_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5027 return 0;
5030 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
5031 memory address for an instruction. The MODE argument is the machine mode
5032 for the MEM expression that wants to use this address.
5034 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
5035 convert common non-canonical forms to canonical form so that they will
5036 be recognized. */
5039 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
5041 struct ix86_address parts;
5042 rtx base, index, disp;
5043 HOST_WIDE_INT scale;
5044 const char *reason = NULL;
5045 rtx reason_rtx = NULL_RTX;
5047 if (TARGET_DEBUG_ADDR)
5049 fprintf (stderr,
5050 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
5051 GET_MODE_NAME (mode), strict);
5052 debug_rtx (addr);
5055 if (ix86_decompose_address (addr, &parts) <= 0)
5057 reason = "decomposition failed";
5058 goto report_error;
5061 base = parts.base;
5062 index = parts.index;
5063 disp = parts.disp;
5064 scale = parts.scale;
5066 /* Validate base register.
5068 Don't allow SUBREG's here, it can lead to spill failures when the base
5069 is one word out of a two word structure, which is represented internally
5070 as a DImode int. */
5072 if (base)
5074 reason_rtx = base;
5076 if (GET_CODE (base) != REG)
5078 reason = "base is not a register";
5079 goto report_error;
5082 if (GET_MODE (base) != Pmode)
5084 reason = "base is not in Pmode";
5085 goto report_error;
5088 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (base))
5089 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (base)))
5091 reason = "base is not valid";
5092 goto report_error;
5096 /* Validate index register.
5098 Don't allow SUBREG's here, it can lead to spill failures when the index
5099 is one word out of a two word structure, which is represented internally
5100 as a DImode int. */
5102 if (index)
5104 reason_rtx = index;
5106 if (GET_CODE (index) != REG)
5108 reason = "index is not a register";
5109 goto report_error;
5112 if (GET_MODE (index) != Pmode)
5114 reason = "index is not in Pmode";
5115 goto report_error;
5118 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (index))
5119 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (index)))
5121 reason = "index is not valid";
5122 goto report_error;
5126 /* Validate scale factor. */
5127 if (scale != 1)
5129 reason_rtx = GEN_INT (scale);
5130 if (!index)
5132 reason = "scale without index";
5133 goto report_error;
5136 if (scale != 2 && scale != 4 && scale != 8)
5138 reason = "scale is not a valid multiplier";
5139 goto report_error;
5143 /* Validate displacement. */
5144 if (disp)
5146 reason_rtx = disp;
5148 if (GET_CODE (disp) == CONST
5149 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
5150 switch (XINT (XEXP (disp, 0), 1))
5152 case UNSPEC_GOT:
5153 case UNSPEC_GOTOFF:
5154 case UNSPEC_GOTPCREL:
5155 if (!flag_pic)
5156 abort ();
5157 goto is_legitimate_pic;
5159 case UNSPEC_GOTTPOFF:
5160 case UNSPEC_GOTNTPOFF:
5161 case UNSPEC_INDNTPOFF:
5162 case UNSPEC_NTPOFF:
5163 case UNSPEC_DTPOFF:
5164 break;
5166 default:
5167 reason = "invalid address unspec";
5168 goto report_error;
5171 else if (flag_pic && (SYMBOLIC_CONST (disp)
5172 #if TARGET_MACHO
5173 && !machopic_operand_p (disp)
5174 #endif
5177 is_legitimate_pic:
5178 if (TARGET_64BIT && (index || base))
5180 /* foo@dtpoff(%rX) is ok. */
5181 if (GET_CODE (disp) != CONST
5182 || GET_CODE (XEXP (disp, 0)) != PLUS
5183 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
5184 || GET_CODE (XEXP (XEXP (disp, 0), 1)) != CONST_INT
5185 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
5186 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
5188 reason = "non-constant pic memory reference";
5189 goto report_error;
5192 else if (! legitimate_pic_address_disp_p (disp))
5194 reason = "displacement is an invalid pic construct";
5195 goto report_error;
5198 /* This code used to verify that a symbolic pic displacement
5199 includes the pic_offset_table_rtx register.
5201 While this is good idea, unfortunately these constructs may
5202 be created by "adds using lea" optimization for incorrect
5203 code like:
5205 int a;
5206 int foo(int i)
5208 return *(&a+i);
5211 This code is nonsensical, but results in addressing
5212 GOT table with pic_offset_table_rtx base. We can't
5213 just refuse it easily, since it gets matched by
5214 "addsi3" pattern, that later gets split to lea in the
5215 case output register differs from input. While this
5216 can be handled by separate addsi pattern for this case
5217 that never results in lea, this seems to be easier and
5218 correct fix for crash to disable this test. */
5220 else if (GET_CODE (disp) != LABEL_REF
5221 && GET_CODE (disp) != CONST_INT
5222 && (GET_CODE (disp) != CONST
5223 || !legitimate_constant_p (disp))
5224 && (GET_CODE (disp) != SYMBOL_REF
5225 || !legitimate_constant_p (disp)))
5227 reason = "displacement is not constant";
5228 goto report_error;
5230 else if (TARGET_64BIT
5231 && !x86_64_immediate_operand (disp, VOIDmode))
5233 reason = "displacement is out of range";
5234 goto report_error;
5238 /* Everything looks valid. */
5239 if (TARGET_DEBUG_ADDR)
5240 fprintf (stderr, "Success.\n");
5241 return TRUE;
5243 report_error:
5244 if (TARGET_DEBUG_ADDR)
5246 fprintf (stderr, "Error: %s\n", reason);
5247 debug_rtx (reason_rtx);
5249 return FALSE;
5252 /* Return an unique alias set for the GOT. */
5254 static HOST_WIDE_INT
5255 ix86_GOT_alias_set (void)
5257 static HOST_WIDE_INT set = -1;
5258 if (set == -1)
5259 set = new_alias_set ();
5260 return set;
5263 /* Return a legitimate reference for ORIG (an address) using the
5264 register REG. If REG is 0, a new pseudo is generated.
5266 There are two types of references that must be handled:
5268 1. Global data references must load the address from the GOT, via
5269 the PIC reg. An insn is emitted to do this load, and the reg is
5270 returned.
5272 2. Static data references, constant pool addresses, and code labels
5273 compute the address as an offset from the GOT, whose base is in
5274 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
5275 differentiate them from global data objects. The returned
5276 address is the PIC reg + an unspec constant.
5278 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
5279 reg also appears in the address. */
5281 static rtx
5282 legitimize_pic_address (rtx orig, rtx reg)
5284 rtx addr = orig;
5285 rtx new = orig;
5286 rtx base;
5288 #if TARGET_MACHO
5289 if (reg == 0)
5290 reg = gen_reg_rtx (Pmode);
5291 /* Use the generic Mach-O PIC machinery. */
5292 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
5293 #endif
5295 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
5296 new = addr;
5297 else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode))
5299 /* This symbol may be referenced via a displacement from the PIC
5300 base address (@GOTOFF). */
5302 if (reload_in_progress)
5303 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5304 if (GET_CODE (addr) == CONST)
5305 addr = XEXP (addr, 0);
5306 if (GET_CODE (addr) == PLUS)
5308 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
5309 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
5311 else
5312 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
5313 new = gen_rtx_CONST (Pmode, new);
5314 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5316 if (reg != 0)
5318 emit_move_insn (reg, new);
5319 new = reg;
5322 else if (GET_CODE (addr) == SYMBOL_REF)
5324 if (TARGET_64BIT)
5326 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
5327 new = gen_rtx_CONST (Pmode, new);
5328 new = gen_const_mem (Pmode, new);
5329 set_mem_alias_set (new, ix86_GOT_alias_set ());
5331 if (reg == 0)
5332 reg = gen_reg_rtx (Pmode);
5333 /* Use directly gen_movsi, otherwise the address is loaded
5334 into register for CSE. We don't want to CSE this addresses,
5335 instead we CSE addresses from the GOT table, so skip this. */
5336 emit_insn (gen_movsi (reg, new));
5337 new = reg;
5339 else
5341 /* This symbol must be referenced via a load from the
5342 Global Offset Table (@GOT). */
5344 if (reload_in_progress)
5345 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5346 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
5347 new = gen_rtx_CONST (Pmode, new);
5348 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5349 new = gen_const_mem (Pmode, new);
5350 set_mem_alias_set (new, ix86_GOT_alias_set ());
5352 if (reg == 0)
5353 reg = gen_reg_rtx (Pmode);
5354 emit_move_insn (reg, new);
5355 new = reg;
5358 else
5360 if (GET_CODE (addr) == CONST)
5362 addr = XEXP (addr, 0);
5364 /* We must match stuff we generate before. Assume the only
5365 unspecs that can get here are ours. Not that we could do
5366 anything with them anyway.... */
5367 if (GET_CODE (addr) == UNSPEC
5368 || (GET_CODE (addr) == PLUS
5369 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
5370 return orig;
5371 if (GET_CODE (addr) != PLUS)
5372 abort ();
5374 if (GET_CODE (addr) == PLUS)
5376 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
5378 /* Check first to see if this is a constant offset from a @GOTOFF
5379 symbol reference. */
5380 if (local_symbolic_operand (op0, Pmode)
5381 && GET_CODE (op1) == CONST_INT)
5383 if (!TARGET_64BIT)
5385 if (reload_in_progress)
5386 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5387 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
5388 UNSPEC_GOTOFF);
5389 new = gen_rtx_PLUS (Pmode, new, op1);
5390 new = gen_rtx_CONST (Pmode, new);
5391 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
5393 if (reg != 0)
5395 emit_move_insn (reg, new);
5396 new = reg;
5399 else
5401 if (INTVAL (op1) < -16*1024*1024
5402 || INTVAL (op1) >= 16*1024*1024)
5403 new = gen_rtx_PLUS (Pmode, op0, force_reg (Pmode, op1));
5406 else
5408 base = legitimize_pic_address (XEXP (addr, 0), reg);
5409 new = legitimize_pic_address (XEXP (addr, 1),
5410 base == reg ? NULL_RTX : reg);
5412 if (GET_CODE (new) == CONST_INT)
5413 new = plus_constant (base, INTVAL (new));
5414 else
5416 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
5418 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
5419 new = XEXP (new, 1);
5421 new = gen_rtx_PLUS (Pmode, base, new);
5426 return new;
5429 /* Load the thread pointer. If TO_REG is true, force it into a register. */
5431 static rtx
5432 get_thread_pointer (int to_reg)
5434 rtx tp, reg, insn;
5436 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
5437 if (!to_reg)
5438 return tp;
5440 reg = gen_reg_rtx (Pmode);
5441 insn = gen_rtx_SET (VOIDmode, reg, tp);
5442 insn = emit_insn (insn);
5444 return reg;
5447 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
5448 false if we expect this to be used for a memory address and true if
5449 we expect to load the address into a register. */
5451 static rtx
5452 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
5454 rtx dest, base, off, pic;
5455 int type;
5457 switch (model)
5459 case TLS_MODEL_GLOBAL_DYNAMIC:
5460 dest = gen_reg_rtx (Pmode);
5461 if (TARGET_64BIT)
5463 rtx rax = gen_rtx_REG (Pmode, 0), insns;
5465 start_sequence ();
5466 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
5467 insns = get_insns ();
5468 end_sequence ();
5470 emit_libcall_block (insns, dest, rax, x);
5472 else
5473 emit_insn (gen_tls_global_dynamic_32 (dest, x));
5474 break;
5476 case TLS_MODEL_LOCAL_DYNAMIC:
5477 base = gen_reg_rtx (Pmode);
5478 if (TARGET_64BIT)
5480 rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
5482 start_sequence ();
5483 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
5484 insns = get_insns ();
5485 end_sequence ();
5487 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
5488 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
5489 emit_libcall_block (insns, base, rax, note);
5491 else
5492 emit_insn (gen_tls_local_dynamic_base_32 (base));
5494 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
5495 off = gen_rtx_CONST (Pmode, off);
5497 return gen_rtx_PLUS (Pmode, base, off);
5499 case TLS_MODEL_INITIAL_EXEC:
5500 if (TARGET_64BIT)
5502 pic = NULL;
5503 type = UNSPEC_GOTNTPOFF;
5505 else if (flag_pic)
5507 if (reload_in_progress)
5508 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5509 pic = pic_offset_table_rtx;
5510 type = TARGET_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
5512 else if (!TARGET_GNU_TLS)
5514 pic = gen_reg_rtx (Pmode);
5515 emit_insn (gen_set_got (pic));
5516 type = UNSPEC_GOTTPOFF;
5518 else
5520 pic = NULL;
5521 type = UNSPEC_INDNTPOFF;
5524 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
5525 off = gen_rtx_CONST (Pmode, off);
5526 if (pic)
5527 off = gen_rtx_PLUS (Pmode, pic, off);
5528 off = gen_const_mem (Pmode, off);
5529 set_mem_alias_set (off, ix86_GOT_alias_set ());
5531 if (TARGET_64BIT || TARGET_GNU_TLS)
5533 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
5534 off = force_reg (Pmode, off);
5535 return gen_rtx_PLUS (Pmode, base, off);
5537 else
5539 base = get_thread_pointer (true);
5540 dest = gen_reg_rtx (Pmode);
5541 emit_insn (gen_subsi3 (dest, base, off));
5543 break;
5545 case TLS_MODEL_LOCAL_EXEC:
5546 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
5547 (TARGET_64BIT || TARGET_GNU_TLS)
5548 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
5549 off = gen_rtx_CONST (Pmode, off);
5551 if (TARGET_64BIT || TARGET_GNU_TLS)
5553 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
5554 return gen_rtx_PLUS (Pmode, base, off);
5556 else
5558 base = get_thread_pointer (true);
5559 dest = gen_reg_rtx (Pmode);
5560 emit_insn (gen_subsi3 (dest, base, off));
5562 break;
5564 default:
5565 abort ();
5568 return dest;
5571 /* Try machine-dependent ways of modifying an illegitimate address
5572 to be legitimate. If we find one, return the new, valid address.
5573 This macro is used in only one place: `memory_address' in explow.c.
5575 OLDX is the address as it was before break_out_memory_refs was called.
5576 In some cases it is useful to look at this to decide what needs to be done.
5578 MODE and WIN are passed so that this macro can use
5579 GO_IF_LEGITIMATE_ADDRESS.
5581 It is always safe for this macro to do nothing. It exists to recognize
5582 opportunities to optimize the output.
5584 For the 80386, we handle X+REG by loading X into a register R and
5585 using R+REG. R will go in a general reg and indexing will be used.
5586 However, if REG is a broken-out memory address or multiplication,
5587 nothing needs to be done because REG can certainly go in a general reg.
5589 When -fpic is used, special handling is needed for symbolic references.
5590 See comments by legitimize_pic_address in i386.c for details. */
5593 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
5595 int changed = 0;
5596 unsigned log;
5598 if (TARGET_DEBUG_ADDR)
5600 fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
5601 GET_MODE_NAME (mode));
5602 debug_rtx (x);
5605 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
5606 if (log)
5607 return legitimize_tls_address (x, log, false);
5608 if (GET_CODE (x) == CONST
5609 && GET_CODE (XEXP (x, 0)) == PLUS
5610 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
5611 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
5613 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0), log, false);
5614 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
5617 if (flag_pic && SYMBOLIC_CONST (x))
5618 return legitimize_pic_address (x, 0);
5620 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
5621 if (GET_CODE (x) == ASHIFT
5622 && GET_CODE (XEXP (x, 1)) == CONST_INT
5623 && (log = (unsigned) exact_log2 (INTVAL (XEXP (x, 1)))) < 4)
5625 changed = 1;
5626 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
5627 GEN_INT (1 << log));
5630 if (GET_CODE (x) == PLUS)
5632 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
5634 if (GET_CODE (XEXP (x, 0)) == ASHIFT
5635 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
5636 && (log = (unsigned) exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)))) < 4)
5638 changed = 1;
5639 XEXP (x, 0) = gen_rtx_MULT (Pmode,
5640 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
5641 GEN_INT (1 << log));
5644 if (GET_CODE (XEXP (x, 1)) == ASHIFT
5645 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5646 && (log = (unsigned) exact_log2 (INTVAL (XEXP (XEXP (x, 1), 1)))) < 4)
5648 changed = 1;
5649 XEXP (x, 1) = gen_rtx_MULT (Pmode,
5650 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
5651 GEN_INT (1 << log));
5654 /* Put multiply first if it isn't already. */
5655 if (GET_CODE (XEXP (x, 1)) == MULT)
5657 rtx tmp = XEXP (x, 0);
5658 XEXP (x, 0) = XEXP (x, 1);
5659 XEXP (x, 1) = tmp;
5660 changed = 1;
5663 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
5664 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
5665 created by virtual register instantiation, register elimination, and
5666 similar optimizations. */
5667 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
5669 changed = 1;
5670 x = gen_rtx_PLUS (Pmode,
5671 gen_rtx_PLUS (Pmode, XEXP (x, 0),
5672 XEXP (XEXP (x, 1), 0)),
5673 XEXP (XEXP (x, 1), 1));
5676 /* Canonicalize
5677 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
5678 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
5679 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
5680 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5681 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
5682 && CONSTANT_P (XEXP (x, 1)))
5684 rtx constant;
5685 rtx other = NULL_RTX;
5687 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5689 constant = XEXP (x, 1);
5690 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
5692 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
5694 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
5695 other = XEXP (x, 1);
5697 else
5698 constant = 0;
5700 if (constant)
5702 changed = 1;
5703 x = gen_rtx_PLUS (Pmode,
5704 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
5705 XEXP (XEXP (XEXP (x, 0), 1), 0)),
5706 plus_constant (other, INTVAL (constant)));
5710 if (changed && legitimate_address_p (mode, x, FALSE))
5711 return x;
5713 if (GET_CODE (XEXP (x, 0)) == MULT)
5715 changed = 1;
5716 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
5719 if (GET_CODE (XEXP (x, 1)) == MULT)
5721 changed = 1;
5722 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
5725 if (changed
5726 && GET_CODE (XEXP (x, 1)) == REG
5727 && GET_CODE (XEXP (x, 0)) == REG)
5728 return x;
5730 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
5732 changed = 1;
5733 x = legitimize_pic_address (x, 0);
5736 if (changed && legitimate_address_p (mode, x, FALSE))
5737 return x;
5739 if (GET_CODE (XEXP (x, 0)) == REG)
5741 rtx temp = gen_reg_rtx (Pmode);
5742 rtx val = force_operand (XEXP (x, 1), temp);
5743 if (val != temp)
5744 emit_move_insn (temp, val);
5746 XEXP (x, 1) = temp;
5747 return x;
5750 else if (GET_CODE (XEXP (x, 1)) == REG)
5752 rtx temp = gen_reg_rtx (Pmode);
5753 rtx val = force_operand (XEXP (x, 0), temp);
5754 if (val != temp)
5755 emit_move_insn (temp, val);
5757 XEXP (x, 0) = temp;
5758 return x;
5762 return x;
5765 /* Print an integer constant expression in assembler syntax. Addition
5766 and subtraction are the only arithmetic that may appear in these
5767 expressions. FILE is the stdio stream to write to, X is the rtx, and
5768 CODE is the operand print code from the output string. */
5770 static void
5771 output_pic_addr_const (FILE *file, rtx x, int code)
5773 char buf[256];
5775 switch (GET_CODE (x))
5777 case PC:
5778 if (flag_pic)
5779 putc ('.', file);
5780 else
5781 abort ();
5782 break;
5784 case SYMBOL_REF:
5785 /* Mark the decl as referenced so that cgraph will output the function. */
5786 if (SYMBOL_REF_DECL (x))
5787 mark_decl_referenced (SYMBOL_REF_DECL (x));
5789 assemble_name (file, XSTR (x, 0));
5790 if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
5791 fputs ("@PLT", file);
5792 break;
5794 case LABEL_REF:
5795 x = XEXP (x, 0);
5796 /* FALLTHRU */
5797 case CODE_LABEL:
5798 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
5799 assemble_name (asm_out_file, buf);
5800 break;
5802 case CONST_INT:
5803 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
5804 break;
5806 case CONST:
5807 /* This used to output parentheses around the expression,
5808 but that does not work on the 386 (either ATT or BSD assembler). */
5809 output_pic_addr_const (file, XEXP (x, 0), code);
5810 break;
5812 case CONST_DOUBLE:
5813 if (GET_MODE (x) == VOIDmode)
5815 /* We can use %d if the number is <32 bits and positive. */
5816 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
5817 fprintf (file, "0x%lx%08lx",
5818 (unsigned long) CONST_DOUBLE_HIGH (x),
5819 (unsigned long) CONST_DOUBLE_LOW (x));
5820 else
5821 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
5823 else
5824 /* We can't handle floating point constants;
5825 PRINT_OPERAND must handle them. */
5826 output_operand_lossage ("floating constant misused");
5827 break;
5829 case PLUS:
5830 /* Some assemblers need integer constants to appear first. */
5831 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
5833 output_pic_addr_const (file, XEXP (x, 0), code);
5834 putc ('+', file);
5835 output_pic_addr_const (file, XEXP (x, 1), code);
5837 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5839 output_pic_addr_const (file, XEXP (x, 1), code);
5840 putc ('+', file);
5841 output_pic_addr_const (file, XEXP (x, 0), code);
5843 else
5844 abort ();
5845 break;
5847 case MINUS:
5848 if (!TARGET_MACHO)
5849 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
5850 output_pic_addr_const (file, XEXP (x, 0), code);
5851 putc ('-', file);
5852 output_pic_addr_const (file, XEXP (x, 1), code);
5853 if (!TARGET_MACHO)
5854 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
5855 break;
5857 case UNSPEC:
5858 if (XVECLEN (x, 0) != 1)
5859 abort ();
5860 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
5861 switch (XINT (x, 1))
5863 case UNSPEC_GOT:
5864 fputs ("@GOT", file);
5865 break;
5866 case UNSPEC_GOTOFF:
5867 fputs ("@GOTOFF", file);
5868 break;
5869 case UNSPEC_GOTPCREL:
5870 fputs ("@GOTPCREL(%rip)", file);
5871 break;
5872 case UNSPEC_GOTTPOFF:
5873 /* FIXME: This might be @TPOFF in Sun ld too. */
5874 fputs ("@GOTTPOFF", file);
5875 break;
5876 case UNSPEC_TPOFF:
5877 fputs ("@TPOFF", file);
5878 break;
5879 case UNSPEC_NTPOFF:
5880 if (TARGET_64BIT)
5881 fputs ("@TPOFF", file);
5882 else
5883 fputs ("@NTPOFF", file);
5884 break;
5885 case UNSPEC_DTPOFF:
5886 fputs ("@DTPOFF", file);
5887 break;
5888 case UNSPEC_GOTNTPOFF:
5889 if (TARGET_64BIT)
5890 fputs ("@GOTTPOFF(%rip)", file);
5891 else
5892 fputs ("@GOTNTPOFF", file);
5893 break;
5894 case UNSPEC_INDNTPOFF:
5895 fputs ("@INDNTPOFF", file);
5896 break;
5897 default:
5898 output_operand_lossage ("invalid UNSPEC as operand");
5899 break;
5901 break;
5903 default:
5904 output_operand_lossage ("invalid expression as operand");
5908 /* This is called from dwarfout.c via ASM_OUTPUT_DWARF_ADDR_CONST.
5909 We need to handle our special PIC relocations. */
5911 void
5912 i386_dwarf_output_addr_const (FILE *file, rtx x)
5914 #ifdef ASM_QUAD
5915 fprintf (file, "%s", TARGET_64BIT ? ASM_QUAD : ASM_LONG);
5916 #else
5917 if (TARGET_64BIT)
5918 abort ();
5919 fprintf (file, "%s", ASM_LONG);
5920 #endif
5921 if (flag_pic)
5922 output_pic_addr_const (file, x, '\0');
5923 else
5924 output_addr_const (file, x);
5925 fputc ('\n', file);
5928 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
5929 We need to emit DTP-relative relocations. */
5931 void
5932 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
5934 fputs (ASM_LONG, file);
5935 output_addr_const (file, x);
5936 fputs ("@DTPOFF", file);
5937 switch (size)
5939 case 4:
5940 break;
5941 case 8:
5942 fputs (", 0", file);
5943 break;
5944 default:
5945 abort ();
5949 /* In the name of slightly smaller debug output, and to cater to
5950 general assembler losage, recognize PIC+GOTOFF and turn it back
5951 into a direct symbol reference. */
5953 static rtx
5954 ix86_delegitimize_address (rtx orig_x)
5956 rtx x = orig_x, y;
5958 if (GET_CODE (x) == MEM)
5959 x = XEXP (x, 0);
5961 if (TARGET_64BIT)
5963 if (GET_CODE (x) != CONST
5964 || GET_CODE (XEXP (x, 0)) != UNSPEC
5965 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
5966 || GET_CODE (orig_x) != MEM)
5967 return orig_x;
5968 return XVECEXP (XEXP (x, 0), 0, 0);
5971 if (GET_CODE (x) != PLUS
5972 || GET_CODE (XEXP (x, 1)) != CONST)
5973 return orig_x;
5975 if (GET_CODE (XEXP (x, 0)) == REG
5976 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
5977 /* %ebx + GOT/GOTOFF */
5978 y = NULL;
5979 else if (GET_CODE (XEXP (x, 0)) == PLUS)
5981 /* %ebx + %reg * scale + GOT/GOTOFF */
5982 y = XEXP (x, 0);
5983 if (GET_CODE (XEXP (y, 0)) == REG
5984 && REGNO (XEXP (y, 0)) == PIC_OFFSET_TABLE_REGNUM)
5985 y = XEXP (y, 1);
5986 else if (GET_CODE (XEXP (y, 1)) == REG
5987 && REGNO (XEXP (y, 1)) == PIC_OFFSET_TABLE_REGNUM)
5988 y = XEXP (y, 0);
5989 else
5990 return orig_x;
5991 if (GET_CODE (y) != REG
5992 && GET_CODE (y) != MULT
5993 && GET_CODE (y) != ASHIFT)
5994 return orig_x;
5996 else
5997 return orig_x;
5999 x = XEXP (XEXP (x, 1), 0);
6000 if (GET_CODE (x) == UNSPEC
6001 && ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6002 || (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM)))
6004 if (y)
6005 return gen_rtx_PLUS (Pmode, y, XVECEXP (x, 0, 0));
6006 return XVECEXP (x, 0, 0);
6009 if (GET_CODE (x) == PLUS
6010 && GET_CODE (XEXP (x, 0)) == UNSPEC
6011 && GET_CODE (XEXP (x, 1)) == CONST_INT
6012 && ((XINT (XEXP (x, 0), 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6013 || (XINT (XEXP (x, 0), 1) == UNSPEC_GOTOFF
6014 && GET_CODE (orig_x) != MEM)))
6016 x = gen_rtx_PLUS (VOIDmode, XVECEXP (XEXP (x, 0), 0, 0), XEXP (x, 1));
6017 if (y)
6018 return gen_rtx_PLUS (Pmode, y, x);
6019 return x;
6022 return orig_x;
6025 static void
6026 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
6027 int fp, FILE *file)
6029 const char *suffix;
6031 if (mode == CCFPmode || mode == CCFPUmode)
6033 enum rtx_code second_code, bypass_code;
6034 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
6035 if (bypass_code != UNKNOWN || second_code != UNKNOWN)
6036 abort ();
6037 code = ix86_fp_compare_code_to_integer (code);
6038 mode = CCmode;
6040 if (reverse)
6041 code = reverse_condition (code);
6043 switch (code)
6045 case EQ:
6046 suffix = "e";
6047 break;
6048 case NE:
6049 suffix = "ne";
6050 break;
6051 case GT:
6052 if (mode != CCmode && mode != CCNOmode && mode != CCGCmode)
6053 abort ();
6054 suffix = "g";
6055 break;
6056 case GTU:
6057 /* ??? Use "nbe" instead of "a" for fcmov losage on some assemblers.
6058 Those same assemblers have the same but opposite losage on cmov. */
6059 if (mode != CCmode)
6060 abort ();
6061 suffix = fp ? "nbe" : "a";
6062 break;
6063 case LT:
6064 if (mode == CCNOmode || mode == CCGOCmode)
6065 suffix = "s";
6066 else if (mode == CCmode || mode == CCGCmode)
6067 suffix = "l";
6068 else
6069 abort ();
6070 break;
6071 case LTU:
6072 if (mode != CCmode)
6073 abort ();
6074 suffix = "b";
6075 break;
6076 case GE:
6077 if (mode == CCNOmode || mode == CCGOCmode)
6078 suffix = "ns";
6079 else if (mode == CCmode || mode == CCGCmode)
6080 suffix = "ge";
6081 else
6082 abort ();
6083 break;
6084 case GEU:
6085 /* ??? As above. */
6086 if (mode != CCmode)
6087 abort ();
6088 suffix = fp ? "nb" : "ae";
6089 break;
6090 case LE:
6091 if (mode != CCmode && mode != CCGCmode && mode != CCNOmode)
6092 abort ();
6093 suffix = "le";
6094 break;
6095 case LEU:
6096 if (mode != CCmode)
6097 abort ();
6098 suffix = "be";
6099 break;
6100 case UNORDERED:
6101 suffix = fp ? "u" : "p";
6102 break;
6103 case ORDERED:
6104 suffix = fp ? "nu" : "np";
6105 break;
6106 default:
6107 abort ();
6109 fputs (suffix, file);
6112 /* Print the name of register X to FILE based on its machine mode and number.
6113 If CODE is 'w', pretend the mode is HImode.
6114 If CODE is 'b', pretend the mode is QImode.
6115 If CODE is 'k', pretend the mode is SImode.
6116 If CODE is 'q', pretend the mode is DImode.
6117 If CODE is 'h', pretend the reg is the `high' byte register.
6118 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
6120 void
6121 print_reg (rtx x, int code, FILE *file)
6123 if (REGNO (x) == ARG_POINTER_REGNUM
6124 || REGNO (x) == FRAME_POINTER_REGNUM
6125 || REGNO (x) == FLAGS_REG
6126 || REGNO (x) == FPSR_REG)
6127 abort ();
6129 if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
6130 putc ('%', file);
6132 if (code == 'w' || MMX_REG_P (x))
6133 code = 2;
6134 else if (code == 'b')
6135 code = 1;
6136 else if (code == 'k')
6137 code = 4;
6138 else if (code == 'q')
6139 code = 8;
6140 else if (code == 'y')
6141 code = 3;
6142 else if (code == 'h')
6143 code = 0;
6144 else
6145 code = GET_MODE_SIZE (GET_MODE (x));
6147 /* Irritatingly, AMD extended registers use different naming convention
6148 from the normal registers. */
6149 if (REX_INT_REG_P (x))
6151 if (!TARGET_64BIT)
6152 abort ();
6153 switch (code)
6155 case 0:
6156 error ("extended registers have no high halves");
6157 break;
6158 case 1:
6159 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
6160 break;
6161 case 2:
6162 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
6163 break;
6164 case 4:
6165 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
6166 break;
6167 case 8:
6168 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
6169 break;
6170 default:
6171 error ("unsupported operand size for extended register");
6172 break;
6174 return;
6176 switch (code)
6178 case 3:
6179 if (STACK_TOP_P (x))
6181 fputs ("st(0)", file);
6182 break;
6184 /* FALLTHRU */
6185 case 8:
6186 case 4:
6187 case 12:
6188 if (! ANY_FP_REG_P (x))
6189 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
6190 /* FALLTHRU */
6191 case 16:
6192 case 2:
6193 normal:
6194 fputs (hi_reg_name[REGNO (x)], file);
6195 break;
6196 case 1:
6197 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
6198 goto normal;
6199 fputs (qi_reg_name[REGNO (x)], file);
6200 break;
6201 case 0:
6202 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
6203 goto normal;
6204 fputs (qi_high_reg_name[REGNO (x)], file);
6205 break;
6206 default:
6207 abort ();
6211 /* Locate some local-dynamic symbol still in use by this function
6212 so that we can print its name in some tls_local_dynamic_base
6213 pattern. */
6215 static const char *
6216 get_some_local_dynamic_name (void)
6218 rtx insn;
6220 if (cfun->machine->some_ld_name)
6221 return cfun->machine->some_ld_name;
6223 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
6224 if (INSN_P (insn)
6225 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
6226 return cfun->machine->some_ld_name;
6228 abort ();
6231 static int
6232 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
6234 rtx x = *px;
6236 if (GET_CODE (x) == SYMBOL_REF
6237 && local_dynamic_symbolic_operand (x, Pmode))
6239 cfun->machine->some_ld_name = XSTR (x, 0);
6240 return 1;
6243 return 0;
6246 /* Meaning of CODE:
6247 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
6248 C -- print opcode suffix for set/cmov insn.
6249 c -- like C, but print reversed condition
6250 F,f -- likewise, but for floating-point.
6251 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
6252 otherwise nothing
6253 R -- print the prefix for register names.
6254 z -- print the opcode suffix for the size of the current operand.
6255 * -- print a star (in certain assembler syntax)
6256 A -- print an absolute memory reference.
6257 w -- print the operand as if it's a "word" (HImode) even if it isn't.
6258 s -- print a shift double count, followed by the assemblers argument
6259 delimiter.
6260 b -- print the QImode name of the register for the indicated operand.
6261 %b0 would print %al if operands[0] is reg 0.
6262 w -- likewise, print the HImode name of the register.
6263 k -- likewise, print the SImode name of the register.
6264 q -- likewise, print the DImode name of the register.
6265 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
6266 y -- print "st(0)" instead of "st" as a register.
6267 D -- print condition for SSE cmp instruction.
6268 P -- if PIC, print an @PLT suffix.
6269 X -- don't print any sort of PIC '@' suffix for a symbol.
6270 & -- print some in-use local-dynamic symbol name.
6273 void
6274 print_operand (FILE *file, rtx x, int code)
6276 if (code)
6278 switch (code)
6280 case '*':
6281 if (ASSEMBLER_DIALECT == ASM_ATT)
6282 putc ('*', file);
6283 return;
6285 case '&':
6286 assemble_name (file, get_some_local_dynamic_name ());
6287 return;
6289 case 'A':
6290 if (ASSEMBLER_DIALECT == ASM_ATT)
6291 putc ('*', file);
6292 else if (ASSEMBLER_DIALECT == ASM_INTEL)
6294 /* Intel syntax. For absolute addresses, registers should not
6295 be surrounded by braces. */
6296 if (GET_CODE (x) != REG)
6298 putc ('[', file);
6299 PRINT_OPERAND (file, x, 0);
6300 putc (']', file);
6301 return;
6304 else
6305 abort ();
6307 PRINT_OPERAND (file, x, 0);
6308 return;
6311 case 'L':
6312 if (ASSEMBLER_DIALECT == ASM_ATT)
6313 putc ('l', file);
6314 return;
6316 case 'W':
6317 if (ASSEMBLER_DIALECT == ASM_ATT)
6318 putc ('w', file);
6319 return;
6321 case 'B':
6322 if (ASSEMBLER_DIALECT == ASM_ATT)
6323 putc ('b', file);
6324 return;
6326 case 'Q':
6327 if (ASSEMBLER_DIALECT == ASM_ATT)
6328 putc ('l', file);
6329 return;
6331 case 'S':
6332 if (ASSEMBLER_DIALECT == ASM_ATT)
6333 putc ('s', file);
6334 return;
6336 case 'T':
6337 if (ASSEMBLER_DIALECT == ASM_ATT)
6338 putc ('t', file);
6339 return;
6341 case 'z':
6342 /* 387 opcodes don't get size suffixes if the operands are
6343 registers. */
6344 if (STACK_REG_P (x))
6345 return;
6347 /* Likewise if using Intel opcodes. */
6348 if (ASSEMBLER_DIALECT == ASM_INTEL)
6349 return;
6351 /* This is the size of op from size of operand. */
6352 switch (GET_MODE_SIZE (GET_MODE (x)))
6354 case 2:
6355 #ifdef HAVE_GAS_FILDS_FISTS
6356 putc ('s', file);
6357 #endif
6358 return;
6360 case 4:
6361 if (GET_MODE (x) == SFmode)
6363 putc ('s', file);
6364 return;
6366 else
6367 putc ('l', file);
6368 return;
6370 case 12:
6371 case 16:
6372 putc ('t', file);
6373 return;
6375 case 8:
6376 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
6378 #ifdef GAS_MNEMONICS
6379 putc ('q', file);
6380 #else
6381 putc ('l', file);
6382 putc ('l', file);
6383 #endif
6385 else
6386 putc ('l', file);
6387 return;
6389 default:
6390 abort ();
6393 case 'b':
6394 case 'w':
6395 case 'k':
6396 case 'q':
6397 case 'h':
6398 case 'y':
6399 case 'X':
6400 case 'P':
6401 break;
6403 case 's':
6404 if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
6406 PRINT_OPERAND (file, x, 0);
6407 putc (',', file);
6409 return;
6411 case 'D':
6412 /* Little bit of braindamage here. The SSE compare instructions
6413 does use completely different names for the comparisons that the
6414 fp conditional moves. */
6415 switch (GET_CODE (x))
6417 case EQ:
6418 case UNEQ:
6419 fputs ("eq", file);
6420 break;
6421 case LT:
6422 case UNLT:
6423 fputs ("lt", file);
6424 break;
6425 case LE:
6426 case UNLE:
6427 fputs ("le", file);
6428 break;
6429 case UNORDERED:
6430 fputs ("unord", file);
6431 break;
6432 case NE:
6433 case LTGT:
6434 fputs ("neq", file);
6435 break;
6436 case UNGE:
6437 case GE:
6438 fputs ("nlt", file);
6439 break;
6440 case UNGT:
6441 case GT:
6442 fputs ("nle", file);
6443 break;
6444 case ORDERED:
6445 fputs ("ord", file);
6446 break;
6447 default:
6448 abort ();
6449 break;
6451 return;
6452 case 'O':
6453 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6454 if (ASSEMBLER_DIALECT == ASM_ATT)
6456 switch (GET_MODE (x))
6458 case HImode: putc ('w', file); break;
6459 case SImode:
6460 case SFmode: putc ('l', file); break;
6461 case DImode:
6462 case DFmode: putc ('q', file); break;
6463 default: abort ();
6465 putc ('.', file);
6467 #endif
6468 return;
6469 case 'C':
6470 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
6471 return;
6472 case 'F':
6473 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6474 if (ASSEMBLER_DIALECT == ASM_ATT)
6475 putc ('.', file);
6476 #endif
6477 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
6478 return;
6480 /* Like above, but reverse condition */
6481 case 'c':
6482 /* Check to see if argument to %c is really a constant
6483 and not a condition code which needs to be reversed. */
6484 if (!COMPARISON_P (x))
6486 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
6487 return;
6489 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
6490 return;
6491 case 'f':
6492 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
6493 if (ASSEMBLER_DIALECT == ASM_ATT)
6494 putc ('.', file);
6495 #endif
6496 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
6497 return;
6498 case '+':
6500 rtx x;
6502 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
6503 return;
6505 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
6506 if (x)
6508 int pred_val = INTVAL (XEXP (x, 0));
6510 if (pred_val < REG_BR_PROB_BASE * 45 / 100
6511 || pred_val > REG_BR_PROB_BASE * 55 / 100)
6513 int taken = pred_val > REG_BR_PROB_BASE / 2;
6514 int cputaken = final_forward_branch_p (current_output_insn) == 0;
6516 /* Emit hints only in the case default branch prediction
6517 heuristics would fail. */
6518 if (taken != cputaken)
6520 /* We use 3e (DS) prefix for taken branches and
6521 2e (CS) prefix for not taken branches. */
6522 if (taken)
6523 fputs ("ds ; ", file);
6524 else
6525 fputs ("cs ; ", file);
6529 return;
6531 default:
6532 output_operand_lossage ("invalid operand code `%c'", code);
6536 if (GET_CODE (x) == REG)
6537 print_reg (x, code, file);
6539 else if (GET_CODE (x) == MEM)
6541 /* No `byte ptr' prefix for call instructions. */
6542 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
6544 const char * size;
6545 switch (GET_MODE_SIZE (GET_MODE (x)))
6547 case 1: size = "BYTE"; break;
6548 case 2: size = "WORD"; break;
6549 case 4: size = "DWORD"; break;
6550 case 8: size = "QWORD"; break;
6551 case 12: size = "XWORD"; break;
6552 case 16: size = "XMMWORD"; break;
6553 default:
6554 abort ();
6557 /* Check for explicit size override (codes 'b', 'w' and 'k') */
6558 if (code == 'b')
6559 size = "BYTE";
6560 else if (code == 'w')
6561 size = "WORD";
6562 else if (code == 'k')
6563 size = "DWORD";
6565 fputs (size, file);
6566 fputs (" PTR ", file);
6569 x = XEXP (x, 0);
6570 /* Avoid (%rip) for call operands. */
6571 if (CONSTANT_ADDRESS_P (x) && code == 'P'
6572 && GET_CODE (x) != CONST_INT)
6573 output_addr_const (file, x);
6574 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
6575 output_operand_lossage ("invalid constraints for operand");
6576 else
6577 output_address (x);
6580 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
6582 REAL_VALUE_TYPE r;
6583 long l;
6585 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
6586 REAL_VALUE_TO_TARGET_SINGLE (r, l);
6588 if (ASSEMBLER_DIALECT == ASM_ATT)
6589 putc ('$', file);
6590 fprintf (file, "0x%08lx", l);
6593 /* These float cases don't actually occur as immediate operands. */
6594 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
6596 char dstr[30];
6598 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
6599 fprintf (file, "%s", dstr);
6602 else if (GET_CODE (x) == CONST_DOUBLE
6603 && GET_MODE (x) == XFmode)
6605 char dstr[30];
6607 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
6608 fprintf (file, "%s", dstr);
6611 else
6613 if (code != 'P')
6615 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
6617 if (ASSEMBLER_DIALECT == ASM_ATT)
6618 putc ('$', file);
6620 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
6621 || GET_CODE (x) == LABEL_REF)
6623 if (ASSEMBLER_DIALECT == ASM_ATT)
6624 putc ('$', file);
6625 else
6626 fputs ("OFFSET FLAT:", file);
6629 if (GET_CODE (x) == CONST_INT)
6630 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
6631 else if (flag_pic)
6632 output_pic_addr_const (file, x, code);
6633 else
6634 output_addr_const (file, x);
6638 /* Print a memory operand whose address is ADDR. */
6640 void
6641 print_operand_address (FILE *file, rtx addr)
6643 struct ix86_address parts;
6644 rtx base, index, disp;
6645 int scale;
6647 if (! ix86_decompose_address (addr, &parts))
6648 abort ();
6650 base = parts.base;
6651 index = parts.index;
6652 disp = parts.disp;
6653 scale = parts.scale;
6655 switch (parts.seg)
6657 case SEG_DEFAULT:
6658 break;
6659 case SEG_FS:
6660 case SEG_GS:
6661 if (USER_LABEL_PREFIX[0] == 0)
6662 putc ('%', file);
6663 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
6664 break;
6665 default:
6666 abort ();
6669 if (!base && !index)
6671 /* Displacement only requires special attention. */
6673 if (GET_CODE (disp) == CONST_INT)
6675 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
6677 if (USER_LABEL_PREFIX[0] == 0)
6678 putc ('%', file);
6679 fputs ("ds:", file);
6681 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
6683 else if (flag_pic)
6684 output_pic_addr_const (file, disp, 0);
6685 else
6686 output_addr_const (file, disp);
6688 /* Use one byte shorter RIP relative addressing for 64bit mode. */
6689 if (TARGET_64BIT
6690 && ((GET_CODE (disp) == SYMBOL_REF
6691 && ! tls_symbolic_operand (disp, GET_MODE (disp)))
6692 || GET_CODE (disp) == LABEL_REF
6693 || (GET_CODE (disp) == CONST
6694 && GET_CODE (XEXP (disp, 0)) == PLUS
6695 && (GET_CODE (XEXP (XEXP (disp, 0), 0)) == SYMBOL_REF
6696 || GET_CODE (XEXP (XEXP (disp, 0), 0)) == LABEL_REF)
6697 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)))
6698 fputs ("(%rip)", file);
6700 else
6702 if (ASSEMBLER_DIALECT == ASM_ATT)
6704 if (disp)
6706 if (flag_pic)
6707 output_pic_addr_const (file, disp, 0);
6708 else if (GET_CODE (disp) == LABEL_REF)
6709 output_asm_label (disp);
6710 else
6711 output_addr_const (file, disp);
6714 putc ('(', file);
6715 if (base)
6716 print_reg (base, 0, file);
6717 if (index)
6719 putc (',', file);
6720 print_reg (index, 0, file);
6721 if (scale != 1)
6722 fprintf (file, ",%d", scale);
6724 putc (')', file);
6726 else
6728 rtx offset = NULL_RTX;
6730 if (disp)
6732 /* Pull out the offset of a symbol; print any symbol itself. */
6733 if (GET_CODE (disp) == CONST
6734 && GET_CODE (XEXP (disp, 0)) == PLUS
6735 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
6737 offset = XEXP (XEXP (disp, 0), 1);
6738 disp = gen_rtx_CONST (VOIDmode,
6739 XEXP (XEXP (disp, 0), 0));
6742 if (flag_pic)
6743 output_pic_addr_const (file, disp, 0);
6744 else if (GET_CODE (disp) == LABEL_REF)
6745 output_asm_label (disp);
6746 else if (GET_CODE (disp) == CONST_INT)
6747 offset = disp;
6748 else
6749 output_addr_const (file, disp);
6752 putc ('[', file);
6753 if (base)
6755 print_reg (base, 0, file);
6756 if (offset)
6758 if (INTVAL (offset) >= 0)
6759 putc ('+', file);
6760 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
6763 else if (offset)
6764 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
6765 else
6766 putc ('0', file);
6768 if (index)
6770 putc ('+', file);
6771 print_reg (index, 0, file);
6772 if (scale != 1)
6773 fprintf (file, "*%d", scale);
6775 putc (']', file);
6780 bool
6781 output_addr_const_extra (FILE *file, rtx x)
6783 rtx op;
6785 if (GET_CODE (x) != UNSPEC)
6786 return false;
6788 op = XVECEXP (x, 0, 0);
6789 switch (XINT (x, 1))
6791 case UNSPEC_GOTTPOFF:
6792 output_addr_const (file, op);
6793 /* FIXME: This might be @TPOFF in Sun ld. */
6794 fputs ("@GOTTPOFF", file);
6795 break;
6796 case UNSPEC_TPOFF:
6797 output_addr_const (file, op);
6798 fputs ("@TPOFF", file);
6799 break;
6800 case UNSPEC_NTPOFF:
6801 output_addr_const (file, op);
6802 if (TARGET_64BIT)
6803 fputs ("@TPOFF", file);
6804 else
6805 fputs ("@NTPOFF", file);
6806 break;
6807 case UNSPEC_DTPOFF:
6808 output_addr_const (file, op);
6809 fputs ("@DTPOFF", file);
6810 break;
6811 case UNSPEC_GOTNTPOFF:
6812 output_addr_const (file, op);
6813 if (TARGET_64BIT)
6814 fputs ("@GOTTPOFF(%rip)", file);
6815 else
6816 fputs ("@GOTNTPOFF", file);
6817 break;
6818 case UNSPEC_INDNTPOFF:
6819 output_addr_const (file, op);
6820 fputs ("@INDNTPOFF", file);
6821 break;
6823 default:
6824 return false;
6827 return true;
6830 /* Split one or more DImode RTL references into pairs of SImode
6831 references. The RTL can be REG, offsettable MEM, integer constant, or
6832 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
6833 split and "num" is its length. lo_half and hi_half are output arrays
6834 that parallel "operands". */
6836 void
6837 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
6839 while (num--)
6841 rtx op = operands[num];
6843 /* simplify_subreg refuse to split volatile memory addresses,
6844 but we still have to handle it. */
6845 if (GET_CODE (op) == MEM)
6847 lo_half[num] = adjust_address (op, SImode, 0);
6848 hi_half[num] = adjust_address (op, SImode, 4);
6850 else
6852 lo_half[num] = simplify_gen_subreg (SImode, op,
6853 GET_MODE (op) == VOIDmode
6854 ? DImode : GET_MODE (op), 0);
6855 hi_half[num] = simplify_gen_subreg (SImode, op,
6856 GET_MODE (op) == VOIDmode
6857 ? DImode : GET_MODE (op), 4);
6861 /* Split one or more TImode RTL references into pairs of SImode
6862 references. The RTL can be REG, offsettable MEM, integer constant, or
6863 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
6864 split and "num" is its length. lo_half and hi_half are output arrays
6865 that parallel "operands". */
6867 void
6868 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
6870 while (num--)
6872 rtx op = operands[num];
6874 /* simplify_subreg refuse to split volatile memory addresses, but we
6875 still have to handle it. */
6876 if (GET_CODE (op) == MEM)
6878 lo_half[num] = adjust_address (op, DImode, 0);
6879 hi_half[num] = adjust_address (op, DImode, 8);
6881 else
6883 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
6884 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
6889 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
6890 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
6891 is the expression of the binary operation. The output may either be
6892 emitted here, or returned to the caller, like all output_* functions.
6894 There is no guarantee that the operands are the same mode, as they
6895 might be within FLOAT or FLOAT_EXTEND expressions. */
6897 #ifndef SYSV386_COMPAT
6898 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
6899 wants to fix the assemblers because that causes incompatibility
6900 with gcc. No-one wants to fix gcc because that causes
6901 incompatibility with assemblers... You can use the option of
6902 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
6903 #define SYSV386_COMPAT 1
6904 #endif
6906 const char *
6907 output_387_binary_op (rtx insn, rtx *operands)
6909 static char buf[30];
6910 const char *p;
6911 const char *ssep;
6912 int is_sse = SSE_REG_P (operands[0]) | SSE_REG_P (operands[1]) | SSE_REG_P (operands[2]);
6914 #ifdef ENABLE_CHECKING
6915 /* Even if we do not want to check the inputs, this documents input
6916 constraints. Which helps in understanding the following code. */
6917 if (STACK_REG_P (operands[0])
6918 && ((REG_P (operands[1])
6919 && REGNO (operands[0]) == REGNO (operands[1])
6920 && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM))
6921 || (REG_P (operands[2])
6922 && REGNO (operands[0]) == REGNO (operands[2])
6923 && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)))
6924 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
6925 ; /* ok */
6926 else if (!is_sse)
6927 abort ();
6928 #endif
6930 switch (GET_CODE (operands[3]))
6932 case PLUS:
6933 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
6934 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
6935 p = "fiadd";
6936 else
6937 p = "fadd";
6938 ssep = "add";
6939 break;
6941 case MINUS:
6942 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
6943 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
6944 p = "fisub";
6945 else
6946 p = "fsub";
6947 ssep = "sub";
6948 break;
6950 case MULT:
6951 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
6952 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
6953 p = "fimul";
6954 else
6955 p = "fmul";
6956 ssep = "mul";
6957 break;
6959 case DIV:
6960 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
6961 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
6962 p = "fidiv";
6963 else
6964 p = "fdiv";
6965 ssep = "div";
6966 break;
6968 default:
6969 abort ();
6972 if (is_sse)
6974 strcpy (buf, ssep);
6975 if (GET_MODE (operands[0]) == SFmode)
6976 strcat (buf, "ss\t{%2, %0|%0, %2}");
6977 else
6978 strcat (buf, "sd\t{%2, %0|%0, %2}");
6979 return buf;
6981 strcpy (buf, p);
6983 switch (GET_CODE (operands[3]))
6985 case MULT:
6986 case PLUS:
6987 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
6989 rtx temp = operands[2];
6990 operands[2] = operands[1];
6991 operands[1] = temp;
6994 /* know operands[0] == operands[1]. */
6996 if (GET_CODE (operands[2]) == MEM)
6998 p = "%z2\t%2";
6999 break;
7002 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7004 if (STACK_TOP_P (operands[0]))
7005 /* How is it that we are storing to a dead operand[2]?
7006 Well, presumably operands[1] is dead too. We can't
7007 store the result to st(0) as st(0) gets popped on this
7008 instruction. Instead store to operands[2] (which I
7009 think has to be st(1)). st(1) will be popped later.
7010 gcc <= 2.8.1 didn't have this check and generated
7011 assembly code that the Unixware assembler rejected. */
7012 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7013 else
7014 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7015 break;
7018 if (STACK_TOP_P (operands[0]))
7019 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7020 else
7021 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7022 break;
7024 case MINUS:
7025 case DIV:
7026 if (GET_CODE (operands[1]) == MEM)
7028 p = "r%z1\t%1";
7029 break;
7032 if (GET_CODE (operands[2]) == MEM)
7034 p = "%z2\t%2";
7035 break;
7038 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7040 #if SYSV386_COMPAT
7041 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
7042 derived assemblers, confusingly reverse the direction of
7043 the operation for fsub{r} and fdiv{r} when the
7044 destination register is not st(0). The Intel assembler
7045 doesn't have this brain damage. Read !SYSV386_COMPAT to
7046 figure out what the hardware really does. */
7047 if (STACK_TOP_P (operands[0]))
7048 p = "{p\t%0, %2|rp\t%2, %0}";
7049 else
7050 p = "{rp\t%2, %0|p\t%0, %2}";
7051 #else
7052 if (STACK_TOP_P (operands[0]))
7053 /* As above for fmul/fadd, we can't store to st(0). */
7054 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7055 else
7056 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7057 #endif
7058 break;
7061 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
7063 #if SYSV386_COMPAT
7064 if (STACK_TOP_P (operands[0]))
7065 p = "{rp\t%0, %1|p\t%1, %0}";
7066 else
7067 p = "{p\t%1, %0|rp\t%0, %1}";
7068 #else
7069 if (STACK_TOP_P (operands[0]))
7070 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
7071 else
7072 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
7073 #endif
7074 break;
7077 if (STACK_TOP_P (operands[0]))
7079 if (STACK_TOP_P (operands[1]))
7080 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7081 else
7082 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
7083 break;
7085 else if (STACK_TOP_P (operands[1]))
7087 #if SYSV386_COMPAT
7088 p = "{\t%1, %0|r\t%0, %1}";
7089 #else
7090 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
7091 #endif
7093 else
7095 #if SYSV386_COMPAT
7096 p = "{r\t%2, %0|\t%0, %2}";
7097 #else
7098 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7099 #endif
7101 break;
7103 default:
7104 abort ();
7107 strcat (buf, p);
7108 return buf;
7111 /* Output code to initialize control word copies used by trunc?f?i and
7112 rounding patterns. CURRENT_MODE is set to current control word,
7113 while NEW_MODE is set to new control word. */
7115 void
7116 emit_i387_cw_initialization (rtx current_mode, rtx new_mode, int mode)
7118 rtx reg = gen_reg_rtx (HImode);
7120 emit_insn (gen_x86_fnstcw_1 (current_mode));
7121 emit_move_insn (reg, current_mode);
7123 if (!TARGET_PARTIAL_REG_STALL && !optimize_size
7124 && !TARGET_64BIT)
7126 switch (mode)
7128 case I387_CW_FLOOR:
7129 /* round down toward -oo */
7130 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
7131 break;
7133 case I387_CW_CEIL:
7134 /* round up toward +oo */
7135 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
7136 break;
7138 case I387_CW_TRUNC:
7139 /* round toward zero (truncate) */
7140 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
7141 break;
7143 case I387_CW_MASK_PM:
7144 /* mask precision exception for nearbyint() */
7145 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
7146 break;
7148 default:
7149 abort();
7152 else
7154 switch (mode)
7156 case I387_CW_FLOOR:
7157 /* round down toward -oo */
7158 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7159 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
7160 break;
7162 case I387_CW_CEIL:
7163 /* round up toward +oo */
7164 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
7165 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
7166 break;
7168 case I387_CW_TRUNC:
7169 /* round toward zero (truncate) */
7170 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
7171 break;
7173 case I387_CW_MASK_PM:
7174 /* mask precision exception for nearbyint() */
7175 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
7176 break;
7178 default:
7179 abort();
7183 emit_move_insn (new_mode, reg);
7186 /* Output code for INSN to convert a float to a signed int. OPERANDS
7187 are the insn operands. The output may be [HSD]Imode and the input
7188 operand may be [SDX]Fmode. */
7190 const char *
7191 output_fix_trunc (rtx insn, rtx *operands)
7193 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
7194 int dimode_p = GET_MODE (operands[0]) == DImode;
7196 /* Jump through a hoop or two for DImode, since the hardware has no
7197 non-popping instruction. We used to do this a different way, but
7198 that was somewhat fragile and broke with post-reload splitters. */
7199 if (dimode_p && !stack_top_dies)
7200 output_asm_insn ("fld\t%y1", operands);
7202 if (!STACK_TOP_P (operands[1]))
7203 abort ();
7205 if (GET_CODE (operands[0]) != MEM)
7206 abort ();
7208 output_asm_insn ("fldcw\t%3", operands);
7209 if (stack_top_dies || dimode_p)
7210 output_asm_insn ("fistp%z0\t%0", operands);
7211 else
7212 output_asm_insn ("fist%z0\t%0", operands);
7213 output_asm_insn ("fldcw\t%2", operands);
7215 return "";
7218 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
7219 should be used and 2 when fnstsw should be used. UNORDERED_P is true
7220 when fucom should be used. */
7222 const char *
7223 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
7225 int stack_top_dies;
7226 rtx cmp_op0 = operands[0];
7227 rtx cmp_op1 = operands[1];
7228 int is_sse = SSE_REG_P (operands[0]) | SSE_REG_P (operands[1]);
7230 if (eflags_p == 2)
7232 cmp_op0 = cmp_op1;
7233 cmp_op1 = operands[2];
7235 if (is_sse)
7237 if (GET_MODE (operands[0]) == SFmode)
7238 if (unordered_p)
7239 return "ucomiss\t{%1, %0|%0, %1}";
7240 else
7241 return "comiss\t{%1, %0|%0, %1}";
7242 else
7243 if (unordered_p)
7244 return "ucomisd\t{%1, %0|%0, %1}";
7245 else
7246 return "comisd\t{%1, %0|%0, %1}";
7249 if (! STACK_TOP_P (cmp_op0))
7250 abort ();
7252 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
7254 if (STACK_REG_P (cmp_op1)
7255 && stack_top_dies
7256 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
7257 && REGNO (cmp_op1) != FIRST_STACK_REG)
7259 /* If both the top of the 387 stack dies, and the other operand
7260 is also a stack register that dies, then this must be a
7261 `fcompp' float compare */
7263 if (eflags_p == 1)
7265 /* There is no double popping fcomi variant. Fortunately,
7266 eflags is immune from the fstp's cc clobbering. */
7267 if (unordered_p)
7268 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
7269 else
7270 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
7271 return TARGET_USE_FFREEP ? "ffreep\t%y0" : "fstp\t%y0";
7273 else
7275 if (eflags_p == 2)
7277 if (unordered_p)
7278 return "fucompp\n\tfnstsw\t%0";
7279 else
7280 return "fcompp\n\tfnstsw\t%0";
7282 else
7284 if (unordered_p)
7285 return "fucompp";
7286 else
7287 return "fcompp";
7291 else
7293 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
7295 static const char * const alt[24] =
7297 "fcom%z1\t%y1",
7298 "fcomp%z1\t%y1",
7299 "fucom%z1\t%y1",
7300 "fucomp%z1\t%y1",
7302 "ficom%z1\t%y1",
7303 "ficomp%z1\t%y1",
7304 NULL,
7305 NULL,
7307 "fcomi\t{%y1, %0|%0, %y1}",
7308 "fcomip\t{%y1, %0|%0, %y1}",
7309 "fucomi\t{%y1, %0|%0, %y1}",
7310 "fucomip\t{%y1, %0|%0, %y1}",
7312 NULL,
7313 NULL,
7314 NULL,
7315 NULL,
7317 "fcom%z2\t%y2\n\tfnstsw\t%0",
7318 "fcomp%z2\t%y2\n\tfnstsw\t%0",
7319 "fucom%z2\t%y2\n\tfnstsw\t%0",
7320 "fucomp%z2\t%y2\n\tfnstsw\t%0",
7322 "ficom%z2\t%y2\n\tfnstsw\t%0",
7323 "ficomp%z2\t%y2\n\tfnstsw\t%0",
7324 NULL,
7325 NULL
7328 int mask;
7329 const char *ret;
7331 mask = eflags_p << 3;
7332 mask |= (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT) << 2;
7333 mask |= unordered_p << 1;
7334 mask |= stack_top_dies;
7336 if (mask >= 24)
7337 abort ();
7338 ret = alt[mask];
7339 if (ret == NULL)
7340 abort ();
7342 return ret;
7346 void
7347 ix86_output_addr_vec_elt (FILE *file, int value)
7349 const char *directive = ASM_LONG;
7351 if (TARGET_64BIT)
7353 #ifdef ASM_QUAD
7354 directive = ASM_QUAD;
7355 #else
7356 abort ();
7357 #endif
7360 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
7363 void
7364 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
7366 if (TARGET_64BIT)
7367 fprintf (file, "%s%s%d-%s%d\n",
7368 ASM_LONG, LPREFIX, value, LPREFIX, rel);
7369 else if (HAVE_AS_GOTOFF_IN_DATA)
7370 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
7371 #if TARGET_MACHO
7372 else if (TARGET_MACHO)
7374 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
7375 machopic_output_function_base_name (file);
7376 fprintf(file, "\n");
7378 #endif
7379 else
7380 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
7381 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
7384 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
7385 for the target. */
7387 void
7388 ix86_expand_clear (rtx dest)
7390 rtx tmp;
7392 /* We play register width games, which are only valid after reload. */
7393 if (!reload_completed)
7394 abort ();
7396 /* Avoid HImode and its attendant prefix byte. */
7397 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
7398 dest = gen_rtx_REG (SImode, REGNO (dest));
7400 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
7402 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
7403 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
7405 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
7406 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
7409 emit_insn (tmp);
7412 /* X is an unchanging MEM. If it is a constant pool reference, return
7413 the constant pool rtx, else NULL. */
7416 maybe_get_pool_constant (rtx x)
7418 x = ix86_delegitimize_address (XEXP (x, 0));
7420 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
7421 return get_pool_constant (x);
7423 return NULL_RTX;
7426 void
7427 ix86_expand_move (enum machine_mode mode, rtx operands[])
7429 int strict = (reload_in_progress || reload_completed);
7430 rtx op0, op1;
7431 enum tls_model model;
7433 op0 = operands[0];
7434 op1 = operands[1];
7436 model = GET_CODE (op1) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (op1) : 0;
7437 if (model)
7439 op1 = legitimize_tls_address (op1, model, true);
7440 op1 = force_operand (op1, op0);
7441 if (op1 == op0)
7442 return;
7445 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
7447 #if TARGET_MACHO
7448 if (MACHOPIC_PURE)
7450 rtx temp = ((reload_in_progress
7451 || ((op0 && GET_CODE (op0) == REG)
7452 && mode == Pmode))
7453 ? op0 : gen_reg_rtx (Pmode));
7454 op1 = machopic_indirect_data_reference (op1, temp);
7455 op1 = machopic_legitimize_pic_address (op1, mode,
7456 temp == op1 ? 0 : temp);
7458 else if (MACHOPIC_INDIRECT)
7459 op1 = machopic_indirect_data_reference (op1, 0);
7460 if (op0 == op1)
7461 return;
7462 #else
7463 if (GET_CODE (op0) == MEM)
7464 op1 = force_reg (Pmode, op1);
7465 else
7466 op1 = legitimize_address (op1, op1, Pmode);
7467 #endif /* TARGET_MACHO */
7469 else
7471 if (GET_CODE (op0) == MEM
7472 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
7473 || !push_operand (op0, mode))
7474 && GET_CODE (op1) == MEM)
7475 op1 = force_reg (mode, op1);
7477 if (push_operand (op0, mode)
7478 && ! general_no_elim_operand (op1, mode))
7479 op1 = copy_to_mode_reg (mode, op1);
7481 /* Force large constants in 64bit compilation into register
7482 to get them CSEed. */
7483 if (TARGET_64BIT && mode == DImode
7484 && immediate_operand (op1, mode)
7485 && !x86_64_zext_immediate_operand (op1, VOIDmode)
7486 && !register_operand (op0, mode)
7487 && optimize && !reload_completed && !reload_in_progress)
7488 op1 = copy_to_mode_reg (mode, op1);
7490 if (FLOAT_MODE_P (mode))
7492 /* If we are loading a floating point constant to a register,
7493 force the value to memory now, since we'll get better code
7494 out the back end. */
7496 if (strict)
7498 else if (GET_CODE (op1) == CONST_DOUBLE)
7500 op1 = validize_mem (force_const_mem (mode, op1));
7501 if (!register_operand (op0, mode))
7503 rtx temp = gen_reg_rtx (mode);
7504 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
7505 emit_move_insn (op0, temp);
7506 return;
7512 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
7515 void
7516 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
7518 /* Force constants other than zero into memory. We do not know how
7519 the instructions used to build constants modify the upper 64 bits
7520 of the register, once we have that information we may be able
7521 to handle some of them more efficiently. */
7522 if ((reload_in_progress | reload_completed) == 0
7523 && register_operand (operands[0], mode)
7524 && CONSTANT_P (operands[1]) && operands[1] != CONST0_RTX (mode))
7525 operands[1] = validize_mem (force_const_mem (mode, operands[1]));
7527 /* Make operand1 a register if it isn't already. */
7528 if (!no_new_pseudos
7529 && !register_operand (operands[0], mode)
7530 && !register_operand (operands[1], mode))
7532 rtx temp = force_reg (GET_MODE (operands[1]), operands[1]);
7533 emit_move_insn (operands[0], temp);
7534 return;
7537 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
7540 /* Attempt to expand a binary operator. Make the expansion closer to the
7541 actual machine, then just general_operand, which will allow 3 separate
7542 memory references (one output, two input) in a single insn. */
7544 void
7545 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
7546 rtx operands[])
7548 int matching_memory;
7549 rtx src1, src2, dst, op, clob;
7551 dst = operands[0];
7552 src1 = operands[1];
7553 src2 = operands[2];
7555 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
7556 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
7557 && (rtx_equal_p (dst, src2)
7558 || immediate_operand (src1, mode)))
7560 rtx temp = src1;
7561 src1 = src2;
7562 src2 = temp;
7565 /* If the destination is memory, and we do not have matching source
7566 operands, do things in registers. */
7567 matching_memory = 0;
7568 if (GET_CODE (dst) == MEM)
7570 if (rtx_equal_p (dst, src1))
7571 matching_memory = 1;
7572 else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
7573 && rtx_equal_p (dst, src2))
7574 matching_memory = 2;
7575 else
7576 dst = gen_reg_rtx (mode);
7579 /* Both source operands cannot be in memory. */
7580 if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM)
7582 if (matching_memory != 2)
7583 src2 = force_reg (mode, src2);
7584 else
7585 src1 = force_reg (mode, src1);
7588 /* If the operation is not commutable, source 1 cannot be a constant
7589 or non-matching memory. */
7590 if ((CONSTANT_P (src1)
7591 || (!matching_memory && GET_CODE (src1) == MEM))
7592 && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
7593 src1 = force_reg (mode, src1);
7595 /* If optimizing, copy to regs to improve CSE */
7596 if (optimize && ! no_new_pseudos)
7598 if (GET_CODE (dst) == MEM)
7599 dst = gen_reg_rtx (mode);
7600 if (GET_CODE (src1) == MEM)
7601 src1 = force_reg (mode, src1);
7602 if (GET_CODE (src2) == MEM)
7603 src2 = force_reg (mode, src2);
7606 /* Emit the instruction. */
7608 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
7609 if (reload_in_progress)
7611 /* Reload doesn't know about the flags register, and doesn't know that
7612 it doesn't want to clobber it. We can only do this with PLUS. */
7613 if (code != PLUS)
7614 abort ();
7615 emit_insn (op);
7617 else
7619 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
7620 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
7623 /* Fix up the destination if needed. */
7624 if (dst != operands[0])
7625 emit_move_insn (operands[0], dst);
7628 /* Return TRUE or FALSE depending on whether the binary operator meets the
7629 appropriate constraints. */
7632 ix86_binary_operator_ok (enum rtx_code code,
7633 enum machine_mode mode ATTRIBUTE_UNUSED,
7634 rtx operands[3])
7636 /* Both source operands cannot be in memory. */
7637 if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM)
7638 return 0;
7639 /* If the operation is not commutable, source 1 cannot be a constant. */
7640 if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
7641 return 0;
7642 /* If the destination is memory, we must have a matching source operand. */
7643 if (GET_CODE (operands[0]) == MEM
7644 && ! (rtx_equal_p (operands[0], operands[1])
7645 || (GET_RTX_CLASS (code) == RTX_COMM_ARITH
7646 && rtx_equal_p (operands[0], operands[2]))))
7647 return 0;
7648 /* If the operation is not commutable and the source 1 is memory, we must
7649 have a matching destination. */
7650 if (GET_CODE (operands[1]) == MEM
7651 && GET_RTX_CLASS (code) != RTX_COMM_ARITH
7652 && ! rtx_equal_p (operands[0], operands[1]))
7653 return 0;
7654 return 1;
7657 /* Attempt to expand a unary operator. Make the expansion closer to the
7658 actual machine, then just general_operand, which will allow 2 separate
7659 memory references (one output, one input) in a single insn. */
7661 void
7662 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
7663 rtx operands[])
7665 int matching_memory;
7666 rtx src, dst, op, clob;
7668 dst = operands[0];
7669 src = operands[1];
7671 /* If the destination is memory, and we do not have matching source
7672 operands, do things in registers. */
7673 matching_memory = 0;
7674 if (GET_CODE (dst) == MEM)
7676 if (rtx_equal_p (dst, src))
7677 matching_memory = 1;
7678 else
7679 dst = gen_reg_rtx (mode);
7682 /* When source operand is memory, destination must match. */
7683 if (!matching_memory && GET_CODE (src) == MEM)
7684 src = force_reg (mode, src);
7686 /* If optimizing, copy to regs to improve CSE */
7687 if (optimize && ! no_new_pseudos)
7689 if (GET_CODE (dst) == MEM)
7690 dst = gen_reg_rtx (mode);
7691 if (GET_CODE (src) == MEM)
7692 src = force_reg (mode, src);
7695 /* Emit the instruction. */
7697 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
7698 if (reload_in_progress || code == NOT)
7700 /* Reload doesn't know about the flags register, and doesn't know that
7701 it doesn't want to clobber it. */
7702 if (code != NOT)
7703 abort ();
7704 emit_insn (op);
7706 else
7708 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
7709 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
7712 /* Fix up the destination if needed. */
7713 if (dst != operands[0])
7714 emit_move_insn (operands[0], dst);
7717 /* Return TRUE or FALSE depending on whether the unary operator meets the
7718 appropriate constraints. */
7721 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
7722 enum machine_mode mode ATTRIBUTE_UNUSED,
7723 rtx operands[2] ATTRIBUTE_UNUSED)
7725 /* If one of operands is memory, source and destination must match. */
7726 if ((GET_CODE (operands[0]) == MEM
7727 || GET_CODE (operands[1]) == MEM)
7728 && ! rtx_equal_p (operands[0], operands[1]))
7729 return FALSE;
7730 return TRUE;
7733 /* Return TRUE or FALSE depending on whether the first SET in INSN
7734 has source and destination with matching CC modes, and that the
7735 CC mode is at least as constrained as REQ_MODE. */
7738 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
7740 rtx set;
7741 enum machine_mode set_mode;
7743 set = PATTERN (insn);
7744 if (GET_CODE (set) == PARALLEL)
7745 set = XVECEXP (set, 0, 0);
7746 if (GET_CODE (set) != SET)
7747 abort ();
7748 if (GET_CODE (SET_SRC (set)) != COMPARE)
7749 abort ();
7751 set_mode = GET_MODE (SET_DEST (set));
7752 switch (set_mode)
7754 case CCNOmode:
7755 if (req_mode != CCNOmode
7756 && (req_mode != CCmode
7757 || XEXP (SET_SRC (set), 1) != const0_rtx))
7758 return 0;
7759 break;
7760 case CCmode:
7761 if (req_mode == CCGCmode)
7762 return 0;
7763 /* FALLTHRU */
7764 case CCGCmode:
7765 if (req_mode == CCGOCmode || req_mode == CCNOmode)
7766 return 0;
7767 /* FALLTHRU */
7768 case CCGOCmode:
7769 if (req_mode == CCZmode)
7770 return 0;
7771 /* FALLTHRU */
7772 case CCZmode:
7773 break;
7775 default:
7776 abort ();
7779 return (GET_MODE (SET_SRC (set)) == set_mode);
7782 /* Generate insn patterns to do an integer compare of OPERANDS. */
7784 static rtx
7785 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
7787 enum machine_mode cmpmode;
7788 rtx tmp, flags;
7790 cmpmode = SELECT_CC_MODE (code, op0, op1);
7791 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
7793 /* This is very simple, but making the interface the same as in the
7794 FP case makes the rest of the code easier. */
7795 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
7796 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
7798 /* Return the test that should be put into the flags user, i.e.
7799 the bcc, scc, or cmov instruction. */
7800 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
7803 /* Figure out whether to use ordered or unordered fp comparisons.
7804 Return the appropriate mode to use. */
7806 enum machine_mode
7807 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
7809 /* ??? In order to make all comparisons reversible, we do all comparisons
7810 non-trapping when compiling for IEEE. Once gcc is able to distinguish
7811 all forms trapping and nontrapping comparisons, we can make inequality
7812 comparisons trapping again, since it results in better code when using
7813 FCOM based compares. */
7814 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
7817 enum machine_mode
7818 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
7820 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
7821 return ix86_fp_compare_mode (code);
7822 switch (code)
7824 /* Only zero flag is needed. */
7825 case EQ: /* ZF=0 */
7826 case NE: /* ZF!=0 */
7827 return CCZmode;
7828 /* Codes needing carry flag. */
7829 case GEU: /* CF=0 */
7830 case GTU: /* CF=0 & ZF=0 */
7831 case LTU: /* CF=1 */
7832 case LEU: /* CF=1 | ZF=1 */
7833 return CCmode;
7834 /* Codes possibly doable only with sign flag when
7835 comparing against zero. */
7836 case GE: /* SF=OF or SF=0 */
7837 case LT: /* SF<>OF or SF=1 */
7838 if (op1 == const0_rtx)
7839 return CCGOCmode;
7840 else
7841 /* For other cases Carry flag is not required. */
7842 return CCGCmode;
7843 /* Codes doable only with sign flag when comparing
7844 against zero, but we miss jump instruction for it
7845 so we need to use relational tests against overflow
7846 that thus needs to be zero. */
7847 case GT: /* ZF=0 & SF=OF */
7848 case LE: /* ZF=1 | SF<>OF */
7849 if (op1 == const0_rtx)
7850 return CCNOmode;
7851 else
7852 return CCGCmode;
7853 /* strcmp pattern do (use flags) and combine may ask us for proper
7854 mode. */
7855 case USE:
7856 return CCmode;
7857 default:
7858 abort ();
7862 /* Return the fixed registers used for condition codes. */
7864 static bool
7865 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
7867 *p1 = FLAGS_REG;
7868 *p2 = FPSR_REG;
7869 return true;
7872 /* If two condition code modes are compatible, return a condition code
7873 mode which is compatible with both. Otherwise, return
7874 VOIDmode. */
7876 static enum machine_mode
7877 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
7879 if (m1 == m2)
7880 return m1;
7882 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
7883 return VOIDmode;
7885 if ((m1 == CCGCmode && m2 == CCGOCmode)
7886 || (m1 == CCGOCmode && m2 == CCGCmode))
7887 return CCGCmode;
7889 switch (m1)
7891 default:
7892 abort ();
7894 case CCmode:
7895 case CCGCmode:
7896 case CCGOCmode:
7897 case CCNOmode:
7898 case CCZmode:
7899 switch (m2)
7901 default:
7902 return VOIDmode;
7904 case CCmode:
7905 case CCGCmode:
7906 case CCGOCmode:
7907 case CCNOmode:
7908 case CCZmode:
7909 return CCmode;
7912 case CCFPmode:
7913 case CCFPUmode:
7914 /* These are only compatible with themselves, which we already
7915 checked above. */
7916 return VOIDmode;
7920 /* Return true if we should use an FCOMI instruction for this fp comparison. */
7923 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
7925 enum rtx_code swapped_code = swap_condition (code);
7926 return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code))
7927 || (ix86_fp_comparison_cost (swapped_code)
7928 == ix86_fp_comparison_fcomi_cost (swapped_code)));
7931 /* Swap, force into registers, or otherwise massage the two operands
7932 to a fp comparison. The operands are updated in place; the new
7933 comparison code is returned. */
7935 static enum rtx_code
7936 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
7938 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
7939 rtx op0 = *pop0, op1 = *pop1;
7940 enum machine_mode op_mode = GET_MODE (op0);
7941 int is_sse = SSE_REG_P (op0) | SSE_REG_P (op1);
7943 /* All of the unordered compare instructions only work on registers.
7944 The same is true of the fcomi compare instructions. The same is
7945 true of the XFmode compare instructions if not comparing with
7946 zero (ftst insn is used in this case). */
7948 if (!is_sse
7949 && (fpcmp_mode == CCFPUmode
7950 || (op_mode == XFmode
7951 && ! (standard_80387_constant_p (op0) == 1
7952 || standard_80387_constant_p (op1) == 1))
7953 || ix86_use_fcomi_compare (code)))
7955 op0 = force_reg (op_mode, op0);
7956 op1 = force_reg (op_mode, op1);
7958 else
7960 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
7961 things around if they appear profitable, otherwise force op0
7962 into a register. */
7964 if (standard_80387_constant_p (op0) == 0
7965 || (GET_CODE (op0) == MEM
7966 && ! (standard_80387_constant_p (op1) == 0
7967 || GET_CODE (op1) == MEM)))
7969 rtx tmp;
7970 tmp = op0, op0 = op1, op1 = tmp;
7971 code = swap_condition (code);
7974 if (GET_CODE (op0) != REG)
7975 op0 = force_reg (op_mode, op0);
7977 if (CONSTANT_P (op1))
7979 int tmp = standard_80387_constant_p (op1);
7980 if (tmp == 0)
7981 op1 = validize_mem (force_const_mem (op_mode, op1));
7982 else if (tmp == 1)
7984 if (TARGET_CMOVE)
7985 op1 = force_reg (op_mode, op1);
7987 else
7988 op1 = force_reg (op_mode, op1);
7992 /* Try to rearrange the comparison to make it cheaper. */
7993 if (ix86_fp_comparison_cost (code)
7994 > ix86_fp_comparison_cost (swap_condition (code))
7995 && (GET_CODE (op1) == REG || !no_new_pseudos))
7997 rtx tmp;
7998 tmp = op0, op0 = op1, op1 = tmp;
7999 code = swap_condition (code);
8000 if (GET_CODE (op0) != REG)
8001 op0 = force_reg (op_mode, op0);
8004 *pop0 = op0;
8005 *pop1 = op1;
8006 return code;
8009 /* Convert comparison codes we use to represent FP comparison to integer
8010 code that will result in proper branch. Return UNKNOWN if no such code
8011 is available. */
8013 enum rtx_code
8014 ix86_fp_compare_code_to_integer (enum rtx_code code)
8016 switch (code)
8018 case GT:
8019 return GTU;
8020 case GE:
8021 return GEU;
8022 case ORDERED:
8023 case UNORDERED:
8024 return code;
8025 break;
8026 case UNEQ:
8027 return EQ;
8028 break;
8029 case UNLT:
8030 return LTU;
8031 break;
8032 case UNLE:
8033 return LEU;
8034 break;
8035 case LTGT:
8036 return NE;
8037 break;
8038 default:
8039 return UNKNOWN;
8043 /* Split comparison code CODE into comparisons we can do using branch
8044 instructions. BYPASS_CODE is comparison code for branch that will
8045 branch around FIRST_CODE and SECOND_CODE. If some of branches
8046 is not required, set value to UNKNOWN.
8047 We never require more than two branches. */
8049 void
8050 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
8051 enum rtx_code *first_code,
8052 enum rtx_code *second_code)
8054 *first_code = code;
8055 *bypass_code = UNKNOWN;
8056 *second_code = UNKNOWN;
8058 /* The fcomi comparison sets flags as follows:
8060 cmp ZF PF CF
8061 > 0 0 0
8062 < 0 0 1
8063 = 1 0 0
8064 un 1 1 1 */
8066 switch (code)
8068 case GT: /* GTU - CF=0 & ZF=0 */
8069 case GE: /* GEU - CF=0 */
8070 case ORDERED: /* PF=0 */
8071 case UNORDERED: /* PF=1 */
8072 case UNEQ: /* EQ - ZF=1 */
8073 case UNLT: /* LTU - CF=1 */
8074 case UNLE: /* LEU - CF=1 | ZF=1 */
8075 case LTGT: /* EQ - ZF=0 */
8076 break;
8077 case LT: /* LTU - CF=1 - fails on unordered */
8078 *first_code = UNLT;
8079 *bypass_code = UNORDERED;
8080 break;
8081 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
8082 *first_code = UNLE;
8083 *bypass_code = UNORDERED;
8084 break;
8085 case EQ: /* EQ - ZF=1 - fails on unordered */
8086 *first_code = UNEQ;
8087 *bypass_code = UNORDERED;
8088 break;
8089 case NE: /* NE - ZF=0 - fails on unordered */
8090 *first_code = LTGT;
8091 *second_code = UNORDERED;
8092 break;
8093 case UNGE: /* GEU - CF=0 - fails on unordered */
8094 *first_code = GE;
8095 *second_code = UNORDERED;
8096 break;
8097 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
8098 *first_code = GT;
8099 *second_code = UNORDERED;
8100 break;
8101 default:
8102 abort ();
8104 if (!TARGET_IEEE_FP)
8106 *second_code = UNKNOWN;
8107 *bypass_code = UNKNOWN;
8111 /* Return cost of comparison done fcom + arithmetics operations on AX.
8112 All following functions do use number of instructions as a cost metrics.
8113 In future this should be tweaked to compute bytes for optimize_size and
8114 take into account performance of various instructions on various CPUs. */
8115 static int
8116 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
8118 if (!TARGET_IEEE_FP)
8119 return 4;
8120 /* The cost of code output by ix86_expand_fp_compare. */
8121 switch (code)
8123 case UNLE:
8124 case UNLT:
8125 case LTGT:
8126 case GT:
8127 case GE:
8128 case UNORDERED:
8129 case ORDERED:
8130 case UNEQ:
8131 return 4;
8132 break;
8133 case LT:
8134 case NE:
8135 case EQ:
8136 case UNGE:
8137 return 5;
8138 break;
8139 case LE:
8140 case UNGT:
8141 return 6;
8142 break;
8143 default:
8144 abort ();
8148 /* Return cost of comparison done using fcomi operation.
8149 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8150 static int
8151 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
8153 enum rtx_code bypass_code, first_code, second_code;
8154 /* Return arbitrarily high cost when instruction is not supported - this
8155 prevents gcc from using it. */
8156 if (!TARGET_CMOVE)
8157 return 1024;
8158 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8159 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
8162 /* Return cost of comparison done using sahf operation.
8163 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8164 static int
8165 ix86_fp_comparison_sahf_cost (enum rtx_code code)
8167 enum rtx_code bypass_code, first_code, second_code;
8168 /* Return arbitrarily high cost when instruction is not preferred - this
8169 avoids gcc from using it. */
8170 if (!TARGET_USE_SAHF && !optimize_size)
8171 return 1024;
8172 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8173 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
8176 /* Compute cost of the comparison done using any method.
8177 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8178 static int
8179 ix86_fp_comparison_cost (enum rtx_code code)
8181 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
8182 int min;
8184 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
8185 sahf_cost = ix86_fp_comparison_sahf_cost (code);
8187 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
8188 if (min > sahf_cost)
8189 min = sahf_cost;
8190 if (min > fcomi_cost)
8191 min = fcomi_cost;
8192 return min;
8195 /* Generate insn patterns to do a floating point compare of OPERANDS. */
8197 static rtx
8198 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
8199 rtx *second_test, rtx *bypass_test)
8201 enum machine_mode fpcmp_mode, intcmp_mode;
8202 rtx tmp, tmp2;
8203 int cost = ix86_fp_comparison_cost (code);
8204 enum rtx_code bypass_code, first_code, second_code;
8206 fpcmp_mode = ix86_fp_compare_mode (code);
8207 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
8209 if (second_test)
8210 *second_test = NULL_RTX;
8211 if (bypass_test)
8212 *bypass_test = NULL_RTX;
8214 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8216 /* Do fcomi/sahf based test when profitable. */
8217 if ((bypass_code == UNKNOWN || bypass_test)
8218 && (second_code == UNKNOWN || second_test)
8219 && ix86_fp_comparison_arithmetics_cost (code) > cost)
8221 if (TARGET_CMOVE)
8223 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
8224 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
8225 tmp);
8226 emit_insn (tmp);
8228 else
8230 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
8231 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
8232 if (!scratch)
8233 scratch = gen_reg_rtx (HImode);
8234 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
8235 emit_insn (gen_x86_sahf_1 (scratch));
8238 /* The FP codes work out to act like unsigned. */
8239 intcmp_mode = fpcmp_mode;
8240 code = first_code;
8241 if (bypass_code != UNKNOWN)
8242 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
8243 gen_rtx_REG (intcmp_mode, FLAGS_REG),
8244 const0_rtx);
8245 if (second_code != UNKNOWN)
8246 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
8247 gen_rtx_REG (intcmp_mode, FLAGS_REG),
8248 const0_rtx);
8250 else
8252 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
8253 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
8254 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
8255 if (!scratch)
8256 scratch = gen_reg_rtx (HImode);
8257 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
8259 /* In the unordered case, we have to check C2 for NaN's, which
8260 doesn't happen to work out to anything nice combination-wise.
8261 So do some bit twiddling on the value we've got in AH to come
8262 up with an appropriate set of condition codes. */
8264 intcmp_mode = CCNOmode;
8265 switch (code)
8267 case GT:
8268 case UNGT:
8269 if (code == GT || !TARGET_IEEE_FP)
8271 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
8272 code = EQ;
8274 else
8276 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8277 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
8278 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
8279 intcmp_mode = CCmode;
8280 code = GEU;
8282 break;
8283 case LT:
8284 case UNLT:
8285 if (code == LT && TARGET_IEEE_FP)
8287 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8288 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
8289 intcmp_mode = CCmode;
8290 code = EQ;
8292 else
8294 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
8295 code = NE;
8297 break;
8298 case GE:
8299 case UNGE:
8300 if (code == GE || !TARGET_IEEE_FP)
8302 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
8303 code = EQ;
8305 else
8307 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8308 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
8309 GEN_INT (0x01)));
8310 code = NE;
8312 break;
8313 case LE:
8314 case UNLE:
8315 if (code == LE && TARGET_IEEE_FP)
8317 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8318 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
8319 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
8320 intcmp_mode = CCmode;
8321 code = LTU;
8323 else
8325 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
8326 code = NE;
8328 break;
8329 case EQ:
8330 case UNEQ:
8331 if (code == EQ && TARGET_IEEE_FP)
8333 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8334 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
8335 intcmp_mode = CCmode;
8336 code = EQ;
8338 else
8340 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
8341 code = NE;
8342 break;
8344 break;
8345 case NE:
8346 case LTGT:
8347 if (code == NE && TARGET_IEEE_FP)
8349 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
8350 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
8351 GEN_INT (0x40)));
8352 code = NE;
8354 else
8356 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
8357 code = EQ;
8359 break;
8361 case UNORDERED:
8362 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
8363 code = NE;
8364 break;
8365 case ORDERED:
8366 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
8367 code = EQ;
8368 break;
8370 default:
8371 abort ();
8375 /* Return the test that should be put into the flags user, i.e.
8376 the bcc, scc, or cmov instruction. */
8377 return gen_rtx_fmt_ee (code, VOIDmode,
8378 gen_rtx_REG (intcmp_mode, FLAGS_REG),
8379 const0_rtx);
8383 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
8385 rtx op0, op1, ret;
8386 op0 = ix86_compare_op0;
8387 op1 = ix86_compare_op1;
8389 if (second_test)
8390 *second_test = NULL_RTX;
8391 if (bypass_test)
8392 *bypass_test = NULL_RTX;
8394 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
8395 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
8396 second_test, bypass_test);
8397 else
8398 ret = ix86_expand_int_compare (code, op0, op1);
8400 return ret;
8403 /* Return true if the CODE will result in nontrivial jump sequence. */
8404 bool
8405 ix86_fp_jump_nontrivial_p (enum rtx_code code)
8407 enum rtx_code bypass_code, first_code, second_code;
8408 if (!TARGET_CMOVE)
8409 return true;
8410 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8411 return bypass_code != UNKNOWN || second_code != UNKNOWN;
8414 void
8415 ix86_expand_branch (enum rtx_code code, rtx label)
8417 rtx tmp;
8419 switch (GET_MODE (ix86_compare_op0))
8421 case QImode:
8422 case HImode:
8423 case SImode:
8424 simple:
8425 tmp = ix86_expand_compare (code, NULL, NULL);
8426 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
8427 gen_rtx_LABEL_REF (VOIDmode, label),
8428 pc_rtx);
8429 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
8430 return;
8432 case SFmode:
8433 case DFmode:
8434 case XFmode:
8436 rtvec vec;
8437 int use_fcomi;
8438 enum rtx_code bypass_code, first_code, second_code;
8440 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
8441 &ix86_compare_op1);
8443 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8445 /* Check whether we will use the natural sequence with one jump. If
8446 so, we can expand jump early. Otherwise delay expansion by
8447 creating compound insn to not confuse optimizers. */
8448 if (bypass_code == UNKNOWN && second_code == UNKNOWN
8449 && TARGET_CMOVE)
8451 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
8452 gen_rtx_LABEL_REF (VOIDmode, label),
8453 pc_rtx, NULL_RTX);
8455 else
8457 tmp = gen_rtx_fmt_ee (code, VOIDmode,
8458 ix86_compare_op0, ix86_compare_op1);
8459 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
8460 gen_rtx_LABEL_REF (VOIDmode, label),
8461 pc_rtx);
8462 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
8464 use_fcomi = ix86_use_fcomi_compare (code);
8465 vec = rtvec_alloc (3 + !use_fcomi);
8466 RTVEC_ELT (vec, 0) = tmp;
8467 RTVEC_ELT (vec, 1)
8468 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
8469 RTVEC_ELT (vec, 2)
8470 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
8471 if (! use_fcomi)
8472 RTVEC_ELT (vec, 3)
8473 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
8475 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
8477 return;
8480 case DImode:
8481 if (TARGET_64BIT)
8482 goto simple;
8483 /* Expand DImode branch into multiple compare+branch. */
8485 rtx lo[2], hi[2], label2;
8486 enum rtx_code code1, code2, code3;
8488 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
8490 tmp = ix86_compare_op0;
8491 ix86_compare_op0 = ix86_compare_op1;
8492 ix86_compare_op1 = tmp;
8493 code = swap_condition (code);
8495 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
8496 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
8498 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
8499 avoid two branches. This costs one extra insn, so disable when
8500 optimizing for size. */
8502 if ((code == EQ || code == NE)
8503 && (!optimize_size
8504 || hi[1] == const0_rtx || lo[1] == const0_rtx))
8506 rtx xor0, xor1;
8508 xor1 = hi[0];
8509 if (hi[1] != const0_rtx)
8510 xor1 = expand_binop (SImode, xor_optab, xor1, hi[1],
8511 NULL_RTX, 0, OPTAB_WIDEN);
8513 xor0 = lo[0];
8514 if (lo[1] != const0_rtx)
8515 xor0 = expand_binop (SImode, xor_optab, xor0, lo[1],
8516 NULL_RTX, 0, OPTAB_WIDEN);
8518 tmp = expand_binop (SImode, ior_optab, xor1, xor0,
8519 NULL_RTX, 0, OPTAB_WIDEN);
8521 ix86_compare_op0 = tmp;
8522 ix86_compare_op1 = const0_rtx;
8523 ix86_expand_branch (code, label);
8524 return;
8527 /* Otherwise, if we are doing less-than or greater-or-equal-than,
8528 op1 is a constant and the low word is zero, then we can just
8529 examine the high word. */
8531 if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx)
8532 switch (code)
8534 case LT: case LTU: case GE: case GEU:
8535 ix86_compare_op0 = hi[0];
8536 ix86_compare_op1 = hi[1];
8537 ix86_expand_branch (code, label);
8538 return;
8539 default:
8540 break;
8543 /* Otherwise, we need two or three jumps. */
8545 label2 = gen_label_rtx ();
8547 code1 = code;
8548 code2 = swap_condition (code);
8549 code3 = unsigned_condition (code);
8551 switch (code)
8553 case LT: case GT: case LTU: case GTU:
8554 break;
8556 case LE: code1 = LT; code2 = GT; break;
8557 case GE: code1 = GT; code2 = LT; break;
8558 case LEU: code1 = LTU; code2 = GTU; break;
8559 case GEU: code1 = GTU; code2 = LTU; break;
8561 case EQ: code1 = UNKNOWN; code2 = NE; break;
8562 case NE: code2 = UNKNOWN; break;
8564 default:
8565 abort ();
8569 * a < b =>
8570 * if (hi(a) < hi(b)) goto true;
8571 * if (hi(a) > hi(b)) goto false;
8572 * if (lo(a) < lo(b)) goto true;
8573 * false:
8576 ix86_compare_op0 = hi[0];
8577 ix86_compare_op1 = hi[1];
8579 if (code1 != UNKNOWN)
8580 ix86_expand_branch (code1, label);
8581 if (code2 != UNKNOWN)
8582 ix86_expand_branch (code2, label2);
8584 ix86_compare_op0 = lo[0];
8585 ix86_compare_op1 = lo[1];
8586 ix86_expand_branch (code3, label);
8588 if (code2 != UNKNOWN)
8589 emit_label (label2);
8590 return;
8593 default:
8594 abort ();
8598 /* Split branch based on floating point condition. */
8599 void
8600 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
8601 rtx target1, rtx target2, rtx tmp)
8603 rtx second, bypass;
8604 rtx label = NULL_RTX;
8605 rtx condition;
8606 int bypass_probability = -1, second_probability = -1, probability = -1;
8607 rtx i;
8609 if (target2 != pc_rtx)
8611 rtx tmp = target2;
8612 code = reverse_condition_maybe_unordered (code);
8613 target2 = target1;
8614 target1 = tmp;
8617 condition = ix86_expand_fp_compare (code, op1, op2,
8618 tmp, &second, &bypass);
8620 if (split_branch_probability >= 0)
8622 /* Distribute the probabilities across the jumps.
8623 Assume the BYPASS and SECOND to be always test
8624 for UNORDERED. */
8625 probability = split_branch_probability;
8627 /* Value of 1 is low enough to make no need for probability
8628 to be updated. Later we may run some experiments and see
8629 if unordered values are more frequent in practice. */
8630 if (bypass)
8631 bypass_probability = 1;
8632 if (second)
8633 second_probability = 1;
8635 if (bypass != NULL_RTX)
8637 label = gen_label_rtx ();
8638 i = emit_jump_insn (gen_rtx_SET
8639 (VOIDmode, pc_rtx,
8640 gen_rtx_IF_THEN_ELSE (VOIDmode,
8641 bypass,
8642 gen_rtx_LABEL_REF (VOIDmode,
8643 label),
8644 pc_rtx)));
8645 if (bypass_probability >= 0)
8646 REG_NOTES (i)
8647 = gen_rtx_EXPR_LIST (REG_BR_PROB,
8648 GEN_INT (bypass_probability),
8649 REG_NOTES (i));
8651 i = emit_jump_insn (gen_rtx_SET
8652 (VOIDmode, pc_rtx,
8653 gen_rtx_IF_THEN_ELSE (VOIDmode,
8654 condition, target1, target2)));
8655 if (probability >= 0)
8656 REG_NOTES (i)
8657 = gen_rtx_EXPR_LIST (REG_BR_PROB,
8658 GEN_INT (probability),
8659 REG_NOTES (i));
8660 if (second != NULL_RTX)
8662 i = emit_jump_insn (gen_rtx_SET
8663 (VOIDmode, pc_rtx,
8664 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
8665 target2)));
8666 if (second_probability >= 0)
8667 REG_NOTES (i)
8668 = gen_rtx_EXPR_LIST (REG_BR_PROB,
8669 GEN_INT (second_probability),
8670 REG_NOTES (i));
8672 if (label != NULL_RTX)
8673 emit_label (label);
8677 ix86_expand_setcc (enum rtx_code code, rtx dest)
8679 rtx ret, tmp, tmpreg, equiv;
8680 rtx second_test, bypass_test;
8682 if (GET_MODE (ix86_compare_op0) == DImode
8683 && !TARGET_64BIT)
8684 return 0; /* FAIL */
8686 if (GET_MODE (dest) != QImode)
8687 abort ();
8689 ret = ix86_expand_compare (code, &second_test, &bypass_test);
8690 PUT_MODE (ret, QImode);
8692 tmp = dest;
8693 tmpreg = dest;
8695 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
8696 if (bypass_test || second_test)
8698 rtx test = second_test;
8699 int bypass = 0;
8700 rtx tmp2 = gen_reg_rtx (QImode);
8701 if (bypass_test)
8703 if (second_test)
8704 abort ();
8705 test = bypass_test;
8706 bypass = 1;
8707 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
8709 PUT_MODE (test, QImode);
8710 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
8712 if (bypass)
8713 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
8714 else
8715 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
8718 /* Attach a REG_EQUAL note describing the comparison result. */
8719 equiv = simplify_gen_relational (code, QImode,
8720 GET_MODE (ix86_compare_op0),
8721 ix86_compare_op0, ix86_compare_op1);
8722 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
8724 return 1; /* DONE */
8727 /* Expand comparison setting or clearing carry flag. Return true when
8728 successful and set pop for the operation. */
8729 static bool
8730 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
8732 enum machine_mode mode =
8733 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
8735 /* Do not handle DImode compares that go trought special path. Also we can't
8736 deal with FP compares yet. This is possible to add. */
8737 if ((mode == DImode && !TARGET_64BIT))
8738 return false;
8739 if (FLOAT_MODE_P (mode))
8741 rtx second_test = NULL, bypass_test = NULL;
8742 rtx compare_op, compare_seq;
8744 /* Shortcut: following common codes never translate into carry flag compares. */
8745 if (code == EQ || code == NE || code == UNEQ || code == LTGT
8746 || code == ORDERED || code == UNORDERED)
8747 return false;
8749 /* These comparisons require zero flag; swap operands so they won't. */
8750 if ((code == GT || code == UNLE || code == LE || code == UNGT)
8751 && !TARGET_IEEE_FP)
8753 rtx tmp = op0;
8754 op0 = op1;
8755 op1 = tmp;
8756 code = swap_condition (code);
8759 /* Try to expand the comparison and verify that we end up with carry flag
8760 based comparison. This is fails to be true only when we decide to expand
8761 comparison using arithmetic that is not too common scenario. */
8762 start_sequence ();
8763 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
8764 &second_test, &bypass_test);
8765 compare_seq = get_insns ();
8766 end_sequence ();
8768 if (second_test || bypass_test)
8769 return false;
8770 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
8771 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
8772 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
8773 else
8774 code = GET_CODE (compare_op);
8775 if (code != LTU && code != GEU)
8776 return false;
8777 emit_insn (compare_seq);
8778 *pop = compare_op;
8779 return true;
8781 if (!INTEGRAL_MODE_P (mode))
8782 return false;
8783 switch (code)
8785 case LTU:
8786 case GEU:
8787 break;
8789 /* Convert a==0 into (unsigned)a<1. */
8790 case EQ:
8791 case NE:
8792 if (op1 != const0_rtx)
8793 return false;
8794 op1 = const1_rtx;
8795 code = (code == EQ ? LTU : GEU);
8796 break;
8798 /* Convert a>b into b<a or a>=b-1. */
8799 case GTU:
8800 case LEU:
8801 if (GET_CODE (op1) == CONST_INT)
8803 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
8804 /* Bail out on overflow. We still can swap operands but that
8805 would force loading of the constant into register. */
8806 if (op1 == const0_rtx
8807 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
8808 return false;
8809 code = (code == GTU ? GEU : LTU);
8811 else
8813 rtx tmp = op1;
8814 op1 = op0;
8815 op0 = tmp;
8816 code = (code == GTU ? LTU : GEU);
8818 break;
8820 /* Convert a>=0 into (unsigned)a<0x80000000. */
8821 case LT:
8822 case GE:
8823 if (mode == DImode || op1 != const0_rtx)
8824 return false;
8825 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
8826 code = (code == LT ? GEU : LTU);
8827 break;
8828 case LE:
8829 case GT:
8830 if (mode == DImode || op1 != constm1_rtx)
8831 return false;
8832 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
8833 code = (code == LE ? GEU : LTU);
8834 break;
8836 default:
8837 return false;
8839 /* Swapping operands may cause constant to appear as first operand. */
8840 if (!nonimmediate_operand (op0, VOIDmode))
8842 if (no_new_pseudos)
8843 return false;
8844 op0 = force_reg (mode, op0);
8846 ix86_compare_op0 = op0;
8847 ix86_compare_op1 = op1;
8848 *pop = ix86_expand_compare (code, NULL, NULL);
8849 if (GET_CODE (*pop) != LTU && GET_CODE (*pop) != GEU)
8850 abort ();
8851 return true;
8855 ix86_expand_int_movcc (rtx operands[])
8857 enum rtx_code code = GET_CODE (operands[1]), compare_code;
8858 rtx compare_seq, compare_op;
8859 rtx second_test, bypass_test;
8860 enum machine_mode mode = GET_MODE (operands[0]);
8861 bool sign_bit_compare_p = false;;
8863 start_sequence ();
8864 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
8865 compare_seq = get_insns ();
8866 end_sequence ();
8868 compare_code = GET_CODE (compare_op);
8870 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
8871 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
8872 sign_bit_compare_p = true;
8874 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
8875 HImode insns, we'd be swallowed in word prefix ops. */
8877 if ((mode != HImode || TARGET_FAST_PREFIX)
8878 && (mode != DImode || TARGET_64BIT)
8879 && GET_CODE (operands[2]) == CONST_INT
8880 && GET_CODE (operands[3]) == CONST_INT)
8882 rtx out = operands[0];
8883 HOST_WIDE_INT ct = INTVAL (operands[2]);
8884 HOST_WIDE_INT cf = INTVAL (operands[3]);
8885 HOST_WIDE_INT diff;
8887 diff = ct - cf;
8888 /* Sign bit compares are better done using shifts than we do by using
8889 sbb. */
8890 if (sign_bit_compare_p
8891 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
8892 ix86_compare_op1, &compare_op))
8894 /* Detect overlap between destination and compare sources. */
8895 rtx tmp = out;
8897 if (!sign_bit_compare_p)
8899 bool fpcmp = false;
8901 compare_code = GET_CODE (compare_op);
8903 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
8904 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
8906 fpcmp = true;
8907 compare_code = ix86_fp_compare_code_to_integer (compare_code);
8910 /* To simplify rest of code, restrict to the GEU case. */
8911 if (compare_code == LTU)
8913 HOST_WIDE_INT tmp = ct;
8914 ct = cf;
8915 cf = tmp;
8916 compare_code = reverse_condition (compare_code);
8917 code = reverse_condition (code);
8919 else
8921 if (fpcmp)
8922 PUT_CODE (compare_op,
8923 reverse_condition_maybe_unordered
8924 (GET_CODE (compare_op)));
8925 else
8926 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
8928 diff = ct - cf;
8930 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
8931 || reg_overlap_mentioned_p (out, ix86_compare_op1))
8932 tmp = gen_reg_rtx (mode);
8934 if (mode == DImode)
8935 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
8936 else
8937 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
8939 else
8941 if (code == GT || code == GE)
8942 code = reverse_condition (code);
8943 else
8945 HOST_WIDE_INT tmp = ct;
8946 ct = cf;
8947 cf = tmp;
8948 diff = ct - cf;
8950 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
8951 ix86_compare_op1, VOIDmode, 0, -1);
8954 if (diff == 1)
8957 * cmpl op0,op1
8958 * sbbl dest,dest
8959 * [addl dest, ct]
8961 * Size 5 - 8.
8963 if (ct)
8964 tmp = expand_simple_binop (mode, PLUS,
8965 tmp, GEN_INT (ct),
8966 copy_rtx (tmp), 1, OPTAB_DIRECT);
8968 else if (cf == -1)
8971 * cmpl op0,op1
8972 * sbbl dest,dest
8973 * orl $ct, dest
8975 * Size 8.
8977 tmp = expand_simple_binop (mode, IOR,
8978 tmp, GEN_INT (ct),
8979 copy_rtx (tmp), 1, OPTAB_DIRECT);
8981 else if (diff == -1 && ct)
8984 * cmpl op0,op1
8985 * sbbl dest,dest
8986 * notl dest
8987 * [addl dest, cf]
8989 * Size 8 - 11.
8991 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
8992 if (cf)
8993 tmp = expand_simple_binop (mode, PLUS,
8994 copy_rtx (tmp), GEN_INT (cf),
8995 copy_rtx (tmp), 1, OPTAB_DIRECT);
8997 else
9000 * cmpl op0,op1
9001 * sbbl dest,dest
9002 * [notl dest]
9003 * andl cf - ct, dest
9004 * [addl dest, ct]
9006 * Size 8 - 11.
9009 if (cf == 0)
9011 cf = ct;
9012 ct = 0;
9013 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
9016 tmp = expand_simple_binop (mode, AND,
9017 copy_rtx (tmp),
9018 gen_int_mode (cf - ct, mode),
9019 copy_rtx (tmp), 1, OPTAB_DIRECT);
9020 if (ct)
9021 tmp = expand_simple_binop (mode, PLUS,
9022 copy_rtx (tmp), GEN_INT (ct),
9023 copy_rtx (tmp), 1, OPTAB_DIRECT);
9026 if (!rtx_equal_p (tmp, out))
9027 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
9029 return 1; /* DONE */
9032 if (diff < 0)
9034 HOST_WIDE_INT tmp;
9035 tmp = ct, ct = cf, cf = tmp;
9036 diff = -diff;
9037 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
9039 /* We may be reversing unordered compare to normal compare, that
9040 is not valid in general (we may convert non-trapping condition
9041 to trapping one), however on i386 we currently emit all
9042 comparisons unordered. */
9043 compare_code = reverse_condition_maybe_unordered (compare_code);
9044 code = reverse_condition_maybe_unordered (code);
9046 else
9048 compare_code = reverse_condition (compare_code);
9049 code = reverse_condition (code);
9053 compare_code = UNKNOWN;
9054 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
9055 && GET_CODE (ix86_compare_op1) == CONST_INT)
9057 if (ix86_compare_op1 == const0_rtx
9058 && (code == LT || code == GE))
9059 compare_code = code;
9060 else if (ix86_compare_op1 == constm1_rtx)
9062 if (code == LE)
9063 compare_code = LT;
9064 else if (code == GT)
9065 compare_code = GE;
9069 /* Optimize dest = (op0 < 0) ? -1 : cf. */
9070 if (compare_code != UNKNOWN
9071 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
9072 && (cf == -1 || ct == -1))
9074 /* If lea code below could be used, only optimize
9075 if it results in a 2 insn sequence. */
9077 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
9078 || diff == 3 || diff == 5 || diff == 9)
9079 || (compare_code == LT && ct == -1)
9080 || (compare_code == GE && cf == -1))
9083 * notl op1 (if necessary)
9084 * sarl $31, op1
9085 * orl cf, op1
9087 if (ct != -1)
9089 cf = ct;
9090 ct = -1;
9091 code = reverse_condition (code);
9094 out = emit_store_flag (out, code, ix86_compare_op0,
9095 ix86_compare_op1, VOIDmode, 0, -1);
9097 out = expand_simple_binop (mode, IOR,
9098 out, GEN_INT (cf),
9099 out, 1, OPTAB_DIRECT);
9100 if (out != operands[0])
9101 emit_move_insn (operands[0], out);
9103 return 1; /* DONE */
9108 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
9109 || diff == 3 || diff == 5 || diff == 9)
9110 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
9111 && (mode != DImode
9112 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
9115 * xorl dest,dest
9116 * cmpl op1,op2
9117 * setcc dest
9118 * lea cf(dest*(ct-cf)),dest
9120 * Size 14.
9122 * This also catches the degenerate setcc-only case.
9125 rtx tmp;
9126 int nops;
9128 out = emit_store_flag (out, code, ix86_compare_op0,
9129 ix86_compare_op1, VOIDmode, 0, 1);
9131 nops = 0;
9132 /* On x86_64 the lea instruction operates on Pmode, so we need
9133 to get arithmetics done in proper mode to match. */
9134 if (diff == 1)
9135 tmp = copy_rtx (out);
9136 else
9138 rtx out1;
9139 out1 = copy_rtx (out);
9140 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
9141 nops++;
9142 if (diff & 1)
9144 tmp = gen_rtx_PLUS (mode, tmp, out1);
9145 nops++;
9148 if (cf != 0)
9150 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
9151 nops++;
9153 if (!rtx_equal_p (tmp, out))
9155 if (nops == 1)
9156 out = force_operand (tmp, copy_rtx (out));
9157 else
9158 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
9160 if (!rtx_equal_p (out, operands[0]))
9161 emit_move_insn (operands[0], copy_rtx (out));
9163 return 1; /* DONE */
9167 * General case: Jumpful:
9168 * xorl dest,dest cmpl op1, op2
9169 * cmpl op1, op2 movl ct, dest
9170 * setcc dest jcc 1f
9171 * decl dest movl cf, dest
9172 * andl (cf-ct),dest 1:
9173 * addl ct,dest
9175 * Size 20. Size 14.
9177 * This is reasonably steep, but branch mispredict costs are
9178 * high on modern cpus, so consider failing only if optimizing
9179 * for space.
9182 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
9183 && BRANCH_COST >= 2)
9185 if (cf == 0)
9187 cf = ct;
9188 ct = 0;
9189 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
9190 /* We may be reversing unordered compare to normal compare,
9191 that is not valid in general (we may convert non-trapping
9192 condition to trapping one), however on i386 we currently
9193 emit all comparisons unordered. */
9194 code = reverse_condition_maybe_unordered (code);
9195 else
9197 code = reverse_condition (code);
9198 if (compare_code != UNKNOWN)
9199 compare_code = reverse_condition (compare_code);
9203 if (compare_code != UNKNOWN)
9205 /* notl op1 (if needed)
9206 sarl $31, op1
9207 andl (cf-ct), op1
9208 addl ct, op1
9210 For x < 0 (resp. x <= -1) there will be no notl,
9211 so if possible swap the constants to get rid of the
9212 complement.
9213 True/false will be -1/0 while code below (store flag
9214 followed by decrement) is 0/-1, so the constants need
9215 to be exchanged once more. */
9217 if (compare_code == GE || !cf)
9219 code = reverse_condition (code);
9220 compare_code = LT;
9222 else
9224 HOST_WIDE_INT tmp = cf;
9225 cf = ct;
9226 ct = tmp;
9229 out = emit_store_flag (out, code, ix86_compare_op0,
9230 ix86_compare_op1, VOIDmode, 0, -1);
9232 else
9234 out = emit_store_flag (out, code, ix86_compare_op0,
9235 ix86_compare_op1, VOIDmode, 0, 1);
9237 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
9238 copy_rtx (out), 1, OPTAB_DIRECT);
9241 out = expand_simple_binop (mode, AND, copy_rtx (out),
9242 gen_int_mode (cf - ct, mode),
9243 copy_rtx (out), 1, OPTAB_DIRECT);
9244 if (ct)
9245 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
9246 copy_rtx (out), 1, OPTAB_DIRECT);
9247 if (!rtx_equal_p (out, operands[0]))
9248 emit_move_insn (operands[0], copy_rtx (out));
9250 return 1; /* DONE */
9254 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
9256 /* Try a few things more with specific constants and a variable. */
9258 optab op;
9259 rtx var, orig_out, out, tmp;
9261 if (BRANCH_COST <= 2)
9262 return 0; /* FAIL */
9264 /* If one of the two operands is an interesting constant, load a
9265 constant with the above and mask it in with a logical operation. */
9267 if (GET_CODE (operands[2]) == CONST_INT)
9269 var = operands[3];
9270 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
9271 operands[3] = constm1_rtx, op = and_optab;
9272 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
9273 operands[3] = const0_rtx, op = ior_optab;
9274 else
9275 return 0; /* FAIL */
9277 else if (GET_CODE (operands[3]) == CONST_INT)
9279 var = operands[2];
9280 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
9281 operands[2] = constm1_rtx, op = and_optab;
9282 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
9283 operands[2] = const0_rtx, op = ior_optab;
9284 else
9285 return 0; /* FAIL */
9287 else
9288 return 0; /* FAIL */
9290 orig_out = operands[0];
9291 tmp = gen_reg_rtx (mode);
9292 operands[0] = tmp;
9294 /* Recurse to get the constant loaded. */
9295 if (ix86_expand_int_movcc (operands) == 0)
9296 return 0; /* FAIL */
9298 /* Mask in the interesting variable. */
9299 out = expand_binop (mode, op, var, tmp, orig_out, 0,
9300 OPTAB_WIDEN);
9301 if (!rtx_equal_p (out, orig_out))
9302 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
9304 return 1; /* DONE */
9308 * For comparison with above,
9310 * movl cf,dest
9311 * movl ct,tmp
9312 * cmpl op1,op2
9313 * cmovcc tmp,dest
9315 * Size 15.
9318 if (! nonimmediate_operand (operands[2], mode))
9319 operands[2] = force_reg (mode, operands[2]);
9320 if (! nonimmediate_operand (operands[3], mode))
9321 operands[3] = force_reg (mode, operands[3]);
9323 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
9325 rtx tmp = gen_reg_rtx (mode);
9326 emit_move_insn (tmp, operands[3]);
9327 operands[3] = tmp;
9329 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
9331 rtx tmp = gen_reg_rtx (mode);
9332 emit_move_insn (tmp, operands[2]);
9333 operands[2] = tmp;
9336 if (! register_operand (operands[2], VOIDmode)
9337 && (mode == QImode
9338 || ! register_operand (operands[3], VOIDmode)))
9339 operands[2] = force_reg (mode, operands[2]);
9341 if (mode == QImode
9342 && ! register_operand (operands[3], VOIDmode))
9343 operands[3] = force_reg (mode, operands[3]);
9345 emit_insn (compare_seq);
9346 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9347 gen_rtx_IF_THEN_ELSE (mode,
9348 compare_op, operands[2],
9349 operands[3])));
9350 if (bypass_test)
9351 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
9352 gen_rtx_IF_THEN_ELSE (mode,
9353 bypass_test,
9354 copy_rtx (operands[3]),
9355 copy_rtx (operands[0]))));
9356 if (second_test)
9357 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
9358 gen_rtx_IF_THEN_ELSE (mode,
9359 second_test,
9360 copy_rtx (operands[2]),
9361 copy_rtx (operands[0]))));
9363 return 1; /* DONE */
9367 ix86_expand_fp_movcc (rtx operands[])
9369 enum rtx_code code;
9370 rtx tmp;
9371 rtx compare_op, second_test, bypass_test;
9373 /* For SF/DFmode conditional moves based on comparisons
9374 in same mode, we may want to use SSE min/max instructions. */
9375 if (((TARGET_SSE_MATH && GET_MODE (operands[0]) == SFmode)
9376 || (TARGET_SSE2 && TARGET_SSE_MATH && GET_MODE (operands[0]) == DFmode))
9377 && GET_MODE (ix86_compare_op0) == GET_MODE (operands[0])
9378 /* The SSE comparisons does not support the LTGT/UNEQ pair. */
9379 && (!TARGET_IEEE_FP
9380 || (GET_CODE (operands[1]) != LTGT && GET_CODE (operands[1]) != UNEQ))
9381 /* We may be called from the post-reload splitter. */
9382 && (!REG_P (operands[0])
9383 || SSE_REG_P (operands[0])
9384 || REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
9386 rtx op0 = ix86_compare_op0, op1 = ix86_compare_op1;
9387 code = GET_CODE (operands[1]);
9389 /* See if we have (cross) match between comparison operands and
9390 conditional move operands. */
9391 if (rtx_equal_p (operands[2], op1))
9393 rtx tmp = op0;
9394 op0 = op1;
9395 op1 = tmp;
9396 code = reverse_condition_maybe_unordered (code);
9398 if (rtx_equal_p (operands[2], op0) && rtx_equal_p (operands[3], op1))
9400 /* Check for min operation. */
9401 if (code == LT || code == UNLE)
9403 if (code == UNLE)
9405 rtx tmp = op0;
9406 op0 = op1;
9407 op1 = tmp;
9409 operands[0] = force_reg (GET_MODE (operands[0]), operands[0]);
9410 if (memory_operand (op0, VOIDmode))
9411 op0 = force_reg (GET_MODE (operands[0]), op0);
9412 if (GET_MODE (operands[0]) == SFmode)
9413 emit_insn (gen_minsf3 (operands[0], op0, op1));
9414 else
9415 emit_insn (gen_mindf3 (operands[0], op0, op1));
9416 return 1;
9418 /* Check for max operation. */
9419 if (code == GT || code == UNGE)
9421 if (code == UNGE)
9423 rtx tmp = op0;
9424 op0 = op1;
9425 op1 = tmp;
9427 operands[0] = force_reg (GET_MODE (operands[0]), operands[0]);
9428 if (memory_operand (op0, VOIDmode))
9429 op0 = force_reg (GET_MODE (operands[0]), op0);
9430 if (GET_MODE (operands[0]) == SFmode)
9431 emit_insn (gen_maxsf3 (operands[0], op0, op1));
9432 else
9433 emit_insn (gen_maxdf3 (operands[0], op0, op1));
9434 return 1;
9437 /* Manage condition to be sse_comparison_operator. In case we are
9438 in non-ieee mode, try to canonicalize the destination operand
9439 to be first in the comparison - this helps reload to avoid extra
9440 moves. */
9441 if (!sse_comparison_operator (operands[1], VOIDmode)
9442 || (rtx_equal_p (operands[0], ix86_compare_op1) && !TARGET_IEEE_FP))
9444 rtx tmp = ix86_compare_op0;
9445 ix86_compare_op0 = ix86_compare_op1;
9446 ix86_compare_op1 = tmp;
9447 operands[1] = gen_rtx_fmt_ee (swap_condition (GET_CODE (operands[1])),
9448 VOIDmode, ix86_compare_op0,
9449 ix86_compare_op1);
9451 /* Similarly try to manage result to be first operand of conditional
9452 move. We also don't support the NE comparison on SSE, so try to
9453 avoid it. */
9454 if ((rtx_equal_p (operands[0], operands[3])
9455 && (!TARGET_IEEE_FP || GET_CODE (operands[1]) != EQ))
9456 || (GET_CODE (operands[1]) == NE && TARGET_IEEE_FP))
9458 rtx tmp = operands[2];
9459 operands[2] = operands[3];
9460 operands[3] = tmp;
9461 operands[1] = gen_rtx_fmt_ee (reverse_condition_maybe_unordered
9462 (GET_CODE (operands[1])),
9463 VOIDmode, ix86_compare_op0,
9464 ix86_compare_op1);
9466 if (GET_MODE (operands[0]) == SFmode)
9467 emit_insn (gen_sse_movsfcc (operands[0], operands[1],
9468 operands[2], operands[3],
9469 ix86_compare_op0, ix86_compare_op1));
9470 else
9471 emit_insn (gen_sse_movdfcc (operands[0], operands[1],
9472 operands[2], operands[3],
9473 ix86_compare_op0, ix86_compare_op1));
9474 return 1;
9477 /* The floating point conditional move instructions don't directly
9478 support conditions resulting from a signed integer comparison. */
9480 code = GET_CODE (operands[1]);
9481 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
9483 /* The floating point conditional move instructions don't directly
9484 support signed integer comparisons. */
9486 if (!fcmov_comparison_operator (compare_op, VOIDmode))
9488 if (second_test != NULL || bypass_test != NULL)
9489 abort ();
9490 tmp = gen_reg_rtx (QImode);
9491 ix86_expand_setcc (code, tmp);
9492 code = NE;
9493 ix86_compare_op0 = tmp;
9494 ix86_compare_op1 = const0_rtx;
9495 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
9497 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
9499 tmp = gen_reg_rtx (GET_MODE (operands[0]));
9500 emit_move_insn (tmp, operands[3]);
9501 operands[3] = tmp;
9503 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
9505 tmp = gen_reg_rtx (GET_MODE (operands[0]));
9506 emit_move_insn (tmp, operands[2]);
9507 operands[2] = tmp;
9510 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9511 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
9512 compare_op,
9513 operands[2],
9514 operands[3])));
9515 if (bypass_test)
9516 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9517 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
9518 bypass_test,
9519 operands[3],
9520 operands[0])));
9521 if (second_test)
9522 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
9523 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
9524 second_test,
9525 operands[2],
9526 operands[0])));
9528 return 1;
9531 /* Expand conditional increment or decrement using adb/sbb instructions.
9532 The default case using setcc followed by the conditional move can be
9533 done by generic code. */
9535 ix86_expand_int_addcc (rtx operands[])
9537 enum rtx_code code = GET_CODE (operands[1]);
9538 rtx compare_op;
9539 rtx val = const0_rtx;
9540 bool fpcmp = false;
9541 enum machine_mode mode = GET_MODE (operands[0]);
9543 if (operands[3] != const1_rtx
9544 && operands[3] != constm1_rtx)
9545 return 0;
9546 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
9547 ix86_compare_op1, &compare_op))
9548 return 0;
9549 code = GET_CODE (compare_op);
9551 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
9552 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
9554 fpcmp = true;
9555 code = ix86_fp_compare_code_to_integer (code);
9558 if (code != LTU)
9560 val = constm1_rtx;
9561 if (fpcmp)
9562 PUT_CODE (compare_op,
9563 reverse_condition_maybe_unordered
9564 (GET_CODE (compare_op)));
9565 else
9566 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
9568 PUT_MODE (compare_op, mode);
9570 /* Construct either adc or sbb insn. */
9571 if ((code == LTU) == (operands[3] == constm1_rtx))
9573 switch (GET_MODE (operands[0]))
9575 case QImode:
9576 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
9577 break;
9578 case HImode:
9579 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
9580 break;
9581 case SImode:
9582 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
9583 break;
9584 case DImode:
9585 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
9586 break;
9587 default:
9588 abort ();
9591 else
9593 switch (GET_MODE (operands[0]))
9595 case QImode:
9596 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
9597 break;
9598 case HImode:
9599 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
9600 break;
9601 case SImode:
9602 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
9603 break;
9604 case DImode:
9605 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
9606 break;
9607 default:
9608 abort ();
9611 return 1; /* DONE */
9615 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
9616 works for floating pointer parameters and nonoffsetable memories.
9617 For pushes, it returns just stack offsets; the values will be saved
9618 in the right order. Maximally three parts are generated. */
9620 static int
9621 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
9623 int size;
9625 if (!TARGET_64BIT)
9626 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
9627 else
9628 size = (GET_MODE_SIZE (mode) + 4) / 8;
9630 if (GET_CODE (operand) == REG && MMX_REGNO_P (REGNO (operand)))
9631 abort ();
9632 if (size < 2 || size > 3)
9633 abort ();
9635 /* Optimize constant pool reference to immediates. This is used by fp
9636 moves, that force all constants to memory to allow combining. */
9637 if (GET_CODE (operand) == MEM && MEM_READONLY_P (operand))
9639 rtx tmp = maybe_get_pool_constant (operand);
9640 if (tmp)
9641 operand = tmp;
9644 if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand))
9646 /* The only non-offsetable memories we handle are pushes. */
9647 if (! push_operand (operand, VOIDmode))
9648 abort ();
9650 operand = copy_rtx (operand);
9651 PUT_MODE (operand, Pmode);
9652 parts[0] = parts[1] = parts[2] = operand;
9654 else if (!TARGET_64BIT)
9656 if (mode == DImode)
9657 split_di (&operand, 1, &parts[0], &parts[1]);
9658 else
9660 if (REG_P (operand))
9662 if (!reload_completed)
9663 abort ();
9664 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
9665 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
9666 if (size == 3)
9667 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
9669 else if (offsettable_memref_p (operand))
9671 operand = adjust_address (operand, SImode, 0);
9672 parts[0] = operand;
9673 parts[1] = adjust_address (operand, SImode, 4);
9674 if (size == 3)
9675 parts[2] = adjust_address (operand, SImode, 8);
9677 else if (GET_CODE (operand) == CONST_DOUBLE)
9679 REAL_VALUE_TYPE r;
9680 long l[4];
9682 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
9683 switch (mode)
9685 case XFmode:
9686 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
9687 parts[2] = gen_int_mode (l[2], SImode);
9688 break;
9689 case DFmode:
9690 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
9691 break;
9692 default:
9693 abort ();
9695 parts[1] = gen_int_mode (l[1], SImode);
9696 parts[0] = gen_int_mode (l[0], SImode);
9698 else
9699 abort ();
9702 else
9704 if (mode == TImode)
9705 split_ti (&operand, 1, &parts[0], &parts[1]);
9706 if (mode == XFmode || mode == TFmode)
9708 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
9709 if (REG_P (operand))
9711 if (!reload_completed)
9712 abort ();
9713 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
9714 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
9716 else if (offsettable_memref_p (operand))
9718 operand = adjust_address (operand, DImode, 0);
9719 parts[0] = operand;
9720 parts[1] = adjust_address (operand, upper_mode, 8);
9722 else if (GET_CODE (operand) == CONST_DOUBLE)
9724 REAL_VALUE_TYPE r;
9725 long l[3];
9727 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
9728 real_to_target (l, &r, mode);
9729 /* Do not use shift by 32 to avoid warning on 32bit systems. */
9730 if (HOST_BITS_PER_WIDE_INT >= 64)
9731 parts[0]
9732 = gen_int_mode
9733 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
9734 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
9735 DImode);
9736 else
9737 parts[0] = immed_double_const (l[0], l[1], DImode);
9738 if (upper_mode == SImode)
9739 parts[1] = gen_int_mode (l[2], SImode);
9740 else if (HOST_BITS_PER_WIDE_INT >= 64)
9741 parts[1]
9742 = gen_int_mode
9743 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
9744 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
9745 DImode);
9746 else
9747 parts[1] = immed_double_const (l[2], l[3], DImode);
9749 else
9750 abort ();
9754 return size;
9757 /* Emit insns to perform a move or push of DI, DF, and XF values.
9758 Return false when normal moves are needed; true when all required
9759 insns have been emitted. Operands 2-4 contain the input values
9760 int the correct order; operands 5-7 contain the output values. */
9762 void
9763 ix86_split_long_move (rtx operands[])
9765 rtx part[2][3];
9766 int nparts;
9767 int push = 0;
9768 int collisions = 0;
9769 enum machine_mode mode = GET_MODE (operands[0]);
9771 /* The DFmode expanders may ask us to move double.
9772 For 64bit target this is single move. By hiding the fact
9773 here we simplify i386.md splitters. */
9774 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
9776 /* Optimize constant pool reference to immediates. This is used by
9777 fp moves, that force all constants to memory to allow combining. */
9779 if (GET_CODE (operands[1]) == MEM
9780 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
9781 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
9782 operands[1] = get_pool_constant (XEXP (operands[1], 0));
9783 if (push_operand (operands[0], VOIDmode))
9785 operands[0] = copy_rtx (operands[0]);
9786 PUT_MODE (operands[0], Pmode);
9788 else
9789 operands[0] = gen_lowpart (DImode, operands[0]);
9790 operands[1] = gen_lowpart (DImode, operands[1]);
9791 emit_move_insn (operands[0], operands[1]);
9792 return;
9795 /* The only non-offsettable memory we handle is push. */
9796 if (push_operand (operands[0], VOIDmode))
9797 push = 1;
9798 else if (GET_CODE (operands[0]) == MEM
9799 && ! offsettable_memref_p (operands[0]))
9800 abort ();
9802 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
9803 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
9805 /* When emitting push, take care for source operands on the stack. */
9806 if (push && GET_CODE (operands[1]) == MEM
9807 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
9809 if (nparts == 3)
9810 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
9811 XEXP (part[1][2], 0));
9812 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
9813 XEXP (part[1][1], 0));
9816 /* We need to do copy in the right order in case an address register
9817 of the source overlaps the destination. */
9818 if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM)
9820 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
9821 collisions++;
9822 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
9823 collisions++;
9824 if (nparts == 3
9825 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
9826 collisions++;
9828 /* Collision in the middle part can be handled by reordering. */
9829 if (collisions == 1 && nparts == 3
9830 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
9832 rtx tmp;
9833 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
9834 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
9837 /* If there are more collisions, we can't handle it by reordering.
9838 Do an lea to the last part and use only one colliding move. */
9839 else if (collisions > 1)
9841 rtx base;
9843 collisions = 1;
9845 base = part[0][nparts - 1];
9847 /* Handle the case when the last part isn't valid for lea.
9848 Happens in 64-bit mode storing the 12-byte XFmode. */
9849 if (GET_MODE (base) != Pmode)
9850 base = gen_rtx_REG (Pmode, REGNO (base));
9852 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
9853 part[1][0] = replace_equiv_address (part[1][0], base);
9854 part[1][1] = replace_equiv_address (part[1][1],
9855 plus_constant (base, UNITS_PER_WORD));
9856 if (nparts == 3)
9857 part[1][2] = replace_equiv_address (part[1][2],
9858 plus_constant (base, 8));
9862 if (push)
9864 if (!TARGET_64BIT)
9866 if (nparts == 3)
9868 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
9869 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
9870 emit_move_insn (part[0][2], part[1][2]);
9873 else
9875 /* In 64bit mode we don't have 32bit push available. In case this is
9876 register, it is OK - we will just use larger counterpart. We also
9877 retype memory - these comes from attempt to avoid REX prefix on
9878 moving of second half of TFmode value. */
9879 if (GET_MODE (part[1][1]) == SImode)
9881 if (GET_CODE (part[1][1]) == MEM)
9882 part[1][1] = adjust_address (part[1][1], DImode, 0);
9883 else if (REG_P (part[1][1]))
9884 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
9885 else
9886 abort ();
9887 if (GET_MODE (part[1][0]) == SImode)
9888 part[1][0] = part[1][1];
9891 emit_move_insn (part[0][1], part[1][1]);
9892 emit_move_insn (part[0][0], part[1][0]);
9893 return;
9896 /* Choose correct order to not overwrite the source before it is copied. */
9897 if ((REG_P (part[0][0])
9898 && REG_P (part[1][1])
9899 && (REGNO (part[0][0]) == REGNO (part[1][1])
9900 || (nparts == 3
9901 && REGNO (part[0][0]) == REGNO (part[1][2]))))
9902 || (collisions > 0
9903 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
9905 if (nparts == 3)
9907 operands[2] = part[0][2];
9908 operands[3] = part[0][1];
9909 operands[4] = part[0][0];
9910 operands[5] = part[1][2];
9911 operands[6] = part[1][1];
9912 operands[7] = part[1][0];
9914 else
9916 operands[2] = part[0][1];
9917 operands[3] = part[0][0];
9918 operands[5] = part[1][1];
9919 operands[6] = part[1][0];
9922 else
9924 if (nparts == 3)
9926 operands[2] = part[0][0];
9927 operands[3] = part[0][1];
9928 operands[4] = part[0][2];
9929 operands[5] = part[1][0];
9930 operands[6] = part[1][1];
9931 operands[7] = part[1][2];
9933 else
9935 operands[2] = part[0][0];
9936 operands[3] = part[0][1];
9937 operands[5] = part[1][0];
9938 operands[6] = part[1][1];
9942 /* If optimizing for size, attempt to locally unCSE non-zero constants. */
9943 if (optimize_size)
9945 if (GET_CODE (operands[5]) == CONST_INT
9946 && operands[5] != const0_rtx
9947 && REG_P (operands[2]))
9949 if (GET_CODE (operands[6]) == CONST_INT
9950 && INTVAL (operands[6]) == INTVAL (operands[5]))
9951 operands[6] = operands[2];
9953 if (nparts == 3
9954 && GET_CODE (operands[7]) == CONST_INT
9955 && INTVAL (operands[7]) == INTVAL (operands[5]))
9956 operands[7] = operands[2];
9959 if (nparts == 3
9960 && GET_CODE (operands[6]) == CONST_INT
9961 && operands[6] != const0_rtx
9962 && REG_P (operands[3])
9963 && GET_CODE (operands[7]) == CONST_INT
9964 && INTVAL (operands[7]) == INTVAL (operands[6]))
9965 operands[7] = operands[3];
9968 emit_move_insn (operands[2], operands[5]);
9969 emit_move_insn (operands[3], operands[6]);
9970 if (nparts == 3)
9971 emit_move_insn (operands[4], operands[7]);
9973 return;
9976 /* Helper function of ix86_split_ashldi used to generate an SImode
9977 left shift by a constant, either using a single shift or
9978 a sequence of add instructions. */
9980 static void
9981 ix86_expand_ashlsi3_const (rtx operand, int count)
9983 if (count == 1)
9984 emit_insn (gen_addsi3 (operand, operand, operand));
9985 else if (!optimize_size
9986 && count * ix86_cost->add <= ix86_cost->shift_const)
9988 int i;
9989 for (i=0; i<count; i++)
9990 emit_insn (gen_addsi3 (operand, operand, operand));
9992 else
9993 emit_insn (gen_ashlsi3 (operand, operand, GEN_INT (count)));
9996 void
9997 ix86_split_ashldi (rtx *operands, rtx scratch)
9999 rtx low[2], high[2];
10000 int count;
10002 if (GET_CODE (operands[2]) == CONST_INT)
10004 split_di (operands, 2, low, high);
10005 count = INTVAL (operands[2]) & 63;
10007 if (count >= 32)
10009 emit_move_insn (high[0], low[1]);
10010 emit_move_insn (low[0], const0_rtx);
10012 if (count > 32)
10013 ix86_expand_ashlsi3_const (high[0], count - 32);
10015 else
10017 if (!rtx_equal_p (operands[0], operands[1]))
10018 emit_move_insn (operands[0], operands[1]);
10019 emit_insn (gen_x86_shld_1 (high[0], low[0], GEN_INT (count)));
10020 ix86_expand_ashlsi3_const (low[0], count);
10022 return;
10025 split_di (operands, 1, low, high);
10027 if (operands[1] == const1_rtx)
10029 /* Assuming we've chosen a QImode capable registers, then 1LL << N
10030 can be done with two 32-bit shifts, no branches, no cmoves. */
10031 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
10033 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
10035 ix86_expand_clear (low[0]);
10036 ix86_expand_clear (high[0]);
10037 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (32)));
10039 d = gen_lowpart (QImode, low[0]);
10040 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
10041 s = gen_rtx_EQ (QImode, flags, const0_rtx);
10042 emit_insn (gen_rtx_SET (VOIDmode, d, s));
10044 d = gen_lowpart (QImode, high[0]);
10045 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
10046 s = gen_rtx_NE (QImode, flags, const0_rtx);
10047 emit_insn (gen_rtx_SET (VOIDmode, d, s));
10050 /* Otherwise, we can get the same results by manually performing
10051 a bit extract operation on bit 5, and then performing the two
10052 shifts. The two methods of getting 0/1 into low/high are exactly
10053 the same size. Avoiding the shift in the bit extract case helps
10054 pentium4 a bit; no one else seems to care much either way. */
10055 else
10057 rtx x;
10059 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
10060 x = gen_rtx_ZERO_EXTEND (SImode, operands[2]);
10061 else
10062 x = gen_lowpart (SImode, operands[2]);
10063 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
10065 emit_insn (gen_lshrsi3 (high[0], high[0], GEN_INT (5)));
10066 emit_insn (gen_andsi3 (high[0], high[0], GEN_INT (1)));
10067 emit_move_insn (low[0], high[0]);
10068 emit_insn (gen_xorsi3 (low[0], low[0], GEN_INT (1)));
10071 emit_insn (gen_ashlsi3 (low[0], low[0], operands[2]));
10072 emit_insn (gen_ashlsi3 (high[0], high[0], operands[2]));
10073 return;
10076 if (operands[1] == constm1_rtx)
10078 /* For -1LL << N, we can avoid the shld instruction, because we
10079 know that we're shifting 0...31 ones into a -1. */
10080 emit_move_insn (low[0], constm1_rtx);
10081 if (optimize_size)
10082 emit_move_insn (high[0], low[0]);
10083 else
10084 emit_move_insn (high[0], constm1_rtx);
10086 else
10088 if (!rtx_equal_p (operands[0], operands[1]))
10089 emit_move_insn (operands[0], operands[1]);
10091 split_di (operands, 1, low, high);
10092 emit_insn (gen_x86_shld_1 (high[0], low[0], operands[2]));
10095 emit_insn (gen_ashlsi3 (low[0], low[0], operands[2]));
10097 if (TARGET_CMOVE && scratch)
10099 ix86_expand_clear (scratch);
10100 emit_insn (gen_x86_shift_adj_1 (high[0], low[0], operands[2], scratch));
10102 else
10103 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
10106 void
10107 ix86_split_ashrdi (rtx *operands, rtx scratch)
10109 rtx low[2], high[2];
10110 int count;
10112 if (GET_CODE (operands[2]) == CONST_INT)
10114 split_di (operands, 2, low, high);
10115 count = INTVAL (operands[2]) & 63;
10117 if (count == 63)
10119 emit_move_insn (high[0], high[1]);
10120 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (31)));
10121 emit_move_insn (low[0], high[0]);
10124 else if (count >= 32)
10126 emit_move_insn (low[0], high[1]);
10127 emit_move_insn (high[0], low[0]);
10128 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (31)));
10129 if (count > 32)
10130 emit_insn (gen_ashrsi3 (low[0], low[0], GEN_INT (count - 32)));
10132 else
10134 if (!rtx_equal_p (operands[0], operands[1]))
10135 emit_move_insn (operands[0], operands[1]);
10136 emit_insn (gen_x86_shrd_1 (low[0], high[0], GEN_INT (count)));
10137 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (count)));
10140 else
10142 if (!rtx_equal_p (operands[0], operands[1]))
10143 emit_move_insn (operands[0], operands[1]);
10145 split_di (operands, 1, low, high);
10147 emit_insn (gen_x86_shrd_1 (low[0], high[0], operands[2]));
10148 emit_insn (gen_ashrsi3 (high[0], high[0], operands[2]));
10150 if (TARGET_CMOVE && scratch)
10152 emit_move_insn (scratch, high[0]);
10153 emit_insn (gen_ashrsi3 (scratch, scratch, GEN_INT (31)));
10154 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
10155 scratch));
10157 else
10158 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
10162 void
10163 ix86_split_lshrdi (rtx *operands, rtx scratch)
10165 rtx low[2], high[2];
10166 int count;
10168 if (GET_CODE (operands[2]) == CONST_INT)
10170 split_di (operands, 2, low, high);
10171 count = INTVAL (operands[2]) & 63;
10173 if (count >= 32)
10175 emit_move_insn (low[0], high[1]);
10176 ix86_expand_clear (high[0]);
10178 if (count > 32)
10179 emit_insn (gen_lshrsi3 (low[0], low[0], GEN_INT (count - 32)));
10181 else
10183 if (!rtx_equal_p (operands[0], operands[1]))
10184 emit_move_insn (operands[0], operands[1]);
10185 emit_insn (gen_x86_shrd_1 (low[0], high[0], GEN_INT (count)));
10186 emit_insn (gen_lshrsi3 (high[0], high[0], GEN_INT (count)));
10189 else
10191 if (!rtx_equal_p (operands[0], operands[1]))
10192 emit_move_insn (operands[0], operands[1]);
10194 split_di (operands, 1, low, high);
10196 emit_insn (gen_x86_shrd_1 (low[0], high[0], operands[2]));
10197 emit_insn (gen_lshrsi3 (high[0], high[0], operands[2]));
10199 /* Heh. By reversing the arguments, we can reuse this pattern. */
10200 if (TARGET_CMOVE && scratch)
10202 ix86_expand_clear (scratch);
10203 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
10204 scratch));
10206 else
10207 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
10211 /* Helper function for the string operations below. Dest VARIABLE whether
10212 it is aligned to VALUE bytes. If true, jump to the label. */
10213 static rtx
10214 ix86_expand_aligntest (rtx variable, int value)
10216 rtx label = gen_label_rtx ();
10217 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
10218 if (GET_MODE (variable) == DImode)
10219 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
10220 else
10221 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
10222 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
10223 1, label);
10224 return label;
10227 /* Adjust COUNTER by the VALUE. */
10228 static void
10229 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
10231 if (GET_MODE (countreg) == DImode)
10232 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
10233 else
10234 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
10237 /* Zero extend possibly SImode EXP to Pmode register. */
10239 ix86_zero_extend_to_Pmode (rtx exp)
10241 rtx r;
10242 if (GET_MODE (exp) == VOIDmode)
10243 return force_reg (Pmode, exp);
10244 if (GET_MODE (exp) == Pmode)
10245 return copy_to_mode_reg (Pmode, exp);
10246 r = gen_reg_rtx (Pmode);
10247 emit_insn (gen_zero_extendsidi2 (r, exp));
10248 return r;
10251 /* Expand string move (memcpy) operation. Use i386 string operations when
10252 profitable. expand_clrmem contains similar code. */
10254 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp)
10256 rtx srcreg, destreg, countreg, srcexp, destexp;
10257 enum machine_mode counter_mode;
10258 HOST_WIDE_INT align = 0;
10259 unsigned HOST_WIDE_INT count = 0;
10261 if (GET_CODE (align_exp) == CONST_INT)
10262 align = INTVAL (align_exp);
10264 /* Can't use any of this if the user has appropriated esi or edi. */
10265 if (global_regs[4] || global_regs[5])
10266 return 0;
10268 /* This simple hack avoids all inlining code and simplifies code below. */
10269 if (!TARGET_ALIGN_STRINGOPS)
10270 align = 64;
10272 if (GET_CODE (count_exp) == CONST_INT)
10274 count = INTVAL (count_exp);
10275 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
10276 return 0;
10279 /* Figure out proper mode for counter. For 32bits it is always SImode,
10280 for 64bits use SImode when possible, otherwise DImode.
10281 Set count to number of bytes copied when known at compile time. */
10282 if (!TARGET_64BIT
10283 || GET_MODE (count_exp) == SImode
10284 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
10285 counter_mode = SImode;
10286 else
10287 counter_mode = DImode;
10289 if (counter_mode != SImode && counter_mode != DImode)
10290 abort ();
10292 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
10293 if (destreg != XEXP (dst, 0))
10294 dst = replace_equiv_address_nv (dst, destreg);
10295 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
10296 if (srcreg != XEXP (src, 0))
10297 src = replace_equiv_address_nv (src, srcreg);
10299 /* When optimizing for size emit simple rep ; movsb instruction for
10300 counts not divisible by 4. */
10302 if ((!optimize || optimize_size) && (count == 0 || (count & 0x03)))
10304 emit_insn (gen_cld ());
10305 countreg = ix86_zero_extend_to_Pmode (count_exp);
10306 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
10307 srcexp = gen_rtx_PLUS (Pmode, srcreg, countreg);
10308 emit_insn (gen_rep_mov (destreg, dst, srcreg, src, countreg,
10309 destexp, srcexp));
10312 /* For constant aligned (or small unaligned) copies use rep movsl
10313 followed by code copying the rest. For PentiumPro ensure 8 byte
10314 alignment to allow rep movsl acceleration. */
10316 else if (count != 0
10317 && (align >= 8
10318 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
10319 || optimize_size || count < (unsigned int) 64))
10321 unsigned HOST_WIDE_INT offset = 0;
10322 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
10323 rtx srcmem, dstmem;
10325 emit_insn (gen_cld ());
10326 if (count & ~(size - 1))
10328 countreg = copy_to_mode_reg (counter_mode,
10329 GEN_INT ((count >> (size == 4 ? 2 : 3))
10330 & (TARGET_64BIT ? -1 : 0x3fffffff)));
10331 countreg = ix86_zero_extend_to_Pmode (countreg);
10333 destexp = gen_rtx_ASHIFT (Pmode, countreg,
10334 GEN_INT (size == 4 ? 2 : 3));
10335 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
10336 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
10338 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
10339 countreg, destexp, srcexp));
10340 offset = count & ~(size - 1);
10342 if (size == 8 && (count & 0x04))
10344 srcmem = adjust_automodify_address_nv (src, SImode, srcreg,
10345 offset);
10346 dstmem = adjust_automodify_address_nv (dst, SImode, destreg,
10347 offset);
10348 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10349 offset += 4;
10351 if (count & 0x02)
10353 srcmem = adjust_automodify_address_nv (src, HImode, srcreg,
10354 offset);
10355 dstmem = adjust_automodify_address_nv (dst, HImode, destreg,
10356 offset);
10357 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10358 offset += 2;
10360 if (count & 0x01)
10362 srcmem = adjust_automodify_address_nv (src, QImode, srcreg,
10363 offset);
10364 dstmem = adjust_automodify_address_nv (dst, QImode, destreg,
10365 offset);
10366 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10369 /* The generic code based on the glibc implementation:
10370 - align destination to 4 bytes (8 byte alignment is used for PentiumPro
10371 allowing accelerated copying there)
10372 - copy the data using rep movsl
10373 - copy the rest. */
10374 else
10376 rtx countreg2;
10377 rtx label = NULL;
10378 rtx srcmem, dstmem;
10379 int desired_alignment = (TARGET_PENTIUMPRO
10380 && (count == 0 || count >= (unsigned int) 260)
10381 ? 8 : UNITS_PER_WORD);
10382 /* Get rid of MEM_OFFSETs, they won't be accurate. */
10383 dst = change_address (dst, BLKmode, destreg);
10384 src = change_address (src, BLKmode, srcreg);
10386 /* In case we don't know anything about the alignment, default to
10387 library version, since it is usually equally fast and result in
10388 shorter code.
10390 Also emit call when we know that the count is large and call overhead
10391 will not be important. */
10392 if (!TARGET_INLINE_ALL_STRINGOPS
10393 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
10394 return 0;
10396 if (TARGET_SINGLE_STRINGOP)
10397 emit_insn (gen_cld ());
10399 countreg2 = gen_reg_rtx (Pmode);
10400 countreg = copy_to_mode_reg (counter_mode, count_exp);
10402 /* We don't use loops to align destination and to copy parts smaller
10403 than 4 bytes, because gcc is able to optimize such code better (in
10404 the case the destination or the count really is aligned, gcc is often
10405 able to predict the branches) and also it is friendlier to the
10406 hardware branch prediction.
10408 Using loops is beneficial for generic case, because we can
10409 handle small counts using the loops. Many CPUs (such as Athlon)
10410 have large REP prefix setup costs.
10412 This is quite costly. Maybe we can revisit this decision later or
10413 add some customizability to this code. */
10415 if (count == 0 && align < desired_alignment)
10417 label = gen_label_rtx ();
10418 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
10419 LEU, 0, counter_mode, 1, label);
10421 if (align <= 1)
10423 rtx label = ix86_expand_aligntest (destreg, 1);
10424 srcmem = change_address (src, QImode, srcreg);
10425 dstmem = change_address (dst, QImode, destreg);
10426 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10427 ix86_adjust_counter (countreg, 1);
10428 emit_label (label);
10429 LABEL_NUSES (label) = 1;
10431 if (align <= 2)
10433 rtx label = ix86_expand_aligntest (destreg, 2);
10434 srcmem = change_address (src, HImode, srcreg);
10435 dstmem = change_address (dst, HImode, destreg);
10436 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10437 ix86_adjust_counter (countreg, 2);
10438 emit_label (label);
10439 LABEL_NUSES (label) = 1;
10441 if (align <= 4 && desired_alignment > 4)
10443 rtx label = ix86_expand_aligntest (destreg, 4);
10444 srcmem = change_address (src, SImode, srcreg);
10445 dstmem = change_address (dst, SImode, destreg);
10446 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10447 ix86_adjust_counter (countreg, 4);
10448 emit_label (label);
10449 LABEL_NUSES (label) = 1;
10452 if (label && desired_alignment > 4 && !TARGET_64BIT)
10454 emit_label (label);
10455 LABEL_NUSES (label) = 1;
10456 label = NULL_RTX;
10458 if (!TARGET_SINGLE_STRINGOP)
10459 emit_insn (gen_cld ());
10460 if (TARGET_64BIT)
10462 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
10463 GEN_INT (3)));
10464 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
10466 else
10468 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
10469 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
10471 srcexp = gen_rtx_PLUS (Pmode, destexp, srcreg);
10472 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
10473 emit_insn (gen_rep_mov (destreg, dst, srcreg, src,
10474 countreg2, destexp, srcexp));
10476 if (label)
10478 emit_label (label);
10479 LABEL_NUSES (label) = 1;
10481 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
10483 srcmem = change_address (src, SImode, srcreg);
10484 dstmem = change_address (dst, SImode, destreg);
10485 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10487 if ((align <= 4 || count == 0) && TARGET_64BIT)
10489 rtx label = ix86_expand_aligntest (countreg, 4);
10490 srcmem = change_address (src, SImode, srcreg);
10491 dstmem = change_address (dst, SImode, destreg);
10492 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10493 emit_label (label);
10494 LABEL_NUSES (label) = 1;
10496 if (align > 2 && count != 0 && (count & 2))
10498 srcmem = change_address (src, HImode, srcreg);
10499 dstmem = change_address (dst, HImode, destreg);
10500 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10502 if (align <= 2 || count == 0)
10504 rtx label = ix86_expand_aligntest (countreg, 2);
10505 srcmem = change_address (src, HImode, srcreg);
10506 dstmem = change_address (dst, HImode, destreg);
10507 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10508 emit_label (label);
10509 LABEL_NUSES (label) = 1;
10511 if (align > 1 && count != 0 && (count & 1))
10513 srcmem = change_address (src, QImode, srcreg);
10514 dstmem = change_address (dst, QImode, destreg);
10515 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10517 if (align <= 1 || count == 0)
10519 rtx label = ix86_expand_aligntest (countreg, 1);
10520 srcmem = change_address (src, QImode, srcreg);
10521 dstmem = change_address (dst, QImode, destreg);
10522 emit_insn (gen_strmov (destreg, dstmem, srcreg, srcmem));
10523 emit_label (label);
10524 LABEL_NUSES (label) = 1;
10528 return 1;
10531 /* Expand string clear operation (bzero). Use i386 string operations when
10532 profitable. expand_movmem contains similar code. */
10534 ix86_expand_clrmem (rtx dst, rtx count_exp, rtx align_exp)
10536 rtx destreg, zeroreg, countreg, destexp;
10537 enum machine_mode counter_mode;
10538 HOST_WIDE_INT align = 0;
10539 unsigned HOST_WIDE_INT count = 0;
10541 if (GET_CODE (align_exp) == CONST_INT)
10542 align = INTVAL (align_exp);
10544 /* Can't use any of this if the user has appropriated esi. */
10545 if (global_regs[4])
10546 return 0;
10548 /* This simple hack avoids all inlining code and simplifies code below. */
10549 if (!TARGET_ALIGN_STRINGOPS)
10550 align = 32;
10552 if (GET_CODE (count_exp) == CONST_INT)
10554 count = INTVAL (count_exp);
10555 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
10556 return 0;
10558 /* Figure out proper mode for counter. For 32bits it is always SImode,
10559 for 64bits use SImode when possible, otherwise DImode.
10560 Set count to number of bytes copied when known at compile time. */
10561 if (!TARGET_64BIT
10562 || GET_MODE (count_exp) == SImode
10563 || x86_64_zext_immediate_operand (count_exp, VOIDmode))
10564 counter_mode = SImode;
10565 else
10566 counter_mode = DImode;
10568 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
10569 if (destreg != XEXP (dst, 0))
10570 dst = replace_equiv_address_nv (dst, destreg);
10573 /* When optimizing for size emit simple rep ; movsb instruction for
10574 counts not divisible by 4. The movl $N, %ecx; rep; stosb
10575 sequence is 7 bytes long, so if optimizing for size and count is
10576 small enough that some stosl, stosw and stosb instructions without
10577 rep are shorter, fall back into the next if. */
10579 if ((!optimize || optimize_size)
10580 && (count == 0
10581 || ((count & 0x03)
10582 && (!optimize_size || (count & 0x03) + (count >> 2) > 7))))
10584 emit_insn (gen_cld ());
10586 countreg = ix86_zero_extend_to_Pmode (count_exp);
10587 zeroreg = copy_to_mode_reg (QImode, const0_rtx);
10588 destexp = gen_rtx_PLUS (Pmode, destreg, countreg);
10589 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg, destexp));
10591 else if (count != 0
10592 && (align >= 8
10593 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
10594 || optimize_size || count < (unsigned int) 64))
10596 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
10597 unsigned HOST_WIDE_INT offset = 0;
10599 emit_insn (gen_cld ());
10601 zeroreg = copy_to_mode_reg (size == 4 ? SImode : DImode, const0_rtx);
10602 if (count & ~(size - 1))
10604 unsigned HOST_WIDE_INT repcount;
10605 unsigned int max_nonrep;
10607 repcount = count >> (size == 4 ? 2 : 3);
10608 if (!TARGET_64BIT)
10609 repcount &= 0x3fffffff;
10611 /* movl $N, %ecx; rep; stosl is 7 bytes, while N x stosl is N bytes.
10612 movl $N, %ecx; rep; stosq is 8 bytes, while N x stosq is 2xN
10613 bytes. In both cases the latter seems to be faster for small
10614 values of N. */
10615 max_nonrep = size == 4 ? 7 : 4;
10616 if (!optimize_size)
10617 switch (ix86_tune)
10619 case PROCESSOR_PENTIUM4:
10620 case PROCESSOR_NOCONA:
10621 max_nonrep = 3;
10622 break;
10623 default:
10624 break;
10627 if (repcount <= max_nonrep)
10628 while (repcount-- > 0)
10630 rtx mem = adjust_automodify_address_nv (dst,
10631 GET_MODE (zeroreg),
10632 destreg, offset);
10633 emit_insn (gen_strset (destreg, mem, zeroreg));
10634 offset += size;
10636 else
10638 countreg = copy_to_mode_reg (counter_mode, GEN_INT (repcount));
10639 countreg = ix86_zero_extend_to_Pmode (countreg);
10640 destexp = gen_rtx_ASHIFT (Pmode, countreg,
10641 GEN_INT (size == 4 ? 2 : 3));
10642 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
10643 emit_insn (gen_rep_stos (destreg, countreg, dst, zeroreg,
10644 destexp));
10645 offset = count & ~(size - 1);
10648 if (size == 8 && (count & 0x04))
10650 rtx mem = adjust_automodify_address_nv (dst, SImode, destreg,
10651 offset);
10652 emit_insn (gen_strset (destreg, mem,
10653 gen_rtx_SUBREG (SImode, zeroreg, 0)));
10654 offset += 4;
10656 if (count & 0x02)
10658 rtx mem = adjust_automodify_address_nv (dst, HImode, destreg,
10659 offset);
10660 emit_insn (gen_strset (destreg, mem,
10661 gen_rtx_SUBREG (HImode, zeroreg, 0)));
10662 offset += 2;
10664 if (count & 0x01)
10666 rtx mem = adjust_automodify_address_nv (dst, QImode, destreg,
10667 offset);
10668 emit_insn (gen_strset (destreg, mem,
10669 gen_rtx_SUBREG (QImode, zeroreg, 0)));
10672 else
10674 rtx countreg2;
10675 rtx label = NULL;
10676 /* Compute desired alignment of the string operation. */
10677 int desired_alignment = (TARGET_PENTIUMPRO
10678 && (count == 0 || count >= (unsigned int) 260)
10679 ? 8 : UNITS_PER_WORD);
10681 /* In case we don't know anything about the alignment, default to
10682 library version, since it is usually equally fast and result in
10683 shorter code.
10685 Also emit call when we know that the count is large and call overhead
10686 will not be important. */
10687 if (!TARGET_INLINE_ALL_STRINGOPS
10688 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
10689 return 0;
10691 if (TARGET_SINGLE_STRINGOP)
10692 emit_insn (gen_cld ());
10694 countreg2 = gen_reg_rtx (Pmode);
10695 countreg = copy_to_mode_reg (counter_mode, count_exp);
10696 zeroreg = copy_to_mode_reg (Pmode, const0_rtx);
10697 /* Get rid of MEM_OFFSET, it won't be accurate. */
10698 dst = change_address (dst, BLKmode, destreg);
10700 if (count == 0 && align < desired_alignment)
10702 label = gen_label_rtx ();
10703 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
10704 LEU, 0, counter_mode, 1, label);
10706 if (align <= 1)
10708 rtx label = ix86_expand_aligntest (destreg, 1);
10709 emit_insn (gen_strset (destreg, dst,
10710 gen_rtx_SUBREG (QImode, zeroreg, 0)));
10711 ix86_adjust_counter (countreg, 1);
10712 emit_label (label);
10713 LABEL_NUSES (label) = 1;
10715 if (align <= 2)
10717 rtx label = ix86_expand_aligntest (destreg, 2);
10718 emit_insn (gen_strset (destreg, dst,
10719 gen_rtx_SUBREG (HImode, zeroreg, 0)));
10720 ix86_adjust_counter (countreg, 2);
10721 emit_label (label);
10722 LABEL_NUSES (label) = 1;
10724 if (align <= 4 && desired_alignment > 4)
10726 rtx label = ix86_expand_aligntest (destreg, 4);
10727 emit_insn (gen_strset (destreg, dst,
10728 (TARGET_64BIT
10729 ? gen_rtx_SUBREG (SImode, zeroreg, 0)
10730 : zeroreg)));
10731 ix86_adjust_counter (countreg, 4);
10732 emit_label (label);
10733 LABEL_NUSES (label) = 1;
10736 if (label && desired_alignment > 4 && !TARGET_64BIT)
10738 emit_label (label);
10739 LABEL_NUSES (label) = 1;
10740 label = NULL_RTX;
10743 if (!TARGET_SINGLE_STRINGOP)
10744 emit_insn (gen_cld ());
10745 if (TARGET_64BIT)
10747 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
10748 GEN_INT (3)));
10749 destexp = gen_rtx_ASHIFT (Pmode, countreg2, GEN_INT (3));
10751 else
10753 emit_insn (gen_lshrsi3 (countreg2, countreg, const2_rtx));
10754 destexp = gen_rtx_ASHIFT (Pmode, countreg2, const2_rtx);
10756 destexp = gen_rtx_PLUS (Pmode, destexp, destreg);
10757 emit_insn (gen_rep_stos (destreg, countreg2, dst, zeroreg, destexp));
10759 if (label)
10761 emit_label (label);
10762 LABEL_NUSES (label) = 1;
10765 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
10766 emit_insn (gen_strset (destreg, dst,
10767 gen_rtx_SUBREG (SImode, zeroreg, 0)));
10768 if (TARGET_64BIT && (align <= 4 || count == 0))
10770 rtx label = ix86_expand_aligntest (countreg, 4);
10771 emit_insn (gen_strset (destreg, dst,
10772 gen_rtx_SUBREG (SImode, zeroreg, 0)));
10773 emit_label (label);
10774 LABEL_NUSES (label) = 1;
10776 if (align > 2 && count != 0 && (count & 2))
10777 emit_insn (gen_strset (destreg, dst,
10778 gen_rtx_SUBREG (HImode, zeroreg, 0)));
10779 if (align <= 2 || count == 0)
10781 rtx label = ix86_expand_aligntest (countreg, 2);
10782 emit_insn (gen_strset (destreg, dst,
10783 gen_rtx_SUBREG (HImode, zeroreg, 0)));
10784 emit_label (label);
10785 LABEL_NUSES (label) = 1;
10787 if (align > 1 && count != 0 && (count & 1))
10788 emit_insn (gen_strset (destreg, dst,
10789 gen_rtx_SUBREG (QImode, zeroreg, 0)));
10790 if (align <= 1 || count == 0)
10792 rtx label = ix86_expand_aligntest (countreg, 1);
10793 emit_insn (gen_strset (destreg, dst,
10794 gen_rtx_SUBREG (QImode, zeroreg, 0)));
10795 emit_label (label);
10796 LABEL_NUSES (label) = 1;
10799 return 1;
10802 /* Expand strlen. */
10804 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
10806 rtx addr, scratch1, scratch2, scratch3, scratch4;
10808 /* The generic case of strlen expander is long. Avoid it's
10809 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
10811 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
10812 && !TARGET_INLINE_ALL_STRINGOPS
10813 && !optimize_size
10814 && (GET_CODE (align) != CONST_INT || INTVAL (align) < 4))
10815 return 0;
10817 addr = force_reg (Pmode, XEXP (src, 0));
10818 scratch1 = gen_reg_rtx (Pmode);
10820 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
10821 && !optimize_size)
10823 /* Well it seems that some optimizer does not combine a call like
10824 foo(strlen(bar), strlen(bar));
10825 when the move and the subtraction is done here. It does calculate
10826 the length just once when these instructions are done inside of
10827 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
10828 often used and I use one fewer register for the lifetime of
10829 output_strlen_unroll() this is better. */
10831 emit_move_insn (out, addr);
10833 ix86_expand_strlensi_unroll_1 (out, src, align);
10835 /* strlensi_unroll_1 returns the address of the zero at the end of
10836 the string, like memchr(), so compute the length by subtracting
10837 the start address. */
10838 if (TARGET_64BIT)
10839 emit_insn (gen_subdi3 (out, out, addr));
10840 else
10841 emit_insn (gen_subsi3 (out, out, addr));
10843 else
10845 rtx unspec;
10846 scratch2 = gen_reg_rtx (Pmode);
10847 scratch3 = gen_reg_rtx (Pmode);
10848 scratch4 = force_reg (Pmode, constm1_rtx);
10850 emit_move_insn (scratch3, addr);
10851 eoschar = force_reg (QImode, eoschar);
10853 emit_insn (gen_cld ());
10854 src = replace_equiv_address_nv (src, scratch3);
10856 /* If .md starts supporting :P, this can be done in .md. */
10857 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
10858 scratch4), UNSPEC_SCAS);
10859 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
10860 if (TARGET_64BIT)
10862 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
10863 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
10865 else
10867 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
10868 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
10871 return 1;
10874 /* Expand the appropriate insns for doing strlen if not just doing
10875 repnz; scasb
10877 out = result, initialized with the start address
10878 align_rtx = alignment of the address.
10879 scratch = scratch register, initialized with the startaddress when
10880 not aligned, otherwise undefined
10882 This is just the body. It needs the initializations mentioned above and
10883 some address computing at the end. These things are done in i386.md. */
10885 static void
10886 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
10888 int align;
10889 rtx tmp;
10890 rtx align_2_label = NULL_RTX;
10891 rtx align_3_label = NULL_RTX;
10892 rtx align_4_label = gen_label_rtx ();
10893 rtx end_0_label = gen_label_rtx ();
10894 rtx mem;
10895 rtx tmpreg = gen_reg_rtx (SImode);
10896 rtx scratch = gen_reg_rtx (SImode);
10897 rtx cmp;
10899 align = 0;
10900 if (GET_CODE (align_rtx) == CONST_INT)
10901 align = INTVAL (align_rtx);
10903 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
10905 /* Is there a known alignment and is it less than 4? */
10906 if (align < 4)
10908 rtx scratch1 = gen_reg_rtx (Pmode);
10909 emit_move_insn (scratch1, out);
10910 /* Is there a known alignment and is it not 2? */
10911 if (align != 2)
10913 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
10914 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
10916 /* Leave just the 3 lower bits. */
10917 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
10918 NULL_RTX, 0, OPTAB_WIDEN);
10920 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
10921 Pmode, 1, align_4_label);
10922 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
10923 Pmode, 1, align_2_label);
10924 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
10925 Pmode, 1, align_3_label);
10927 else
10929 /* Since the alignment is 2, we have to check 2 or 0 bytes;
10930 check if is aligned to 4 - byte. */
10932 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
10933 NULL_RTX, 0, OPTAB_WIDEN);
10935 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
10936 Pmode, 1, align_4_label);
10939 mem = change_address (src, QImode, out);
10941 /* Now compare the bytes. */
10943 /* Compare the first n unaligned byte on a byte per byte basis. */
10944 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
10945 QImode, 1, end_0_label);
10947 /* Increment the address. */
10948 if (TARGET_64BIT)
10949 emit_insn (gen_adddi3 (out, out, const1_rtx));
10950 else
10951 emit_insn (gen_addsi3 (out, out, const1_rtx));
10953 /* Not needed with an alignment of 2 */
10954 if (align != 2)
10956 emit_label (align_2_label);
10958 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
10959 end_0_label);
10961 if (TARGET_64BIT)
10962 emit_insn (gen_adddi3 (out, out, const1_rtx));
10963 else
10964 emit_insn (gen_addsi3 (out, out, const1_rtx));
10966 emit_label (align_3_label);
10969 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
10970 end_0_label);
10972 if (TARGET_64BIT)
10973 emit_insn (gen_adddi3 (out, out, const1_rtx));
10974 else
10975 emit_insn (gen_addsi3 (out, out, const1_rtx));
10978 /* Generate loop to check 4 bytes at a time. It is not a good idea to
10979 align this loop. It gives only huge programs, but does not help to
10980 speed up. */
10981 emit_label (align_4_label);
10983 mem = change_address (src, SImode, out);
10984 emit_move_insn (scratch, mem);
10985 if (TARGET_64BIT)
10986 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
10987 else
10988 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
10990 /* This formula yields a nonzero result iff one of the bytes is zero.
10991 This saves three branches inside loop and many cycles. */
10993 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
10994 emit_insn (gen_one_cmplsi2 (scratch, scratch));
10995 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
10996 emit_insn (gen_andsi3 (tmpreg, tmpreg,
10997 gen_int_mode (0x80808080, SImode)));
10998 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
10999 align_4_label);
11001 if (TARGET_CMOVE)
11003 rtx reg = gen_reg_rtx (SImode);
11004 rtx reg2 = gen_reg_rtx (Pmode);
11005 emit_move_insn (reg, tmpreg);
11006 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
11008 /* If zero is not in the first two bytes, move two bytes forward. */
11009 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
11010 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
11011 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
11012 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
11013 gen_rtx_IF_THEN_ELSE (SImode, tmp,
11014 reg,
11015 tmpreg)));
11016 /* Emit lea manually to avoid clobbering of flags. */
11017 emit_insn (gen_rtx_SET (SImode, reg2,
11018 gen_rtx_PLUS (Pmode, out, const2_rtx)));
11020 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
11021 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
11022 emit_insn (gen_rtx_SET (VOIDmode, out,
11023 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
11024 reg2,
11025 out)));
11028 else
11030 rtx end_2_label = gen_label_rtx ();
11031 /* Is zero in the first two bytes? */
11033 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
11034 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
11035 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
11036 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
11037 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
11038 pc_rtx);
11039 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
11040 JUMP_LABEL (tmp) = end_2_label;
11042 /* Not in the first two. Move two bytes forward. */
11043 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
11044 if (TARGET_64BIT)
11045 emit_insn (gen_adddi3 (out, out, const2_rtx));
11046 else
11047 emit_insn (gen_addsi3 (out, out, const2_rtx));
11049 emit_label (end_2_label);
11053 /* Avoid branch in fixing the byte. */
11054 tmpreg = gen_lowpart (QImode, tmpreg);
11055 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
11056 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
11057 if (TARGET_64BIT)
11058 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
11059 else
11060 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
11062 emit_label (end_0_label);
11065 void
11066 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
11067 rtx callarg2 ATTRIBUTE_UNUSED,
11068 rtx pop, int sibcall)
11070 rtx use = NULL, call;
11072 if (pop == const0_rtx)
11073 pop = NULL;
11074 if (TARGET_64BIT && pop)
11075 abort ();
11077 #if TARGET_MACHO
11078 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
11079 fnaddr = machopic_indirect_call_target (fnaddr);
11080 #else
11081 /* Static functions and indirect calls don't need the pic register. */
11082 if (! TARGET_64BIT && flag_pic
11083 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
11084 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
11085 use_reg (&use, pic_offset_table_rtx);
11087 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
11089 rtx al = gen_rtx_REG (QImode, 0);
11090 emit_move_insn (al, callarg2);
11091 use_reg (&use, al);
11093 #endif /* TARGET_MACHO */
11095 if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
11097 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
11098 fnaddr = gen_rtx_MEM (QImode, fnaddr);
11100 if (sibcall && TARGET_64BIT
11101 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
11103 rtx addr;
11104 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
11105 fnaddr = gen_rtx_REG (Pmode, FIRST_REX_INT_REG + 3 /* R11 */);
11106 emit_move_insn (fnaddr, addr);
11107 fnaddr = gen_rtx_MEM (QImode, fnaddr);
11110 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
11111 if (retval)
11112 call = gen_rtx_SET (VOIDmode, retval, call);
11113 if (pop)
11115 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
11116 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
11117 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
11120 call = emit_call_insn (call);
11121 if (use)
11122 CALL_INSN_FUNCTION_USAGE (call) = use;
11126 /* Clear stack slot assignments remembered from previous functions.
11127 This is called from INIT_EXPANDERS once before RTL is emitted for each
11128 function. */
11130 static struct machine_function *
11131 ix86_init_machine_status (void)
11133 struct machine_function *f;
11135 f = ggc_alloc_cleared (sizeof (struct machine_function));
11136 f->use_fast_prologue_epilogue_nregs = -1;
11138 return f;
11141 /* Return a MEM corresponding to a stack slot with mode MODE.
11142 Allocate a new slot if necessary.
11144 The RTL for a function can have several slots available: N is
11145 which slot to use. */
11148 assign_386_stack_local (enum machine_mode mode, int n)
11150 struct stack_local_entry *s;
11152 if (n < 0 || n >= MAX_386_STACK_LOCALS)
11153 abort ();
11155 for (s = ix86_stack_locals; s; s = s->next)
11156 if (s->mode == mode && s->n == n)
11157 return s->rtl;
11159 s = (struct stack_local_entry *)
11160 ggc_alloc (sizeof (struct stack_local_entry));
11161 s->n = n;
11162 s->mode = mode;
11163 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
11165 s->next = ix86_stack_locals;
11166 ix86_stack_locals = s;
11167 return s->rtl;
11170 /* Construct the SYMBOL_REF for the tls_get_addr function. */
11172 static GTY(()) rtx ix86_tls_symbol;
11174 ix86_tls_get_addr (void)
11177 if (!ix86_tls_symbol)
11179 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
11180 (TARGET_GNU_TLS && !TARGET_64BIT)
11181 ? "___tls_get_addr"
11182 : "__tls_get_addr");
11185 return ix86_tls_symbol;
11188 /* Calculate the length of the memory address in the instruction
11189 encoding. Does not include the one-byte modrm, opcode, or prefix. */
11192 memory_address_length (rtx addr)
11194 struct ix86_address parts;
11195 rtx base, index, disp;
11196 int len;
11198 if (GET_CODE (addr) == PRE_DEC
11199 || GET_CODE (addr) == POST_INC
11200 || GET_CODE (addr) == PRE_MODIFY
11201 || GET_CODE (addr) == POST_MODIFY)
11202 return 0;
11204 if (! ix86_decompose_address (addr, &parts))
11205 abort ();
11207 base = parts.base;
11208 index = parts.index;
11209 disp = parts.disp;
11210 len = 0;
11212 /* Rule of thumb:
11213 - esp as the base always wants an index,
11214 - ebp as the base always wants a displacement. */
11216 /* Register Indirect. */
11217 if (base && !index && !disp)
11219 /* esp (for its index) and ebp (for its displacement) need
11220 the two-byte modrm form. */
11221 if (addr == stack_pointer_rtx
11222 || addr == arg_pointer_rtx
11223 || addr == frame_pointer_rtx
11224 || addr == hard_frame_pointer_rtx)
11225 len = 1;
11228 /* Direct Addressing. */
11229 else if (disp && !base && !index)
11230 len = 4;
11232 else
11234 /* Find the length of the displacement constant. */
11235 if (disp)
11237 if (GET_CODE (disp) == CONST_INT
11238 && CONST_OK_FOR_LETTER_P (INTVAL (disp), 'K')
11239 && base)
11240 len = 1;
11241 else
11242 len = 4;
11244 /* ebp always wants a displacement. */
11245 else if (base == hard_frame_pointer_rtx)
11246 len = 1;
11248 /* An index requires the two-byte modrm form.... */
11249 if (index
11250 /* ...like esp, which always wants an index. */
11251 || base == stack_pointer_rtx
11252 || base == arg_pointer_rtx
11253 || base == frame_pointer_rtx)
11254 len += 1;
11257 return len;
11260 /* Compute default value for "length_immediate" attribute. When SHORTFORM
11261 is set, expect that insn have 8bit immediate alternative. */
11263 ix86_attr_length_immediate_default (rtx insn, int shortform)
11265 int len = 0;
11266 int i;
11267 extract_insn_cached (insn);
11268 for (i = recog_data.n_operands - 1; i >= 0; --i)
11269 if (CONSTANT_P (recog_data.operand[i]))
11271 if (len)
11272 abort ();
11273 if (shortform
11274 && GET_CODE (recog_data.operand[i]) == CONST_INT
11275 && CONST_OK_FOR_LETTER_P (INTVAL (recog_data.operand[i]), 'K'))
11276 len = 1;
11277 else
11279 switch (get_attr_mode (insn))
11281 case MODE_QI:
11282 len+=1;
11283 break;
11284 case MODE_HI:
11285 len+=2;
11286 break;
11287 case MODE_SI:
11288 len+=4;
11289 break;
11290 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
11291 case MODE_DI:
11292 len+=4;
11293 break;
11294 default:
11295 fatal_insn ("unknown insn mode", insn);
11299 return len;
11301 /* Compute default value for "length_address" attribute. */
11303 ix86_attr_length_address_default (rtx insn)
11305 int i;
11307 if (get_attr_type (insn) == TYPE_LEA)
11309 rtx set = PATTERN (insn);
11310 if (GET_CODE (set) == SET)
11312 else if (GET_CODE (set) == PARALLEL
11313 && GET_CODE (XVECEXP (set, 0, 0)) == SET)
11314 set = XVECEXP (set, 0, 0);
11315 else
11317 #ifdef ENABLE_CHECKING
11318 abort ();
11319 #endif
11320 return 0;
11323 return memory_address_length (SET_SRC (set));
11326 extract_insn_cached (insn);
11327 for (i = recog_data.n_operands - 1; i >= 0; --i)
11328 if (GET_CODE (recog_data.operand[i]) == MEM)
11330 return memory_address_length (XEXP (recog_data.operand[i], 0));
11331 break;
11333 return 0;
11336 /* Return the maximum number of instructions a cpu can issue. */
11338 static int
11339 ix86_issue_rate (void)
11341 switch (ix86_tune)
11343 case PROCESSOR_PENTIUM:
11344 case PROCESSOR_K6:
11345 return 2;
11347 case PROCESSOR_PENTIUMPRO:
11348 case PROCESSOR_PENTIUM4:
11349 case PROCESSOR_ATHLON:
11350 case PROCESSOR_K8:
11351 case PROCESSOR_NOCONA:
11352 return 3;
11354 default:
11355 return 1;
11359 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
11360 by DEP_INSN and nothing set by DEP_INSN. */
11362 static int
11363 ix86_flags_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
11365 rtx set, set2;
11367 /* Simplify the test for uninteresting insns. */
11368 if (insn_type != TYPE_SETCC
11369 && insn_type != TYPE_ICMOV
11370 && insn_type != TYPE_FCMOV
11371 && insn_type != TYPE_IBR)
11372 return 0;
11374 if ((set = single_set (dep_insn)) != 0)
11376 set = SET_DEST (set);
11377 set2 = NULL_RTX;
11379 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
11380 && XVECLEN (PATTERN (dep_insn), 0) == 2
11381 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
11382 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
11384 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
11385 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
11387 else
11388 return 0;
11390 if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG)
11391 return 0;
11393 /* This test is true if the dependent insn reads the flags but
11394 not any other potentially set register. */
11395 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
11396 return 0;
11398 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
11399 return 0;
11401 return 1;
11404 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
11405 address with operands set by DEP_INSN. */
11407 static int
11408 ix86_agi_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
11410 rtx addr;
11412 if (insn_type == TYPE_LEA
11413 && TARGET_PENTIUM)
11415 addr = PATTERN (insn);
11416 if (GET_CODE (addr) == SET)
11418 else if (GET_CODE (addr) == PARALLEL
11419 && GET_CODE (XVECEXP (addr, 0, 0)) == SET)
11420 addr = XVECEXP (addr, 0, 0);
11421 else
11422 abort ();
11423 addr = SET_SRC (addr);
11425 else
11427 int i;
11428 extract_insn_cached (insn);
11429 for (i = recog_data.n_operands - 1; i >= 0; --i)
11430 if (GET_CODE (recog_data.operand[i]) == MEM)
11432 addr = XEXP (recog_data.operand[i], 0);
11433 goto found;
11435 return 0;
11436 found:;
11439 return modified_in_p (addr, dep_insn);
11442 static int
11443 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
11445 enum attr_type insn_type, dep_insn_type;
11446 enum attr_memory memory;
11447 rtx set, set2;
11448 int dep_insn_code_number;
11450 /* Anti and output dependencies have zero cost on all CPUs. */
11451 if (REG_NOTE_KIND (link) != 0)
11452 return 0;
11454 dep_insn_code_number = recog_memoized (dep_insn);
11456 /* If we can't recognize the insns, we can't really do anything. */
11457 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
11458 return cost;
11460 insn_type = get_attr_type (insn);
11461 dep_insn_type = get_attr_type (dep_insn);
11463 switch (ix86_tune)
11465 case PROCESSOR_PENTIUM:
11466 /* Address Generation Interlock adds a cycle of latency. */
11467 if (ix86_agi_dependant (insn, dep_insn, insn_type))
11468 cost += 1;
11470 /* ??? Compares pair with jump/setcc. */
11471 if (ix86_flags_dependant (insn, dep_insn, insn_type))
11472 cost = 0;
11474 /* Floating point stores require value to be ready one cycle earlier. */
11475 if (insn_type == TYPE_FMOV
11476 && get_attr_memory (insn) == MEMORY_STORE
11477 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11478 cost += 1;
11479 break;
11481 case PROCESSOR_PENTIUMPRO:
11482 memory = get_attr_memory (insn);
11484 /* INT->FP conversion is expensive. */
11485 if (get_attr_fp_int_src (dep_insn))
11486 cost += 5;
11488 /* There is one cycle extra latency between an FP op and a store. */
11489 if (insn_type == TYPE_FMOV
11490 && (set = single_set (dep_insn)) != NULL_RTX
11491 && (set2 = single_set (insn)) != NULL_RTX
11492 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
11493 && GET_CODE (SET_DEST (set2)) == MEM)
11494 cost += 1;
11496 /* Show ability of reorder buffer to hide latency of load by executing
11497 in parallel with previous instruction in case
11498 previous instruction is not needed to compute the address. */
11499 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
11500 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11502 /* Claim moves to take one cycle, as core can issue one load
11503 at time and the next load can start cycle later. */
11504 if (dep_insn_type == TYPE_IMOV
11505 || dep_insn_type == TYPE_FMOV)
11506 cost = 1;
11507 else if (cost > 1)
11508 cost--;
11510 break;
11512 case PROCESSOR_K6:
11513 memory = get_attr_memory (insn);
11515 /* The esp dependency is resolved before the instruction is really
11516 finished. */
11517 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
11518 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
11519 return 1;
11521 /* INT->FP conversion is expensive. */
11522 if (get_attr_fp_int_src (dep_insn))
11523 cost += 5;
11525 /* Show ability of reorder buffer to hide latency of load by executing
11526 in parallel with previous instruction in case
11527 previous instruction is not needed to compute the address. */
11528 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
11529 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11531 /* Claim moves to take one cycle, as core can issue one load
11532 at time and the next load can start cycle later. */
11533 if (dep_insn_type == TYPE_IMOV
11534 || dep_insn_type == TYPE_FMOV)
11535 cost = 1;
11536 else if (cost > 2)
11537 cost -= 2;
11538 else
11539 cost = 1;
11541 break;
11543 case PROCESSOR_ATHLON:
11544 case PROCESSOR_K8:
11545 memory = get_attr_memory (insn);
11547 /* Show ability of reorder buffer to hide latency of load by executing
11548 in parallel with previous instruction in case
11549 previous instruction is not needed to compute the address. */
11550 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
11551 && !ix86_agi_dependant (insn, dep_insn, insn_type))
11553 enum attr_unit unit = get_attr_unit (insn);
11554 int loadcost = 3;
11556 /* Because of the difference between the length of integer and
11557 floating unit pipeline preparation stages, the memory operands
11558 for floating point are cheaper.
11560 ??? For Athlon it the difference is most probably 2. */
11561 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
11562 loadcost = 3;
11563 else
11564 loadcost = TARGET_ATHLON ? 2 : 0;
11566 if (cost >= loadcost)
11567 cost -= loadcost;
11568 else
11569 cost = 0;
11572 default:
11573 break;
11576 return cost;
11579 /* How many alternative schedules to try. This should be as wide as the
11580 scheduling freedom in the DFA, but no wider. Making this value too
11581 large results extra work for the scheduler. */
11583 static int
11584 ia32_multipass_dfa_lookahead (void)
11586 if (ix86_tune == PROCESSOR_PENTIUM)
11587 return 2;
11589 if (ix86_tune == PROCESSOR_PENTIUMPRO
11590 || ix86_tune == PROCESSOR_K6)
11591 return 1;
11593 else
11594 return 0;
11598 /* Compute the alignment given to a constant that is being placed in memory.
11599 EXP is the constant and ALIGN is the alignment that the object would
11600 ordinarily have.
11601 The value of this function is used instead of that alignment to align
11602 the object. */
11605 ix86_constant_alignment (tree exp, int align)
11607 if (TREE_CODE (exp) == REAL_CST)
11609 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
11610 return 64;
11611 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
11612 return 128;
11614 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
11615 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
11616 return BITS_PER_WORD;
11618 return align;
11621 /* Compute the alignment for a static variable.
11622 TYPE is the data type, and ALIGN is the alignment that
11623 the object would ordinarily have. The value of this function is used
11624 instead of that alignment to align the object. */
11627 ix86_data_alignment (tree type, int align)
11629 if (AGGREGATE_TYPE_P (type)
11630 && TYPE_SIZE (type)
11631 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
11632 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 256
11633 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 256)
11634 return 256;
11636 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
11637 to 16byte boundary. */
11638 if (TARGET_64BIT)
11640 if (AGGREGATE_TYPE_P (type)
11641 && TYPE_SIZE (type)
11642 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
11643 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
11644 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
11645 return 128;
11648 if (TREE_CODE (type) == ARRAY_TYPE)
11650 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
11651 return 64;
11652 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
11653 return 128;
11655 else if (TREE_CODE (type) == COMPLEX_TYPE)
11658 if (TYPE_MODE (type) == DCmode && align < 64)
11659 return 64;
11660 if (TYPE_MODE (type) == XCmode && align < 128)
11661 return 128;
11663 else if ((TREE_CODE (type) == RECORD_TYPE
11664 || TREE_CODE (type) == UNION_TYPE
11665 || TREE_CODE (type) == QUAL_UNION_TYPE)
11666 && TYPE_FIELDS (type))
11668 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
11669 return 64;
11670 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
11671 return 128;
11673 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
11674 || TREE_CODE (type) == INTEGER_TYPE)
11676 if (TYPE_MODE (type) == DFmode && align < 64)
11677 return 64;
11678 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
11679 return 128;
11682 return align;
11685 /* Compute the alignment for a local variable.
11686 TYPE is the data type, and ALIGN is the alignment that
11687 the object would ordinarily have. The value of this macro is used
11688 instead of that alignment to align the object. */
11691 ix86_local_alignment (tree type, int align)
11693 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
11694 to 16byte boundary. */
11695 if (TARGET_64BIT)
11697 if (AGGREGATE_TYPE_P (type)
11698 && TYPE_SIZE (type)
11699 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
11700 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
11701 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
11702 return 128;
11704 if (TREE_CODE (type) == ARRAY_TYPE)
11706 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
11707 return 64;
11708 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
11709 return 128;
11711 else if (TREE_CODE (type) == COMPLEX_TYPE)
11713 if (TYPE_MODE (type) == DCmode && align < 64)
11714 return 64;
11715 if (TYPE_MODE (type) == XCmode && align < 128)
11716 return 128;
11718 else if ((TREE_CODE (type) == RECORD_TYPE
11719 || TREE_CODE (type) == UNION_TYPE
11720 || TREE_CODE (type) == QUAL_UNION_TYPE)
11721 && TYPE_FIELDS (type))
11723 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
11724 return 64;
11725 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
11726 return 128;
11728 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
11729 || TREE_CODE (type) == INTEGER_TYPE)
11732 if (TYPE_MODE (type) == DFmode && align < 64)
11733 return 64;
11734 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
11735 return 128;
11737 return align;
11740 /* Emit RTL insns to initialize the variable parts of a trampoline.
11741 FNADDR is an RTX for the address of the function's pure code.
11742 CXT is an RTX for the static chain value for the function. */
11743 void
11744 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
11746 if (!TARGET_64BIT)
11748 /* Compute offset from the end of the jmp to the target function. */
11749 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
11750 plus_constant (tramp, 10),
11751 NULL_RTX, 1, OPTAB_DIRECT);
11752 emit_move_insn (gen_rtx_MEM (QImode, tramp),
11753 gen_int_mode (0xb9, QImode));
11754 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
11755 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
11756 gen_int_mode (0xe9, QImode));
11757 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
11759 else
11761 int offset = 0;
11762 /* Try to load address using shorter movl instead of movabs.
11763 We may want to support movq for kernel mode, but kernel does not use
11764 trampolines at the moment. */
11765 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
11767 fnaddr = copy_to_mode_reg (DImode, fnaddr);
11768 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
11769 gen_int_mode (0xbb41, HImode));
11770 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
11771 gen_lowpart (SImode, fnaddr));
11772 offset += 6;
11774 else
11776 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
11777 gen_int_mode (0xbb49, HImode));
11778 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
11779 fnaddr);
11780 offset += 10;
11782 /* Load static chain using movabs to r10. */
11783 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
11784 gen_int_mode (0xba49, HImode));
11785 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
11786 cxt);
11787 offset += 10;
11788 /* Jump to the r11 */
11789 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
11790 gen_int_mode (0xff49, HImode));
11791 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
11792 gen_int_mode (0xe3, QImode));
11793 offset += 3;
11794 if (offset > TRAMPOLINE_SIZE)
11795 abort ();
11798 #ifdef ENABLE_EXECUTE_STACK
11799 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
11800 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
11801 #endif
11804 #define def_builtin(MASK, NAME, TYPE, CODE) \
11805 do { \
11806 if ((MASK) & target_flags \
11807 && (!((MASK) & MASK_64BIT) || TARGET_64BIT)) \
11808 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
11809 NULL, NULL_TREE); \
11810 } while (0)
11812 struct builtin_description
11814 const unsigned int mask;
11815 const enum insn_code icode;
11816 const char *const name;
11817 const enum ix86_builtins code;
11818 const enum rtx_code comparison;
11819 const unsigned int flag;
11822 static const struct builtin_description bdesc_comi[] =
11824 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
11825 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
11826 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
11827 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
11828 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
11829 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
11830 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
11831 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
11832 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
11833 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
11834 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
11835 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
11836 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
11837 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
11838 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
11839 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
11840 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
11841 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
11842 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
11843 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
11844 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
11845 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
11846 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
11847 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
11850 static const struct builtin_description bdesc_2arg[] =
11852 /* SSE */
11853 { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
11854 { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
11855 { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
11856 { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
11857 { MASK_SSE, CODE_FOR_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
11858 { MASK_SSE, CODE_FOR_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
11859 { MASK_SSE, CODE_FOR_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
11860 { MASK_SSE, CODE_FOR_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
11862 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
11863 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
11864 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
11865 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, 1 },
11866 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, 1 },
11867 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
11868 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, EQ, 0 },
11869 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, LT, 0 },
11870 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, LE, 0 },
11871 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, LT, 1 },
11872 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, LE, 1 },
11873 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, UNORDERED, 0 },
11874 { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
11875 { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
11876 { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
11877 { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
11878 { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, EQ, 0 },
11879 { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, LT, 0 },
11880 { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, LE, 0 },
11881 { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 },
11883 { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
11884 { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
11885 { MASK_SSE, CODE_FOR_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
11886 { MASK_SSE, CODE_FOR_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
11888 { MASK_SSE, CODE_FOR_sse_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
11889 { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
11890 { MASK_SSE, CODE_FOR_sse_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
11891 { MASK_SSE, CODE_FOR_sse_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
11893 { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
11894 { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
11895 { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
11896 { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
11897 { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
11899 /* MMX */
11900 { MASK_MMX, CODE_FOR_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
11901 { MASK_MMX, CODE_FOR_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
11902 { MASK_MMX, CODE_FOR_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
11903 { MASK_MMX, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
11904 { MASK_MMX, CODE_FOR_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
11905 { MASK_MMX, CODE_FOR_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
11906 { MASK_MMX, CODE_FOR_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
11907 { MASK_MMX, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
11909 { MASK_MMX, CODE_FOR_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
11910 { MASK_MMX, CODE_FOR_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
11911 { MASK_MMX, CODE_FOR_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
11912 { MASK_MMX, CODE_FOR_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
11913 { MASK_MMX, CODE_FOR_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
11914 { MASK_MMX, CODE_FOR_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
11915 { MASK_MMX, CODE_FOR_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
11916 { MASK_MMX, CODE_FOR_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
11918 { MASK_MMX, CODE_FOR_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
11919 { MASK_MMX, CODE_FOR_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
11920 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
11922 { MASK_MMX, CODE_FOR_mmx_anddi3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
11923 { MASK_MMX, CODE_FOR_mmx_nanddi3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
11924 { MASK_MMX, CODE_FOR_mmx_iordi3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
11925 { MASK_MMX, CODE_FOR_mmx_xordi3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
11927 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
11928 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
11930 { MASK_MMX, CODE_FOR_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
11931 { MASK_MMX, CODE_FOR_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
11932 { MASK_MMX, CODE_FOR_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
11933 { MASK_MMX, CODE_FOR_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
11934 { MASK_MMX, CODE_FOR_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
11935 { MASK_MMX, CODE_FOR_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
11937 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
11938 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
11939 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
11940 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
11942 { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
11943 { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
11944 { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
11945 { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
11946 { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
11947 { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
11949 /* Special. */
11950 { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
11951 { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
11952 { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
11954 { MASK_SSE, CODE_FOR_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
11955 { MASK_SSE, CODE_FOR_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
11956 { MASK_SSE | MASK_64BIT, CODE_FOR_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
11958 { MASK_MMX, CODE_FOR_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
11959 { MASK_MMX, CODE_FOR_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
11960 { MASK_MMX, CODE_FOR_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
11961 { MASK_MMX, CODE_FOR_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
11962 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
11963 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
11965 { MASK_MMX, CODE_FOR_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
11966 { MASK_MMX, CODE_FOR_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
11967 { MASK_MMX, CODE_FOR_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
11968 { MASK_MMX, CODE_FOR_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
11969 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
11970 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
11972 { MASK_MMX, CODE_FOR_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
11973 { MASK_MMX, CODE_FOR_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
11974 { MASK_MMX, CODE_FOR_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
11975 { MASK_MMX, CODE_FOR_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 },
11977 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
11978 { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
11980 /* SSE2 */
11981 { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
11982 { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
11983 { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
11984 { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
11985 { MASK_SSE2, CODE_FOR_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
11986 { MASK_SSE2, CODE_FOR_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
11987 { MASK_SSE2, CODE_FOR_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
11988 { MASK_SSE2, CODE_FOR_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
11990 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
11991 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
11992 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
11993 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, 1 },
11994 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, 1 },
11995 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
11996 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, EQ, 0 },
11997 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, LT, 0 },
11998 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, LE, 0 },
11999 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, LT, 1 },
12000 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, LE, 1 },
12001 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, UNORDERED, 0 },
12002 { MASK_SSE2, CODE_FOR_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
12003 { MASK_SSE2, CODE_FOR_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
12004 { MASK_SSE2, CODE_FOR_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
12005 { MASK_SSE2, CODE_FOR_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
12006 { MASK_SSE2, CODE_FOR_vmmaskncmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, EQ, 0 },
12007 { MASK_SSE2, CODE_FOR_vmmaskncmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, LT, 0 },
12008 { MASK_SSE2, CODE_FOR_vmmaskncmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, LE, 0 },
12009 { MASK_SSE2, CODE_FOR_vmmaskncmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, UNORDERED, 0 },
12011 { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
12012 { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
12013 { MASK_SSE2, CODE_FOR_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
12014 { MASK_SSE2, CODE_FOR_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
12016 { MASK_SSE2, CODE_FOR_sse2_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
12017 { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
12018 { MASK_SSE2, CODE_FOR_sse2_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
12019 { MASK_SSE2, CODE_FOR_sse2_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
12021 { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
12022 { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
12023 { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
12025 /* SSE2 MMX */
12026 { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
12027 { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
12028 { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
12029 { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
12030 { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
12031 { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
12032 { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
12033 { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
12035 { MASK_MMX, CODE_FOR_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
12036 { MASK_MMX, CODE_FOR_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
12037 { MASK_MMX, CODE_FOR_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
12038 { MASK_MMX, CODE_FOR_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
12039 { MASK_MMX, CODE_FOR_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
12040 { MASK_MMX, CODE_FOR_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
12041 { MASK_MMX, CODE_FOR_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
12042 { MASK_MMX, CODE_FOR_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
12044 { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
12045 { MASK_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
12047 { MASK_SSE2, CODE_FOR_sse2_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
12048 { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
12049 { MASK_SSE2, CODE_FOR_sse2_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
12050 { MASK_SSE2, CODE_FOR_sse2_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
12052 { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
12053 { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
12055 { MASK_SSE2, CODE_FOR_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
12056 { MASK_SSE2, CODE_FOR_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
12057 { MASK_SSE2, CODE_FOR_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
12058 { MASK_SSE2, CODE_FOR_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
12059 { MASK_SSE2, CODE_FOR_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
12060 { MASK_SSE2, CODE_FOR_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
12062 { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
12063 { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
12064 { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
12065 { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
12067 { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
12068 { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
12069 { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
12070 { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
12071 { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
12072 { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
12073 { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
12074 { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
12076 { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
12077 { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
12078 { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
12080 { MASK_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
12081 { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
12083 { MASK_SSE2, CODE_FOR_sse2_umulsidi3, 0, IX86_BUILTIN_PMULUDQ, 0, 0 },
12084 { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, 0, 0 },
12086 { MASK_SSE2, CODE_FOR_ashlv8hi3_ti, 0, IX86_BUILTIN_PSLLW128, 0, 0 },
12087 { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
12088 { MASK_SSE2, CODE_FOR_ashlv4si3_ti, 0, IX86_BUILTIN_PSLLD128, 0, 0 },
12089 { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
12090 { MASK_SSE2, CODE_FOR_ashlv2di3_ti, 0, IX86_BUILTIN_PSLLQ128, 0, 0 },
12091 { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
12093 { MASK_SSE2, CODE_FOR_lshrv8hi3_ti, 0, IX86_BUILTIN_PSRLW128, 0, 0 },
12094 { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
12095 { MASK_SSE2, CODE_FOR_lshrv4si3_ti, 0, IX86_BUILTIN_PSRLD128, 0, 0 },
12096 { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
12097 { MASK_SSE2, CODE_FOR_lshrv2di3_ti, 0, IX86_BUILTIN_PSRLQ128, 0, 0 },
12098 { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
12100 { MASK_SSE2, CODE_FOR_ashrv8hi3_ti, 0, IX86_BUILTIN_PSRAW128, 0, 0 },
12101 { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
12102 { MASK_SSE2, CODE_FOR_ashrv4si3_ti, 0, IX86_BUILTIN_PSRAD128, 0, 0 },
12103 { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
12105 { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
12107 { MASK_SSE2, CODE_FOR_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
12108 { MASK_SSE2 | MASK_64BIT, CODE_FOR_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
12109 { MASK_SSE2, CODE_FOR_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
12110 { MASK_SSE2, CODE_FOR_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
12112 /* SSE3 MMX */
12113 { MASK_SSE3, CODE_FOR_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
12114 { MASK_SSE3, CODE_FOR_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
12115 { MASK_SSE3, CODE_FOR_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
12116 { MASK_SSE3, CODE_FOR_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
12117 { MASK_SSE3, CODE_FOR_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
12118 { MASK_SSE3, CODE_FOR_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 }
12121 static const struct builtin_description bdesc_1arg[] =
12123 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
12124 { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
12126 { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
12127 { MASK_SSE, CODE_FOR_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
12128 { MASK_SSE, CODE_FOR_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
12130 { MASK_SSE, CODE_FOR_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
12131 { MASK_SSE, CODE_FOR_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
12132 { MASK_SSE | MASK_64BIT, CODE_FOR_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
12133 { MASK_SSE, CODE_FOR_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
12134 { MASK_SSE, CODE_FOR_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
12135 { MASK_SSE | MASK_64BIT, CODE_FOR_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
12137 { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
12138 { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
12139 { MASK_SSE2, CODE_FOR_sse2_movq2dq, 0, IX86_BUILTIN_MOVQ2DQ, 0, 0 },
12140 { MASK_SSE2, CODE_FOR_sse2_movdq2q, 0, IX86_BUILTIN_MOVDQ2Q, 0, 0 },
12142 { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
12144 { MASK_SSE2, CODE_FOR_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
12145 { MASK_SSE2, CODE_FOR_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
12147 { MASK_SSE2, CODE_FOR_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
12148 { MASK_SSE2, CODE_FOR_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
12149 { MASK_SSE2, CODE_FOR_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
12150 { MASK_SSE2, CODE_FOR_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
12151 { MASK_SSE2, CODE_FOR_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
12153 { MASK_SSE2, CODE_FOR_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
12155 { MASK_SSE2, CODE_FOR_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
12156 { MASK_SSE2, CODE_FOR_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
12157 { MASK_SSE2 | MASK_64BIT, CODE_FOR_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
12158 { MASK_SSE2 | MASK_64BIT, CODE_FOR_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
12160 { MASK_SSE2, CODE_FOR_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
12161 { MASK_SSE2, CODE_FOR_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
12162 { MASK_SSE2, CODE_FOR_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
12164 { MASK_SSE2, CODE_FOR_sse2_movq, 0, IX86_BUILTIN_MOVQ, 0, 0 },
12166 /* SSE3 */
12167 { MASK_SSE3, CODE_FOR_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
12168 { MASK_SSE3, CODE_FOR_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
12169 { MASK_SSE3, CODE_FOR_movddup, 0, IX86_BUILTIN_MOVDDUP, 0, 0 }
12172 void
12173 ix86_init_builtins (void)
12175 if (TARGET_MMX)
12176 ix86_init_mmx_sse_builtins ();
12179 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
12180 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
12181 builtins. */
12182 static void
12183 ix86_init_mmx_sse_builtins (void)
12185 const struct builtin_description * d;
12186 size_t i;
12188 tree V16QI_type_node = build_vector_type_for_mode (intQI_type_node, V16QImode);
12189 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
12190 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
12191 tree V2DI_type_node = build_vector_type_for_mode (intDI_type_node, V2DImode);
12192 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
12193 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
12194 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
12195 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
12196 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
12197 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
12199 tree pchar_type_node = build_pointer_type (char_type_node);
12200 tree pcchar_type_node = build_pointer_type (
12201 build_type_variant (char_type_node, 1, 0));
12202 tree pfloat_type_node = build_pointer_type (float_type_node);
12203 tree pcfloat_type_node = build_pointer_type (
12204 build_type_variant (float_type_node, 1, 0));
12205 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
12206 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
12207 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
12209 /* Comparisons. */
12210 tree int_ftype_v4sf_v4sf
12211 = build_function_type_list (integer_type_node,
12212 V4SF_type_node, V4SF_type_node, NULL_TREE);
12213 tree v4si_ftype_v4sf_v4sf
12214 = build_function_type_list (V4SI_type_node,
12215 V4SF_type_node, V4SF_type_node, NULL_TREE);
12216 /* MMX/SSE/integer conversions. */
12217 tree int_ftype_v4sf
12218 = build_function_type_list (integer_type_node,
12219 V4SF_type_node, NULL_TREE);
12220 tree int64_ftype_v4sf
12221 = build_function_type_list (long_long_integer_type_node,
12222 V4SF_type_node, NULL_TREE);
12223 tree int_ftype_v8qi
12224 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
12225 tree v4sf_ftype_v4sf_int
12226 = build_function_type_list (V4SF_type_node,
12227 V4SF_type_node, integer_type_node, NULL_TREE);
12228 tree v4sf_ftype_v4sf_int64
12229 = build_function_type_list (V4SF_type_node,
12230 V4SF_type_node, long_long_integer_type_node,
12231 NULL_TREE);
12232 tree v4sf_ftype_v4sf_v2si
12233 = build_function_type_list (V4SF_type_node,
12234 V4SF_type_node, V2SI_type_node, NULL_TREE);
12235 tree int_ftype_v4hi_int
12236 = build_function_type_list (integer_type_node,
12237 V4HI_type_node, integer_type_node, NULL_TREE);
12238 tree v4hi_ftype_v4hi_int_int
12239 = build_function_type_list (V4HI_type_node, V4HI_type_node,
12240 integer_type_node, integer_type_node,
12241 NULL_TREE);
12242 /* Miscellaneous. */
12243 tree v8qi_ftype_v4hi_v4hi
12244 = build_function_type_list (V8QI_type_node,
12245 V4HI_type_node, V4HI_type_node, NULL_TREE);
12246 tree v4hi_ftype_v2si_v2si
12247 = build_function_type_list (V4HI_type_node,
12248 V2SI_type_node, V2SI_type_node, NULL_TREE);
12249 tree v4sf_ftype_v4sf_v4sf_int
12250 = build_function_type_list (V4SF_type_node,
12251 V4SF_type_node, V4SF_type_node,
12252 integer_type_node, NULL_TREE);
12253 tree v2si_ftype_v4hi_v4hi
12254 = build_function_type_list (V2SI_type_node,
12255 V4HI_type_node, V4HI_type_node, NULL_TREE);
12256 tree v4hi_ftype_v4hi_int
12257 = build_function_type_list (V4HI_type_node,
12258 V4HI_type_node, integer_type_node, NULL_TREE);
12259 tree v4hi_ftype_v4hi_di
12260 = build_function_type_list (V4HI_type_node,
12261 V4HI_type_node, long_long_unsigned_type_node,
12262 NULL_TREE);
12263 tree v2si_ftype_v2si_di
12264 = build_function_type_list (V2SI_type_node,
12265 V2SI_type_node, long_long_unsigned_type_node,
12266 NULL_TREE);
12267 tree void_ftype_void
12268 = build_function_type (void_type_node, void_list_node);
12269 tree void_ftype_unsigned
12270 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
12271 tree void_ftype_unsigned_unsigned
12272 = build_function_type_list (void_type_node, unsigned_type_node,
12273 unsigned_type_node, NULL_TREE);
12274 tree void_ftype_pcvoid_unsigned_unsigned
12275 = build_function_type_list (void_type_node, const_ptr_type_node,
12276 unsigned_type_node, unsigned_type_node,
12277 NULL_TREE);
12278 tree unsigned_ftype_void
12279 = build_function_type (unsigned_type_node, void_list_node);
12280 tree di_ftype_void
12281 = build_function_type (long_long_unsigned_type_node, void_list_node);
12282 tree v4sf_ftype_void
12283 = build_function_type (V4SF_type_node, void_list_node);
12284 tree v2si_ftype_v4sf
12285 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
12286 /* Loads/stores. */
12287 tree void_ftype_v8qi_v8qi_pchar
12288 = build_function_type_list (void_type_node,
12289 V8QI_type_node, V8QI_type_node,
12290 pchar_type_node, NULL_TREE);
12291 tree v4sf_ftype_pcfloat
12292 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
12293 /* @@@ the type is bogus */
12294 tree v4sf_ftype_v4sf_pv2si
12295 = build_function_type_list (V4SF_type_node,
12296 V4SF_type_node, pv2si_type_node, NULL_TREE);
12297 tree void_ftype_pv2si_v4sf
12298 = build_function_type_list (void_type_node,
12299 pv2si_type_node, V4SF_type_node, NULL_TREE);
12300 tree void_ftype_pfloat_v4sf
12301 = build_function_type_list (void_type_node,
12302 pfloat_type_node, V4SF_type_node, NULL_TREE);
12303 tree void_ftype_pdi_di
12304 = build_function_type_list (void_type_node,
12305 pdi_type_node, long_long_unsigned_type_node,
12306 NULL_TREE);
12307 tree void_ftype_pv2di_v2di
12308 = build_function_type_list (void_type_node,
12309 pv2di_type_node, V2DI_type_node, NULL_TREE);
12310 /* Normal vector unops. */
12311 tree v4sf_ftype_v4sf
12312 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
12314 /* Normal vector binops. */
12315 tree v4sf_ftype_v4sf_v4sf
12316 = build_function_type_list (V4SF_type_node,
12317 V4SF_type_node, V4SF_type_node, NULL_TREE);
12318 tree v8qi_ftype_v8qi_v8qi
12319 = build_function_type_list (V8QI_type_node,
12320 V8QI_type_node, V8QI_type_node, NULL_TREE);
12321 tree v4hi_ftype_v4hi_v4hi
12322 = build_function_type_list (V4HI_type_node,
12323 V4HI_type_node, V4HI_type_node, NULL_TREE);
12324 tree v2si_ftype_v2si_v2si
12325 = build_function_type_list (V2SI_type_node,
12326 V2SI_type_node, V2SI_type_node, NULL_TREE);
12327 tree di_ftype_di_di
12328 = build_function_type_list (long_long_unsigned_type_node,
12329 long_long_unsigned_type_node,
12330 long_long_unsigned_type_node, NULL_TREE);
12332 tree v2si_ftype_v2sf
12333 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
12334 tree v2sf_ftype_v2si
12335 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
12336 tree v2si_ftype_v2si
12337 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
12338 tree v2sf_ftype_v2sf
12339 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
12340 tree v2sf_ftype_v2sf_v2sf
12341 = build_function_type_list (V2SF_type_node,
12342 V2SF_type_node, V2SF_type_node, NULL_TREE);
12343 tree v2si_ftype_v2sf_v2sf
12344 = build_function_type_list (V2SI_type_node,
12345 V2SF_type_node, V2SF_type_node, NULL_TREE);
12346 tree pint_type_node = build_pointer_type (integer_type_node);
12347 tree pcint_type_node = build_pointer_type (
12348 build_type_variant (integer_type_node, 1, 0));
12349 tree pdouble_type_node = build_pointer_type (double_type_node);
12350 tree pcdouble_type_node = build_pointer_type (
12351 build_type_variant (double_type_node, 1, 0));
12352 tree int_ftype_v2df_v2df
12353 = build_function_type_list (integer_type_node,
12354 V2DF_type_node, V2DF_type_node, NULL_TREE);
12356 tree ti_ftype_void
12357 = build_function_type (intTI_type_node, void_list_node);
12358 tree v2di_ftype_void
12359 = build_function_type (V2DI_type_node, void_list_node);
12360 tree ti_ftype_ti_ti
12361 = build_function_type_list (intTI_type_node,
12362 intTI_type_node, intTI_type_node, NULL_TREE);
12363 tree void_ftype_pcvoid
12364 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
12365 tree v2di_ftype_di
12366 = build_function_type_list (V2DI_type_node,
12367 long_long_unsigned_type_node, NULL_TREE);
12368 tree di_ftype_v2di
12369 = build_function_type_list (long_long_unsigned_type_node,
12370 V2DI_type_node, NULL_TREE);
12371 tree v4sf_ftype_v4si
12372 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
12373 tree v4si_ftype_v4sf
12374 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
12375 tree v2df_ftype_v4si
12376 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
12377 tree v4si_ftype_v2df
12378 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
12379 tree v2si_ftype_v2df
12380 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
12381 tree v4sf_ftype_v2df
12382 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
12383 tree v2df_ftype_v2si
12384 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
12385 tree v2df_ftype_v4sf
12386 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
12387 tree int_ftype_v2df
12388 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
12389 tree int64_ftype_v2df
12390 = build_function_type_list (long_long_integer_type_node,
12391 V2DF_type_node, NULL_TREE);
12392 tree v2df_ftype_v2df_int
12393 = build_function_type_list (V2DF_type_node,
12394 V2DF_type_node, integer_type_node, NULL_TREE);
12395 tree v2df_ftype_v2df_int64
12396 = build_function_type_list (V2DF_type_node,
12397 V2DF_type_node, long_long_integer_type_node,
12398 NULL_TREE);
12399 tree v4sf_ftype_v4sf_v2df
12400 = build_function_type_list (V4SF_type_node,
12401 V4SF_type_node, V2DF_type_node, NULL_TREE);
12402 tree v2df_ftype_v2df_v4sf
12403 = build_function_type_list (V2DF_type_node,
12404 V2DF_type_node, V4SF_type_node, NULL_TREE);
12405 tree v2df_ftype_v2df_v2df_int
12406 = build_function_type_list (V2DF_type_node,
12407 V2DF_type_node, V2DF_type_node,
12408 integer_type_node,
12409 NULL_TREE);
12410 tree v2df_ftype_v2df_pv2si
12411 = build_function_type_list (V2DF_type_node,
12412 V2DF_type_node, pv2si_type_node, NULL_TREE);
12413 tree void_ftype_pv2si_v2df
12414 = build_function_type_list (void_type_node,
12415 pv2si_type_node, V2DF_type_node, NULL_TREE);
12416 tree void_ftype_pdouble_v2df
12417 = build_function_type_list (void_type_node,
12418 pdouble_type_node, V2DF_type_node, NULL_TREE);
12419 tree void_ftype_pint_int
12420 = build_function_type_list (void_type_node,
12421 pint_type_node, integer_type_node, NULL_TREE);
12422 tree void_ftype_v16qi_v16qi_pchar
12423 = build_function_type_list (void_type_node,
12424 V16QI_type_node, V16QI_type_node,
12425 pchar_type_node, NULL_TREE);
12426 tree v2df_ftype_pcdouble
12427 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
12428 tree v2df_ftype_v2df_v2df
12429 = build_function_type_list (V2DF_type_node,
12430 V2DF_type_node, V2DF_type_node, NULL_TREE);
12431 tree v16qi_ftype_v16qi_v16qi
12432 = build_function_type_list (V16QI_type_node,
12433 V16QI_type_node, V16QI_type_node, NULL_TREE);
12434 tree v8hi_ftype_v8hi_v8hi
12435 = build_function_type_list (V8HI_type_node,
12436 V8HI_type_node, V8HI_type_node, NULL_TREE);
12437 tree v4si_ftype_v4si_v4si
12438 = build_function_type_list (V4SI_type_node,
12439 V4SI_type_node, V4SI_type_node, NULL_TREE);
12440 tree v2di_ftype_v2di_v2di
12441 = build_function_type_list (V2DI_type_node,
12442 V2DI_type_node, V2DI_type_node, NULL_TREE);
12443 tree v2di_ftype_v2df_v2df
12444 = build_function_type_list (V2DI_type_node,
12445 V2DF_type_node, V2DF_type_node, NULL_TREE);
12446 tree v2df_ftype_v2df
12447 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
12448 tree v2df_ftype_double
12449 = build_function_type_list (V2DF_type_node, double_type_node, NULL_TREE);
12450 tree v2df_ftype_double_double
12451 = build_function_type_list (V2DF_type_node,
12452 double_type_node, double_type_node, NULL_TREE);
12453 tree int_ftype_v8hi_int
12454 = build_function_type_list (integer_type_node,
12455 V8HI_type_node, integer_type_node, NULL_TREE);
12456 tree v8hi_ftype_v8hi_int_int
12457 = build_function_type_list (V8HI_type_node,
12458 V8HI_type_node, integer_type_node,
12459 integer_type_node, NULL_TREE);
12460 tree v2di_ftype_v2di_int
12461 = build_function_type_list (V2DI_type_node,
12462 V2DI_type_node, integer_type_node, NULL_TREE);
12463 tree v4si_ftype_v4si_int
12464 = build_function_type_list (V4SI_type_node,
12465 V4SI_type_node, integer_type_node, NULL_TREE);
12466 tree v8hi_ftype_v8hi_int
12467 = build_function_type_list (V8HI_type_node,
12468 V8HI_type_node, integer_type_node, NULL_TREE);
12469 tree v8hi_ftype_v8hi_v2di
12470 = build_function_type_list (V8HI_type_node,
12471 V8HI_type_node, V2DI_type_node, NULL_TREE);
12472 tree v4si_ftype_v4si_v2di
12473 = build_function_type_list (V4SI_type_node,
12474 V4SI_type_node, V2DI_type_node, NULL_TREE);
12475 tree v4si_ftype_v8hi_v8hi
12476 = build_function_type_list (V4SI_type_node,
12477 V8HI_type_node, V8HI_type_node, NULL_TREE);
12478 tree di_ftype_v8qi_v8qi
12479 = build_function_type_list (long_long_unsigned_type_node,
12480 V8QI_type_node, V8QI_type_node, NULL_TREE);
12481 tree di_ftype_v2si_v2si
12482 = build_function_type_list (long_long_unsigned_type_node,
12483 V2SI_type_node, V2SI_type_node, NULL_TREE);
12484 tree v2di_ftype_v16qi_v16qi
12485 = build_function_type_list (V2DI_type_node,
12486 V16QI_type_node, V16QI_type_node, NULL_TREE);
12487 tree v2di_ftype_v4si_v4si
12488 = build_function_type_list (V2DI_type_node,
12489 V4SI_type_node, V4SI_type_node, NULL_TREE);
12490 tree int_ftype_v16qi
12491 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
12492 tree v16qi_ftype_pcchar
12493 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
12494 tree void_ftype_pchar_v16qi
12495 = build_function_type_list (void_type_node,
12496 pchar_type_node, V16QI_type_node, NULL_TREE);
12497 tree v4si_ftype_pcint
12498 = build_function_type_list (V4SI_type_node, pcint_type_node, NULL_TREE);
12499 tree void_ftype_pcint_v4si
12500 = build_function_type_list (void_type_node,
12501 pcint_type_node, V4SI_type_node, NULL_TREE);
12502 tree v2di_ftype_v2di
12503 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
12505 tree float80_type;
12506 tree float128_type;
12508 /* The __float80 type. */
12509 if (TYPE_MODE (long_double_type_node) == XFmode)
12510 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
12511 "__float80");
12512 else
12514 /* The __float80 type. */
12515 float80_type = make_node (REAL_TYPE);
12516 TYPE_PRECISION (float80_type) = 80;
12517 layout_type (float80_type);
12518 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
12521 float128_type = make_node (REAL_TYPE);
12522 TYPE_PRECISION (float128_type) = 128;
12523 layout_type (float128_type);
12524 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
12526 /* Add all builtins that are more or less simple operations on two
12527 operands. */
12528 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
12530 /* Use one of the operands; the target can have a different mode for
12531 mask-generating compares. */
12532 enum machine_mode mode;
12533 tree type;
12535 if (d->name == 0)
12536 continue;
12537 mode = insn_data[d->icode].operand[1].mode;
12539 switch (mode)
12541 case V16QImode:
12542 type = v16qi_ftype_v16qi_v16qi;
12543 break;
12544 case V8HImode:
12545 type = v8hi_ftype_v8hi_v8hi;
12546 break;
12547 case V4SImode:
12548 type = v4si_ftype_v4si_v4si;
12549 break;
12550 case V2DImode:
12551 type = v2di_ftype_v2di_v2di;
12552 break;
12553 case V2DFmode:
12554 type = v2df_ftype_v2df_v2df;
12555 break;
12556 case TImode:
12557 type = ti_ftype_ti_ti;
12558 break;
12559 case V4SFmode:
12560 type = v4sf_ftype_v4sf_v4sf;
12561 break;
12562 case V8QImode:
12563 type = v8qi_ftype_v8qi_v8qi;
12564 break;
12565 case V4HImode:
12566 type = v4hi_ftype_v4hi_v4hi;
12567 break;
12568 case V2SImode:
12569 type = v2si_ftype_v2si_v2si;
12570 break;
12571 case DImode:
12572 type = di_ftype_di_di;
12573 break;
12575 default:
12576 abort ();
12579 /* Override for comparisons. */
12580 if (d->icode == CODE_FOR_maskcmpv4sf3
12581 || d->icode == CODE_FOR_maskncmpv4sf3
12582 || d->icode == CODE_FOR_vmmaskcmpv4sf3
12583 || d->icode == CODE_FOR_vmmaskncmpv4sf3)
12584 type = v4si_ftype_v4sf_v4sf;
12586 if (d->icode == CODE_FOR_maskcmpv2df3
12587 || d->icode == CODE_FOR_maskncmpv2df3
12588 || d->icode == CODE_FOR_vmmaskcmpv2df3
12589 || d->icode == CODE_FOR_vmmaskncmpv2df3)
12590 type = v2di_ftype_v2df_v2df;
12592 def_builtin (d->mask, d->name, type, d->code);
12595 /* Add the remaining MMX insns with somewhat more complicated types. */
12596 def_builtin (MASK_MMX, "__builtin_ia32_mmx_zero", di_ftype_void, IX86_BUILTIN_MMX_ZERO);
12597 def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
12598 def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
12599 def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD);
12600 def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ);
12602 def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW);
12603 def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD);
12604 def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ);
12606 def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
12607 def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
12609 def_builtin (MASK_MMX, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
12610 def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
12612 /* comi/ucomi insns. */
12613 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
12614 if (d->mask == MASK_SSE2)
12615 def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
12616 else
12617 def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
12619 def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
12620 def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
12621 def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
12623 def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
12624 def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
12625 def_builtin (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
12626 def_builtin (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
12627 def_builtin (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
12628 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
12629 def_builtin (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
12630 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
12631 def_builtin (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
12632 def_builtin (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
12633 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
12635 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pextrw", int_ftype_v4hi_int, IX86_BUILTIN_PEXTRW);
12636 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pinsrw", v4hi_ftype_v4hi_int_int, IX86_BUILTIN_PINSRW);
12638 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
12640 def_builtin (MASK_SSE, "__builtin_ia32_loadaps", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADAPS);
12641 def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
12642 def_builtin (MASK_SSE, "__builtin_ia32_loadss", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADSS);
12643 def_builtin (MASK_SSE, "__builtin_ia32_storeaps", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREAPS);
12644 def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
12645 def_builtin (MASK_SSE, "__builtin_ia32_storess", void_ftype_pfloat_v4sf, IX86_BUILTIN_STORESS);
12647 def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
12648 def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
12649 def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
12650 def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
12652 def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
12653 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
12654 def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
12655 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
12657 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
12659 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
12661 def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
12662 def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
12663 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
12664 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
12665 def_builtin (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
12666 def_builtin (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
12668 def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
12670 /* Original 3DNow! */
12671 def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
12672 def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
12673 def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
12674 def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
12675 def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
12676 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
12677 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
12678 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
12679 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
12680 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
12681 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
12682 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
12683 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
12684 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
12685 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
12686 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
12687 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
12688 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
12689 def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
12690 def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
12692 /* 3DNow! extension as used in the Athlon CPU. */
12693 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
12694 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
12695 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
12696 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
12697 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
12698 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
12700 def_builtin (MASK_SSE, "__builtin_ia32_setzerops", v4sf_ftype_void, IX86_BUILTIN_SSE_ZERO);
12702 /* SSE2 */
12703 def_builtin (MASK_SSE2, "__builtin_ia32_pextrw128", int_ftype_v8hi_int, IX86_BUILTIN_PEXTRW128);
12704 def_builtin (MASK_SSE2, "__builtin_ia32_pinsrw128", v8hi_ftype_v8hi_int_int, IX86_BUILTIN_PINSRW128);
12706 def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
12707 def_builtin (MASK_SSE2, "__builtin_ia32_movq2dq", v2di_ftype_di, IX86_BUILTIN_MOVQ2DQ);
12708 def_builtin (MASK_SSE2, "__builtin_ia32_movdq2q", di_ftype_v2di, IX86_BUILTIN_MOVDQ2Q);
12710 def_builtin (MASK_SSE2, "__builtin_ia32_loadapd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADAPD);
12711 def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
12712 def_builtin (MASK_SSE2, "__builtin_ia32_loadsd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADSD);
12713 def_builtin (MASK_SSE2, "__builtin_ia32_storeapd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREAPD);
12714 def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
12715 def_builtin (MASK_SSE2, "__builtin_ia32_storesd", void_ftype_pdouble_v2df, IX86_BUILTIN_STORESD);
12717 def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pv2si, IX86_BUILTIN_LOADHPD);
12718 def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pv2si, IX86_BUILTIN_LOADLPD);
12719 def_builtin (MASK_SSE2, "__builtin_ia32_storehpd", void_ftype_pv2si_v2df, IX86_BUILTIN_STOREHPD);
12720 def_builtin (MASK_SSE2, "__builtin_ia32_storelpd", void_ftype_pv2si_v2df, IX86_BUILTIN_STORELPD);
12722 def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
12723 def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
12724 def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
12725 def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
12726 def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
12728 def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
12729 def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
12730 def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
12731 def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
12733 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
12734 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
12736 def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
12738 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
12739 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
12741 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
12742 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
12743 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
12744 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
12745 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
12747 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
12749 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
12750 def_builtin (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
12751 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
12752 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
12754 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
12755 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
12756 def_builtin (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
12758 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
12759 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
12760 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
12761 def_builtin (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
12763 def_builtin (MASK_SSE2, "__builtin_ia32_setpd1", v2df_ftype_double, IX86_BUILTIN_SETPD1);
12764 def_builtin (MASK_SSE2, "__builtin_ia32_setpd", v2df_ftype_double_double, IX86_BUILTIN_SETPD);
12765 def_builtin (MASK_SSE2, "__builtin_ia32_setzeropd", ti_ftype_void, IX86_BUILTIN_CLRPD);
12766 def_builtin (MASK_SSE2, "__builtin_ia32_loadpd1", v2df_ftype_pcdouble, IX86_BUILTIN_LOADPD1);
12767 def_builtin (MASK_SSE2, "__builtin_ia32_loadrpd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADRPD);
12768 def_builtin (MASK_SSE2, "__builtin_ia32_storepd1", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREPD1);
12769 def_builtin (MASK_SSE2, "__builtin_ia32_storerpd", void_ftype_pdouble_v2df, IX86_BUILTIN_STORERPD);
12771 def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
12772 def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
12773 def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
12775 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqa", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQA);
12776 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
12777 def_builtin (MASK_SSE2, "__builtin_ia32_loadd", v4si_ftype_pcint, IX86_BUILTIN_LOADD);
12778 def_builtin (MASK_SSE2, "__builtin_ia32_storedqa", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQA);
12779 def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
12780 def_builtin (MASK_SSE2, "__builtin_ia32_stored", void_ftype_pcint_v4si, IX86_BUILTIN_STORED);
12781 def_builtin (MASK_SSE2, "__builtin_ia32_movq", v2di_ftype_v2di, IX86_BUILTIN_MOVQ);
12783 def_builtin (MASK_SSE, "__builtin_ia32_setzero128", v2di_ftype_void, IX86_BUILTIN_CLRTI);
12785 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
12786 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
12788 def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128);
12789 def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128);
12790 def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
12792 def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRLW128);
12793 def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRLD128);
12794 def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
12796 def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRAW128);
12797 def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRAD128);
12799 def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
12800 def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
12801 def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
12802 def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
12804 def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
12805 def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
12806 def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
12807 def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
12809 def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
12810 def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
12812 def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
12814 /* Prescott New Instructions. */
12815 def_builtin (MASK_SSE3, "__builtin_ia32_monitor",
12816 void_ftype_pcvoid_unsigned_unsigned,
12817 IX86_BUILTIN_MONITOR);
12818 def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
12819 void_ftype_unsigned_unsigned,
12820 IX86_BUILTIN_MWAIT);
12821 def_builtin (MASK_SSE3, "__builtin_ia32_movshdup",
12822 v4sf_ftype_v4sf,
12823 IX86_BUILTIN_MOVSHDUP);
12824 def_builtin (MASK_SSE3, "__builtin_ia32_movsldup",
12825 v4sf_ftype_v4sf,
12826 IX86_BUILTIN_MOVSLDUP);
12827 def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
12828 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
12829 def_builtin (MASK_SSE3, "__builtin_ia32_loadddup",
12830 v2df_ftype_pcdouble, IX86_BUILTIN_LOADDDUP);
12831 def_builtin (MASK_SSE3, "__builtin_ia32_movddup",
12832 v2df_ftype_v2df, IX86_BUILTIN_MOVDDUP);
12835 /* Errors in the source file can cause expand_expr to return const0_rtx
12836 where we expect a vector. To avoid crashing, use one of the vector
12837 clear instructions. */
12838 static rtx
12839 safe_vector_operand (rtx x, enum machine_mode mode)
12841 if (x != const0_rtx)
12842 return x;
12843 x = gen_reg_rtx (mode);
12845 if (VALID_MMX_REG_MODE (mode) || VALID_MMX_REG_MODE_3DNOW (mode))
12846 emit_insn (gen_mmx_clrdi (mode == DImode ? x
12847 : gen_rtx_SUBREG (DImode, x, 0)));
12848 else
12849 emit_insn (gen_sse_clrv4sf (mode == V4SFmode ? x
12850 : gen_rtx_SUBREG (V4SFmode, x, 0),
12851 CONST0_RTX (V4SFmode)));
12852 return x;
12855 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
12857 static rtx
12858 ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
12860 rtx pat;
12861 tree arg0 = TREE_VALUE (arglist);
12862 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12863 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12864 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12865 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12866 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12867 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12869 if (VECTOR_MODE_P (mode0))
12870 op0 = safe_vector_operand (op0, mode0);
12871 if (VECTOR_MODE_P (mode1))
12872 op1 = safe_vector_operand (op1, mode1);
12874 if (! target
12875 || GET_MODE (target) != tmode
12876 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12877 target = gen_reg_rtx (tmode);
12879 if (GET_MODE (op1) == SImode && mode1 == TImode)
12881 rtx x = gen_reg_rtx (V4SImode);
12882 emit_insn (gen_sse2_loadd (x, op1));
12883 op1 = gen_lowpart (TImode, x);
12886 /* In case the insn wants input operands in modes different from
12887 the result, abort. */
12888 if ((GET_MODE (op0) != mode0 && GET_MODE (op0) != VOIDmode)
12889 || (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode))
12890 abort ();
12892 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12893 op0 = copy_to_mode_reg (mode0, op0);
12894 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12895 op1 = copy_to_mode_reg (mode1, op1);
12897 /* In the commutative cases, both op0 and op1 are nonimmediate_operand,
12898 yet one of the two must not be a memory. This is normally enforced
12899 by expanders, but we didn't bother to create one here. */
12900 if (GET_CODE (op0) == MEM && GET_CODE (op1) == MEM)
12901 op0 = copy_to_mode_reg (mode0, op0);
12903 pat = GEN_FCN (icode) (target, op0, op1);
12904 if (! pat)
12905 return 0;
12906 emit_insn (pat);
12907 return target;
12910 /* Subroutine of ix86_expand_builtin to take care of stores. */
12912 static rtx
12913 ix86_expand_store_builtin (enum insn_code icode, tree arglist)
12915 rtx pat;
12916 tree arg0 = TREE_VALUE (arglist);
12917 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
12918 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12919 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
12920 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
12921 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
12923 if (VECTOR_MODE_P (mode1))
12924 op1 = safe_vector_operand (op1, mode1);
12926 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12927 op1 = copy_to_mode_reg (mode1, op1);
12929 pat = GEN_FCN (icode) (op0, op1);
12930 if (pat)
12931 emit_insn (pat);
12932 return 0;
12935 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
12937 static rtx
12938 ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
12939 rtx target, int do_load)
12941 rtx pat;
12942 tree arg0 = TREE_VALUE (arglist);
12943 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12944 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12945 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12947 if (! target
12948 || GET_MODE (target) != tmode
12949 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12950 target = gen_reg_rtx (tmode);
12951 if (do_load)
12952 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12953 else
12955 if (VECTOR_MODE_P (mode0))
12956 op0 = safe_vector_operand (op0, mode0);
12958 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12959 op0 = copy_to_mode_reg (mode0, op0);
12962 pat = GEN_FCN (icode) (target, op0);
12963 if (! pat)
12964 return 0;
12965 emit_insn (pat);
12966 return target;
12969 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
12970 sqrtss, rsqrtss, rcpss. */
12972 static rtx
12973 ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
12975 rtx pat;
12976 tree arg0 = TREE_VALUE (arglist);
12977 rtx op1, op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
12978 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12979 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12981 if (! target
12982 || GET_MODE (target) != tmode
12983 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12984 target = gen_reg_rtx (tmode);
12986 if (VECTOR_MODE_P (mode0))
12987 op0 = safe_vector_operand (op0, mode0);
12989 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12990 op0 = copy_to_mode_reg (mode0, op0);
12992 op1 = op0;
12993 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
12994 op1 = copy_to_mode_reg (mode0, op1);
12996 pat = GEN_FCN (icode) (target, op0, op1);
12997 if (! pat)
12998 return 0;
12999 emit_insn (pat);
13000 return target;
13003 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
13005 static rtx
13006 ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
13007 rtx target)
13009 rtx pat;
13010 tree arg0 = TREE_VALUE (arglist);
13011 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13012 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13013 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13014 rtx op2;
13015 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
13016 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
13017 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
13018 enum rtx_code comparison = d->comparison;
13020 if (VECTOR_MODE_P (mode0))
13021 op0 = safe_vector_operand (op0, mode0);
13022 if (VECTOR_MODE_P (mode1))
13023 op1 = safe_vector_operand (op1, mode1);
13025 /* Swap operands if we have a comparison that isn't available in
13026 hardware. */
13027 if (d->flag)
13029 rtx tmp = gen_reg_rtx (mode1);
13030 emit_move_insn (tmp, op1);
13031 op1 = op0;
13032 op0 = tmp;
13035 if (! target
13036 || GET_MODE (target) != tmode
13037 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
13038 target = gen_reg_rtx (tmode);
13040 if (! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
13041 op0 = copy_to_mode_reg (mode0, op0);
13042 if (! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
13043 op1 = copy_to_mode_reg (mode1, op1);
13045 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
13046 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
13047 if (! pat)
13048 return 0;
13049 emit_insn (pat);
13050 return target;
13053 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
13055 static rtx
13056 ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
13057 rtx target)
13059 rtx pat;
13060 tree arg0 = TREE_VALUE (arglist);
13061 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13062 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13063 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13064 rtx op2;
13065 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
13066 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
13067 enum rtx_code comparison = d->comparison;
13069 if (VECTOR_MODE_P (mode0))
13070 op0 = safe_vector_operand (op0, mode0);
13071 if (VECTOR_MODE_P (mode1))
13072 op1 = safe_vector_operand (op1, mode1);
13074 /* Swap operands if we have a comparison that isn't available in
13075 hardware. */
13076 if (d->flag)
13078 rtx tmp = op1;
13079 op1 = op0;
13080 op0 = tmp;
13083 target = gen_reg_rtx (SImode);
13084 emit_move_insn (target, const0_rtx);
13085 target = gen_rtx_SUBREG (QImode, target, 0);
13087 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
13088 op0 = copy_to_mode_reg (mode0, op0);
13089 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
13090 op1 = copy_to_mode_reg (mode1, op1);
13092 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
13093 pat = GEN_FCN (d->icode) (op0, op1);
13094 if (! pat)
13095 return 0;
13096 emit_insn (pat);
13097 emit_insn (gen_rtx_SET (VOIDmode,
13098 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
13099 gen_rtx_fmt_ee (comparison, QImode,
13100 SET_DEST (pat),
13101 const0_rtx)));
13103 return SUBREG_REG (target);
13106 /* Expand an expression EXP that calls a built-in function,
13107 with result going to TARGET if that's convenient
13108 (and in mode MODE if that's convenient).
13109 SUBTARGET may be used as the target for computing one of EXP's operands.
13110 IGNORE is nonzero if the value is to be ignored. */
13113 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
13114 enum machine_mode mode ATTRIBUTE_UNUSED,
13115 int ignore ATTRIBUTE_UNUSED)
13117 const struct builtin_description *d;
13118 size_t i;
13119 enum insn_code icode;
13120 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
13121 tree arglist = TREE_OPERAND (exp, 1);
13122 tree arg0, arg1, arg2;
13123 rtx op0, op1, op2, pat;
13124 enum machine_mode tmode, mode0, mode1, mode2;
13125 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
13127 switch (fcode)
13129 case IX86_BUILTIN_EMMS:
13130 emit_insn (gen_emms ());
13131 return 0;
13133 case IX86_BUILTIN_SFENCE:
13134 emit_insn (gen_sfence ());
13135 return 0;
13137 case IX86_BUILTIN_PEXTRW:
13138 case IX86_BUILTIN_PEXTRW128:
13139 icode = (fcode == IX86_BUILTIN_PEXTRW
13140 ? CODE_FOR_mmx_pextrw
13141 : CODE_FOR_sse2_pextrw);
13142 arg0 = TREE_VALUE (arglist);
13143 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13144 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13145 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13146 tmode = insn_data[icode].operand[0].mode;
13147 mode0 = insn_data[icode].operand[1].mode;
13148 mode1 = insn_data[icode].operand[2].mode;
13150 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13151 op0 = copy_to_mode_reg (mode0, op0);
13152 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13154 error ("selector must be an integer constant in the range 0..%i",
13155 fcode == IX86_BUILTIN_PEXTRW ? 3:7);
13156 return gen_reg_rtx (tmode);
13158 if (target == 0
13159 || GET_MODE (target) != tmode
13160 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13161 target = gen_reg_rtx (tmode);
13162 pat = GEN_FCN (icode) (target, op0, op1);
13163 if (! pat)
13164 return 0;
13165 emit_insn (pat);
13166 return target;
13168 case IX86_BUILTIN_PINSRW:
13169 case IX86_BUILTIN_PINSRW128:
13170 icode = (fcode == IX86_BUILTIN_PINSRW
13171 ? CODE_FOR_mmx_pinsrw
13172 : CODE_FOR_sse2_pinsrw);
13173 arg0 = TREE_VALUE (arglist);
13174 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13175 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
13176 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13177 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13178 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
13179 tmode = insn_data[icode].operand[0].mode;
13180 mode0 = insn_data[icode].operand[1].mode;
13181 mode1 = insn_data[icode].operand[2].mode;
13182 mode2 = insn_data[icode].operand[3].mode;
13184 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13185 op0 = copy_to_mode_reg (mode0, op0);
13186 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13187 op1 = copy_to_mode_reg (mode1, op1);
13188 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13190 error ("selector must be an integer constant in the range 0..%i",
13191 fcode == IX86_BUILTIN_PINSRW ? 15:255);
13192 return const0_rtx;
13194 if (target == 0
13195 || GET_MODE (target) != tmode
13196 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13197 target = gen_reg_rtx (tmode);
13198 pat = GEN_FCN (icode) (target, op0, op1, op2);
13199 if (! pat)
13200 return 0;
13201 emit_insn (pat);
13202 return target;
13204 case IX86_BUILTIN_MASKMOVQ:
13205 case IX86_BUILTIN_MASKMOVDQU:
13206 icode = (fcode == IX86_BUILTIN_MASKMOVQ
13207 ? (TARGET_64BIT ? CODE_FOR_mmx_maskmovq_rex : CODE_FOR_mmx_maskmovq)
13208 : (TARGET_64BIT ? CODE_FOR_sse2_maskmovdqu_rex64
13209 : CODE_FOR_sse2_maskmovdqu));
13210 /* Note the arg order is different from the operand order. */
13211 arg1 = TREE_VALUE (arglist);
13212 arg2 = TREE_VALUE (TREE_CHAIN (arglist));
13213 arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
13214 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13215 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13216 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
13217 mode0 = insn_data[icode].operand[0].mode;
13218 mode1 = insn_data[icode].operand[1].mode;
13219 mode2 = insn_data[icode].operand[2].mode;
13221 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13222 op0 = copy_to_mode_reg (mode0, op0);
13223 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13224 op1 = copy_to_mode_reg (mode1, op1);
13225 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
13226 op2 = copy_to_mode_reg (mode2, op2);
13227 pat = GEN_FCN (icode) (op0, op1, op2);
13228 if (! pat)
13229 return 0;
13230 emit_insn (pat);
13231 return 0;
13233 case IX86_BUILTIN_SQRTSS:
13234 return ix86_expand_unop1_builtin (CODE_FOR_vmsqrtv4sf2, arglist, target);
13235 case IX86_BUILTIN_RSQRTSS:
13236 return ix86_expand_unop1_builtin (CODE_FOR_vmrsqrtv4sf2, arglist, target);
13237 case IX86_BUILTIN_RCPSS:
13238 return ix86_expand_unop1_builtin (CODE_FOR_vmrcpv4sf2, arglist, target);
13240 case IX86_BUILTIN_LOADAPS:
13241 return ix86_expand_unop_builtin (CODE_FOR_sse_movaps, arglist, target, 1);
13243 case IX86_BUILTIN_LOADUPS:
13244 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
13246 case IX86_BUILTIN_STOREAPS:
13247 return ix86_expand_store_builtin (CODE_FOR_sse_movaps, arglist);
13249 case IX86_BUILTIN_STOREUPS:
13250 return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
13252 case IX86_BUILTIN_LOADSS:
13253 return ix86_expand_unop_builtin (CODE_FOR_sse_loadss, arglist, target, 1);
13255 case IX86_BUILTIN_STORESS:
13256 return ix86_expand_store_builtin (CODE_FOR_sse_storess, arglist);
13258 case IX86_BUILTIN_LOADHPS:
13259 case IX86_BUILTIN_LOADLPS:
13260 case IX86_BUILTIN_LOADHPD:
13261 case IX86_BUILTIN_LOADLPD:
13262 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_movhps
13263 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_movlps
13264 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_movhpd
13265 : CODE_FOR_sse2_movsd);
13266 arg0 = TREE_VALUE (arglist);
13267 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13268 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13269 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13270 tmode = insn_data[icode].operand[0].mode;
13271 mode0 = insn_data[icode].operand[1].mode;
13272 mode1 = insn_data[icode].operand[2].mode;
13274 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13275 op0 = copy_to_mode_reg (mode0, op0);
13276 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
13277 if (target == 0
13278 || GET_MODE (target) != tmode
13279 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13280 target = gen_reg_rtx (tmode);
13281 pat = GEN_FCN (icode) (target, op0, op1);
13282 if (! pat)
13283 return 0;
13284 emit_insn (pat);
13285 return target;
13287 case IX86_BUILTIN_STOREHPS:
13288 case IX86_BUILTIN_STORELPS:
13289 case IX86_BUILTIN_STOREHPD:
13290 case IX86_BUILTIN_STORELPD:
13291 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_movhps
13292 : fcode == IX86_BUILTIN_STORELPS ? CODE_FOR_sse_movlps
13293 : fcode == IX86_BUILTIN_STOREHPD ? CODE_FOR_sse2_movhpd
13294 : CODE_FOR_sse2_movsd);
13295 arg0 = TREE_VALUE (arglist);
13296 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13297 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13298 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13299 mode0 = insn_data[icode].operand[1].mode;
13300 mode1 = insn_data[icode].operand[2].mode;
13302 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
13303 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13304 op1 = copy_to_mode_reg (mode1, op1);
13306 pat = GEN_FCN (icode) (op0, op0, op1);
13307 if (! pat)
13308 return 0;
13309 emit_insn (pat);
13310 return 0;
13312 case IX86_BUILTIN_MOVNTPS:
13313 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
13314 case IX86_BUILTIN_MOVNTQ:
13315 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
13317 case IX86_BUILTIN_LDMXCSR:
13318 op0 = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
13319 target = assign_386_stack_local (SImode, 0);
13320 emit_move_insn (target, op0);
13321 emit_insn (gen_ldmxcsr (target));
13322 return 0;
13324 case IX86_BUILTIN_STMXCSR:
13325 target = assign_386_stack_local (SImode, 0);
13326 emit_insn (gen_stmxcsr (target));
13327 return copy_to_mode_reg (SImode, target);
13329 case IX86_BUILTIN_SHUFPS:
13330 case IX86_BUILTIN_SHUFPD:
13331 icode = (fcode == IX86_BUILTIN_SHUFPS
13332 ? CODE_FOR_sse_shufps
13333 : CODE_FOR_sse2_shufpd);
13334 arg0 = TREE_VALUE (arglist);
13335 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13336 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
13337 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13338 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13339 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
13340 tmode = insn_data[icode].operand[0].mode;
13341 mode0 = insn_data[icode].operand[1].mode;
13342 mode1 = insn_data[icode].operand[2].mode;
13343 mode2 = insn_data[icode].operand[3].mode;
13345 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13346 op0 = copy_to_mode_reg (mode0, op0);
13347 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13348 op1 = copy_to_mode_reg (mode1, op1);
13349 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13351 /* @@@ better error message */
13352 error ("mask must be an immediate");
13353 return gen_reg_rtx (tmode);
13355 if (target == 0
13356 || GET_MODE (target) != tmode
13357 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13358 target = gen_reg_rtx (tmode);
13359 pat = GEN_FCN (icode) (target, op0, op1, op2);
13360 if (! pat)
13361 return 0;
13362 emit_insn (pat);
13363 return target;
13365 case IX86_BUILTIN_PSHUFW:
13366 case IX86_BUILTIN_PSHUFD:
13367 case IX86_BUILTIN_PSHUFHW:
13368 case IX86_BUILTIN_PSHUFLW:
13369 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
13370 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
13371 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
13372 : CODE_FOR_mmx_pshufw);
13373 arg0 = TREE_VALUE (arglist);
13374 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13375 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13376 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13377 tmode = insn_data[icode].operand[0].mode;
13378 mode1 = insn_data[icode].operand[1].mode;
13379 mode2 = insn_data[icode].operand[2].mode;
13381 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
13382 op0 = copy_to_mode_reg (mode1, op0);
13383 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
13385 /* @@@ better error message */
13386 error ("mask must be an immediate");
13387 return const0_rtx;
13389 if (target == 0
13390 || GET_MODE (target) != tmode
13391 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13392 target = gen_reg_rtx (tmode);
13393 pat = GEN_FCN (icode) (target, op0, op1);
13394 if (! pat)
13395 return 0;
13396 emit_insn (pat);
13397 return target;
13399 case IX86_BUILTIN_PSLLDQI128:
13400 case IX86_BUILTIN_PSRLDQI128:
13401 icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
13402 : CODE_FOR_sse2_lshrti3);
13403 arg0 = TREE_VALUE (arglist);
13404 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13405 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13406 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13407 tmode = insn_data[icode].operand[0].mode;
13408 mode1 = insn_data[icode].operand[1].mode;
13409 mode2 = insn_data[icode].operand[2].mode;
13411 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
13413 op0 = copy_to_reg (op0);
13414 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
13416 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
13418 error ("shift must be an immediate");
13419 return const0_rtx;
13421 target = gen_reg_rtx (V2DImode);
13422 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0), op0, op1);
13423 if (! pat)
13424 return 0;
13425 emit_insn (pat);
13426 return target;
13428 case IX86_BUILTIN_FEMMS:
13429 emit_insn (gen_femms ());
13430 return NULL_RTX;
13432 case IX86_BUILTIN_PAVGUSB:
13433 return ix86_expand_binop_builtin (CODE_FOR_pavgusb, arglist, target);
13435 case IX86_BUILTIN_PF2ID:
13436 return ix86_expand_unop_builtin (CODE_FOR_pf2id, arglist, target, 0);
13438 case IX86_BUILTIN_PFACC:
13439 return ix86_expand_binop_builtin (CODE_FOR_pfacc, arglist, target);
13441 case IX86_BUILTIN_PFADD:
13442 return ix86_expand_binop_builtin (CODE_FOR_addv2sf3, arglist, target);
13444 case IX86_BUILTIN_PFCMPEQ:
13445 return ix86_expand_binop_builtin (CODE_FOR_eqv2sf3, arglist, target);
13447 case IX86_BUILTIN_PFCMPGE:
13448 return ix86_expand_binop_builtin (CODE_FOR_gev2sf3, arglist, target);
13450 case IX86_BUILTIN_PFCMPGT:
13451 return ix86_expand_binop_builtin (CODE_FOR_gtv2sf3, arglist, target);
13453 case IX86_BUILTIN_PFMAX:
13454 return ix86_expand_binop_builtin (CODE_FOR_pfmaxv2sf3, arglist, target);
13456 case IX86_BUILTIN_PFMIN:
13457 return ix86_expand_binop_builtin (CODE_FOR_pfminv2sf3, arglist, target);
13459 case IX86_BUILTIN_PFMUL:
13460 return ix86_expand_binop_builtin (CODE_FOR_mulv2sf3, arglist, target);
13462 case IX86_BUILTIN_PFRCP:
13463 return ix86_expand_unop_builtin (CODE_FOR_pfrcpv2sf2, arglist, target, 0);
13465 case IX86_BUILTIN_PFRCPIT1:
13466 return ix86_expand_binop_builtin (CODE_FOR_pfrcpit1v2sf3, arglist, target);
13468 case IX86_BUILTIN_PFRCPIT2:
13469 return ix86_expand_binop_builtin (CODE_FOR_pfrcpit2v2sf3, arglist, target);
13471 case IX86_BUILTIN_PFRSQIT1:
13472 return ix86_expand_binop_builtin (CODE_FOR_pfrsqit1v2sf3, arglist, target);
13474 case IX86_BUILTIN_PFRSQRT:
13475 return ix86_expand_unop_builtin (CODE_FOR_pfrsqrtv2sf2, arglist, target, 0);
13477 case IX86_BUILTIN_PFSUB:
13478 return ix86_expand_binop_builtin (CODE_FOR_subv2sf3, arglist, target);
13480 case IX86_BUILTIN_PFSUBR:
13481 return ix86_expand_binop_builtin (CODE_FOR_subrv2sf3, arglist, target);
13483 case IX86_BUILTIN_PI2FD:
13484 return ix86_expand_unop_builtin (CODE_FOR_floatv2si2, arglist, target, 0);
13486 case IX86_BUILTIN_PMULHRW:
13487 return ix86_expand_binop_builtin (CODE_FOR_pmulhrwv4hi3, arglist, target);
13489 case IX86_BUILTIN_PF2IW:
13490 return ix86_expand_unop_builtin (CODE_FOR_pf2iw, arglist, target, 0);
13492 case IX86_BUILTIN_PFNACC:
13493 return ix86_expand_binop_builtin (CODE_FOR_pfnacc, arglist, target);
13495 case IX86_BUILTIN_PFPNACC:
13496 return ix86_expand_binop_builtin (CODE_FOR_pfpnacc, arglist, target);
13498 case IX86_BUILTIN_PI2FW:
13499 return ix86_expand_unop_builtin (CODE_FOR_pi2fw, arglist, target, 0);
13501 case IX86_BUILTIN_PSWAPDSI:
13502 return ix86_expand_unop_builtin (CODE_FOR_pswapdv2si2, arglist, target, 0);
13504 case IX86_BUILTIN_PSWAPDSF:
13505 return ix86_expand_unop_builtin (CODE_FOR_pswapdv2sf2, arglist, target, 0);
13507 case IX86_BUILTIN_SSE_ZERO:
13508 target = gen_reg_rtx (V4SFmode);
13509 emit_insn (gen_sse_clrv4sf (target, CONST0_RTX (V4SFmode)));
13510 return target;
13512 case IX86_BUILTIN_MMX_ZERO:
13513 target = gen_reg_rtx (DImode);
13514 emit_insn (gen_mmx_clrdi (target));
13515 return target;
13517 case IX86_BUILTIN_CLRTI:
13518 target = gen_reg_rtx (V2DImode);
13519 emit_insn (gen_sse2_clrti (simplify_gen_subreg (TImode, target, V2DImode, 0)));
13520 return target;
13523 case IX86_BUILTIN_SQRTSD:
13524 return ix86_expand_unop1_builtin (CODE_FOR_vmsqrtv2df2, arglist, target);
13525 case IX86_BUILTIN_LOADAPD:
13526 return ix86_expand_unop_builtin (CODE_FOR_sse2_movapd, arglist, target, 1);
13527 case IX86_BUILTIN_LOADUPD:
13528 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
13530 case IX86_BUILTIN_STOREAPD:
13531 return ix86_expand_store_builtin (CODE_FOR_sse2_movapd, arglist);
13532 case IX86_BUILTIN_STOREUPD:
13533 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
13535 case IX86_BUILTIN_LOADSD:
13536 return ix86_expand_unop_builtin (CODE_FOR_sse2_loadsd, arglist, target, 1);
13538 case IX86_BUILTIN_STORESD:
13539 return ix86_expand_store_builtin (CODE_FOR_sse2_storesd, arglist);
13541 case IX86_BUILTIN_SETPD1:
13542 target = assign_386_stack_local (DFmode, 0);
13543 arg0 = TREE_VALUE (arglist);
13544 emit_move_insn (adjust_address (target, DFmode, 0),
13545 expand_expr (arg0, NULL_RTX, VOIDmode, 0));
13546 op0 = gen_reg_rtx (V2DFmode);
13547 emit_insn (gen_sse2_loadsd (op0, adjust_address (target, V2DFmode, 0)));
13548 emit_insn (gen_sse2_shufpd (op0, op0, op0, const0_rtx));
13549 return op0;
13551 case IX86_BUILTIN_SETPD:
13552 target = assign_386_stack_local (V2DFmode, 0);
13553 arg0 = TREE_VALUE (arglist);
13554 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13555 emit_move_insn (adjust_address (target, DFmode, 0),
13556 expand_expr (arg0, NULL_RTX, VOIDmode, 0));
13557 emit_move_insn (adjust_address (target, DFmode, 8),
13558 expand_expr (arg1, NULL_RTX, VOIDmode, 0));
13559 op0 = gen_reg_rtx (V2DFmode);
13560 emit_insn (gen_sse2_movapd (op0, target));
13561 return op0;
13563 case IX86_BUILTIN_LOADRPD:
13564 target = ix86_expand_unop_builtin (CODE_FOR_sse2_movapd, arglist,
13565 gen_reg_rtx (V2DFmode), 1);
13566 emit_insn (gen_sse2_shufpd (target, target, target, const1_rtx));
13567 return target;
13569 case IX86_BUILTIN_LOADPD1:
13570 target = ix86_expand_unop_builtin (CODE_FOR_sse2_loadsd, arglist,
13571 gen_reg_rtx (V2DFmode), 1);
13572 emit_insn (gen_sse2_shufpd (target, target, target, const0_rtx));
13573 return target;
13575 case IX86_BUILTIN_STOREPD1:
13576 return ix86_expand_store_builtin (CODE_FOR_sse2_movapd, arglist);
13577 case IX86_BUILTIN_STORERPD:
13578 return ix86_expand_store_builtin (CODE_FOR_sse2_movapd, arglist);
13580 case IX86_BUILTIN_CLRPD:
13581 target = gen_reg_rtx (V2DFmode);
13582 emit_insn (gen_sse_clrv2df (target));
13583 return target;
13585 case IX86_BUILTIN_MFENCE:
13586 emit_insn (gen_sse2_mfence ());
13587 return 0;
13588 case IX86_BUILTIN_LFENCE:
13589 emit_insn (gen_sse2_lfence ());
13590 return 0;
13592 case IX86_BUILTIN_CLFLUSH:
13593 arg0 = TREE_VALUE (arglist);
13594 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13595 icode = CODE_FOR_sse2_clflush;
13596 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
13597 op0 = copy_to_mode_reg (Pmode, op0);
13599 emit_insn (gen_sse2_clflush (op0));
13600 return 0;
13602 case IX86_BUILTIN_MOVNTPD:
13603 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
13604 case IX86_BUILTIN_MOVNTDQ:
13605 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
13606 case IX86_BUILTIN_MOVNTI:
13607 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
13609 case IX86_BUILTIN_LOADDQA:
13610 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqa, arglist, target, 1);
13611 case IX86_BUILTIN_LOADDQU:
13612 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
13613 case IX86_BUILTIN_LOADD:
13614 return ix86_expand_unop_builtin (CODE_FOR_sse2_loadd, arglist, target, 1);
13616 case IX86_BUILTIN_STOREDQA:
13617 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqa, arglist);
13618 case IX86_BUILTIN_STOREDQU:
13619 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
13620 case IX86_BUILTIN_STORED:
13621 return ix86_expand_store_builtin (CODE_FOR_sse2_stored, arglist);
13623 case IX86_BUILTIN_MONITOR:
13624 arg0 = TREE_VALUE (arglist);
13625 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13626 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
13627 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13628 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13629 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
13630 if (!REG_P (op0))
13631 op0 = copy_to_mode_reg (SImode, op0);
13632 if (!REG_P (op1))
13633 op1 = copy_to_mode_reg (SImode, op1);
13634 if (!REG_P (op2))
13635 op2 = copy_to_mode_reg (SImode, op2);
13636 emit_insn (gen_monitor (op0, op1, op2));
13637 return 0;
13639 case IX86_BUILTIN_MWAIT:
13640 arg0 = TREE_VALUE (arglist);
13641 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13642 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13643 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13644 if (!REG_P (op0))
13645 op0 = copy_to_mode_reg (SImode, op0);
13646 if (!REG_P (op1))
13647 op1 = copy_to_mode_reg (SImode, op1);
13648 emit_insn (gen_mwait (op0, op1));
13649 return 0;
13651 case IX86_BUILTIN_LOADDDUP:
13652 return ix86_expand_unop_builtin (CODE_FOR_loadddup, arglist, target, 1);
13654 case IX86_BUILTIN_LDDQU:
13655 return ix86_expand_unop_builtin (CODE_FOR_lddqu, arglist, target,
13658 default:
13659 break;
13662 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
13663 if (d->code == fcode)
13665 /* Compares are treated specially. */
13666 if (d->icode == CODE_FOR_maskcmpv4sf3
13667 || d->icode == CODE_FOR_vmmaskcmpv4sf3
13668 || d->icode == CODE_FOR_maskncmpv4sf3
13669 || d->icode == CODE_FOR_vmmaskncmpv4sf3
13670 || d->icode == CODE_FOR_maskcmpv2df3
13671 || d->icode == CODE_FOR_vmmaskcmpv2df3
13672 || d->icode == CODE_FOR_maskncmpv2df3
13673 || d->icode == CODE_FOR_vmmaskncmpv2df3)
13674 return ix86_expand_sse_compare (d, arglist, target);
13676 return ix86_expand_binop_builtin (d->icode, arglist, target);
13679 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
13680 if (d->code == fcode)
13681 return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
13683 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
13684 if (d->code == fcode)
13685 return ix86_expand_sse_comi (d, arglist, target);
13687 /* @@@ Should really do something sensible here. */
13688 return 0;
13691 /* Store OPERAND to the memory after reload is completed. This means
13692 that we can't easily use assign_stack_local. */
13694 ix86_force_to_memory (enum machine_mode mode, rtx operand)
13696 rtx result;
13697 if (!reload_completed)
13698 abort ();
13699 if (TARGET_RED_ZONE)
13701 result = gen_rtx_MEM (mode,
13702 gen_rtx_PLUS (Pmode,
13703 stack_pointer_rtx,
13704 GEN_INT (-RED_ZONE_SIZE)));
13705 emit_move_insn (result, operand);
13707 else if (!TARGET_RED_ZONE && TARGET_64BIT)
13709 switch (mode)
13711 case HImode:
13712 case SImode:
13713 operand = gen_lowpart (DImode, operand);
13714 /* FALLTHRU */
13715 case DImode:
13716 emit_insn (
13717 gen_rtx_SET (VOIDmode,
13718 gen_rtx_MEM (DImode,
13719 gen_rtx_PRE_DEC (DImode,
13720 stack_pointer_rtx)),
13721 operand));
13722 break;
13723 default:
13724 abort ();
13726 result = gen_rtx_MEM (mode, stack_pointer_rtx);
13728 else
13730 switch (mode)
13732 case DImode:
13734 rtx operands[2];
13735 split_di (&operand, 1, operands, operands + 1);
13736 emit_insn (
13737 gen_rtx_SET (VOIDmode,
13738 gen_rtx_MEM (SImode,
13739 gen_rtx_PRE_DEC (Pmode,
13740 stack_pointer_rtx)),
13741 operands[1]));
13742 emit_insn (
13743 gen_rtx_SET (VOIDmode,
13744 gen_rtx_MEM (SImode,
13745 gen_rtx_PRE_DEC (Pmode,
13746 stack_pointer_rtx)),
13747 operands[0]));
13749 break;
13750 case HImode:
13751 /* It is better to store HImodes as SImodes. */
13752 if (!TARGET_PARTIAL_REG_STALL)
13753 operand = gen_lowpart (SImode, operand);
13754 /* FALLTHRU */
13755 case SImode:
13756 emit_insn (
13757 gen_rtx_SET (VOIDmode,
13758 gen_rtx_MEM (GET_MODE (operand),
13759 gen_rtx_PRE_DEC (SImode,
13760 stack_pointer_rtx)),
13761 operand));
13762 break;
13763 default:
13764 abort ();
13766 result = gen_rtx_MEM (mode, stack_pointer_rtx);
13768 return result;
13771 /* Free operand from the memory. */
13772 void
13773 ix86_free_from_memory (enum machine_mode mode)
13775 if (!TARGET_RED_ZONE)
13777 int size;
13779 if (mode == DImode || TARGET_64BIT)
13780 size = 8;
13781 else if (mode == HImode && TARGET_PARTIAL_REG_STALL)
13782 size = 2;
13783 else
13784 size = 4;
13785 /* Use LEA to deallocate stack space. In peephole2 it will be converted
13786 to pop or add instruction if registers are available. */
13787 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
13788 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
13789 GEN_INT (size))));
13793 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
13794 QImode must go into class Q_REGS.
13795 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
13796 movdf to do mem-to-mem moves through integer regs. */
13797 enum reg_class
13798 ix86_preferred_reload_class (rtx x, enum reg_class class)
13800 if (GET_CODE (x) == CONST_VECTOR && x != CONST0_RTX (GET_MODE (x)))
13801 return NO_REGS;
13802 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
13804 /* SSE can't load any constant directly yet. */
13805 if (SSE_CLASS_P (class))
13806 return NO_REGS;
13807 /* Floats can load 0 and 1. */
13808 if (MAYBE_FLOAT_CLASS_P (class) && standard_80387_constant_p (x))
13810 /* Limit class to non-SSE. Use GENERAL_REGS if possible. */
13811 if (MAYBE_SSE_CLASS_P (class))
13812 return (reg_class_subset_p (class, GENERAL_REGS)
13813 ? GENERAL_REGS : FLOAT_REGS);
13814 else
13815 return class;
13817 /* General regs can load everything. */
13818 if (reg_class_subset_p (class, GENERAL_REGS))
13819 return GENERAL_REGS;
13820 /* In case we haven't resolved FLOAT or SSE yet, give up. */
13821 if (MAYBE_FLOAT_CLASS_P (class) || MAYBE_SSE_CLASS_P (class))
13822 return NO_REGS;
13824 if (MAYBE_MMX_CLASS_P (class) && CONSTANT_P (x))
13825 return NO_REGS;
13826 if (GET_MODE (x) == QImode && ! reg_class_subset_p (class, Q_REGS))
13827 return Q_REGS;
13828 return class;
13831 /* If we are copying between general and FP registers, we need a memory
13832 location. The same is true for SSE and MMX registers.
13834 The macro can't work reliably when one of the CLASSES is class containing
13835 registers from multiple units (SSE, MMX, integer). We avoid this by never
13836 combining those units in single alternative in the machine description.
13837 Ensure that this constraint holds to avoid unexpected surprises.
13839 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
13840 enforce these sanity checks. */
13842 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
13843 enum machine_mode mode, int strict)
13845 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
13846 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
13847 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
13848 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
13849 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
13850 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
13852 if (strict)
13853 abort ();
13854 else
13855 return 1;
13857 return (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2)
13858 || ((SSE_CLASS_P (class1) != SSE_CLASS_P (class2)
13859 || MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
13860 && ((mode != SImode && (mode != DImode || !TARGET_64BIT))
13861 || (!TARGET_INTER_UNIT_MOVES && !optimize_size))));
13863 /* Return the cost of moving data from a register in class CLASS1 to
13864 one in class CLASS2.
13866 It is not required that the cost always equal 2 when FROM is the same as TO;
13867 on some machines it is expensive to move between registers if they are not
13868 general registers. */
13870 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
13871 enum reg_class class2)
13873 /* In case we require secondary memory, compute cost of the store followed
13874 by load. In order to avoid bad register allocation choices, we need
13875 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
13877 if (ix86_secondary_memory_needed (class1, class2, mode, 0))
13879 int cost = 1;
13881 cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
13882 MEMORY_MOVE_COST (mode, class1, 1));
13883 cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
13884 MEMORY_MOVE_COST (mode, class2, 1));
13886 /* In case of copying from general_purpose_register we may emit multiple
13887 stores followed by single load causing memory size mismatch stall.
13888 Count this as arbitrarily high cost of 20. */
13889 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
13890 cost += 20;
13892 /* In the case of FP/MMX moves, the registers actually overlap, and we
13893 have to switch modes in order to treat them differently. */
13894 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
13895 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
13896 cost += 20;
13898 return cost;
13901 /* Moves between SSE/MMX and integer unit are expensive. */
13902 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
13903 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
13904 return ix86_cost->mmxsse_to_integer;
13905 if (MAYBE_FLOAT_CLASS_P (class1))
13906 return ix86_cost->fp_move;
13907 if (MAYBE_SSE_CLASS_P (class1))
13908 return ix86_cost->sse_move;
13909 if (MAYBE_MMX_CLASS_P (class1))
13910 return ix86_cost->mmx_move;
13911 return 2;
13914 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
13916 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
13918 /* Flags and only flags can only hold CCmode values. */
13919 if (CC_REGNO_P (regno))
13920 return GET_MODE_CLASS (mode) == MODE_CC;
13921 if (GET_MODE_CLASS (mode) == MODE_CC
13922 || GET_MODE_CLASS (mode) == MODE_RANDOM
13923 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
13924 return 0;
13925 if (FP_REGNO_P (regno))
13926 return VALID_FP_MODE_P (mode);
13927 if (SSE_REGNO_P (regno))
13928 return (TARGET_SSE ? VALID_SSE_REG_MODE (mode) : 0);
13929 if (MMX_REGNO_P (regno))
13930 return (TARGET_MMX
13931 ? VALID_MMX_REG_MODE (mode) || VALID_MMX_REG_MODE_3DNOW (mode) : 0);
13932 /* We handle both integer and floats in the general purpose registers.
13933 In future we should be able to handle vector modes as well. */
13934 if (!VALID_INT_MODE_P (mode) && !VALID_FP_MODE_P (mode))
13935 return 0;
13936 /* Take care for QImode values - they can be in non-QI regs, but then
13937 they do cause partial register stalls. */
13938 if (regno < 4 || mode != QImode || TARGET_64BIT)
13939 return 1;
13940 return reload_in_progress || reload_completed || !TARGET_PARTIAL_REG_STALL;
13943 /* Return the cost of moving data of mode M between a
13944 register and memory. A value of 2 is the default; this cost is
13945 relative to those in `REGISTER_MOVE_COST'.
13947 If moving between registers and memory is more expensive than
13948 between two registers, you should define this macro to express the
13949 relative cost.
13951 Model also increased moving costs of QImode registers in non
13952 Q_REGS classes.
13955 ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
13957 if (FLOAT_CLASS_P (class))
13959 int index;
13960 switch (mode)
13962 case SFmode:
13963 index = 0;
13964 break;
13965 case DFmode:
13966 index = 1;
13967 break;
13968 case XFmode:
13969 index = 2;
13970 break;
13971 default:
13972 return 100;
13974 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
13976 if (SSE_CLASS_P (class))
13978 int index;
13979 switch (GET_MODE_SIZE (mode))
13981 case 4:
13982 index = 0;
13983 break;
13984 case 8:
13985 index = 1;
13986 break;
13987 case 16:
13988 index = 2;
13989 break;
13990 default:
13991 return 100;
13993 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
13995 if (MMX_CLASS_P (class))
13997 int index;
13998 switch (GET_MODE_SIZE (mode))
14000 case 4:
14001 index = 0;
14002 break;
14003 case 8:
14004 index = 1;
14005 break;
14006 default:
14007 return 100;
14009 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
14011 switch (GET_MODE_SIZE (mode))
14013 case 1:
14014 if (in)
14015 return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
14016 : ix86_cost->movzbl_load);
14017 else
14018 return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
14019 : ix86_cost->int_store[0] + 4);
14020 break;
14021 case 2:
14022 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
14023 default:
14024 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
14025 if (mode == TFmode)
14026 mode = XFmode;
14027 return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
14028 * (((int) GET_MODE_SIZE (mode)
14029 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
14033 /* Compute a (partial) cost for rtx X. Return true if the complete
14034 cost has been computed, and false if subexpressions should be
14035 scanned. In either case, *TOTAL contains the cost result. */
14037 static bool
14038 ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
14040 enum machine_mode mode = GET_MODE (x);
14042 switch (code)
14044 case CONST_INT:
14045 case CONST:
14046 case LABEL_REF:
14047 case SYMBOL_REF:
14048 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
14049 *total = 3;
14050 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
14051 *total = 2;
14052 else if (flag_pic && SYMBOLIC_CONST (x)
14053 && (!TARGET_64BIT
14054 || (!GET_CODE (x) != LABEL_REF
14055 && (GET_CODE (x) != SYMBOL_REF
14056 || !SYMBOL_REF_LOCAL_P (x)))))
14057 *total = 1;
14058 else
14059 *total = 0;
14060 return true;
14062 case CONST_DOUBLE:
14063 if (mode == VOIDmode)
14064 *total = 0;
14065 else
14066 switch (standard_80387_constant_p (x))
14068 case 1: /* 0.0 */
14069 *total = 1;
14070 break;
14071 default: /* Other constants */
14072 *total = 2;
14073 break;
14074 case 0:
14075 case -1:
14076 /* Start with (MEM (SYMBOL_REF)), since that's where
14077 it'll probably end up. Add a penalty for size. */
14078 *total = (COSTS_N_INSNS (1)
14079 + (flag_pic != 0 && !TARGET_64BIT)
14080 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
14081 break;
14083 return true;
14085 case ZERO_EXTEND:
14086 /* The zero extensions is often completely free on x86_64, so make
14087 it as cheap as possible. */
14088 if (TARGET_64BIT && mode == DImode
14089 && GET_MODE (XEXP (x, 0)) == SImode)
14090 *total = 1;
14091 else if (TARGET_ZERO_EXTEND_WITH_AND)
14092 *total = COSTS_N_INSNS (ix86_cost->add);
14093 else
14094 *total = COSTS_N_INSNS (ix86_cost->movzx);
14095 return false;
14097 case SIGN_EXTEND:
14098 *total = COSTS_N_INSNS (ix86_cost->movsx);
14099 return false;
14101 case ASHIFT:
14102 if (GET_CODE (XEXP (x, 1)) == CONST_INT
14103 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
14105 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
14106 if (value == 1)
14108 *total = COSTS_N_INSNS (ix86_cost->add);
14109 return false;
14111 if ((value == 2 || value == 3)
14112 && ix86_cost->lea <= ix86_cost->shift_const)
14114 *total = COSTS_N_INSNS (ix86_cost->lea);
14115 return false;
14118 /* FALLTHRU */
14120 case ROTATE:
14121 case ASHIFTRT:
14122 case LSHIFTRT:
14123 case ROTATERT:
14124 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
14126 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
14128 if (INTVAL (XEXP (x, 1)) > 32)
14129 *total = COSTS_N_INSNS(ix86_cost->shift_const + 2);
14130 else
14131 *total = COSTS_N_INSNS(ix86_cost->shift_const * 2);
14133 else
14135 if (GET_CODE (XEXP (x, 1)) == AND)
14136 *total = COSTS_N_INSNS(ix86_cost->shift_var * 2);
14137 else
14138 *total = COSTS_N_INSNS(ix86_cost->shift_var * 6 + 2);
14141 else
14143 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
14144 *total = COSTS_N_INSNS (ix86_cost->shift_const);
14145 else
14146 *total = COSTS_N_INSNS (ix86_cost->shift_var);
14148 return false;
14150 case MULT:
14151 if (FLOAT_MODE_P (mode))
14153 *total = COSTS_N_INSNS (ix86_cost->fmul);
14154 return false;
14156 else
14158 rtx op0 = XEXP (x, 0);
14159 rtx op1 = XEXP (x, 1);
14160 int nbits;
14161 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
14163 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
14164 for (nbits = 0; value != 0; value &= value - 1)
14165 nbits++;
14167 else
14168 /* This is arbitrary. */
14169 nbits = 7;
14171 /* Compute costs correctly for widening multiplication. */
14172 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND)
14173 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
14174 == GET_MODE_SIZE (mode))
14176 int is_mulwiden = 0;
14177 enum machine_mode inner_mode = GET_MODE (op0);
14179 if (GET_CODE (op0) == GET_CODE (op1))
14180 is_mulwiden = 1, op1 = XEXP (op1, 0);
14181 else if (GET_CODE (op1) == CONST_INT)
14183 if (GET_CODE (op0) == SIGN_EXTEND)
14184 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
14185 == INTVAL (op1);
14186 else
14187 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
14190 if (is_mulwiden)
14191 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
14194 *total = COSTS_N_INSNS (ix86_cost->mult_init[MODE_INDEX (mode)]
14195 + nbits * ix86_cost->mult_bit)
14196 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code);
14198 return true;
14201 case DIV:
14202 case UDIV:
14203 case MOD:
14204 case UMOD:
14205 if (FLOAT_MODE_P (mode))
14206 *total = COSTS_N_INSNS (ix86_cost->fdiv);
14207 else
14208 *total = COSTS_N_INSNS (ix86_cost->divide[MODE_INDEX (mode)]);
14209 return false;
14211 case PLUS:
14212 if (FLOAT_MODE_P (mode))
14213 *total = COSTS_N_INSNS (ix86_cost->fadd);
14214 else if (GET_MODE_CLASS (mode) == MODE_INT
14215 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
14217 if (GET_CODE (XEXP (x, 0)) == PLUS
14218 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
14219 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
14220 && CONSTANT_P (XEXP (x, 1)))
14222 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
14223 if (val == 2 || val == 4 || val == 8)
14225 *total = COSTS_N_INSNS (ix86_cost->lea);
14226 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
14227 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
14228 outer_code);
14229 *total += rtx_cost (XEXP (x, 1), outer_code);
14230 return true;
14233 else if (GET_CODE (XEXP (x, 0)) == MULT
14234 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
14236 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
14237 if (val == 2 || val == 4 || val == 8)
14239 *total = COSTS_N_INSNS (ix86_cost->lea);
14240 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
14241 *total += rtx_cost (XEXP (x, 1), outer_code);
14242 return true;
14245 else if (GET_CODE (XEXP (x, 0)) == PLUS)
14247 *total = COSTS_N_INSNS (ix86_cost->lea);
14248 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
14249 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
14250 *total += rtx_cost (XEXP (x, 1), outer_code);
14251 return true;
14254 /* FALLTHRU */
14256 case MINUS:
14257 if (FLOAT_MODE_P (mode))
14259 *total = COSTS_N_INSNS (ix86_cost->fadd);
14260 return false;
14262 /* FALLTHRU */
14264 case AND:
14265 case IOR:
14266 case XOR:
14267 if (!TARGET_64BIT && mode == DImode)
14269 *total = (COSTS_N_INSNS (ix86_cost->add) * 2
14270 + (rtx_cost (XEXP (x, 0), outer_code)
14271 << (GET_MODE (XEXP (x, 0)) != DImode))
14272 + (rtx_cost (XEXP (x, 1), outer_code)
14273 << (GET_MODE (XEXP (x, 1)) != DImode)));
14274 return true;
14276 /* FALLTHRU */
14278 case NEG:
14279 if (FLOAT_MODE_P (mode))
14281 *total = COSTS_N_INSNS (ix86_cost->fchs);
14282 return false;
14284 /* FALLTHRU */
14286 case NOT:
14287 if (!TARGET_64BIT && mode == DImode)
14288 *total = COSTS_N_INSNS (ix86_cost->add * 2);
14289 else
14290 *total = COSTS_N_INSNS (ix86_cost->add);
14291 return false;
14293 case FLOAT_EXTEND:
14294 if (!TARGET_SSE_MATH || !VALID_SSE_REG_MODE (mode))
14295 *total = 0;
14296 return false;
14298 case ABS:
14299 if (FLOAT_MODE_P (mode))
14300 *total = COSTS_N_INSNS (ix86_cost->fabs);
14301 return false;
14303 case SQRT:
14304 if (FLOAT_MODE_P (mode))
14305 *total = COSTS_N_INSNS (ix86_cost->fsqrt);
14306 return false;
14308 case UNSPEC:
14309 if (XINT (x, 1) == UNSPEC_TP)
14310 *total = 0;
14311 return false;
14313 default:
14314 return false;
14318 #if defined (DO_GLOBAL_CTORS_BODY) && defined (HAS_INIT_SECTION)
14319 static void
14320 ix86_svr3_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
14322 init_section ();
14323 fputs ("\tpushl $", asm_out_file);
14324 assemble_name (asm_out_file, XSTR (symbol, 0));
14325 fputc ('\n', asm_out_file);
14327 #endif
14329 #if TARGET_MACHO
14331 static int current_machopic_label_num;
14333 /* Given a symbol name and its associated stub, write out the
14334 definition of the stub. */
14336 void
14337 machopic_output_stub (FILE *file, const char *symb, const char *stub)
14339 unsigned int length;
14340 char *binder_name, *symbol_name, lazy_ptr_name[32];
14341 int label = ++current_machopic_label_num;
14343 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
14344 symb = (*targetm.strip_name_encoding) (symb);
14346 length = strlen (stub);
14347 binder_name = alloca (length + 32);
14348 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
14350 length = strlen (symb);
14351 symbol_name = alloca (length + 32);
14352 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
14354 sprintf (lazy_ptr_name, "L%d$lz", label);
14356 if (MACHOPIC_PURE)
14357 machopic_picsymbol_stub_section ();
14358 else
14359 machopic_symbol_stub_section ();
14361 fprintf (file, "%s:\n", stub);
14362 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
14364 if (MACHOPIC_PURE)
14366 fprintf (file, "\tcall LPC$%d\nLPC$%d:\tpopl %%eax\n", label, label);
14367 fprintf (file, "\tmovl %s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
14368 fprintf (file, "\tjmp %%edx\n");
14370 else
14371 fprintf (file, "\tjmp *%s\n", lazy_ptr_name);
14373 fprintf (file, "%s:\n", binder_name);
14375 if (MACHOPIC_PURE)
14377 fprintf (file, "\tlea %s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
14378 fprintf (file, "\tpushl %%eax\n");
14380 else
14381 fprintf (file, "\t pushl $%s\n", lazy_ptr_name);
14383 fprintf (file, "\tjmp dyld_stub_binding_helper\n");
14385 machopic_lazy_symbol_ptr_section ();
14386 fprintf (file, "%s:\n", lazy_ptr_name);
14387 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
14388 fprintf (file, "\t.long %s\n", binder_name);
14390 #endif /* TARGET_MACHO */
14392 /* Order the registers for register allocator. */
14394 void
14395 x86_order_regs_for_local_alloc (void)
14397 int pos = 0;
14398 int i;
14400 /* First allocate the local general purpose registers. */
14401 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
14402 if (GENERAL_REGNO_P (i) && call_used_regs[i])
14403 reg_alloc_order [pos++] = i;
14405 /* Global general purpose registers. */
14406 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
14407 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
14408 reg_alloc_order [pos++] = i;
14410 /* x87 registers come first in case we are doing FP math
14411 using them. */
14412 if (!TARGET_SSE_MATH)
14413 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
14414 reg_alloc_order [pos++] = i;
14416 /* SSE registers. */
14417 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
14418 reg_alloc_order [pos++] = i;
14419 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
14420 reg_alloc_order [pos++] = i;
14422 /* x87 registers. */
14423 if (TARGET_SSE_MATH)
14424 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
14425 reg_alloc_order [pos++] = i;
14427 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
14428 reg_alloc_order [pos++] = i;
14430 /* Initialize the rest of array as we do not allocate some registers
14431 at all. */
14432 while (pos < FIRST_PSEUDO_REGISTER)
14433 reg_alloc_order [pos++] = 0;
14436 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
14437 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
14438 #endif
14440 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
14441 struct attribute_spec.handler. */
14442 static tree
14443 ix86_handle_struct_attribute (tree *node, tree name,
14444 tree args ATTRIBUTE_UNUSED,
14445 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
14447 tree *type = NULL;
14448 if (DECL_P (*node))
14450 if (TREE_CODE (*node) == TYPE_DECL)
14451 type = &TREE_TYPE (*node);
14453 else
14454 type = node;
14456 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
14457 || TREE_CODE (*type) == UNION_TYPE)))
14459 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
14460 *no_add_attrs = true;
14463 else if ((is_attribute_p ("ms_struct", name)
14464 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
14465 || ((is_attribute_p ("gcc_struct", name)
14466 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
14468 warning ("`%s' incompatible attribute ignored",
14469 IDENTIFIER_POINTER (name));
14470 *no_add_attrs = true;
14473 return NULL_TREE;
14476 static bool
14477 ix86_ms_bitfield_layout_p (tree record_type)
14479 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
14480 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
14481 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
14484 /* Returns an expression indicating where the this parameter is
14485 located on entry to the FUNCTION. */
14487 static rtx
14488 x86_this_parameter (tree function)
14490 tree type = TREE_TYPE (function);
14492 if (TARGET_64BIT)
14494 int n = aggregate_value_p (TREE_TYPE (type), type) != 0;
14495 return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]);
14498 if (ix86_function_regparm (type, function) > 0)
14500 tree parm;
14502 parm = TYPE_ARG_TYPES (type);
14503 /* Figure out whether or not the function has a variable number of
14504 arguments. */
14505 for (; parm; parm = TREE_CHAIN (parm))
14506 if (TREE_VALUE (parm) == void_type_node)
14507 break;
14508 /* If not, the this parameter is in the first argument. */
14509 if (parm)
14511 int regno = 0;
14512 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
14513 regno = 2;
14514 return gen_rtx_REG (SImode, regno);
14518 if (aggregate_value_p (TREE_TYPE (type), type))
14519 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8));
14520 else
14521 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4));
14524 /* Determine whether x86_output_mi_thunk can succeed. */
14526 static bool
14527 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
14528 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
14529 HOST_WIDE_INT vcall_offset, tree function)
14531 /* 64-bit can handle anything. */
14532 if (TARGET_64BIT)
14533 return true;
14535 /* For 32-bit, everything's fine if we have one free register. */
14536 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
14537 return true;
14539 /* Need a free register for vcall_offset. */
14540 if (vcall_offset)
14541 return false;
14543 /* Need a free register for GOT references. */
14544 if (flag_pic && !(*targetm.binds_local_p) (function))
14545 return false;
14547 /* Otherwise ok. */
14548 return true;
14551 /* Output the assembler code for a thunk function. THUNK_DECL is the
14552 declaration for the thunk function itself, FUNCTION is the decl for
14553 the target function. DELTA is an immediate constant offset to be
14554 added to THIS. If VCALL_OFFSET is nonzero, the word at
14555 *(*this + vcall_offset) should be added to THIS. */
14557 static void
14558 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
14559 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
14560 HOST_WIDE_INT vcall_offset, tree function)
14562 rtx xops[3];
14563 rtx this = x86_this_parameter (function);
14564 rtx this_reg, tmp;
14566 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
14567 pull it in now and let DELTA benefit. */
14568 if (REG_P (this))
14569 this_reg = this;
14570 else if (vcall_offset)
14572 /* Put the this parameter into %eax. */
14573 xops[0] = this;
14574 xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
14575 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
14577 else
14578 this_reg = NULL_RTX;
14580 /* Adjust the this parameter by a fixed constant. */
14581 if (delta)
14583 xops[0] = GEN_INT (delta);
14584 xops[1] = this_reg ? this_reg : this;
14585 if (TARGET_64BIT)
14587 if (!x86_64_general_operand (xops[0], DImode))
14589 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
14590 xops[1] = tmp;
14591 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
14592 xops[0] = tmp;
14593 xops[1] = this;
14595 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
14597 else
14598 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
14601 /* Adjust the this parameter by a value stored in the vtable. */
14602 if (vcall_offset)
14604 if (TARGET_64BIT)
14605 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
14606 else
14608 int tmp_regno = 2 /* ECX */;
14609 if (lookup_attribute ("fastcall",
14610 TYPE_ATTRIBUTES (TREE_TYPE (function))))
14611 tmp_regno = 0 /* EAX */;
14612 tmp = gen_rtx_REG (SImode, tmp_regno);
14615 xops[0] = gen_rtx_MEM (Pmode, this_reg);
14616 xops[1] = tmp;
14617 if (TARGET_64BIT)
14618 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
14619 else
14620 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
14622 /* Adjust the this parameter. */
14623 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
14624 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
14626 rtx tmp2 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
14627 xops[0] = GEN_INT (vcall_offset);
14628 xops[1] = tmp2;
14629 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
14630 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
14632 xops[1] = this_reg;
14633 if (TARGET_64BIT)
14634 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
14635 else
14636 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
14639 /* If necessary, drop THIS back to its stack slot. */
14640 if (this_reg && this_reg != this)
14642 xops[0] = this_reg;
14643 xops[1] = this;
14644 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
14647 xops[0] = XEXP (DECL_RTL (function), 0);
14648 if (TARGET_64BIT)
14650 if (!flag_pic || (*targetm.binds_local_p) (function))
14651 output_asm_insn ("jmp\t%P0", xops);
14652 else
14654 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
14655 tmp = gen_rtx_CONST (Pmode, tmp);
14656 tmp = gen_rtx_MEM (QImode, tmp);
14657 xops[0] = tmp;
14658 output_asm_insn ("jmp\t%A0", xops);
14661 else
14663 if (!flag_pic || (*targetm.binds_local_p) (function))
14664 output_asm_insn ("jmp\t%P0", xops);
14665 else
14666 #if TARGET_MACHO
14667 if (TARGET_MACHO)
14669 rtx sym_ref = XEXP (DECL_RTL (function), 0);
14670 tmp = (gen_rtx_SYMBOL_REF
14671 (Pmode,
14672 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
14673 tmp = gen_rtx_MEM (QImode, tmp);
14674 xops[0] = tmp;
14675 output_asm_insn ("jmp\t%0", xops);
14677 else
14678 #endif /* TARGET_MACHO */
14680 tmp = gen_rtx_REG (SImode, 2 /* ECX */);
14681 output_set_got (tmp);
14683 xops[1] = tmp;
14684 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
14685 output_asm_insn ("jmp\t{*}%1", xops);
14690 static void
14691 x86_file_start (void)
14693 default_file_start ();
14694 if (X86_FILE_START_VERSION_DIRECTIVE)
14695 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
14696 if (X86_FILE_START_FLTUSED)
14697 fputs ("\t.global\t__fltused\n", asm_out_file);
14698 if (ix86_asm_dialect == ASM_INTEL)
14699 fputs ("\t.intel_syntax\n", asm_out_file);
14703 x86_field_alignment (tree field, int computed)
14705 enum machine_mode mode;
14706 tree type = TREE_TYPE (field);
14708 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
14709 return computed;
14710 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
14711 ? get_inner_array_type (type) : type);
14712 if (mode == DFmode || mode == DCmode
14713 || GET_MODE_CLASS (mode) == MODE_INT
14714 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
14715 return MIN (32, computed);
14716 return computed;
14719 /* Output assembler code to FILE to increment profiler label # LABELNO
14720 for profiling a function entry. */
14721 void
14722 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
14724 if (TARGET_64BIT)
14725 if (flag_pic)
14727 #ifndef NO_PROFILE_COUNTERS
14728 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
14729 #endif
14730 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
14732 else
14734 #ifndef NO_PROFILE_COUNTERS
14735 fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno);
14736 #endif
14737 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
14739 else if (flag_pic)
14741 #ifndef NO_PROFILE_COUNTERS
14742 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
14743 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
14744 #endif
14745 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
14747 else
14749 #ifndef NO_PROFILE_COUNTERS
14750 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
14751 PROFILE_COUNT_REGISTER);
14752 #endif
14753 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
14757 /* We don't have exact information about the insn sizes, but we may assume
14758 quite safely that we are informed about all 1 byte insns and memory
14759 address sizes. This is enough to eliminate unnecessary padding in
14760 99% of cases. */
14762 static int
14763 min_insn_size (rtx insn)
14765 int l = 0;
14767 if (!INSN_P (insn) || !active_insn_p (insn))
14768 return 0;
14770 /* Discard alignments we've emit and jump instructions. */
14771 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
14772 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
14773 return 0;
14774 if (GET_CODE (insn) == JUMP_INSN
14775 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
14776 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
14777 return 0;
14779 /* Important case - calls are always 5 bytes.
14780 It is common to have many calls in the row. */
14781 if (GET_CODE (insn) == CALL_INSN
14782 && symbolic_reference_mentioned_p (PATTERN (insn))
14783 && !SIBLING_CALL_P (insn))
14784 return 5;
14785 if (get_attr_length (insn) <= 1)
14786 return 1;
14788 /* For normal instructions we may rely on the sizes of addresses
14789 and the presence of symbol to require 4 bytes of encoding.
14790 This is not the case for jumps where references are PC relative. */
14791 if (GET_CODE (insn) != JUMP_INSN)
14793 l = get_attr_length_address (insn);
14794 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
14795 l = 4;
14797 if (l)
14798 return 1+l;
14799 else
14800 return 2;
14803 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
14804 window. */
14806 static void
14807 ix86_avoid_jump_misspredicts (void)
14809 rtx insn, start = get_insns ();
14810 int nbytes = 0, njumps = 0;
14811 int isjump = 0;
14813 /* Look for all minimal intervals of instructions containing 4 jumps.
14814 The intervals are bounded by START and INSN. NBYTES is the total
14815 size of instructions in the interval including INSN and not including
14816 START. When the NBYTES is smaller than 16 bytes, it is possible
14817 that the end of START and INSN ends up in the same 16byte page.
14819 The smallest offset in the page INSN can start is the case where START
14820 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
14821 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
14823 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14826 nbytes += min_insn_size (insn);
14827 if (dump_file)
14828 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
14829 INSN_UID (insn), min_insn_size (insn));
14830 if ((GET_CODE (insn) == JUMP_INSN
14831 && GET_CODE (PATTERN (insn)) != ADDR_VEC
14832 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
14833 || GET_CODE (insn) == CALL_INSN)
14834 njumps++;
14835 else
14836 continue;
14838 while (njumps > 3)
14840 start = NEXT_INSN (start);
14841 if ((GET_CODE (start) == JUMP_INSN
14842 && GET_CODE (PATTERN (start)) != ADDR_VEC
14843 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
14844 || GET_CODE (start) == CALL_INSN)
14845 njumps--, isjump = 1;
14846 else
14847 isjump = 0;
14848 nbytes -= min_insn_size (start);
14850 if (njumps < 0)
14851 abort ();
14852 if (dump_file)
14853 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
14854 INSN_UID (start), INSN_UID (insn), nbytes);
14856 if (njumps == 3 && isjump && nbytes < 16)
14858 int padsize = 15 - nbytes + min_insn_size (insn);
14860 if (dump_file)
14861 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
14862 INSN_UID (insn), padsize);
14863 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
14868 /* AMD Athlon works faster
14869 when RET is not destination of conditional jump or directly preceded
14870 by other jump instruction. We avoid the penalty by inserting NOP just
14871 before the RET instructions in such cases. */
14872 static void
14873 ix86_pad_returns (void)
14875 edge e;
14877 for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next)
14879 basic_block bb = e->src;
14880 rtx ret = BB_END (bb);
14881 rtx prev;
14882 bool replace = false;
14884 if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN
14885 || !maybe_hot_bb_p (bb))
14886 continue;
14887 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
14888 if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL)
14889 break;
14890 if (prev && GET_CODE (prev) == CODE_LABEL)
14892 edge e;
14893 for (e = bb->pred; e; e = e->pred_next)
14894 if (EDGE_FREQUENCY (e) && e->src->index >= 0
14895 && !(e->flags & EDGE_FALLTHRU))
14896 replace = true;
14898 if (!replace)
14900 prev = prev_active_insn (ret);
14901 if (prev
14902 && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev))
14903 || GET_CODE (prev) == CALL_INSN))
14904 replace = true;
14905 /* Empty functions get branch mispredict even when the jump destination
14906 is not visible to us. */
14907 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
14908 replace = true;
14910 if (replace)
14912 emit_insn_before (gen_return_internal_long (), ret);
14913 delete_insn (ret);
14918 /* Implement machine specific optimizations. We implement padding of returns
14919 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
14920 static void
14921 ix86_reorg (void)
14923 if (TARGET_ATHLON_K8 && optimize && !optimize_size)
14924 ix86_pad_returns ();
14925 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
14926 ix86_avoid_jump_misspredicts ();
14929 /* Return nonzero when QImode register that must be represented via REX prefix
14930 is used. */
14931 bool
14932 x86_extended_QIreg_mentioned_p (rtx insn)
14934 int i;
14935 extract_insn_cached (insn);
14936 for (i = 0; i < recog_data.n_operands; i++)
14937 if (REG_P (recog_data.operand[i])
14938 && REGNO (recog_data.operand[i]) >= 4)
14939 return true;
14940 return false;
14943 /* Return nonzero when P points to register encoded via REX prefix.
14944 Called via for_each_rtx. */
14945 static int
14946 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
14948 unsigned int regno;
14949 if (!REG_P (*p))
14950 return 0;
14951 regno = REGNO (*p);
14952 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
14955 /* Return true when INSN mentions register that must be encoded using REX
14956 prefix. */
14957 bool
14958 x86_extended_reg_mentioned_p (rtx insn)
14960 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
14963 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
14964 optabs would emit if we didn't have TFmode patterns. */
14966 void
14967 x86_emit_floatuns (rtx operands[2])
14969 rtx neglab, donelab, i0, i1, f0, in, out;
14970 enum machine_mode mode, inmode;
14972 inmode = GET_MODE (operands[1]);
14973 if (inmode != SImode
14974 && inmode != DImode)
14975 abort ();
14977 out = operands[0];
14978 in = force_reg (inmode, operands[1]);
14979 mode = GET_MODE (out);
14980 neglab = gen_label_rtx ();
14981 donelab = gen_label_rtx ();
14982 i1 = gen_reg_rtx (Pmode);
14983 f0 = gen_reg_rtx (mode);
14985 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab);
14987 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
14988 emit_jump_insn (gen_jump (donelab));
14989 emit_barrier ();
14991 emit_label (neglab);
14993 i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
14994 i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
14995 i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
14996 expand_float (f0, i0, 0);
14997 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
14999 emit_label (donelab);
15002 /* Initialize vector TARGET via VALS. */
15003 void
15004 ix86_expand_vector_init (rtx target, rtx vals)
15006 enum machine_mode mode = GET_MODE (target);
15007 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
15008 int n_elts = (GET_MODE_SIZE (mode) / elt_size);
15009 int i;
15011 for (i = n_elts - 1; i >= 0; i--)
15012 if (GET_CODE (XVECEXP (vals, 0, i)) != CONST_INT
15013 && GET_CODE (XVECEXP (vals, 0, i)) != CONST_DOUBLE)
15014 break;
15016 /* Few special cases first...
15017 ... constants are best loaded from constant pool. */
15018 if (i < 0)
15020 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
15021 return;
15024 /* ... values where only first field is non-constant are best loaded
15025 from the pool and overwritten via move later. */
15026 if (!i)
15028 rtx op = simplify_gen_subreg (mode, XVECEXP (vals, 0, 0),
15029 GET_MODE_INNER (mode), 0);
15031 op = force_reg (mode, op);
15032 XVECEXP (vals, 0, 0) = CONST0_RTX (GET_MODE_INNER (mode));
15033 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
15034 switch (GET_MODE (target))
15036 case V2DFmode:
15037 emit_insn (gen_sse2_movsd (target, target, op));
15038 break;
15039 case V4SFmode:
15040 emit_insn (gen_sse_movss (target, target, op));
15041 break;
15042 default:
15043 break;
15045 return;
15048 /* And the busy sequence doing rotations. */
15049 switch (GET_MODE (target))
15051 case V2DFmode:
15053 rtx vecop0 =
15054 simplify_gen_subreg (V2DFmode, XVECEXP (vals, 0, 0), DFmode, 0);
15055 rtx vecop1 =
15056 simplify_gen_subreg (V2DFmode, XVECEXP (vals, 0, 1), DFmode, 0);
15058 vecop0 = force_reg (V2DFmode, vecop0);
15059 vecop1 = force_reg (V2DFmode, vecop1);
15060 emit_insn (gen_sse2_unpcklpd (target, vecop0, vecop1));
15062 break;
15063 case V4SFmode:
15065 rtx vecop0 =
15066 simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 0), SFmode, 0);
15067 rtx vecop1 =
15068 simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 1), SFmode, 0);
15069 rtx vecop2 =
15070 simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 2), SFmode, 0);
15071 rtx vecop3 =
15072 simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 3), SFmode, 0);
15073 rtx tmp1 = gen_reg_rtx (V4SFmode);
15074 rtx tmp2 = gen_reg_rtx (V4SFmode);
15076 vecop0 = force_reg (V4SFmode, vecop0);
15077 vecop1 = force_reg (V4SFmode, vecop1);
15078 vecop2 = force_reg (V4SFmode, vecop2);
15079 vecop3 = force_reg (V4SFmode, vecop3);
15080 emit_insn (gen_sse_unpcklps (tmp1, vecop1, vecop3));
15081 emit_insn (gen_sse_unpcklps (tmp2, vecop0, vecop2));
15082 emit_insn (gen_sse_unpcklps (target, tmp2, tmp1));
15084 break;
15085 default:
15086 abort ();
15090 /* Implements target hook vector_mode_supported_p. */
15091 static bool
15092 ix86_vector_mode_supported_p (enum machine_mode mode)
15094 if (TARGET_SSE
15095 && VALID_SSE_REG_MODE (mode))
15096 return true;
15098 else if (TARGET_MMX
15099 && VALID_MMX_REG_MODE (mode))
15100 return true;
15102 else if (TARGET_3DNOW
15103 && VALID_MMX_REG_MODE_3DNOW (mode))
15104 return true;
15106 else
15107 return false;
15110 /* Worker function for TARGET_MD_ASM_CLOBBERS.
15112 We do this in the new i386 backend to maintain source compatibility
15113 with the old cc0-based compiler. */
15115 static tree
15116 ix86_md_asm_clobbers (tree clobbers)
15118 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
15119 clobbers);
15120 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
15121 clobbers);
15122 clobbers = tree_cons (NULL_TREE, build_string (7, "dirflag"),
15123 clobbers);
15124 return clobbers;
15127 /* Worker function for REVERSE_CONDITION. */
15129 enum rtx_code
15130 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
15132 return (mode != CCFPmode && mode != CCFPUmode
15133 ? reverse_condition (code)
15134 : reverse_condition_maybe_unordered (code));
15137 /* Output code to perform an x87 FP register move, from OPERANDS[1]
15138 to OPERANDS[0]. */
15140 const char *
15141 output_387_reg_move (rtx insn, rtx *operands)
15143 if (REG_P (operands[1])
15144 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
15146 if (REGNO (operands[0]) == FIRST_STACK_REG
15147 && TARGET_USE_FFREEP)
15148 return "ffreep\t%y0";
15149 return "fstp\t%y0";
15151 if (STACK_TOP_P (operands[0]))
15152 return "fld%z1\t%y1";
15153 return "fst\t%y0";
15156 /* Output code to perform a conditional jump to LABEL, if C2 flag in
15157 FP status register is set. */
15159 void
15160 ix86_emit_fp_unordered_jump (rtx label)
15162 rtx reg = gen_reg_rtx (HImode);
15163 rtx temp;
15165 emit_insn (gen_x86_fnstsw_1 (reg));
15167 if (TARGET_USE_SAHF)
15169 emit_insn (gen_x86_sahf_1 (reg));
15171 temp = gen_rtx_REG (CCmode, FLAGS_REG);
15172 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
15174 else
15176 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
15178 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
15179 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
15182 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
15183 gen_rtx_LABEL_REF (VOIDmode, label),
15184 pc_rtx);
15185 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
15186 emit_jump_insn (temp);
15189 /* Output code to perform a log1p XFmode calculation. */
15191 void ix86_emit_i387_log1p (rtx op0, rtx op1)
15193 rtx label1 = gen_label_rtx ();
15194 rtx label2 = gen_label_rtx ();
15196 rtx tmp = gen_reg_rtx (XFmode);
15197 rtx tmp2 = gen_reg_rtx (XFmode);
15199 emit_insn (gen_absxf2 (tmp, op1));
15200 emit_insn (gen_cmpxf (tmp,
15201 CONST_DOUBLE_FROM_REAL_VALUE (
15202 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
15203 XFmode)));
15204 emit_jump_insn (gen_bge (label1));
15206 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
15207 emit_insn (gen_fyl2xp1_xf3 (op0, tmp2, op1));
15208 emit_jump (label2);
15210 emit_label (label1);
15211 emit_move_insn (tmp, CONST1_RTX (XFmode));
15212 emit_insn (gen_addxf3 (tmp, op1, tmp));
15213 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
15214 emit_insn (gen_fyl2x_xf3 (op0, tmp2, tmp));
15216 emit_label (label2);
15219 #include "gt-i386.h"