* gcc.dg/i386-sse-5.c: New test
[official-gcc.git] / gcc / config / i386 / i386.c
blobbe4e542b9de02bfae928e46488a142586be7a23c
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-attr.h"
36 #include "flags.h"
37 #include "except.h"
38 #include "function.h"
39 #include "recog.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "toplev.h"
43 #include "basic-block.h"
44 #include "ggc.h"
45 #include "target.h"
46 #include "target-def.h"
47 #include "langhooks.h"
48 #include "cgraph.h"
50 #ifndef CHECK_STACK_LIMIT
51 #define CHECK_STACK_LIMIT (-1)
52 #endif
54 /* Return index of given mode in mult and division cost tables. */
55 #define MODE_INDEX(mode) \
56 ((mode) == QImode ? 0 \
57 : (mode) == HImode ? 1 \
58 : (mode) == SImode ? 2 \
59 : (mode) == DImode ? 3 \
60 : 4)
62 /* Processor costs (relative to an add) */
63 static const
64 struct processor_costs size_cost = { /* costs for tunning for size */
65 2, /* cost of an add instruction */
66 3, /* cost of a lea instruction */
67 2, /* variable shift costs */
68 3, /* constant shift costs */
69 {3, 3, 3, 3, 5}, /* cost of starting a multiply */
70 0, /* cost of multiply per each bit set */
71 {3, 3, 3, 3, 5}, /* cost of a divide/mod */
72 3, /* cost of movsx */
73 3, /* cost of movzx */
74 0, /* "large" insn */
75 2, /* MOVE_RATIO */
76 2, /* cost for loading QImode using movzbl */
77 {2, 2, 2}, /* cost of loading integer registers
78 in QImode, HImode and SImode.
79 Relative to reg-reg move (2). */
80 {2, 2, 2}, /* cost of storing integer registers */
81 2, /* cost of reg,reg fld/fst */
82 {2, 2, 2}, /* cost of loading fp registers
83 in SFmode, DFmode and XFmode */
84 {2, 2, 2}, /* cost of loading integer registers */
85 3, /* cost of moving MMX register */
86 {3, 3}, /* cost of loading MMX registers
87 in SImode and DImode */
88 {3, 3}, /* cost of storing MMX registers
89 in SImode and DImode */
90 3, /* cost of moving SSE register */
91 {3, 3, 3}, /* cost of loading SSE registers
92 in SImode, DImode and TImode */
93 {3, 3, 3}, /* cost of storing SSE registers
94 in SImode, DImode and TImode */
95 3, /* MMX or SSE register to integer */
96 0, /* size of prefetch block */
97 0, /* number of parallel prefetches */
98 1, /* Branch cost */
99 2, /* cost of FADD and FSUB insns. */
100 2, /* cost of FMUL instruction. */
101 2, /* cost of FDIV instruction. */
102 2, /* cost of FABS instruction. */
103 2, /* cost of FCHS instruction. */
104 2, /* cost of FSQRT instruction. */
107 /* Processor costs (relative to an add) */
108 static const
109 struct processor_costs i386_cost = { /* 386 specific costs */
110 1, /* cost of an add instruction */
111 1, /* cost of a lea instruction */
112 3, /* variable shift costs */
113 2, /* constant shift costs */
114 {6, 6, 6, 6, 6}, /* cost of starting a multiply */
115 1, /* cost of multiply per each bit set */
116 {23, 23, 23, 23, 23}, /* cost of a divide/mod */
117 3, /* cost of movsx */
118 2, /* cost of movzx */
119 15, /* "large" insn */
120 3, /* MOVE_RATIO */
121 4, /* cost for loading QImode using movzbl */
122 {2, 4, 2}, /* cost of loading integer registers
123 in QImode, HImode and SImode.
124 Relative to reg-reg move (2). */
125 {2, 4, 2}, /* cost of storing integer registers */
126 2, /* cost of reg,reg fld/fst */
127 {8, 8, 8}, /* cost of loading fp registers
128 in SFmode, DFmode and XFmode */
129 {8, 8, 8}, /* cost of loading integer registers */
130 2, /* cost of moving MMX register */
131 {4, 8}, /* cost of loading MMX registers
132 in SImode and DImode */
133 {4, 8}, /* cost of storing MMX registers
134 in SImode and DImode */
135 2, /* cost of moving SSE register */
136 {4, 8, 16}, /* cost of loading SSE registers
137 in SImode, DImode and TImode */
138 {4, 8, 16}, /* cost of storing SSE registers
139 in SImode, DImode and TImode */
140 3, /* MMX or SSE register to integer */
141 0, /* size of prefetch block */
142 0, /* number of parallel prefetches */
143 1, /* Branch cost */
144 23, /* cost of FADD and FSUB insns. */
145 27, /* cost of FMUL instruction. */
146 88, /* cost of FDIV instruction. */
147 22, /* cost of FABS instruction. */
148 24, /* cost of FCHS instruction. */
149 122, /* cost of FSQRT instruction. */
152 static const
153 struct processor_costs i486_cost = { /* 486 specific costs */
154 1, /* cost of an add instruction */
155 1, /* cost of a lea instruction */
156 3, /* variable shift costs */
157 2, /* constant shift costs */
158 {12, 12, 12, 12, 12}, /* cost of starting a multiply */
159 1, /* cost of multiply per each bit set */
160 {40, 40, 40, 40, 40}, /* cost of a divide/mod */
161 3, /* cost of movsx */
162 2, /* cost of movzx */
163 15, /* "large" insn */
164 3, /* MOVE_RATIO */
165 4, /* cost for loading QImode using movzbl */
166 {2, 4, 2}, /* cost of loading integer registers
167 in QImode, HImode and SImode.
168 Relative to reg-reg move (2). */
169 {2, 4, 2}, /* cost of storing integer registers */
170 2, /* cost of reg,reg fld/fst */
171 {8, 8, 8}, /* cost of loading fp registers
172 in SFmode, DFmode and XFmode */
173 {8, 8, 8}, /* cost of loading integer registers */
174 2, /* cost of moving MMX register */
175 {4, 8}, /* cost of loading MMX registers
176 in SImode and DImode */
177 {4, 8}, /* cost of storing MMX registers
178 in SImode and DImode */
179 2, /* cost of moving SSE register */
180 {4, 8, 16}, /* cost of loading SSE registers
181 in SImode, DImode and TImode */
182 {4, 8, 16}, /* cost of storing SSE registers
183 in SImode, DImode and TImode */
184 3, /* MMX or SSE register to integer */
185 0, /* size of prefetch block */
186 0, /* number of parallel prefetches */
187 1, /* Branch cost */
188 8, /* cost of FADD and FSUB insns. */
189 16, /* cost of FMUL instruction. */
190 73, /* cost of FDIV instruction. */
191 3, /* cost of FABS instruction. */
192 3, /* cost of FCHS instruction. */
193 83, /* cost of FSQRT instruction. */
196 static const
197 struct processor_costs pentium_cost = {
198 1, /* cost of an add instruction */
199 1, /* cost of a lea instruction */
200 4, /* variable shift costs */
201 1, /* constant shift costs */
202 {11, 11, 11, 11, 11}, /* cost of starting a multiply */
203 0, /* cost of multiply per each bit set */
204 {25, 25, 25, 25, 25}, /* cost of a divide/mod */
205 3, /* cost of movsx */
206 2, /* cost of movzx */
207 8, /* "large" insn */
208 6, /* MOVE_RATIO */
209 6, /* cost for loading QImode using movzbl */
210 {2, 4, 2}, /* cost of loading integer registers
211 in QImode, HImode and SImode.
212 Relative to reg-reg move (2). */
213 {2, 4, 2}, /* cost of storing integer registers */
214 2, /* cost of reg,reg fld/fst */
215 {2, 2, 6}, /* cost of loading fp registers
216 in SFmode, DFmode and XFmode */
217 {4, 4, 6}, /* cost of loading integer registers */
218 8, /* cost of moving MMX register */
219 {8, 8}, /* cost of loading MMX registers
220 in SImode and DImode */
221 {8, 8}, /* cost of storing MMX registers
222 in SImode and DImode */
223 2, /* cost of moving SSE register */
224 {4, 8, 16}, /* cost of loading SSE registers
225 in SImode, DImode and TImode */
226 {4, 8, 16}, /* cost of storing SSE registers
227 in SImode, DImode and TImode */
228 3, /* MMX or SSE register to integer */
229 0, /* size of prefetch block */
230 0, /* number of parallel prefetches */
231 2, /* Branch cost */
232 3, /* cost of FADD and FSUB insns. */
233 3, /* cost of FMUL instruction. */
234 39, /* cost of FDIV instruction. */
235 1, /* cost of FABS instruction. */
236 1, /* cost of FCHS instruction. */
237 70, /* cost of FSQRT instruction. */
240 static const
241 struct processor_costs pentiumpro_cost = {
242 1, /* cost of an add instruction */
243 1, /* cost of a lea instruction */
244 1, /* variable shift costs */
245 1, /* constant shift costs */
246 {4, 4, 4, 4, 4}, /* cost of starting a multiply */
247 0, /* cost of multiply per each bit set */
248 {17, 17, 17, 17, 17}, /* cost of a divide/mod */
249 1, /* cost of movsx */
250 1, /* cost of movzx */
251 8, /* "large" insn */
252 6, /* MOVE_RATIO */
253 2, /* cost for loading QImode using movzbl */
254 {4, 4, 4}, /* cost of loading integer registers
255 in QImode, HImode and SImode.
256 Relative to reg-reg move (2). */
257 {2, 2, 2}, /* cost of storing integer registers */
258 2, /* cost of reg,reg fld/fst */
259 {2, 2, 6}, /* cost of loading fp registers
260 in SFmode, DFmode and XFmode */
261 {4, 4, 6}, /* cost of loading integer registers */
262 2, /* cost of moving MMX register */
263 {2, 2}, /* cost of loading MMX registers
264 in SImode and DImode */
265 {2, 2}, /* cost of storing MMX registers
266 in SImode and DImode */
267 2, /* cost of moving SSE register */
268 {2, 2, 8}, /* cost of loading SSE registers
269 in SImode, DImode and TImode */
270 {2, 2, 8}, /* cost of storing SSE registers
271 in SImode, DImode and TImode */
272 3, /* MMX or SSE register to integer */
273 32, /* size of prefetch block */
274 6, /* number of parallel prefetches */
275 2, /* Branch cost */
276 3, /* cost of FADD and FSUB insns. */
277 5, /* cost of FMUL instruction. */
278 56, /* cost of FDIV instruction. */
279 2, /* cost of FABS instruction. */
280 2, /* cost of FCHS instruction. */
281 56, /* cost of FSQRT instruction. */
284 static const
285 struct processor_costs k6_cost = {
286 1, /* cost of an add instruction */
287 2, /* cost of a lea instruction */
288 1, /* variable shift costs */
289 1, /* constant shift costs */
290 {3, 3, 3, 3, 3}, /* cost of starting a multiply */
291 0, /* cost of multiply per each bit set */
292 {18, 18, 18, 18, 18}, /* cost of a divide/mod */
293 2, /* cost of movsx */
294 2, /* cost of movzx */
295 8, /* "large" insn */
296 4, /* MOVE_RATIO */
297 3, /* cost for loading QImode using movzbl */
298 {4, 5, 4}, /* cost of loading integer registers
299 in QImode, HImode and SImode.
300 Relative to reg-reg move (2). */
301 {2, 3, 2}, /* cost of storing integer registers */
302 4, /* cost of reg,reg fld/fst */
303 {6, 6, 6}, /* cost of loading fp registers
304 in SFmode, DFmode and XFmode */
305 {4, 4, 4}, /* cost of loading integer registers */
306 2, /* cost of moving MMX register */
307 {2, 2}, /* cost of loading MMX registers
308 in SImode and DImode */
309 {2, 2}, /* cost of storing MMX registers
310 in SImode and DImode */
311 2, /* cost of moving SSE register */
312 {2, 2, 8}, /* cost of loading SSE registers
313 in SImode, DImode and TImode */
314 {2, 2, 8}, /* cost of storing SSE registers
315 in SImode, DImode and TImode */
316 6, /* MMX or SSE register to integer */
317 32, /* size of prefetch block */
318 1, /* number of parallel prefetches */
319 1, /* Branch cost */
320 2, /* cost of FADD and FSUB insns. */
321 2, /* cost of FMUL instruction. */
322 56, /* cost of FDIV instruction. */
323 2, /* cost of FABS instruction. */
324 2, /* cost of FCHS instruction. */
325 56, /* cost of FSQRT instruction. */
328 static const
329 struct processor_costs athlon_cost = {
330 1, /* cost of an add instruction */
331 2, /* cost of a lea instruction */
332 1, /* variable shift costs */
333 1, /* constant shift costs */
334 {5, 5, 5, 5, 5}, /* cost of starting a multiply */
335 0, /* cost of multiply per each bit set */
336 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
337 1, /* cost of movsx */
338 1, /* cost of movzx */
339 8, /* "large" insn */
340 9, /* MOVE_RATIO */
341 4, /* cost for loading QImode using movzbl */
342 {3, 4, 3}, /* cost of loading integer registers
343 in QImode, HImode and SImode.
344 Relative to reg-reg move (2). */
345 {3, 4, 3}, /* cost of storing integer registers */
346 4, /* cost of reg,reg fld/fst */
347 {4, 4, 12}, /* cost of loading fp registers
348 in SFmode, DFmode and XFmode */
349 {6, 6, 8}, /* cost of loading integer registers */
350 2, /* cost of moving MMX register */
351 {4, 4}, /* cost of loading MMX registers
352 in SImode and DImode */
353 {4, 4}, /* cost of storing MMX registers
354 in SImode and DImode */
355 2, /* cost of moving SSE register */
356 {4, 4, 6}, /* cost of loading SSE registers
357 in SImode, DImode and TImode */
358 {4, 4, 5}, /* cost of storing SSE registers
359 in SImode, DImode and TImode */
360 5, /* MMX or SSE register to integer */
361 64, /* size of prefetch block */
362 6, /* number of parallel prefetches */
363 2, /* Branch cost */
364 4, /* cost of FADD and FSUB insns. */
365 4, /* cost of FMUL instruction. */
366 24, /* cost of FDIV instruction. */
367 2, /* cost of FABS instruction. */
368 2, /* cost of FCHS instruction. */
369 35, /* cost of FSQRT instruction. */
372 static const
373 struct processor_costs k8_cost = {
374 1, /* cost of an add instruction */
375 2, /* cost of a lea instruction */
376 1, /* variable shift costs */
377 1, /* constant shift costs */
378 {3, 4, 3, 4, 5}, /* cost of starting a multiply */
379 0, /* cost of multiply per each bit set */
380 {18, 26, 42, 74, 74}, /* cost of a divide/mod */
381 1, /* cost of movsx */
382 1, /* cost of movzx */
383 8, /* "large" insn */
384 9, /* MOVE_RATIO */
385 4, /* cost for loading QImode using movzbl */
386 {3, 4, 3}, /* cost of loading integer registers
387 in QImode, HImode and SImode.
388 Relative to reg-reg move (2). */
389 {3, 4, 3}, /* cost of storing integer registers */
390 4, /* cost of reg,reg fld/fst */
391 {4, 4, 12}, /* cost of loading fp registers
392 in SFmode, DFmode and XFmode */
393 {6, 6, 8}, /* cost of loading integer registers */
394 2, /* cost of moving MMX register */
395 {3, 3}, /* cost of loading MMX registers
396 in SImode and DImode */
397 {4, 4}, /* cost of storing MMX registers
398 in SImode and DImode */
399 2, /* cost of moving SSE register */
400 {4, 3, 6}, /* cost of loading SSE registers
401 in SImode, DImode and TImode */
402 {4, 4, 5}, /* cost of storing SSE registers
403 in SImode, DImode and TImode */
404 5, /* MMX or SSE register to integer */
405 64, /* size of prefetch block */
406 6, /* number of parallel prefetches */
407 2, /* Branch cost */
408 4, /* cost of FADD and FSUB insns. */
409 4, /* cost of FMUL instruction. */
410 19, /* cost of FDIV instruction. */
411 2, /* cost of FABS instruction. */
412 2, /* cost of FCHS instruction. */
413 35, /* cost of FSQRT instruction. */
416 static const
417 struct processor_costs pentium4_cost = {
418 1, /* cost of an add instruction */
419 1, /* cost of a lea instruction */
420 4, /* variable shift costs */
421 4, /* constant shift costs */
422 {15, 15, 15, 15, 15}, /* cost of starting a multiply */
423 0, /* cost of multiply per each bit set */
424 {56, 56, 56, 56, 56}, /* cost of a divide/mod */
425 1, /* cost of movsx */
426 1, /* cost of movzx */
427 16, /* "large" insn */
428 6, /* MOVE_RATIO */
429 2, /* cost for loading QImode using movzbl */
430 {4, 5, 4}, /* cost of loading integer registers
431 in QImode, HImode and SImode.
432 Relative to reg-reg move (2). */
433 {2, 3, 2}, /* cost of storing integer registers */
434 2, /* cost of reg,reg fld/fst */
435 {2, 2, 6}, /* cost of loading fp registers
436 in SFmode, DFmode and XFmode */
437 {4, 4, 6}, /* cost of loading integer registers */
438 2, /* cost of moving MMX register */
439 {2, 2}, /* cost of loading MMX registers
440 in SImode and DImode */
441 {2, 2}, /* cost of storing MMX registers
442 in SImode and DImode */
443 12, /* cost of moving SSE register */
444 {12, 12, 12}, /* cost of loading SSE registers
445 in SImode, DImode and TImode */
446 {2, 2, 8}, /* cost of storing SSE registers
447 in SImode, DImode and TImode */
448 10, /* MMX or SSE register to integer */
449 64, /* size of prefetch block */
450 6, /* number of parallel prefetches */
451 2, /* Branch cost */
452 5, /* cost of FADD and FSUB insns. */
453 7, /* cost of FMUL instruction. */
454 43, /* cost of FDIV instruction. */
455 2, /* cost of FABS instruction. */
456 2, /* cost of FCHS instruction. */
457 43, /* cost of FSQRT instruction. */
460 const struct processor_costs *ix86_cost = &pentium_cost;
462 /* Processor feature/optimization bitmasks. */
463 #define m_386 (1<<PROCESSOR_I386)
464 #define m_486 (1<<PROCESSOR_I486)
465 #define m_PENT (1<<PROCESSOR_PENTIUM)
466 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
467 #define m_K6 (1<<PROCESSOR_K6)
468 #define m_ATHLON (1<<PROCESSOR_ATHLON)
469 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
470 #define m_K8 (1<<PROCESSOR_K8)
471 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
473 const int x86_use_leave = m_386 | m_K6 | m_ATHLON_K8;
474 const int x86_push_memory = m_386 | m_K6 | m_ATHLON_K8 | m_PENT4;
475 const int x86_zero_extend_with_and = m_486 | m_PENT;
476 const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 /* m_386 | m_K6 */;
477 const int x86_double_with_add = ~m_386;
478 const int x86_use_bit_test = m_386;
479 const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6;
480 const int x86_cmove = m_PPRO | m_ATHLON_K8 | m_PENT4;
481 const int x86_3dnow_a = m_ATHLON_K8;
482 const int x86_deep_branch = m_PPRO | m_K6 | m_ATHLON_K8 | m_PENT4;
483 const int x86_branch_hints = m_PENT4;
484 const int x86_use_sahf = m_PPRO | m_K6 | m_PENT4;
485 const int x86_partial_reg_stall = m_PPRO;
486 const int x86_use_loop = m_K6;
487 const int x86_use_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT);
488 const int x86_use_mov0 = m_K6;
489 const int x86_use_cltd = ~(m_PENT | m_K6);
490 const int x86_read_modify_write = ~m_PENT;
491 const int x86_read_modify = ~(m_PENT | m_PPRO);
492 const int x86_split_long_moves = m_PPRO;
493 const int x86_promote_QImode = m_K6 | m_PENT | m_386 | m_486 | m_ATHLON_K8;
494 const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
495 const int x86_single_stringop = m_386 | m_PENT4;
496 const int x86_qimode_math = ~(0);
497 const int x86_promote_qi_regs = 0;
498 const int x86_himode_math = ~(m_PPRO);
499 const int x86_promote_hi_regs = m_PPRO;
500 const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4;
501 const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4;
502 const int x86_add_esp_4 = m_ATHLON_K8 | m_K6 | m_PENT4;
503 const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6 | m_386 | m_486 | m_PENT4;
504 const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_PPRO);
505 const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4;
506 const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4;
507 const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_PPRO;
508 const int x86_prologue_using_move = m_ATHLON_K8 | m_PENT4 | m_PPRO;
509 const int x86_epilogue_using_move = m_ATHLON_K8 | m_PENT4 | m_PPRO;
510 const int x86_decompose_lea = m_PENT4;
511 const int x86_shift1 = ~m_486;
512 const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4;
513 const int x86_sse_partial_reg_dependency = m_PENT4 | m_PPRO;
514 /* Set for machines where the type and dependencies are resolved on SSE register
515 parts instead of whole registers, so we may maintain just lower part of
516 scalar values in proper format leaving the upper part undefined. */
517 const int x86_sse_partial_regs = m_ATHLON_K8;
518 /* Athlon optimizes partial-register FPS special case, thus avoiding the
519 need for extra instructions beforehand */
520 const int x86_sse_partial_regs_for_cvtsd2ss = 0;
521 const int x86_sse_typeless_stores = m_ATHLON_K8;
522 const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4;
523 const int x86_use_ffreep = m_ATHLON_K8;
524 const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6;
525 const int x86_inter_unit_moves = ~(m_ATHLON_K8);
526 const int x86_ext_80387_constants = m_K6 | m_ATHLON | m_PENT4 | m_PPRO;
528 /* In case the average insn count for single function invocation is
529 lower than this constant, emit fast (but longer) prologue and
530 epilogue code. */
531 #define FAST_PROLOGUE_INSN_COUNT 20
533 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
534 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
535 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
536 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
538 /* Array of the smallest class containing reg number REGNO, indexed by
539 REGNO. Used by REGNO_REG_CLASS in i386.h. */
541 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
543 /* ax, dx, cx, bx */
544 AREG, DREG, CREG, BREG,
545 /* si, di, bp, sp */
546 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
547 /* FP registers */
548 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
549 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
550 /* arg pointer */
551 NON_Q_REGS,
552 /* flags, fpsr, dirflag, frame */
553 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
554 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
555 SSE_REGS, SSE_REGS,
556 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
557 MMX_REGS, MMX_REGS,
558 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
559 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
560 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
561 SSE_REGS, SSE_REGS,
564 /* The "default" register map used in 32bit mode. */
566 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
568 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
569 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
570 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
571 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
572 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
573 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
574 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
577 static int const x86_64_int_parameter_registers[6] =
579 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
580 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
583 static int const x86_64_int_return_registers[4] =
585 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
588 /* The "default" register map used in 64bit mode. */
589 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
591 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
592 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
593 -1, -1, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
594 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
595 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
596 8,9,10,11,12,13,14,15, /* extended integer registers */
597 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
600 /* Define the register numbers to be used in Dwarf debugging information.
601 The SVR4 reference port C compiler uses the following register numbers
602 in its Dwarf output code:
603 0 for %eax (gcc regno = 0)
604 1 for %ecx (gcc regno = 2)
605 2 for %edx (gcc regno = 1)
606 3 for %ebx (gcc regno = 3)
607 4 for %esp (gcc regno = 7)
608 5 for %ebp (gcc regno = 6)
609 6 for %esi (gcc regno = 4)
610 7 for %edi (gcc regno = 5)
611 The following three DWARF register numbers are never generated by
612 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
613 believes these numbers have these meanings.
614 8 for %eip (no gcc equivalent)
615 9 for %eflags (gcc regno = 17)
616 10 for %trapno (no gcc equivalent)
617 It is not at all clear how we should number the FP stack registers
618 for the x86 architecture. If the version of SDB on x86/svr4 were
619 a bit less brain dead with respect to floating-point then we would
620 have a precedent to follow with respect to DWARF register numbers
621 for x86 FP registers, but the SDB on x86/svr4 is so completely
622 broken with respect to FP registers that it is hardly worth thinking
623 of it as something to strive for compatibility with.
624 The version of x86/svr4 SDB I have at the moment does (partially)
625 seem to believe that DWARF register number 11 is associated with
626 the x86 register %st(0), but that's about all. Higher DWARF
627 register numbers don't seem to be associated with anything in
628 particular, and even for DWARF regno 11, SDB only seems to under-
629 stand that it should say that a variable lives in %st(0) (when
630 asked via an `=' command) if we said it was in DWARF regno 11,
631 but SDB still prints garbage when asked for the value of the
632 variable in question (via a `/' command).
633 (Also note that the labels SDB prints for various FP stack regs
634 when doing an `x' command are all wrong.)
635 Note that these problems generally don't affect the native SVR4
636 C compiler because it doesn't allow the use of -O with -g and
637 because when it is *not* optimizing, it allocates a memory
638 location for each floating-point variable, and the memory
639 location is what gets described in the DWARF AT_location
640 attribute for the variable in question.
641 Regardless of the severe mental illness of the x86/svr4 SDB, we
642 do something sensible here and we use the following DWARF
643 register numbers. Note that these are all stack-top-relative
644 numbers.
645 11 for %st(0) (gcc regno = 8)
646 12 for %st(1) (gcc regno = 9)
647 13 for %st(2) (gcc regno = 10)
648 14 for %st(3) (gcc regno = 11)
649 15 for %st(4) (gcc regno = 12)
650 16 for %st(5) (gcc regno = 13)
651 17 for %st(6) (gcc regno = 14)
652 18 for %st(7) (gcc regno = 15)
654 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
656 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
657 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
658 -1, 9, -1, -1, -1, /* arg, flags, fpsr, dir, frame */
659 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
660 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
661 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
662 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
665 /* Test and compare insns in i386.md store the information needed to
666 generate branch and scc insns here. */
668 rtx ix86_compare_op0 = NULL_RTX;
669 rtx ix86_compare_op1 = NULL_RTX;
671 #define MAX_386_STACK_LOCALS 3
672 /* Size of the register save area. */
673 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
675 /* Define the structure for the machine field in struct function. */
677 struct stack_local_entry GTY(())
679 unsigned short mode;
680 unsigned short n;
681 rtx rtl;
682 struct stack_local_entry *next;
685 /* Structure describing stack frame layout.
686 Stack grows downward:
688 [arguments]
689 <- ARG_POINTER
690 saved pc
692 saved frame pointer if frame_pointer_needed
693 <- HARD_FRAME_POINTER
694 [saved regs]
696 [padding1] \
698 [va_arg registers] (
699 > to_allocate <- FRAME_POINTER
700 [frame] (
702 [padding2] /
704 struct ix86_frame
706 int nregs;
707 int padding1;
708 int va_arg_size;
709 HOST_WIDE_INT frame;
710 int padding2;
711 int outgoing_arguments_size;
712 int red_zone_size;
714 HOST_WIDE_INT to_allocate;
715 /* The offsets relative to ARG_POINTER. */
716 HOST_WIDE_INT frame_pointer_offset;
717 HOST_WIDE_INT hard_frame_pointer_offset;
718 HOST_WIDE_INT stack_pointer_offset;
720 /* When save_regs_using_mov is set, emit prologue using
721 move instead of push instructions. */
722 bool save_regs_using_mov;
725 /* Used to enable/disable debugging features. */
726 const char *ix86_debug_arg_string, *ix86_debug_addr_string;
727 /* Code model option as passed by user. */
728 const char *ix86_cmodel_string;
729 /* Parsed value. */
730 enum cmodel ix86_cmodel;
731 /* Asm dialect. */
732 const char *ix86_asm_string;
733 enum asm_dialect ix86_asm_dialect = ASM_ATT;
734 /* TLS dialext. */
735 const char *ix86_tls_dialect_string;
736 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
738 /* Which unit we are generating floating point math for. */
739 enum fpmath_unit ix86_fpmath;
741 /* Which cpu are we scheduling for. */
742 enum processor_type ix86_tune;
743 /* Which instruction set architecture to use. */
744 enum processor_type ix86_arch;
746 /* Strings to hold which cpu and instruction set architecture to use. */
747 const char *ix86_tune_string; /* for -mtune=<xxx> */
748 const char *ix86_arch_string; /* for -march=<xxx> */
749 const char *ix86_fpmath_string; /* for -mfpmath=<xxx> */
751 /* # of registers to use to pass arguments. */
752 const char *ix86_regparm_string;
754 /* true if sse prefetch instruction is not NOOP. */
755 int x86_prefetch_sse;
757 /* ix86_regparm_string as a number */
758 int ix86_regparm;
760 /* Alignment to use for loops and jumps: */
762 /* Power of two alignment for loops. */
763 const char *ix86_align_loops_string;
765 /* Power of two alignment for non-loop jumps. */
766 const char *ix86_align_jumps_string;
768 /* Power of two alignment for stack boundary in bytes. */
769 const char *ix86_preferred_stack_boundary_string;
771 /* Preferred alignment for stack boundary in bits. */
772 int ix86_preferred_stack_boundary;
774 /* Values 1-5: see jump.c */
775 int ix86_branch_cost;
776 const char *ix86_branch_cost_string;
778 /* Power of two alignment for functions. */
779 const char *ix86_align_funcs_string;
781 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
782 static char internal_label_prefix[16];
783 static int internal_label_prefix_len;
785 static int local_symbolic_operand (rtx, enum machine_mode);
786 static int tls_symbolic_operand_1 (rtx, enum tls_model);
787 static void output_pic_addr_const (FILE *, rtx, int);
788 static void put_condition_code (enum rtx_code, enum machine_mode,
789 int, int, FILE *);
790 static const char *get_some_local_dynamic_name (void);
791 static int get_some_local_dynamic_name_1 (rtx *, void *);
792 static rtx maybe_get_pool_constant (rtx);
793 static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx);
794 static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *,
795 rtx *);
796 static rtx get_thread_pointer (int);
797 static rtx legitimize_tls_address (rtx, enum tls_model, int);
798 static void get_pc_thunk_name (char [32], unsigned int);
799 static rtx gen_push (rtx);
800 static int memory_address_length (rtx addr);
801 static int ix86_flags_dependant (rtx, rtx, enum attr_type);
802 static int ix86_agi_dependant (rtx, rtx, enum attr_type);
803 static enum attr_ppro_uops ix86_safe_ppro_uops (rtx);
804 static void ix86_dump_ppro_packet (FILE *);
805 static void ix86_reorder_insn (rtx *, rtx *);
806 static struct machine_function * ix86_init_machine_status (void);
807 static int ix86_split_to_parts (rtx, rtx *, enum machine_mode);
808 static int ix86_nsaved_regs (void);
809 static void ix86_emit_save_regs (void);
810 static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT);
811 static void ix86_emit_restore_regs_using_mov (rtx, int, int);
812 static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT);
813 static void ix86_set_move_mem_attrs_1 (rtx, rtx, rtx, rtx, rtx);
814 static void ix86_sched_reorder_ppro (rtx *, rtx *);
815 static HOST_WIDE_INT ix86_GOT_alias_set (void);
816 static void ix86_adjust_counter (rtx, HOST_WIDE_INT);
817 static rtx ix86_expand_aligntest (rtx, int);
818 static void ix86_expand_strlensi_unroll_1 (rtx, rtx);
819 static int ix86_issue_rate (void);
820 static int ix86_adjust_cost (rtx, rtx, rtx, int);
821 static void ix86_sched_init (FILE *, int, int);
822 static int ix86_sched_reorder (FILE *, int, rtx *, int *, int);
823 static int ix86_variable_issue (FILE *, int, rtx, int);
824 static int ia32_use_dfa_pipeline_interface (void);
825 static int ia32_multipass_dfa_lookahead (void);
826 static void ix86_init_mmx_sse_builtins (void);
827 static rtx x86_this_parameter (tree);
828 static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
829 HOST_WIDE_INT, tree);
830 static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
831 static void x86_file_start (void);
832 static void ix86_reorg (void);
833 static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*);
834 static tree ix86_build_builtin_va_list (void);
836 struct ix86_address
838 rtx base, index, disp;
839 HOST_WIDE_INT scale;
840 enum ix86_address_seg { SEG_DEFAULT, SEG_FS, SEG_GS } seg;
843 static int ix86_decompose_address (rtx, struct ix86_address *);
844 static int ix86_address_cost (rtx);
845 static bool ix86_cannot_force_const_mem (rtx);
846 static rtx ix86_delegitimize_address (rtx);
848 struct builtin_description;
849 static rtx ix86_expand_sse_comi (const struct builtin_description *,
850 tree, rtx);
851 static rtx ix86_expand_sse_compare (const struct builtin_description *,
852 tree, rtx);
853 static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx);
854 static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int);
855 static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx);
856 static rtx ix86_expand_store_builtin (enum insn_code, tree);
857 static rtx safe_vector_operand (rtx, enum machine_mode);
858 static enum rtx_code ix86_fp_compare_code_to_integer (enum rtx_code);
859 static void ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *,
860 enum rtx_code *, enum rtx_code *);
861 static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *);
862 static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code);
863 static int ix86_fp_comparison_fcomi_cost (enum rtx_code code);
864 static int ix86_fp_comparison_sahf_cost (enum rtx_code code);
865 static int ix86_fp_comparison_cost (enum rtx_code code);
866 static unsigned int ix86_select_alt_pic_regnum (void);
867 static int ix86_save_reg (unsigned int, int);
868 static void ix86_compute_frame_layout (struct ix86_frame *);
869 static int ix86_comp_type_attributes (tree, tree);
870 static int ix86_function_regparm (tree, tree);
871 const struct attribute_spec ix86_attribute_table[];
872 static bool ix86_function_ok_for_sibcall (tree, tree);
873 static tree ix86_handle_cdecl_attribute (tree *, tree, tree, int, bool *);
874 static tree ix86_handle_regparm_attribute (tree *, tree, tree, int, bool *);
875 static int ix86_value_regno (enum machine_mode);
876 static bool contains_128bit_aligned_vector_p (tree);
877 static bool ix86_ms_bitfield_layout_p (tree);
878 static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *);
879 static int extended_reg_mentioned_1 (rtx *, void *);
880 static bool ix86_rtx_costs (rtx, int, int, int *);
881 static int min_insn_size (rtx);
882 static void k8_avoid_jump_misspredicts (void);
884 #if defined (DO_GLOBAL_CTORS_BODY) && defined (HAS_INIT_SECTION)
885 static void ix86_svr3_asm_out_constructor (rtx, int);
886 #endif
888 /* Register class used for passing given 64bit part of the argument.
889 These represent classes as documented by the PS ABI, with the exception
890 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
891 use SF or DFmode move instead of DImode to avoid reformatting penalties.
893 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
894 whenever possible (upper half does contain padding).
896 enum x86_64_reg_class
898 X86_64_NO_CLASS,
899 X86_64_INTEGER_CLASS,
900 X86_64_INTEGERSI_CLASS,
901 X86_64_SSE_CLASS,
902 X86_64_SSESF_CLASS,
903 X86_64_SSEDF_CLASS,
904 X86_64_SSEUP_CLASS,
905 X86_64_X87_CLASS,
906 X86_64_X87UP_CLASS,
907 X86_64_MEMORY_CLASS
909 static const char * const x86_64_reg_class_name[] =
910 {"no", "integer", "integerSI", "sse", "sseSF", "sseDF", "sseup", "x87", "x87up", "no"};
912 #define MAX_CLASSES 4
913 static int classify_argument (enum machine_mode, tree,
914 enum x86_64_reg_class [MAX_CLASSES], int);
915 static int examine_argument (enum machine_mode, tree, int, int *, int *);
916 static rtx construct_container (enum machine_mode, tree, int, int, int,
917 const int *, int);
918 static enum x86_64_reg_class merge_classes (enum x86_64_reg_class,
919 enum x86_64_reg_class);
921 /* Table of constants used by fldpi, fldln2, etc.... */
922 static REAL_VALUE_TYPE ext_80387_constants_table [5];
923 static bool ext_80387_constants_init = 0;
924 static void init_ext_80387_constants (void);
926 /* Initialize the GCC target structure. */
927 #undef TARGET_ATTRIBUTE_TABLE
928 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
929 #ifdef TARGET_DLLIMPORT_DECL_ATTRIBUTES
930 # undef TARGET_MERGE_DECL_ATTRIBUTES
931 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
932 #endif
934 #undef TARGET_COMP_TYPE_ATTRIBUTES
935 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
937 #undef TARGET_INIT_BUILTINS
938 #define TARGET_INIT_BUILTINS ix86_init_builtins
940 #undef TARGET_EXPAND_BUILTIN
941 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
943 #undef TARGET_ASM_FUNCTION_EPILOGUE
944 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
946 #undef TARGET_ASM_OPEN_PAREN
947 #define TARGET_ASM_OPEN_PAREN ""
948 #undef TARGET_ASM_CLOSE_PAREN
949 #define TARGET_ASM_CLOSE_PAREN ""
951 #undef TARGET_ASM_ALIGNED_HI_OP
952 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
953 #undef TARGET_ASM_ALIGNED_SI_OP
954 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
955 #ifdef ASM_QUAD
956 #undef TARGET_ASM_ALIGNED_DI_OP
957 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
958 #endif
960 #undef TARGET_ASM_UNALIGNED_HI_OP
961 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
962 #undef TARGET_ASM_UNALIGNED_SI_OP
963 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
964 #undef TARGET_ASM_UNALIGNED_DI_OP
965 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
967 #undef TARGET_SCHED_ADJUST_COST
968 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
969 #undef TARGET_SCHED_ISSUE_RATE
970 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
971 #undef TARGET_SCHED_VARIABLE_ISSUE
972 #define TARGET_SCHED_VARIABLE_ISSUE ix86_variable_issue
973 #undef TARGET_SCHED_INIT
974 #define TARGET_SCHED_INIT ix86_sched_init
975 #undef TARGET_SCHED_REORDER
976 #define TARGET_SCHED_REORDER ix86_sched_reorder
977 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
978 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE \
979 ia32_use_dfa_pipeline_interface
980 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
981 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
982 ia32_multipass_dfa_lookahead
984 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
985 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
987 #ifdef HAVE_AS_TLS
988 #undef TARGET_HAVE_TLS
989 #define TARGET_HAVE_TLS true
990 #endif
991 #undef TARGET_CANNOT_FORCE_CONST_MEM
992 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
994 #undef TARGET_DELEGITIMIZE_ADDRESS
995 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
997 #undef TARGET_MS_BITFIELD_LAYOUT_P
998 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
1000 #undef TARGET_ASM_OUTPUT_MI_THUNK
1001 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
1002 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1003 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
1005 #undef TARGET_ASM_FILE_START
1006 #define TARGET_ASM_FILE_START x86_file_start
1008 #undef TARGET_RTX_COSTS
1009 #define TARGET_RTX_COSTS ix86_rtx_costs
1010 #undef TARGET_ADDRESS_COST
1011 #define TARGET_ADDRESS_COST ix86_address_cost
1013 #undef TARGET_MACHINE_DEPENDENT_REORG
1014 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
1016 #undef TARGET_BUILD_BUILTIN_VA_LIST
1017 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
1019 struct gcc_target targetm = TARGET_INITIALIZER;
1021 /* The svr4 ABI for the i386 says that records and unions are returned
1022 in memory. */
1023 #ifndef DEFAULT_PCC_STRUCT_RETURN
1024 #define DEFAULT_PCC_STRUCT_RETURN 1
1025 #endif
1027 /* Sometimes certain combinations of command options do not make
1028 sense on a particular target machine. You can define a macro
1029 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1030 defined, is executed once just after all the command options have
1031 been parsed.
1033 Don't use this macro to turn on various extra optimizations for
1034 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1036 void
1037 override_options (void)
1039 int i;
1040 /* Comes from final.c -- no real reason to change it. */
1041 #define MAX_CODE_ALIGN 16
1043 static struct ptt
1045 const struct processor_costs *cost; /* Processor costs */
1046 const int target_enable; /* Target flags to enable. */
1047 const int target_disable; /* Target flags to disable. */
1048 const int align_loop; /* Default alignments. */
1049 const int align_loop_max_skip;
1050 const int align_jump;
1051 const int align_jump_max_skip;
1052 const int align_func;
1054 const processor_target_table[PROCESSOR_max] =
1056 {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
1057 {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
1058 {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
1059 {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
1060 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1061 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1062 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1063 {&k8_cost, 0, 0, 16, 7, 16, 7, 16}
1066 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1067 static struct pta
1069 const char *const name; /* processor name or nickname. */
1070 const enum processor_type processor;
1071 const enum pta_flags
1073 PTA_SSE = 1,
1074 PTA_SSE2 = 2,
1075 PTA_MMX = 4,
1076 PTA_PREFETCH_SSE = 8,
1077 PTA_3DNOW = 16,
1078 PTA_3DNOW_A = 64,
1079 PTA_64BIT = 128
1080 } flags;
1082 const processor_alias_table[] =
1084 {"i386", PROCESSOR_I386, 0},
1085 {"i486", PROCESSOR_I486, 0},
1086 {"i586", PROCESSOR_PENTIUM, 0},
1087 {"pentium", PROCESSOR_PENTIUM, 0},
1088 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
1089 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
1090 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1091 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1092 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
1093 {"i686", PROCESSOR_PENTIUMPRO, 0},
1094 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
1095 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
1096 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1097 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2 |
1098 PTA_MMX | PTA_PREFETCH_SSE},
1099 {"k6", PROCESSOR_K6, PTA_MMX},
1100 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1101 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1102 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1103 | PTA_3DNOW_A},
1104 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
1105 | PTA_3DNOW | PTA_3DNOW_A},
1106 {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1107 | PTA_3DNOW_A | PTA_SSE},
1108 {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1109 | PTA_3DNOW_A | PTA_SSE},
1110 {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1111 | PTA_3DNOW_A | PTA_SSE},
1112 {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1113 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1116 int const pta_size = ARRAY_SIZE (processor_alias_table);
1118 /* Set the default values for switches whose default depends on TARGET_64BIT
1119 in case they weren't overwritten by command line options. */
1120 if (TARGET_64BIT)
1122 if (flag_omit_frame_pointer == 2)
1123 flag_omit_frame_pointer = 1;
1124 if (flag_asynchronous_unwind_tables == 2)
1125 flag_asynchronous_unwind_tables = 1;
1126 if (flag_pcc_struct_return == 2)
1127 flag_pcc_struct_return = 0;
1129 else
1131 if (flag_omit_frame_pointer == 2)
1132 flag_omit_frame_pointer = 0;
1133 if (flag_asynchronous_unwind_tables == 2)
1134 flag_asynchronous_unwind_tables = 0;
1135 if (flag_pcc_struct_return == 2)
1136 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
1139 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1140 SUBTARGET_OVERRIDE_OPTIONS;
1141 #endif
1143 if (!ix86_tune_string && ix86_arch_string)
1144 ix86_tune_string = ix86_arch_string;
1145 if (!ix86_tune_string)
1146 ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
1147 if (!ix86_arch_string)
1148 ix86_arch_string = TARGET_64BIT ? "k8" : "i386";
1150 if (ix86_cmodel_string != 0)
1152 if (!strcmp (ix86_cmodel_string, "small"))
1153 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1154 else if (flag_pic)
1155 sorry ("code model %s not supported in PIC mode", ix86_cmodel_string);
1156 else if (!strcmp (ix86_cmodel_string, "32"))
1157 ix86_cmodel = CM_32;
1158 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
1159 ix86_cmodel = CM_KERNEL;
1160 else if (!strcmp (ix86_cmodel_string, "medium") && !flag_pic)
1161 ix86_cmodel = CM_MEDIUM;
1162 else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic)
1163 ix86_cmodel = CM_LARGE;
1164 else
1165 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
1167 else
1169 ix86_cmodel = CM_32;
1170 if (TARGET_64BIT)
1171 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1173 if (ix86_asm_string != 0)
1175 if (!strcmp (ix86_asm_string, "intel"))
1176 ix86_asm_dialect = ASM_INTEL;
1177 else if (!strcmp (ix86_asm_string, "att"))
1178 ix86_asm_dialect = ASM_ATT;
1179 else
1180 error ("bad value (%s) for -masm= switch", ix86_asm_string);
1182 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
1183 error ("code model `%s' not supported in the %s bit mode",
1184 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
1185 if (ix86_cmodel == CM_LARGE)
1186 sorry ("code model `large' not supported yet");
1187 if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
1188 sorry ("%i-bit mode not compiled in",
1189 (target_flags & MASK_64BIT) ? 64 : 32);
1191 for (i = 0; i < pta_size; i++)
1192 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
1194 ix86_arch = processor_alias_table[i].processor;
1195 /* Default cpu tuning to the architecture. */
1196 ix86_tune = ix86_arch;
1197 if (processor_alias_table[i].flags & PTA_MMX
1198 && !(target_flags_explicit & MASK_MMX))
1199 target_flags |= MASK_MMX;
1200 if (processor_alias_table[i].flags & PTA_3DNOW
1201 && !(target_flags_explicit & MASK_3DNOW))
1202 target_flags |= MASK_3DNOW;
1203 if (processor_alias_table[i].flags & PTA_3DNOW_A
1204 && !(target_flags_explicit & MASK_3DNOW_A))
1205 target_flags |= MASK_3DNOW_A;
1206 if (processor_alias_table[i].flags & PTA_SSE
1207 && !(target_flags_explicit & MASK_SSE))
1208 target_flags |= MASK_SSE;
1209 if (processor_alias_table[i].flags & PTA_SSE2
1210 && !(target_flags_explicit & MASK_SSE2))
1211 target_flags |= MASK_SSE2;
1212 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1213 x86_prefetch_sse = true;
1214 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1215 error ("CPU you selected does not support x86-64 instruction set");
1216 break;
1219 if (i == pta_size)
1220 error ("bad value (%s) for -march= switch", ix86_arch_string);
1222 for (i = 0; i < pta_size; i++)
1223 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
1225 ix86_tune = processor_alias_table[i].processor;
1226 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1227 error ("CPU you selected does not support x86-64 instruction set");
1228 break;
1230 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1231 x86_prefetch_sse = true;
1232 if (i == pta_size)
1233 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1235 if (optimize_size)
1236 ix86_cost = &size_cost;
1237 else
1238 ix86_cost = processor_target_table[ix86_tune].cost;
1239 target_flags |= processor_target_table[ix86_tune].target_enable;
1240 target_flags &= ~processor_target_table[ix86_tune].target_disable;
1242 /* Arrange to set up i386_stack_locals for all functions. */
1243 init_machine_status = ix86_init_machine_status;
1245 /* Validate -mregparm= value. */
1246 if (ix86_regparm_string)
1248 i = atoi (ix86_regparm_string);
1249 if (i < 0 || i > REGPARM_MAX)
1250 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
1251 else
1252 ix86_regparm = i;
1254 else
1255 if (TARGET_64BIT)
1256 ix86_regparm = REGPARM_MAX;
1258 /* If the user has provided any of the -malign-* options,
1259 warn and use that value only if -falign-* is not set.
1260 Remove this code in GCC 3.2 or later. */
1261 if (ix86_align_loops_string)
1263 warning ("-malign-loops is obsolete, use -falign-loops");
1264 if (align_loops == 0)
1266 i = atoi (ix86_align_loops_string);
1267 if (i < 0 || i > MAX_CODE_ALIGN)
1268 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1269 else
1270 align_loops = 1 << i;
1274 if (ix86_align_jumps_string)
1276 warning ("-malign-jumps is obsolete, use -falign-jumps");
1277 if (align_jumps == 0)
1279 i = atoi (ix86_align_jumps_string);
1280 if (i < 0 || i > MAX_CODE_ALIGN)
1281 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1282 else
1283 align_jumps = 1 << i;
1287 if (ix86_align_funcs_string)
1289 warning ("-malign-functions is obsolete, use -falign-functions");
1290 if (align_functions == 0)
1292 i = atoi (ix86_align_funcs_string);
1293 if (i < 0 || i > MAX_CODE_ALIGN)
1294 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1295 else
1296 align_functions = 1 << i;
1300 /* Default align_* from the processor table. */
1301 if (align_loops == 0)
1303 align_loops = processor_target_table[ix86_tune].align_loop;
1304 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
1306 if (align_jumps == 0)
1308 align_jumps = processor_target_table[ix86_tune].align_jump;
1309 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
1311 if (align_functions == 0)
1313 align_functions = processor_target_table[ix86_tune].align_func;
1316 /* Validate -mpreferred-stack-boundary= value, or provide default.
1317 The default of 128 bits is for Pentium III's SSE __m128, but we
1318 don't want additional code to keep the stack aligned when
1319 optimizing for code size. */
1320 ix86_preferred_stack_boundary = (optimize_size
1321 ? TARGET_64BIT ? 128 : 32
1322 : 128);
1323 if (ix86_preferred_stack_boundary_string)
1325 i = atoi (ix86_preferred_stack_boundary_string);
1326 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
1327 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
1328 TARGET_64BIT ? 4 : 2);
1329 else
1330 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
1333 /* Validate -mbranch-cost= value, or provide default. */
1334 ix86_branch_cost = processor_target_table[ix86_tune].cost->branch_cost;
1335 if (ix86_branch_cost_string)
1337 i = atoi (ix86_branch_cost_string);
1338 if (i < 0 || i > 5)
1339 error ("-mbranch-cost=%d is not between 0 and 5", i);
1340 else
1341 ix86_branch_cost = i;
1344 if (ix86_tls_dialect_string)
1346 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
1347 ix86_tls_dialect = TLS_DIALECT_GNU;
1348 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
1349 ix86_tls_dialect = TLS_DIALECT_SUN;
1350 else
1351 error ("bad value (%s) for -mtls-dialect= switch",
1352 ix86_tls_dialect_string);
1355 /* Keep nonleaf frame pointers. */
1356 if (TARGET_OMIT_LEAF_FRAME_POINTER)
1357 flag_omit_frame_pointer = 1;
1359 /* If we're doing fast math, we don't care about comparison order
1360 wrt NaNs. This lets us use a shorter comparison sequence. */
1361 if (flag_unsafe_math_optimizations)
1362 target_flags &= ~MASK_IEEE_FP;
1364 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
1365 since the insns won't need emulation. */
1366 if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
1367 target_flags &= ~MASK_NO_FANCY_MATH_387;
1369 /* Turn on SSE2 builtins for -mpni. */
1370 if (TARGET_PNI)
1371 target_flags |= MASK_SSE2;
1373 /* Turn on SSE builtins for -msse2. */
1374 if (TARGET_SSE2)
1375 target_flags |= MASK_SSE;
1377 if (TARGET_64BIT)
1379 if (TARGET_ALIGN_DOUBLE)
1380 error ("-malign-double makes no sense in the 64bit mode");
1381 if (TARGET_RTD)
1382 error ("-mrtd calling convention not supported in the 64bit mode");
1383 /* Enable by default the SSE and MMX builtins. */
1384 target_flags |= (MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE);
1385 ix86_fpmath = FPMATH_SSE;
1387 else
1389 ix86_fpmath = FPMATH_387;
1390 /* i386 ABI does not specify red zone. It still makes sense to use it
1391 when programmer takes care to stack from being destroyed. */
1392 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
1393 target_flags |= MASK_NO_RED_ZONE;
1396 if (ix86_fpmath_string != 0)
1398 if (! strcmp (ix86_fpmath_string, "387"))
1399 ix86_fpmath = FPMATH_387;
1400 else if (! strcmp (ix86_fpmath_string, "sse"))
1402 if (!TARGET_SSE)
1404 warning ("SSE instruction set disabled, using 387 arithmetics");
1405 ix86_fpmath = FPMATH_387;
1407 else
1408 ix86_fpmath = FPMATH_SSE;
1410 else if (! strcmp (ix86_fpmath_string, "387,sse")
1411 || ! strcmp (ix86_fpmath_string, "sse,387"))
1413 if (!TARGET_SSE)
1415 warning ("SSE instruction set disabled, using 387 arithmetics");
1416 ix86_fpmath = FPMATH_387;
1418 else if (!TARGET_80387)
1420 warning ("387 instruction set disabled, using SSE arithmetics");
1421 ix86_fpmath = FPMATH_SSE;
1423 else
1424 ix86_fpmath = FPMATH_SSE | FPMATH_387;
1426 else
1427 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
1430 /* It makes no sense to ask for just SSE builtins, so MMX is also turned
1431 on by -msse. */
1432 if (TARGET_SSE)
1434 target_flags |= MASK_MMX;
1435 x86_prefetch_sse = true;
1438 /* If it has 3DNow! it also has MMX so MMX is also turned on by -m3dnow */
1439 if (TARGET_3DNOW)
1441 target_flags |= MASK_MMX;
1442 /* If we are targeting the Athlon architecture, enable the 3Dnow/MMX
1443 extensions it adds. */
1444 if (x86_3dnow_a & (1 << ix86_arch))
1445 target_flags |= MASK_3DNOW_A;
1447 if ((x86_accumulate_outgoing_args & TUNEMASK)
1448 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
1449 && !optimize_size)
1450 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
1452 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
1454 char *p;
1455 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
1456 p = strchr (internal_label_prefix, 'X');
1457 internal_label_prefix_len = p - internal_label_prefix;
1458 *p = '\0';
1462 void
1463 optimization_options (int level, int size ATTRIBUTE_UNUSED)
1465 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
1466 make the problem with not enough registers even worse. */
1467 #ifdef INSN_SCHEDULING
1468 if (level > 1)
1469 flag_schedule_insns = 0;
1470 #endif
1472 /* The default values of these switches depend on the TARGET_64BIT
1473 that is not known at this moment. Mark these values with 2 and
1474 let user the to override these. In case there is no command line option
1475 specifying them, we will set the defaults in override_options. */
1476 if (optimize >= 1)
1477 flag_omit_frame_pointer = 2;
1478 flag_pcc_struct_return = 2;
1479 flag_asynchronous_unwind_tables = 2;
1482 /* Table of valid machine attributes. */
1483 const struct attribute_spec ix86_attribute_table[] =
1485 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
1486 /* Stdcall attribute says callee is responsible for popping arguments
1487 if they are not variable. */
1488 { "stdcall", 0, 0, false, true, true, ix86_handle_cdecl_attribute },
1489 /* Fastcall attribute says callee is responsible for popping arguments
1490 if they are not variable. */
1491 { "fastcall", 0, 0, false, true, true, ix86_handle_cdecl_attribute },
1492 /* Cdecl attribute says the callee is a normal C declaration */
1493 { "cdecl", 0, 0, false, true, true, ix86_handle_cdecl_attribute },
1494 /* Regparm attribute specifies how many integer arguments are to be
1495 passed in registers. */
1496 { "regparm", 1, 1, false, true, true, ix86_handle_regparm_attribute },
1497 #ifdef TARGET_DLLIMPORT_DECL_ATTRIBUTES
1498 { "dllimport", 0, 0, false, false, false, ix86_handle_dll_attribute },
1499 { "dllexport", 0, 0, false, false, false, ix86_handle_dll_attribute },
1500 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
1501 #endif
1502 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1503 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
1504 { NULL, 0, 0, false, false, false, NULL }
1507 /* Decide whether we can make a sibling call to a function. DECL is the
1508 declaration of the function being targeted by the call and EXP is the
1509 CALL_EXPR representing the call. */
1511 static bool
1512 ix86_function_ok_for_sibcall (tree decl, tree exp)
1514 /* If we are generating position-independent code, we cannot sibcall
1515 optimize any indirect call, or a direct call to a global function,
1516 as the PLT requires %ebx be live. */
1517 if (!TARGET_64BIT && flag_pic && (!decl || TREE_PUBLIC (decl)))
1518 return false;
1520 /* If we are returning floats on the 80387 register stack, we cannot
1521 make a sibcall from a function that doesn't return a float to a
1522 function that does or, conversely, from a function that does return
1523 a float to a function that doesn't; the necessary stack adjustment
1524 would not be executed. */
1525 if (STACK_REG_P (ix86_function_value (TREE_TYPE (exp)))
1526 != STACK_REG_P (ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)))))
1527 return false;
1529 /* If this call is indirect, we'll need to be able to use a call-clobbered
1530 register for the address of the target function. Make sure that all
1531 such registers are not used for passing parameters. */
1532 if (!decl && !TARGET_64BIT)
1534 tree type;
1536 /* We're looking at the CALL_EXPR, we need the type of the function. */
1537 type = TREE_OPERAND (exp, 0); /* pointer expression */
1538 type = TREE_TYPE (type); /* pointer type */
1539 type = TREE_TYPE (type); /* function type */
1541 if (ix86_function_regparm (type, NULL) >= 3)
1543 /* ??? Need to count the actual number of registers to be used,
1544 not the possible number of registers. Fix later. */
1545 return false;
1549 /* Otherwise okay. That also includes certain types of indirect calls. */
1550 return true;
1553 /* Handle a "cdecl", "stdcall", or "fastcall" attribute;
1554 arguments as in struct attribute_spec.handler. */
1555 static tree
1556 ix86_handle_cdecl_attribute (tree *node, tree name,
1557 tree args ATTRIBUTE_UNUSED,
1558 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1560 if (TREE_CODE (*node) != FUNCTION_TYPE
1561 && TREE_CODE (*node) != METHOD_TYPE
1562 && TREE_CODE (*node) != FIELD_DECL
1563 && TREE_CODE (*node) != TYPE_DECL)
1565 warning ("`%s' attribute only applies to functions",
1566 IDENTIFIER_POINTER (name));
1567 *no_add_attrs = true;
1569 else
1571 if (is_attribute_p ("fastcall", name))
1573 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
1575 error ("fastcall and stdcall attributes are not compatible");
1577 else if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
1579 error ("fastcall and regparm attributes are not compatible");
1582 else if (is_attribute_p ("stdcall", name))
1584 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
1586 error ("fastcall and stdcall attributes are not compatible");
1591 if (TARGET_64BIT)
1593 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
1594 *no_add_attrs = true;
1597 return NULL_TREE;
1600 /* Handle a "regparm" attribute;
1601 arguments as in struct attribute_spec.handler. */
1602 static tree
1603 ix86_handle_regparm_attribute (tree *node, tree name, tree args,
1604 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1606 if (TREE_CODE (*node) != FUNCTION_TYPE
1607 && TREE_CODE (*node) != METHOD_TYPE
1608 && TREE_CODE (*node) != FIELD_DECL
1609 && TREE_CODE (*node) != TYPE_DECL)
1611 warning ("`%s' attribute only applies to functions",
1612 IDENTIFIER_POINTER (name));
1613 *no_add_attrs = true;
1615 else
1617 tree cst;
1619 cst = TREE_VALUE (args);
1620 if (TREE_CODE (cst) != INTEGER_CST)
1622 warning ("`%s' attribute requires an integer constant argument",
1623 IDENTIFIER_POINTER (name));
1624 *no_add_attrs = true;
1626 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
1628 warning ("argument to `%s' attribute larger than %d",
1629 IDENTIFIER_POINTER (name), REGPARM_MAX);
1630 *no_add_attrs = true;
1633 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
1635 error ("fastcall and regparm attributes are not compatible");
1639 return NULL_TREE;
1642 /* Return 0 if the attributes for two types are incompatible, 1 if they
1643 are compatible, and 2 if they are nearly compatible (which causes a
1644 warning to be generated). */
1646 static int
1647 ix86_comp_type_attributes (tree type1, tree type2)
1649 /* Check for mismatch of non-default calling convention. */
1650 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
1652 if (TREE_CODE (type1) != FUNCTION_TYPE)
1653 return 1;
1655 /* Check for mismatched fastcall types */
1656 if (!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
1657 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
1658 return 0;
1660 /* Check for mismatched return types (cdecl vs stdcall). */
1661 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
1662 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
1663 return 0;
1664 return 1;
1667 /* Return the regparm value for a fuctio with the indicated TYPE and DECL.
1668 DECL may be NULL when calling function indirectly
1669 or considering a libcall. */
1671 static int
1672 ix86_function_regparm (tree type, tree decl)
1674 tree attr;
1675 int regparm = ix86_regparm;
1676 bool user_convention = false;
1678 if (!TARGET_64BIT)
1680 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
1681 if (attr)
1683 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
1684 user_convention = true;
1687 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
1689 regparm = 2;
1690 user_convention = true;
1693 /* Use register calling convention for local functions when possible. */
1694 if (!TARGET_64BIT && !user_convention && decl
1695 && flag_unit_at_a_time && !profile_flag)
1697 struct cgraph_local_info *i = cgraph_local_info (decl);
1698 if (i && i->local)
1700 /* We can't use regparm(3) for nested functions as these use
1701 static chain pointer in third argument. */
1702 if (DECL_CONTEXT (decl) && !DECL_NO_STATIC_CHAIN (decl))
1703 regparm = 2;
1704 else
1705 regparm = 3;
1709 return regparm;
1712 /* Return true if EAX is live at the start of the function. Used by
1713 ix86_expand_prologue to determine if we need special help before
1714 calling allocate_stack_worker. */
1716 static bool
1717 ix86_eax_live_at_start_p (void)
1719 /* Cheat. Don't bother working forward from ix86_function_regparm
1720 to the function type to whether an actual argument is located in
1721 eax. Instead just look at cfg info, which is still close enough
1722 to correct at this point. This gives false positives for broken
1723 functions that might use uninitialized data that happens to be
1724 allocated in eax, but who cares? */
1725 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->global_live_at_end, 0);
1728 /* Value is the number of bytes of arguments automatically
1729 popped when returning from a subroutine call.
1730 FUNDECL is the declaration node of the function (as a tree),
1731 FUNTYPE is the data type of the function (as a tree),
1732 or for a library call it is an identifier node for the subroutine name.
1733 SIZE is the number of bytes of arguments passed on the stack.
1735 On the 80386, the RTD insn may be used to pop them if the number
1736 of args is fixed, but if the number is variable then the caller
1737 must pop them all. RTD can't be used for library calls now
1738 because the library is compiled with the Unix compiler.
1739 Use of RTD is a selectable option, since it is incompatible with
1740 standard Unix calling sequences. If the option is not selected,
1741 the caller must always pop the args.
1743 The attribute stdcall is equivalent to RTD on a per module basis. */
1746 ix86_return_pops_args (tree fundecl, tree funtype, int size)
1748 int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
1750 /* Cdecl functions override -mrtd, and never pop the stack. */
1751 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
1753 /* Stdcall and fastcall functions will pop the stack if not
1754 variable args. */
1755 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
1756 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
1757 rtd = 1;
1759 if (rtd
1760 && (TYPE_ARG_TYPES (funtype) == NULL_TREE
1761 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
1762 == void_type_node)))
1763 return size;
1766 /* Lose any fake structure return argument if it is passed on the stack. */
1767 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
1768 && !TARGET_64BIT)
1770 int nregs = ix86_function_regparm (funtype, fundecl);
1772 if (!nregs)
1773 return GET_MODE_SIZE (Pmode);
1776 return 0;
1779 /* Argument support functions. */
1781 /* Return true when register may be used to pass function parameters. */
1782 bool
1783 ix86_function_arg_regno_p (int regno)
1785 int i;
1786 if (!TARGET_64BIT)
1787 return (regno < REGPARM_MAX
1788 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
1789 if (SSE_REGNO_P (regno) && TARGET_SSE)
1790 return true;
1791 /* RAX is used as hidden argument to va_arg functions. */
1792 if (!regno)
1793 return true;
1794 for (i = 0; i < REGPARM_MAX; i++)
1795 if (regno == x86_64_int_parameter_registers[i])
1796 return true;
1797 return false;
1800 /* Initialize a variable CUM of type CUMULATIVE_ARGS
1801 for a call to a function whose data type is FNTYPE.
1802 For a library call, FNTYPE is 0. */
1804 void
1805 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
1806 tree fntype, /* tree ptr for function decl */
1807 rtx libname, /* SYMBOL_REF of library name or 0 */
1808 tree fndecl)
1810 static CUMULATIVE_ARGS zero_cum;
1811 tree param, next_param;
1813 if (TARGET_DEBUG_ARG)
1815 fprintf (stderr, "\ninit_cumulative_args (");
1816 if (fntype)
1817 fprintf (stderr, "fntype code = %s, ret code = %s",
1818 tree_code_name[(int) TREE_CODE (fntype)],
1819 tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
1820 else
1821 fprintf (stderr, "no fntype");
1823 if (libname)
1824 fprintf (stderr, ", libname = %s", XSTR (libname, 0));
1827 *cum = zero_cum;
1829 /* Set up the number of registers to use for passing arguments. */
1830 if (fntype)
1831 cum->nregs = ix86_function_regparm (fntype, fndecl);
1832 else
1833 cum->nregs = ix86_regparm;
1834 cum->sse_nregs = SSE_REGPARM_MAX;
1835 cum->mmx_nregs = MMX_REGPARM_MAX;
1836 cum->maybe_vaarg = false;
1838 /* Use ecx and edx registers if function has fastcall attribute */
1839 if (fntype && !TARGET_64BIT)
1841 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
1843 cum->nregs = 2;
1844 cum->fastcall = 1;
1849 /* Determine if this function has variable arguments. This is
1850 indicated by the last argument being 'void_type_mode' if there
1851 are no variable arguments. If there are variable arguments, then
1852 we won't pass anything in registers */
1854 if (cum->nregs)
1856 for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
1857 param != 0; param = next_param)
1859 next_param = TREE_CHAIN (param);
1860 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
1862 if (!TARGET_64BIT)
1864 cum->nregs = 0;
1865 cum->fastcall = 0;
1867 cum->maybe_vaarg = true;
1871 if ((!fntype && !libname)
1872 || (fntype && !TYPE_ARG_TYPES (fntype)))
1873 cum->maybe_vaarg = 1;
1875 if (TARGET_DEBUG_ARG)
1876 fprintf (stderr, ", nregs=%d )\n", cum->nregs);
1878 return;
1881 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
1882 of this code is to classify each 8bytes of incoming argument by the register
1883 class and assign registers accordingly. */
1885 /* Return the union class of CLASS1 and CLASS2.
1886 See the x86-64 PS ABI for details. */
1888 static enum x86_64_reg_class
1889 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
1891 /* Rule #1: If both classes are equal, this is the resulting class. */
1892 if (class1 == class2)
1893 return class1;
1895 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
1896 the other class. */
1897 if (class1 == X86_64_NO_CLASS)
1898 return class2;
1899 if (class2 == X86_64_NO_CLASS)
1900 return class1;
1902 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
1903 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
1904 return X86_64_MEMORY_CLASS;
1906 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
1907 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
1908 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
1909 return X86_64_INTEGERSI_CLASS;
1910 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
1911 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
1912 return X86_64_INTEGER_CLASS;
1914 /* Rule #5: If one of the classes is X87 or X87UP class, MEMORY is used. */
1915 if (class1 == X86_64_X87_CLASS || class1 == X86_64_X87UP_CLASS
1916 || class2 == X86_64_X87_CLASS || class2 == X86_64_X87UP_CLASS)
1917 return X86_64_MEMORY_CLASS;
1919 /* Rule #6: Otherwise class SSE is used. */
1920 return X86_64_SSE_CLASS;
1923 /* Classify the argument of type TYPE and mode MODE.
1924 CLASSES will be filled by the register class used to pass each word
1925 of the operand. The number of words is returned. In case the parameter
1926 should be passed in memory, 0 is returned. As a special case for zero
1927 sized containers, classes[0] will be NO_CLASS and 1 is returned.
1929 BIT_OFFSET is used internally for handling records and specifies offset
1930 of the offset in bits modulo 256 to avoid overflow cases.
1932 See the x86-64 PS ABI for details.
1935 static int
1936 classify_argument (enum machine_mode mode, tree type,
1937 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
1939 HOST_WIDE_INT bytes =
1940 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
1941 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1943 /* Variable sized entities are always passed/returned in memory. */
1944 if (bytes < 0)
1945 return 0;
1947 if (mode != VOIDmode
1948 && MUST_PASS_IN_STACK (mode, type))
1949 return 0;
1951 if (type && AGGREGATE_TYPE_P (type))
1953 int i;
1954 tree field;
1955 enum x86_64_reg_class subclasses[MAX_CLASSES];
1957 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
1958 if (bytes > 16)
1959 return 0;
1961 for (i = 0; i < words; i++)
1962 classes[i] = X86_64_NO_CLASS;
1964 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
1965 signalize memory class, so handle it as special case. */
1966 if (!words)
1968 classes[0] = X86_64_NO_CLASS;
1969 return 1;
1972 /* Classify each field of record and merge classes. */
1973 if (TREE_CODE (type) == RECORD_TYPE)
1975 /* For classes first merge in the field of the subclasses. */
1976 if (TYPE_BINFO (type) != NULL && TYPE_BINFO_BASETYPES (type) != NULL)
1978 tree bases = TYPE_BINFO_BASETYPES (type);
1979 int n_bases = TREE_VEC_LENGTH (bases);
1980 int i;
1982 for (i = 0; i < n_bases; ++i)
1984 tree binfo = TREE_VEC_ELT (bases, i);
1985 int num;
1986 int offset = tree_low_cst (BINFO_OFFSET (binfo), 0) * 8;
1987 tree type = BINFO_TYPE (binfo);
1989 num = classify_argument (TYPE_MODE (type),
1990 type, subclasses,
1991 (offset + bit_offset) % 256);
1992 if (!num)
1993 return 0;
1994 for (i = 0; i < num; i++)
1996 int pos = (offset + (bit_offset % 64)) / 8 / 8;
1997 classes[i + pos] =
1998 merge_classes (subclasses[i], classes[i + pos]);
2002 /* And now merge the fields of structure. */
2003 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2005 if (TREE_CODE (field) == FIELD_DECL)
2007 int num;
2009 /* Bitfields are always classified as integer. Handle them
2010 early, since later code would consider them to be
2011 misaligned integers. */
2012 if (DECL_BIT_FIELD (field))
2014 for (i = int_bit_position (field) / 8 / 8;
2015 i < (int_bit_position (field)
2016 + tree_low_cst (DECL_SIZE (field), 0)
2017 + 63) / 8 / 8; i++)
2018 classes[i] =
2019 merge_classes (X86_64_INTEGER_CLASS,
2020 classes[i]);
2022 else
2024 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2025 TREE_TYPE (field), subclasses,
2026 (int_bit_position (field)
2027 + bit_offset) % 256);
2028 if (!num)
2029 return 0;
2030 for (i = 0; i < num; i++)
2032 int pos =
2033 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
2034 classes[i + pos] =
2035 merge_classes (subclasses[i], classes[i + pos]);
2041 /* Arrays are handled as small records. */
2042 else if (TREE_CODE (type) == ARRAY_TYPE)
2044 int num;
2045 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
2046 TREE_TYPE (type), subclasses, bit_offset);
2047 if (!num)
2048 return 0;
2050 /* The partial classes are now full classes. */
2051 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
2052 subclasses[0] = X86_64_SSE_CLASS;
2053 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
2054 subclasses[0] = X86_64_INTEGER_CLASS;
2056 for (i = 0; i < words; i++)
2057 classes[i] = subclasses[i % num];
2059 /* Unions are similar to RECORD_TYPE but offset is always 0. */
2060 else if (TREE_CODE (type) == UNION_TYPE
2061 || TREE_CODE (type) == QUAL_UNION_TYPE)
2063 /* For classes first merge in the field of the subclasses. */
2064 if (TYPE_BINFO (type) != NULL && TYPE_BINFO_BASETYPES (type) != NULL)
2066 tree bases = TYPE_BINFO_BASETYPES (type);
2067 int n_bases = TREE_VEC_LENGTH (bases);
2068 int i;
2070 for (i = 0; i < n_bases; ++i)
2072 tree binfo = TREE_VEC_ELT (bases, i);
2073 int num;
2074 int offset = tree_low_cst (BINFO_OFFSET (binfo), 0) * 8;
2075 tree type = BINFO_TYPE (binfo);
2077 num = classify_argument (TYPE_MODE (type),
2078 type, subclasses,
2079 (offset + (bit_offset % 64)) % 256);
2080 if (!num)
2081 return 0;
2082 for (i = 0; i < num; i++)
2084 int pos = (offset + (bit_offset % 64)) / 8 / 8;
2085 classes[i + pos] =
2086 merge_classes (subclasses[i], classes[i + pos]);
2090 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2092 if (TREE_CODE (field) == FIELD_DECL)
2094 int num;
2095 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
2096 TREE_TYPE (field), subclasses,
2097 bit_offset);
2098 if (!num)
2099 return 0;
2100 for (i = 0; i < num; i++)
2101 classes[i] = merge_classes (subclasses[i], classes[i]);
2105 else if (TREE_CODE (type) == SET_TYPE)
2107 if (bytes <= 4)
2109 classes[0] = X86_64_INTEGERSI_CLASS;
2110 return 1;
2112 else if (bytes <= 8)
2114 classes[0] = X86_64_INTEGER_CLASS;
2115 return 1;
2117 else if (bytes <= 12)
2119 classes[0] = X86_64_INTEGER_CLASS;
2120 classes[1] = X86_64_INTEGERSI_CLASS;
2121 return 2;
2123 else
2125 classes[0] = X86_64_INTEGER_CLASS;
2126 classes[1] = X86_64_INTEGER_CLASS;
2127 return 2;
2130 else
2131 abort ();
2133 /* Final merger cleanup. */
2134 for (i = 0; i < words; i++)
2136 /* If one class is MEMORY, everything should be passed in
2137 memory. */
2138 if (classes[i] == X86_64_MEMORY_CLASS)
2139 return 0;
2141 /* The X86_64_SSEUP_CLASS should be always preceded by
2142 X86_64_SSE_CLASS. */
2143 if (classes[i] == X86_64_SSEUP_CLASS
2144 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
2145 classes[i] = X86_64_SSE_CLASS;
2147 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
2148 if (classes[i] == X86_64_X87UP_CLASS
2149 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
2150 classes[i] = X86_64_SSE_CLASS;
2152 return words;
2155 /* Compute alignment needed. We align all types to natural boundaries with
2156 exception of XFmode that is aligned to 64bits. */
2157 if (mode != VOIDmode && mode != BLKmode)
2159 int mode_alignment = GET_MODE_BITSIZE (mode);
2161 if (mode == XFmode)
2162 mode_alignment = 128;
2163 else if (mode == XCmode)
2164 mode_alignment = 256;
2165 /* Misaligned fields are always returned in memory. */
2166 if (bit_offset % mode_alignment)
2167 return 0;
2170 /* Classification of atomic types. */
2171 switch (mode)
2173 case DImode:
2174 case SImode:
2175 case HImode:
2176 case QImode:
2177 case CSImode:
2178 case CHImode:
2179 case CQImode:
2180 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
2181 classes[0] = X86_64_INTEGERSI_CLASS;
2182 else
2183 classes[0] = X86_64_INTEGER_CLASS;
2184 return 1;
2185 case CDImode:
2186 case TImode:
2187 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
2188 return 2;
2189 case CTImode:
2190 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
2191 classes[2] = classes[3] = X86_64_INTEGER_CLASS;
2192 return 4;
2193 case SFmode:
2194 if (!(bit_offset % 64))
2195 classes[0] = X86_64_SSESF_CLASS;
2196 else
2197 classes[0] = X86_64_SSE_CLASS;
2198 return 1;
2199 case DFmode:
2200 classes[0] = X86_64_SSEDF_CLASS;
2201 return 1;
2202 case XFmode:
2203 classes[0] = X86_64_X87_CLASS;
2204 classes[1] = X86_64_X87UP_CLASS;
2205 return 2;
2206 case TFmode:
2207 case TCmode:
2208 return 0;
2209 case XCmode:
2210 classes[0] = X86_64_X87_CLASS;
2211 classes[1] = X86_64_X87UP_CLASS;
2212 classes[2] = X86_64_X87_CLASS;
2213 classes[3] = X86_64_X87UP_CLASS;
2214 return 4;
2215 case DCmode:
2216 classes[0] = X86_64_SSEDF_CLASS;
2217 classes[1] = X86_64_SSEDF_CLASS;
2218 return 2;
2219 case SCmode:
2220 classes[0] = X86_64_SSE_CLASS;
2221 return 1;
2222 case V4SFmode:
2223 case V4SImode:
2224 case V16QImode:
2225 case V8HImode:
2226 case V2DFmode:
2227 case V2DImode:
2228 classes[0] = X86_64_SSE_CLASS;
2229 classes[1] = X86_64_SSEUP_CLASS;
2230 return 2;
2231 case V2SFmode:
2232 case V2SImode:
2233 case V4HImode:
2234 case V8QImode:
2235 return 0;
2236 case BLKmode:
2237 case VOIDmode:
2238 return 0;
2239 default:
2240 abort ();
2244 /* Examine the argument and return set number of register required in each
2245 class. Return 0 iff parameter should be passed in memory. */
2246 static int
2247 examine_argument (enum machine_mode mode, tree type, int in_return,
2248 int *int_nregs, int *sse_nregs)
2250 enum x86_64_reg_class class[MAX_CLASSES];
2251 int n = classify_argument (mode, type, class, 0);
2253 *int_nregs = 0;
2254 *sse_nregs = 0;
2255 if (!n)
2256 return 0;
2257 for (n--; n >= 0; n--)
2258 switch (class[n])
2260 case X86_64_INTEGER_CLASS:
2261 case X86_64_INTEGERSI_CLASS:
2262 (*int_nregs)++;
2263 break;
2264 case X86_64_SSE_CLASS:
2265 case X86_64_SSESF_CLASS:
2266 case X86_64_SSEDF_CLASS:
2267 (*sse_nregs)++;
2268 break;
2269 case X86_64_NO_CLASS:
2270 case X86_64_SSEUP_CLASS:
2271 break;
2272 case X86_64_X87_CLASS:
2273 case X86_64_X87UP_CLASS:
2274 if (!in_return)
2275 return 0;
2276 break;
2277 case X86_64_MEMORY_CLASS:
2278 abort ();
2280 return 1;
2282 /* Construct container for the argument used by GCC interface. See
2283 FUNCTION_ARG for the detailed description. */
2284 static rtx
2285 construct_container (enum machine_mode mode, tree type, int in_return,
2286 int nintregs, int nsseregs, const int * intreg,
2287 int sse_regno)
2289 enum machine_mode tmpmode;
2290 int bytes =
2291 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2292 enum x86_64_reg_class class[MAX_CLASSES];
2293 int n;
2294 int i;
2295 int nexps = 0;
2296 int needed_sseregs, needed_intregs;
2297 rtx exp[MAX_CLASSES];
2298 rtx ret;
2300 n = classify_argument (mode, type, class, 0);
2301 if (TARGET_DEBUG_ARG)
2303 if (!n)
2304 fprintf (stderr, "Memory class\n");
2305 else
2307 fprintf (stderr, "Classes:");
2308 for (i = 0; i < n; i++)
2310 fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]);
2312 fprintf (stderr, "\n");
2315 if (!n)
2316 return NULL;
2317 if (!examine_argument (mode, type, in_return, &needed_intregs, &needed_sseregs))
2318 return NULL;
2319 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
2320 return NULL;
2322 /* First construct simple cases. Avoid SCmode, since we want to use
2323 single register to pass this type. */
2324 if (n == 1 && mode != SCmode)
2325 switch (class[0])
2327 case X86_64_INTEGER_CLASS:
2328 case X86_64_INTEGERSI_CLASS:
2329 return gen_rtx_REG (mode, intreg[0]);
2330 case X86_64_SSE_CLASS:
2331 case X86_64_SSESF_CLASS:
2332 case X86_64_SSEDF_CLASS:
2333 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
2334 case X86_64_X87_CLASS:
2335 return gen_rtx_REG (mode, FIRST_STACK_REG);
2336 case X86_64_NO_CLASS:
2337 /* Zero sized array, struct or class. */
2338 return NULL;
2339 default:
2340 abort ();
2342 if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS)
2343 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
2344 if (n == 2
2345 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
2346 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
2347 if (n == 2 && class[0] == X86_64_INTEGER_CLASS
2348 && class[1] == X86_64_INTEGER_CLASS
2349 && (mode == CDImode || mode == TImode || mode == TFmode)
2350 && intreg[0] + 1 == intreg[1])
2351 return gen_rtx_REG (mode, intreg[0]);
2352 if (n == 4
2353 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS
2354 && class[2] == X86_64_X87_CLASS && class[3] == X86_64_X87UP_CLASS)
2355 return gen_rtx_REG (XCmode, FIRST_STACK_REG);
2357 /* Otherwise figure out the entries of the PARALLEL. */
2358 for (i = 0; i < n; i++)
2360 switch (class[i])
2362 case X86_64_NO_CLASS:
2363 break;
2364 case X86_64_INTEGER_CLASS:
2365 case X86_64_INTEGERSI_CLASS:
2366 /* Merge TImodes on aligned occasions here too. */
2367 if (i * 8 + 8 > bytes)
2368 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
2369 else if (class[i] == X86_64_INTEGERSI_CLASS)
2370 tmpmode = SImode;
2371 else
2372 tmpmode = DImode;
2373 /* We've requested 24 bytes we don't have mode for. Use DImode. */
2374 if (tmpmode == BLKmode)
2375 tmpmode = DImode;
2376 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2377 gen_rtx_REG (tmpmode, *intreg),
2378 GEN_INT (i*8));
2379 intreg++;
2380 break;
2381 case X86_64_SSESF_CLASS:
2382 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2383 gen_rtx_REG (SFmode,
2384 SSE_REGNO (sse_regno)),
2385 GEN_INT (i*8));
2386 sse_regno++;
2387 break;
2388 case X86_64_SSEDF_CLASS:
2389 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2390 gen_rtx_REG (DFmode,
2391 SSE_REGNO (sse_regno)),
2392 GEN_INT (i*8));
2393 sse_regno++;
2394 break;
2395 case X86_64_SSE_CLASS:
2396 if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
2397 tmpmode = TImode;
2398 else
2399 tmpmode = DImode;
2400 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
2401 gen_rtx_REG (tmpmode,
2402 SSE_REGNO (sse_regno)),
2403 GEN_INT (i*8));
2404 if (tmpmode == TImode)
2405 i++;
2406 sse_regno++;
2407 break;
2408 default:
2409 abort ();
2412 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
2413 for (i = 0; i < nexps; i++)
2414 XVECEXP (ret, 0, i) = exp [i];
2415 return ret;
2418 /* Update the data in CUM to advance over an argument
2419 of mode MODE and data type TYPE.
2420 (TYPE is null for libcalls where that information may not be available.) */
2422 void
2423 function_arg_advance (CUMULATIVE_ARGS *cum, /* current arg information */
2424 enum machine_mode mode, /* current arg mode */
2425 tree type, /* type of the argument or 0 if lib support */
2426 int named) /* whether or not the argument was named */
2428 int bytes =
2429 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2430 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2432 if (TARGET_DEBUG_ARG)
2433 fprintf (stderr,
2434 "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, mode=%s, named=%d)\n\n",
2435 words, cum->words, cum->nregs, cum->sse_nregs, GET_MODE_NAME (mode), named);
2436 if (TARGET_64BIT)
2438 int int_nregs, sse_nregs;
2439 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
2440 cum->words += words;
2441 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
2443 cum->nregs -= int_nregs;
2444 cum->sse_nregs -= sse_nregs;
2445 cum->regno += int_nregs;
2446 cum->sse_regno += sse_nregs;
2448 else
2449 cum->words += words;
2451 else
2453 if (TARGET_SSE && SSE_REG_MODE_P (mode)
2454 && (!type || !AGGREGATE_TYPE_P (type)))
2456 cum->sse_words += words;
2457 cum->sse_nregs -= 1;
2458 cum->sse_regno += 1;
2459 if (cum->sse_nregs <= 0)
2461 cum->sse_nregs = 0;
2462 cum->sse_regno = 0;
2465 else if (TARGET_MMX && MMX_REG_MODE_P (mode)
2466 && (!type || !AGGREGATE_TYPE_P (type)))
2468 cum->mmx_words += words;
2469 cum->mmx_nregs -= 1;
2470 cum->mmx_regno += 1;
2471 if (cum->mmx_nregs <= 0)
2473 cum->mmx_nregs = 0;
2474 cum->mmx_regno = 0;
2477 else
2479 cum->words += words;
2480 cum->nregs -= words;
2481 cum->regno += words;
2483 if (cum->nregs <= 0)
2485 cum->nregs = 0;
2486 cum->regno = 0;
2490 return;
2493 /* Define where to put the arguments to a function.
2494 Value is zero to push the argument on the stack,
2495 or a hard register in which to store the argument.
2497 MODE is the argument's machine mode.
2498 TYPE is the data type of the argument (as a tree).
2499 This is null for libcalls where that information may
2500 not be available.
2501 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2502 the preceding args and about the function being called.
2503 NAMED is nonzero if this argument is a named parameter
2504 (otherwise it is an extra parameter matching an ellipsis). */
2507 function_arg (CUMULATIVE_ARGS *cum, /* current arg information */
2508 enum machine_mode mode, /* current arg mode */
2509 tree type, /* type of the argument or 0 if lib support */
2510 int named) /* != 0 for normal args, == 0 for ... args */
2512 rtx ret = NULL_RTX;
2513 int bytes =
2514 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
2515 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2516 static bool warnedsse, warnedmmx;
2518 /* Handle a hidden AL argument containing number of registers for varargs
2519 x86-64 functions. For i386 ABI just return constm1_rtx to avoid
2520 any AL settings. */
2521 if (mode == VOIDmode)
2523 if (TARGET_64BIT)
2524 return GEN_INT (cum->maybe_vaarg
2525 ? (cum->sse_nregs < 0
2526 ? SSE_REGPARM_MAX
2527 : cum->sse_regno)
2528 : -1);
2529 else
2530 return constm1_rtx;
2532 if (TARGET_64BIT)
2533 ret = construct_container (mode, type, 0, cum->nregs, cum->sse_nregs,
2534 &x86_64_int_parameter_registers [cum->regno],
2535 cum->sse_regno);
2536 else
2537 switch (mode)
2539 /* For now, pass fp/complex values on the stack. */
2540 default:
2541 break;
2543 case BLKmode:
2544 if (bytes < 0)
2545 break;
2546 /* FALLTHRU */
2547 case DImode:
2548 case SImode:
2549 case HImode:
2550 case QImode:
2551 if (words <= cum->nregs)
2553 int regno = cum->regno;
2555 /* Fastcall allocates the first two DWORD (SImode) or
2556 smaller arguments to ECX and EDX. */
2557 if (cum->fastcall)
2559 if (mode == BLKmode || mode == DImode)
2560 break;
2562 /* ECX not EAX is the first allocated register. */
2563 if (regno == 0)
2564 regno = 2;
2566 ret = gen_rtx_REG (mode, regno);
2568 break;
2569 case TImode:
2570 case V16QImode:
2571 case V8HImode:
2572 case V4SImode:
2573 case V2DImode:
2574 case V4SFmode:
2575 case V2DFmode:
2576 if (!type || !AGGREGATE_TYPE_P (type))
2578 if (!TARGET_SSE && !warnedmmx)
2580 warnedsse = true;
2581 warning ("SSE vector argument without SSE enabled "
2582 "changes the ABI");
2584 if (cum->sse_nregs)
2585 ret = gen_rtx_REG (mode, cum->sse_regno + FIRST_SSE_REG);
2587 break;
2588 case V8QImode:
2589 case V4HImode:
2590 case V2SImode:
2591 case V2SFmode:
2592 if (!type || !AGGREGATE_TYPE_P (type))
2594 if (!TARGET_MMX && !warnedmmx)
2596 warnedmmx = true;
2597 warning ("MMX vector argument without MMX enabled "
2598 "changes the ABI");
2600 if (cum->mmx_nregs)
2601 ret = gen_rtx_REG (mode, cum->mmx_regno + FIRST_MMX_REG);
2603 break;
2606 if (TARGET_DEBUG_ARG)
2608 fprintf (stderr,
2609 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
2610 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
2612 if (ret)
2613 print_simple_rtl (stderr, ret);
2614 else
2615 fprintf (stderr, ", stack");
2617 fprintf (stderr, " )\n");
2620 return ret;
2623 /* A C expression that indicates when an argument must be passed by
2624 reference. If nonzero for an argument, a copy of that argument is
2625 made in memory and a pointer to the argument is passed instead of
2626 the argument itself. The pointer is passed in whatever way is
2627 appropriate for passing a pointer to that type. */
2630 function_arg_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2631 enum machine_mode mode ATTRIBUTE_UNUSED,
2632 tree type, int named ATTRIBUTE_UNUSED)
2634 if (!TARGET_64BIT)
2635 return 0;
2637 if (type && int_size_in_bytes (type) == -1)
2639 if (TARGET_DEBUG_ARG)
2640 fprintf (stderr, "function_arg_pass_by_reference\n");
2641 return 1;
2644 return 0;
2647 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
2648 ABI */
2649 static bool
2650 contains_128bit_aligned_vector_p (tree type)
2652 enum machine_mode mode = TYPE_MODE (type);
2653 if (SSE_REG_MODE_P (mode)
2654 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
2655 return true;
2656 if (TYPE_ALIGN (type) < 128)
2657 return false;
2659 if (AGGREGATE_TYPE_P (type))
2661 /* Walk the aggregates recursively. */
2662 if (TREE_CODE (type) == RECORD_TYPE
2663 || TREE_CODE (type) == UNION_TYPE
2664 || TREE_CODE (type) == QUAL_UNION_TYPE)
2666 tree field;
2668 if (TYPE_BINFO (type) != NULL
2669 && TYPE_BINFO_BASETYPES (type) != NULL)
2671 tree bases = TYPE_BINFO_BASETYPES (type);
2672 int n_bases = TREE_VEC_LENGTH (bases);
2673 int i;
2675 for (i = 0; i < n_bases; ++i)
2677 tree binfo = TREE_VEC_ELT (bases, i);
2678 tree type = BINFO_TYPE (binfo);
2680 if (contains_128bit_aligned_vector_p (type))
2681 return true;
2684 /* And now merge the fields of structure. */
2685 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
2687 if (TREE_CODE (field) == FIELD_DECL
2688 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
2689 return true;
2692 /* Just for use if some languages passes arrays by value. */
2693 else if (TREE_CODE (type) == ARRAY_TYPE)
2695 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
2696 return true;
2698 else
2699 abort ();
2701 return false;
2704 /* Gives the alignment boundary, in bits, of an argument with the
2705 specified mode and type. */
2708 ix86_function_arg_boundary (enum machine_mode mode, tree type)
2710 int align;
2711 if (type)
2712 align = TYPE_ALIGN (type);
2713 else
2714 align = GET_MODE_ALIGNMENT (mode);
2715 if (align < PARM_BOUNDARY)
2716 align = PARM_BOUNDARY;
2717 if (!TARGET_64BIT)
2719 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
2720 make an exception for SSE modes since these require 128bit
2721 alignment.
2723 The handling here differs from field_alignment. ICC aligns MMX
2724 arguments to 4 byte boundaries, while structure fields are aligned
2725 to 8 byte boundaries. */
2726 if (!type)
2728 if (!SSE_REG_MODE_P (mode))
2729 align = PARM_BOUNDARY;
2731 else
2733 if (!contains_128bit_aligned_vector_p (type))
2734 align = PARM_BOUNDARY;
2737 if (align > 128)
2738 align = 128;
2739 return align;
2742 /* Return true if N is a possible register number of function value. */
2743 bool
2744 ix86_function_value_regno_p (int regno)
2746 if (!TARGET_64BIT)
2748 return ((regno) == 0
2749 || ((regno) == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
2750 || ((regno) == FIRST_SSE_REG && TARGET_SSE));
2752 return ((regno) == 0 || (regno) == FIRST_FLOAT_REG
2753 || ((regno) == FIRST_SSE_REG && TARGET_SSE)
2754 || ((regno) == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387));
2757 /* Define how to find the value returned by a function.
2758 VALTYPE is the data type of the value (as a tree).
2759 If the precise function being called is known, FUNC is its FUNCTION_DECL;
2760 otherwise, FUNC is 0. */
2762 ix86_function_value (tree valtype)
2764 if (TARGET_64BIT)
2766 rtx ret = construct_container (TYPE_MODE (valtype), valtype, 1,
2767 REGPARM_MAX, SSE_REGPARM_MAX,
2768 x86_64_int_return_registers, 0);
2769 /* For zero sized structures, construct_container return NULL, but we need
2770 to keep rest of compiler happy by returning meaningful value. */
2771 if (!ret)
2772 ret = gen_rtx_REG (TYPE_MODE (valtype), 0);
2773 return ret;
2775 else
2776 return gen_rtx_REG (TYPE_MODE (valtype),
2777 ix86_value_regno (TYPE_MODE (valtype)));
2780 /* Return false iff type is returned in memory. */
2782 ix86_return_in_memory (tree type)
2784 int needed_intregs, needed_sseregs, size;
2785 enum machine_mode mode = TYPE_MODE (type);
2787 if (TARGET_64BIT)
2788 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
2790 if (mode == BLKmode)
2791 return 1;
2793 size = int_size_in_bytes (type);
2795 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
2796 return 0;
2798 if (VECTOR_MODE_P (mode) || mode == TImode)
2800 /* User-created vectors small enough to fit in EAX. */
2801 if (size < 8)
2802 return 0;
2804 /* MMX/3dNow values are returned on the stack, since we've
2805 got to EMMS/FEMMS before returning. */
2806 if (size == 8)
2807 return 1;
2809 /* SSE values are returned in XMM0. */
2810 /* ??? Except when it doesn't exist? We have a choice of
2811 either (1) being abi incompatible with a -march switch,
2812 or (2) generating an error here. Given no good solution,
2813 I think the safest thing is one warning. The user won't
2814 be able to use -Werror, but.... */
2815 if (size == 16)
2817 static bool warned;
2819 if (TARGET_SSE)
2820 return 0;
2822 if (!warned)
2824 warned = true;
2825 warning ("SSE vector return without SSE enabled "
2826 "changes the ABI");
2828 return 1;
2832 if (mode == XFmode)
2833 return 0;
2835 if (size > 12)
2836 return 1;
2837 return 0;
2840 /* Define how to find the value returned by a library function
2841 assuming the value has mode MODE. */
2843 ix86_libcall_value (enum machine_mode mode)
2845 if (TARGET_64BIT)
2847 switch (mode)
2849 case SFmode:
2850 case SCmode:
2851 case DFmode:
2852 case DCmode:
2853 return gen_rtx_REG (mode, FIRST_SSE_REG);
2854 case XFmode:
2855 case XCmode:
2856 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
2857 case TFmode:
2858 case TCmode:
2859 return NULL;
2860 default:
2861 return gen_rtx_REG (mode, 0);
2864 else
2865 return gen_rtx_REG (mode, ix86_value_regno (mode));
2868 /* Given a mode, return the register to use for a return value. */
2870 static int
2871 ix86_value_regno (enum machine_mode mode)
2873 /* Floating point return values in %st(0). */
2874 if (GET_MODE_CLASS (mode) == MODE_FLOAT && TARGET_FLOAT_RETURNS_IN_80387)
2875 return FIRST_FLOAT_REG;
2876 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
2877 we prevent this case when sse is not available. */
2878 if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
2879 return FIRST_SSE_REG;
2880 /* Everything else in %eax. */
2881 return 0;
2884 /* Create the va_list data type. */
2886 static tree
2887 ix86_build_builtin_va_list (void)
2889 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
2891 /* For i386 we use plain pointer to argument area. */
2892 if (!TARGET_64BIT)
2893 return build_pointer_type (char_type_node);
2895 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
2896 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
2898 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
2899 unsigned_type_node);
2900 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
2901 unsigned_type_node);
2902 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
2903 ptr_type_node);
2904 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
2905 ptr_type_node);
2907 DECL_FIELD_CONTEXT (f_gpr) = record;
2908 DECL_FIELD_CONTEXT (f_fpr) = record;
2909 DECL_FIELD_CONTEXT (f_ovf) = record;
2910 DECL_FIELD_CONTEXT (f_sav) = record;
2912 TREE_CHAIN (record) = type_decl;
2913 TYPE_NAME (record) = type_decl;
2914 TYPE_FIELDS (record) = f_gpr;
2915 TREE_CHAIN (f_gpr) = f_fpr;
2916 TREE_CHAIN (f_fpr) = f_ovf;
2917 TREE_CHAIN (f_ovf) = f_sav;
2919 layout_type (record);
2921 /* The correct type is an array type of one element. */
2922 return build_array_type (record, build_index_type (size_zero_node));
2925 /* Perform any needed actions needed for a function that is receiving a
2926 variable number of arguments.
2928 CUM is as above.
2930 MODE and TYPE are the mode and type of the current parameter.
2932 PRETEND_SIZE is a variable that should be set to the amount of stack
2933 that must be pushed by the prolog to pretend that our caller pushed
2936 Normally, this macro will push all remaining incoming registers on the
2937 stack and set PRETEND_SIZE to the length of the registers pushed. */
2939 void
2940 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
2941 tree type, int *pretend_size ATTRIBUTE_UNUSED,
2942 int no_rtl)
2944 CUMULATIVE_ARGS next_cum;
2945 rtx save_area = NULL_RTX, mem;
2946 rtx label;
2947 rtx label_ref;
2948 rtx tmp_reg;
2949 rtx nsse_reg;
2950 int set;
2951 tree fntype;
2952 int stdarg_p;
2953 int i;
2955 if (!TARGET_64BIT)
2956 return;
2958 /* Indicate to allocate space on the stack for varargs save area. */
2959 ix86_save_varrargs_registers = 1;
2961 cfun->stack_alignment_needed = 128;
2963 fntype = TREE_TYPE (current_function_decl);
2964 stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
2965 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
2966 != void_type_node));
2968 /* For varargs, we do not want to skip the dummy va_dcl argument.
2969 For stdargs, we do want to skip the last named argument. */
2970 next_cum = *cum;
2971 if (stdarg_p)
2972 function_arg_advance (&next_cum, mode, type, 1);
2974 if (!no_rtl)
2975 save_area = frame_pointer_rtx;
2977 set = get_varargs_alias_set ();
2979 for (i = next_cum.regno; i < ix86_regparm; i++)
2981 mem = gen_rtx_MEM (Pmode,
2982 plus_constant (save_area, i * UNITS_PER_WORD));
2983 set_mem_alias_set (mem, set);
2984 emit_move_insn (mem, gen_rtx_REG (Pmode,
2985 x86_64_int_parameter_registers[i]));
2988 if (next_cum.sse_nregs)
2990 /* Now emit code to save SSE registers. The AX parameter contains number
2991 of SSE parameter registers used to call this function. We use
2992 sse_prologue_save insn template that produces computed jump across
2993 SSE saves. We need some preparation work to get this working. */
2995 label = gen_label_rtx ();
2996 label_ref = gen_rtx_LABEL_REF (Pmode, label);
2998 /* Compute address to jump to :
2999 label - 5*eax + nnamed_sse_arguments*5 */
3000 tmp_reg = gen_reg_rtx (Pmode);
3001 nsse_reg = gen_reg_rtx (Pmode);
3002 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
3003 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3004 gen_rtx_MULT (Pmode, nsse_reg,
3005 GEN_INT (4))));
3006 if (next_cum.sse_regno)
3007 emit_move_insn
3008 (nsse_reg,
3009 gen_rtx_CONST (DImode,
3010 gen_rtx_PLUS (DImode,
3011 label_ref,
3012 GEN_INT (next_cum.sse_regno * 4))));
3013 else
3014 emit_move_insn (nsse_reg, label_ref);
3015 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
3017 /* Compute address of memory block we save into. We always use pointer
3018 pointing 127 bytes after first byte to store - this is needed to keep
3019 instruction size limited by 4 bytes. */
3020 tmp_reg = gen_reg_rtx (Pmode);
3021 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
3022 plus_constant (save_area,
3023 8 * REGPARM_MAX + 127)));
3024 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
3025 set_mem_alias_set (mem, set);
3026 set_mem_align (mem, BITS_PER_WORD);
3028 /* And finally do the dirty job! */
3029 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
3030 GEN_INT (next_cum.sse_regno), label));
3035 /* Implement va_start. */
3037 void
3038 ix86_va_start (tree valist, rtx nextarg)
3040 HOST_WIDE_INT words, n_gpr, n_fpr;
3041 tree f_gpr, f_fpr, f_ovf, f_sav;
3042 tree gpr, fpr, ovf, sav, t;
3044 /* Only 64bit target needs something special. */
3045 if (!TARGET_64BIT)
3047 std_expand_builtin_va_start (valist, nextarg);
3048 return;
3051 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3052 f_fpr = TREE_CHAIN (f_gpr);
3053 f_ovf = TREE_CHAIN (f_fpr);
3054 f_sav = TREE_CHAIN (f_ovf);
3056 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
3057 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr);
3058 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr);
3059 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf);
3060 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav);
3062 /* Count number of gp and fp argument registers used. */
3063 words = current_function_args_info.words;
3064 n_gpr = current_function_args_info.regno;
3065 n_fpr = current_function_args_info.sse_regno;
3067 if (TARGET_DEBUG_ARG)
3068 fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
3069 (int) words, (int) n_gpr, (int) n_fpr);
3071 t = build (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
3072 build_int_2 (n_gpr * 8, 0));
3073 TREE_SIDE_EFFECTS (t) = 1;
3074 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3076 t = build (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
3077 build_int_2 (n_fpr * 16 + 8*REGPARM_MAX, 0));
3078 TREE_SIDE_EFFECTS (t) = 1;
3079 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3081 /* Find the overflow area. */
3082 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
3083 if (words != 0)
3084 t = build (PLUS_EXPR, TREE_TYPE (ovf), t,
3085 build_int_2 (words * UNITS_PER_WORD, 0));
3086 t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3087 TREE_SIDE_EFFECTS (t) = 1;
3088 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3090 /* Find the register save area.
3091 Prologue of the function save it right above stack frame. */
3092 t = make_tree (TREE_TYPE (sav), frame_pointer_rtx);
3093 t = build (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
3094 TREE_SIDE_EFFECTS (t) = 1;
3095 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3098 /* Implement va_arg. */
3100 ix86_va_arg (tree valist, tree type)
3102 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
3103 tree f_gpr, f_fpr, f_ovf, f_sav;
3104 tree gpr, fpr, ovf, sav, t;
3105 int size, rsize;
3106 rtx lab_false, lab_over = NULL_RTX;
3107 rtx addr_rtx, r;
3108 rtx container;
3109 int indirect_p = 0;
3111 /* Only 64bit target needs something special. */
3112 if (!TARGET_64BIT)
3114 return std_expand_builtin_va_arg (valist, type);
3117 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
3118 f_fpr = TREE_CHAIN (f_gpr);
3119 f_ovf = TREE_CHAIN (f_fpr);
3120 f_sav = TREE_CHAIN (f_ovf);
3122 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
3123 gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr);
3124 fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr);
3125 ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf);
3126 sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav);
3128 size = int_size_in_bytes (type);
3129 if (size == -1)
3131 /* Passed by reference. */
3132 indirect_p = 1;
3133 type = build_pointer_type (type);
3134 size = int_size_in_bytes (type);
3136 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3138 container = construct_container (TYPE_MODE (type), type, 0,
3139 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
3141 * Pull the value out of the saved registers ...
3144 addr_rtx = gen_reg_rtx (Pmode);
3146 if (container)
3148 rtx int_addr_rtx, sse_addr_rtx;
3149 int needed_intregs, needed_sseregs;
3150 int need_temp;
3152 lab_over = gen_label_rtx ();
3153 lab_false = gen_label_rtx ();
3155 examine_argument (TYPE_MODE (type), type, 0,
3156 &needed_intregs, &needed_sseregs);
3159 need_temp = ((needed_intregs && TYPE_ALIGN (type) > 64)
3160 || TYPE_ALIGN (type) > 128);
3162 /* In case we are passing structure, verify that it is consecutive block
3163 on the register save area. If not we need to do moves. */
3164 if (!need_temp && !REG_P (container))
3166 /* Verify that all registers are strictly consecutive */
3167 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
3169 int i;
3171 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3173 rtx slot = XVECEXP (container, 0, i);
3174 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
3175 || INTVAL (XEXP (slot, 1)) != i * 16)
3176 need_temp = 1;
3179 else
3181 int i;
3183 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
3185 rtx slot = XVECEXP (container, 0, i);
3186 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
3187 || INTVAL (XEXP (slot, 1)) != i * 8)
3188 need_temp = 1;
3192 if (!need_temp)
3194 int_addr_rtx = addr_rtx;
3195 sse_addr_rtx = addr_rtx;
3197 else
3199 int_addr_rtx = gen_reg_rtx (Pmode);
3200 sse_addr_rtx = gen_reg_rtx (Pmode);
3202 /* First ensure that we fit completely in registers. */
3203 if (needed_intregs)
3205 emit_cmp_and_jump_insns (expand_expr
3206 (gpr, NULL_RTX, SImode, EXPAND_NORMAL),
3207 GEN_INT ((REGPARM_MAX - needed_intregs +
3208 1) * 8), GE, const1_rtx, SImode,
3209 1, lab_false);
3211 if (needed_sseregs)
3213 emit_cmp_and_jump_insns (expand_expr
3214 (fpr, NULL_RTX, SImode, EXPAND_NORMAL),
3215 GEN_INT ((SSE_REGPARM_MAX -
3216 needed_sseregs + 1) * 16 +
3217 REGPARM_MAX * 8), GE, const1_rtx,
3218 SImode, 1, lab_false);
3221 /* Compute index to start of area used for integer regs. */
3222 if (needed_intregs)
3224 t = build (PLUS_EXPR, ptr_type_node, sav, gpr);
3225 r = expand_expr (t, int_addr_rtx, Pmode, EXPAND_NORMAL);
3226 if (r != int_addr_rtx)
3227 emit_move_insn (int_addr_rtx, r);
3229 if (needed_sseregs)
3231 t = build (PLUS_EXPR, ptr_type_node, sav, fpr);
3232 r = expand_expr (t, sse_addr_rtx, Pmode, EXPAND_NORMAL);
3233 if (r != sse_addr_rtx)
3234 emit_move_insn (sse_addr_rtx, r);
3236 if (need_temp)
3238 int i;
3239 rtx mem;
3240 rtx x;
3242 /* Never use the memory itself, as it has the alias set. */
3243 x = XEXP (assign_temp (type, 0, 1, 0), 0);
3244 mem = gen_rtx_MEM (BLKmode, x);
3245 force_operand (x, addr_rtx);
3246 set_mem_alias_set (mem, get_varargs_alias_set ());
3247 set_mem_align (mem, BITS_PER_UNIT);
3249 for (i = 0; i < XVECLEN (container, 0); i++)
3251 rtx slot = XVECEXP (container, 0, i);
3252 rtx reg = XEXP (slot, 0);
3253 enum machine_mode mode = GET_MODE (reg);
3254 rtx src_addr;
3255 rtx src_mem;
3256 int src_offset;
3257 rtx dest_mem;
3259 if (SSE_REGNO_P (REGNO (reg)))
3261 src_addr = sse_addr_rtx;
3262 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
3264 else
3266 src_addr = int_addr_rtx;
3267 src_offset = REGNO (reg) * 8;
3269 src_mem = gen_rtx_MEM (mode, src_addr);
3270 set_mem_alias_set (src_mem, get_varargs_alias_set ());
3271 src_mem = adjust_address (src_mem, mode, src_offset);
3272 dest_mem = adjust_address (mem, mode, INTVAL (XEXP (slot, 1)));
3273 emit_move_insn (dest_mem, src_mem);
3277 if (needed_intregs)
3280 build (PLUS_EXPR, TREE_TYPE (gpr), gpr,
3281 build_int_2 (needed_intregs * 8, 0));
3282 t = build (MODIFY_EXPR, TREE_TYPE (gpr), gpr, t);
3283 TREE_SIDE_EFFECTS (t) = 1;
3284 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3286 if (needed_sseregs)
3289 build (PLUS_EXPR, TREE_TYPE (fpr), fpr,
3290 build_int_2 (needed_sseregs * 16, 0));
3291 t = build (MODIFY_EXPR, TREE_TYPE (fpr), fpr, t);
3292 TREE_SIDE_EFFECTS (t) = 1;
3293 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3296 emit_jump_insn (gen_jump (lab_over));
3297 emit_barrier ();
3298 emit_label (lab_false);
3301 /* ... otherwise out of the overflow area. */
3303 /* Care for on-stack alignment if needed. */
3304 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64)
3305 t = ovf;
3306 else
3308 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
3309 t = build (PLUS_EXPR, TREE_TYPE (ovf), ovf, build_int_2 (align - 1, 0));
3310 t = build (BIT_AND_EXPR, TREE_TYPE (t), t, build_int_2 (-align, -1));
3312 t = save_expr (t);
3314 r = expand_expr (t, addr_rtx, Pmode, EXPAND_NORMAL);
3315 if (r != addr_rtx)
3316 emit_move_insn (addr_rtx, r);
3319 build (PLUS_EXPR, TREE_TYPE (t), t,
3320 build_int_2 (rsize * UNITS_PER_WORD, 0));
3321 t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
3322 TREE_SIDE_EFFECTS (t) = 1;
3323 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3325 if (container)
3326 emit_label (lab_over);
3328 if (indirect_p)
3330 r = gen_rtx_MEM (Pmode, addr_rtx);
3331 set_mem_alias_set (r, get_varargs_alias_set ());
3332 emit_move_insn (addr_rtx, r);
3335 return addr_rtx;
3338 /* Return nonzero if OP is either a i387 or SSE fp register. */
3340 any_fp_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3342 return ANY_FP_REG_P (op);
3345 /* Return nonzero if OP is an i387 fp register. */
3347 fp_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3349 return FP_REG_P (op);
3352 /* Return nonzero if OP is a non-fp register_operand. */
3354 register_and_not_any_fp_reg_operand (rtx op, enum machine_mode mode)
3356 return register_operand (op, mode) && !ANY_FP_REG_P (op);
3359 /* Return nonzero if OP is a register operand other than an
3360 i387 fp register. */
3362 register_and_not_fp_reg_operand (rtx op, enum machine_mode mode)
3364 return register_operand (op, mode) && !FP_REG_P (op);
3367 /* Return nonzero if OP is general operand representable on x86_64. */
3370 x86_64_general_operand (rtx op, enum machine_mode mode)
3372 if (!TARGET_64BIT)
3373 return general_operand (op, mode);
3374 if (nonimmediate_operand (op, mode))
3375 return 1;
3376 return x86_64_sign_extended_value (op);
3379 /* Return nonzero if OP is general operand representable on x86_64
3380 as either sign extended or zero extended constant. */
3383 x86_64_szext_general_operand (rtx op, enum machine_mode mode)
3385 if (!TARGET_64BIT)
3386 return general_operand (op, mode);
3387 if (nonimmediate_operand (op, mode))
3388 return 1;
3389 return x86_64_sign_extended_value (op) || x86_64_zero_extended_value (op);
3392 /* Return nonzero if OP is nonmemory operand representable on x86_64. */
3395 x86_64_nonmemory_operand (rtx op, enum machine_mode mode)
3397 if (!TARGET_64BIT)
3398 return nonmemory_operand (op, mode);
3399 if (register_operand (op, mode))
3400 return 1;
3401 return x86_64_sign_extended_value (op);
3404 /* Return nonzero if OP is nonmemory operand acceptable by movabs patterns. */
3407 x86_64_movabs_operand (rtx op, enum machine_mode mode)
3409 if (!TARGET_64BIT || !flag_pic)
3410 return nonmemory_operand (op, mode);
3411 if (register_operand (op, mode) || x86_64_sign_extended_value (op))
3412 return 1;
3413 if (CONSTANT_P (op) && !symbolic_reference_mentioned_p (op))
3414 return 1;
3415 return 0;
3418 /* Return nonzero if OPNUM's MEM should be matched
3419 in movabs* patterns. */
3422 ix86_check_movabs (rtx insn, int opnum)
3424 rtx set, mem;
3426 set = PATTERN (insn);
3427 if (GET_CODE (set) == PARALLEL)
3428 set = XVECEXP (set, 0, 0);
3429 if (GET_CODE (set) != SET)
3430 abort ();
3431 mem = XEXP (set, opnum);
3432 while (GET_CODE (mem) == SUBREG)
3433 mem = SUBREG_REG (mem);
3434 if (GET_CODE (mem) != MEM)
3435 abort ();
3436 return (volatile_ok || !MEM_VOLATILE_P (mem));
3439 /* Return nonzero if OP is nonmemory operand representable on x86_64. */
3442 x86_64_szext_nonmemory_operand (rtx op, enum machine_mode mode)
3444 if (!TARGET_64BIT)
3445 return nonmemory_operand (op, mode);
3446 if (register_operand (op, mode))
3447 return 1;
3448 return x86_64_sign_extended_value (op) || x86_64_zero_extended_value (op);
3451 /* Return nonzero if OP is immediate operand representable on x86_64. */
3454 x86_64_immediate_operand (rtx op, enum machine_mode mode)
3456 if (!TARGET_64BIT)
3457 return immediate_operand (op, mode);
3458 return x86_64_sign_extended_value (op);
3461 /* Return nonzero if OP is immediate operand representable on x86_64. */
3464 x86_64_zext_immediate_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3466 return x86_64_zero_extended_value (op);
3469 /* Return nonzero if OP is CONST_INT >= 1 and <= 31 (a valid operand
3470 for shift & compare patterns, as shifting by 0 does not change flags),
3471 else return zero. */
3474 const_int_1_31_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3476 return (GET_CODE (op) == CONST_INT && INTVAL (op) >= 1 && INTVAL (op) <= 31);
3479 /* Returns 1 if OP is either a symbol reference or a sum of a symbol
3480 reference and a constant. */
3483 symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3485 switch (GET_CODE (op))
3487 case SYMBOL_REF:
3488 case LABEL_REF:
3489 return 1;
3491 case CONST:
3492 op = XEXP (op, 0);
3493 if (GET_CODE (op) == SYMBOL_REF
3494 || GET_CODE (op) == LABEL_REF
3495 || (GET_CODE (op) == UNSPEC
3496 && (XINT (op, 1) == UNSPEC_GOT
3497 || XINT (op, 1) == UNSPEC_GOTOFF
3498 || XINT (op, 1) == UNSPEC_GOTPCREL)))
3499 return 1;
3500 if (GET_CODE (op) != PLUS
3501 || GET_CODE (XEXP (op, 1)) != CONST_INT)
3502 return 0;
3504 op = XEXP (op, 0);
3505 if (GET_CODE (op) == SYMBOL_REF
3506 || GET_CODE (op) == LABEL_REF)
3507 return 1;
3508 /* Only @GOTOFF gets offsets. */
3509 if (GET_CODE (op) != UNSPEC
3510 || XINT (op, 1) != UNSPEC_GOTOFF)
3511 return 0;
3513 op = XVECEXP (op, 0, 0);
3514 if (GET_CODE (op) == SYMBOL_REF
3515 || GET_CODE (op) == LABEL_REF)
3516 return 1;
3517 return 0;
3519 default:
3520 return 0;
3524 /* Return true if the operand contains a @GOT or @GOTOFF reference. */
3527 pic_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3529 if (GET_CODE (op) != CONST)
3530 return 0;
3531 op = XEXP (op, 0);
3532 if (TARGET_64BIT)
3534 if (GET_CODE (op) == UNSPEC
3535 && XINT (op, 1) == UNSPEC_GOTPCREL)
3536 return 1;
3537 if (GET_CODE (op) == PLUS
3538 && GET_CODE (XEXP (op, 0)) == UNSPEC
3539 && XINT (XEXP (op, 0), 1) == UNSPEC_GOTPCREL)
3540 return 1;
3542 else
3544 if (GET_CODE (op) == UNSPEC)
3545 return 1;
3546 if (GET_CODE (op) != PLUS
3547 || GET_CODE (XEXP (op, 1)) != CONST_INT)
3548 return 0;
3549 op = XEXP (op, 0);
3550 if (GET_CODE (op) == UNSPEC)
3551 return 1;
3553 return 0;
3556 /* Return true if OP is a symbolic operand that resolves locally. */
3558 static int
3559 local_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3561 if (GET_CODE (op) == CONST
3562 && GET_CODE (XEXP (op, 0)) == PLUS
3563 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
3564 op = XEXP (XEXP (op, 0), 0);
3566 if (GET_CODE (op) == LABEL_REF)
3567 return 1;
3569 if (GET_CODE (op) != SYMBOL_REF)
3570 return 0;
3572 if (SYMBOL_REF_LOCAL_P (op))
3573 return 1;
3575 /* There is, however, a not insubstantial body of code in the rest of
3576 the compiler that assumes it can just stick the results of
3577 ASM_GENERATE_INTERNAL_LABEL in a symbol_ref and have done. */
3578 /* ??? This is a hack. Should update the body of the compiler to
3579 always create a DECL an invoke targetm.encode_section_info. */
3580 if (strncmp (XSTR (op, 0), internal_label_prefix,
3581 internal_label_prefix_len) == 0)
3582 return 1;
3584 return 0;
3587 /* Test for various thread-local symbols. */
3590 tls_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3592 if (GET_CODE (op) != SYMBOL_REF)
3593 return 0;
3594 return SYMBOL_REF_TLS_MODEL (op);
3597 static inline int
3598 tls_symbolic_operand_1 (rtx op, enum tls_model kind)
3600 if (GET_CODE (op) != SYMBOL_REF)
3601 return 0;
3602 return SYMBOL_REF_TLS_MODEL (op) == kind;
3606 global_dynamic_symbolic_operand (rtx op,
3607 enum machine_mode mode ATTRIBUTE_UNUSED)
3609 return tls_symbolic_operand_1 (op, TLS_MODEL_GLOBAL_DYNAMIC);
3613 local_dynamic_symbolic_operand (rtx op,
3614 enum machine_mode mode ATTRIBUTE_UNUSED)
3616 return tls_symbolic_operand_1 (op, TLS_MODEL_LOCAL_DYNAMIC);
3620 initial_exec_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3622 return tls_symbolic_operand_1 (op, TLS_MODEL_INITIAL_EXEC);
3626 local_exec_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3628 return tls_symbolic_operand_1 (op, TLS_MODEL_LOCAL_EXEC);
3631 /* Test for a valid operand for a call instruction. Don't allow the
3632 arg pointer register or virtual regs since they may decay into
3633 reg + const, which the patterns can't handle. */
3636 call_insn_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3638 /* Disallow indirect through a virtual register. This leads to
3639 compiler aborts when trying to eliminate them. */
3640 if (GET_CODE (op) == REG
3641 && (op == arg_pointer_rtx
3642 || op == frame_pointer_rtx
3643 || (REGNO (op) >= FIRST_PSEUDO_REGISTER
3644 && REGNO (op) <= LAST_VIRTUAL_REGISTER)))
3645 return 0;
3647 /* Disallow `call 1234'. Due to varying assembler lameness this
3648 gets either rejected or translated to `call .+1234'. */
3649 if (GET_CODE (op) == CONST_INT)
3650 return 0;
3652 /* Explicitly allow SYMBOL_REF even if pic. */
3653 if (GET_CODE (op) == SYMBOL_REF)
3654 return 1;
3656 /* Otherwise we can allow any general_operand in the address. */
3657 return general_operand (op, Pmode);
3660 /* Test for a valid operand for a call instruction. Don't allow the
3661 arg pointer register or virtual regs since they may decay into
3662 reg + const, which the patterns can't handle. */
3665 sibcall_insn_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3667 /* Disallow indirect through a virtual register. This leads to
3668 compiler aborts when trying to eliminate them. */
3669 if (GET_CODE (op) == REG
3670 && (op == arg_pointer_rtx
3671 || op == frame_pointer_rtx
3672 || (REGNO (op) >= FIRST_PSEUDO_REGISTER
3673 && REGNO (op) <= LAST_VIRTUAL_REGISTER)))
3674 return 0;
3676 /* Explicitly allow SYMBOL_REF even if pic. */
3677 if (GET_CODE (op) == SYMBOL_REF)
3678 return 1;
3680 /* Otherwise we can only allow register operands. */
3681 return register_operand (op, Pmode);
3685 constant_call_address_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3687 if (GET_CODE (op) == CONST
3688 && GET_CODE (XEXP (op, 0)) == PLUS
3689 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
3690 op = XEXP (XEXP (op, 0), 0);
3691 return GET_CODE (op) == SYMBOL_REF;
3694 /* Match exactly zero and one. */
3697 const0_operand (rtx op, enum machine_mode mode)
3699 return op == CONST0_RTX (mode);
3703 const1_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3705 return op == const1_rtx;
3708 /* Match 2, 4, or 8. Used for leal multiplicands. */
3711 const248_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3713 return (GET_CODE (op) == CONST_INT
3714 && (INTVAL (op) == 2 || INTVAL (op) == 4 || INTVAL (op) == 8));
3718 const_0_to_3_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3720 return (GET_CODE (op) == CONST_INT && INTVAL (op) >= 0 && INTVAL (op) < 4);
3724 const_0_to_7_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3726 return (GET_CODE (op) == CONST_INT && INTVAL (op) >= 0 && INTVAL (op) < 8);
3730 const_0_to_15_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3732 return (GET_CODE (op) == CONST_INT && INTVAL (op) >= 0 && INTVAL (op) < 16);
3736 const_0_to_255_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3738 return (GET_CODE (op) == CONST_INT && INTVAL (op) >= 0 && INTVAL (op) < 256);
3742 /* True if this is a constant appropriate for an increment or decrement. */
3745 incdec_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3747 /* On Pentium4, the inc and dec operations causes extra dependency on flag
3748 registers, since carry flag is not set. */
3749 if (TARGET_PENTIUM4 && !optimize_size)
3750 return 0;
3751 return op == const1_rtx || op == constm1_rtx;
3754 /* Return nonzero if OP is acceptable as operand of DImode shift
3755 expander. */
3758 shiftdi_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3760 if (TARGET_64BIT)
3761 return nonimmediate_operand (op, mode);
3762 else
3763 return register_operand (op, mode);
3766 /* Return false if this is the stack pointer, or any other fake
3767 register eliminable to the stack pointer. Otherwise, this is
3768 a register operand.
3770 This is used to prevent esp from being used as an index reg.
3771 Which would only happen in pathological cases. */
3774 reg_no_sp_operand (rtx op, enum machine_mode mode)
3776 rtx t = op;
3777 if (GET_CODE (t) == SUBREG)
3778 t = SUBREG_REG (t);
3779 if (t == stack_pointer_rtx || t == arg_pointer_rtx || t == frame_pointer_rtx)
3780 return 0;
3782 return register_operand (op, mode);
3786 mmx_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3788 return MMX_REG_P (op);
3791 /* Return false if this is any eliminable register. Otherwise
3792 general_operand. */
3795 general_no_elim_operand (rtx op, enum machine_mode mode)
3797 rtx t = op;
3798 if (GET_CODE (t) == SUBREG)
3799 t = SUBREG_REG (t);
3800 if (t == arg_pointer_rtx || t == frame_pointer_rtx
3801 || t == virtual_incoming_args_rtx || t == virtual_stack_vars_rtx
3802 || t == virtual_stack_dynamic_rtx)
3803 return 0;
3804 if (REG_P (t)
3805 && REGNO (t) >= FIRST_VIRTUAL_REGISTER
3806 && REGNO (t) <= LAST_VIRTUAL_REGISTER)
3807 return 0;
3809 return general_operand (op, mode);
3812 /* Return false if this is any eliminable register. Otherwise
3813 register_operand or const_int. */
3816 nonmemory_no_elim_operand (rtx op, enum machine_mode mode)
3818 rtx t = op;
3819 if (GET_CODE (t) == SUBREG)
3820 t = SUBREG_REG (t);
3821 if (t == arg_pointer_rtx || t == frame_pointer_rtx
3822 || t == virtual_incoming_args_rtx || t == virtual_stack_vars_rtx
3823 || t == virtual_stack_dynamic_rtx)
3824 return 0;
3826 return GET_CODE (op) == CONST_INT || register_operand (op, mode);
3829 /* Return false if this is any eliminable register or stack register,
3830 otherwise work like register_operand. */
3833 index_register_operand (rtx op, enum machine_mode mode)
3835 rtx t = op;
3836 if (GET_CODE (t) == SUBREG)
3837 t = SUBREG_REG (t);
3838 if (!REG_P (t))
3839 return 0;
3840 if (t == arg_pointer_rtx
3841 || t == frame_pointer_rtx
3842 || t == virtual_incoming_args_rtx
3843 || t == virtual_stack_vars_rtx
3844 || t == virtual_stack_dynamic_rtx
3845 || REGNO (t) == STACK_POINTER_REGNUM)
3846 return 0;
3848 return general_operand (op, mode);
3851 /* Return true if op is a Q_REGS class register. */
3854 q_regs_operand (rtx op, enum machine_mode mode)
3856 if (mode != VOIDmode && GET_MODE (op) != mode)
3857 return 0;
3858 if (GET_CODE (op) == SUBREG)
3859 op = SUBREG_REG (op);
3860 return ANY_QI_REG_P (op);
3863 /* Return true if op is an flags register. */
3866 flags_reg_operand (rtx op, enum machine_mode mode)
3868 if (mode != VOIDmode && GET_MODE (op) != mode)
3869 return 0;
3870 return REG_P (op) && REGNO (op) == FLAGS_REG && GET_MODE (op) != VOIDmode;
3873 /* Return true if op is a NON_Q_REGS class register. */
3876 non_q_regs_operand (rtx op, enum machine_mode mode)
3878 if (mode != VOIDmode && GET_MODE (op) != mode)
3879 return 0;
3880 if (GET_CODE (op) == SUBREG)
3881 op = SUBREG_REG (op);
3882 return NON_QI_REG_P (op);
3886 zero_extended_scalar_load_operand (rtx op,
3887 enum machine_mode mode ATTRIBUTE_UNUSED)
3889 unsigned n_elts;
3890 if (GET_CODE (op) != MEM)
3891 return 0;
3892 op = maybe_get_pool_constant (op);
3893 if (!op)
3894 return 0;
3895 if (GET_CODE (op) != CONST_VECTOR)
3896 return 0;
3897 n_elts =
3898 (GET_MODE_SIZE (GET_MODE (op)) /
3899 GET_MODE_SIZE (GET_MODE_INNER (GET_MODE (op))));
3900 for (n_elts--; n_elts > 0; n_elts--)
3902 rtx elt = CONST_VECTOR_ELT (op, n_elts);
3903 if (elt != CONST0_RTX (GET_MODE_INNER (GET_MODE (op))))
3904 return 0;
3906 return 1;
3909 /* Return 1 when OP is operand acceptable for standard SSE move. */
3911 vector_move_operand (rtx op, enum machine_mode mode)
3913 if (nonimmediate_operand (op, mode))
3914 return 1;
3915 if (GET_MODE (op) != mode && mode != VOIDmode)
3916 return 0;
3917 return (op == CONST0_RTX (GET_MODE (op)));
3920 /* Return true if op if a valid address, and does not contain
3921 a segment override. */
3924 no_seg_address_operand (rtx op, enum machine_mode mode)
3926 struct ix86_address parts;
3928 if (! address_operand (op, mode))
3929 return 0;
3931 if (! ix86_decompose_address (op, &parts))
3932 abort ();
3934 return parts.seg == SEG_DEFAULT;
3937 /* Return 1 if OP is a comparison that can be used in the CMPSS/CMPPS
3938 insns. */
3940 sse_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3942 enum rtx_code code = GET_CODE (op);
3943 switch (code)
3945 /* Operations supported directly. */
3946 case EQ:
3947 case LT:
3948 case LE:
3949 case UNORDERED:
3950 case NE:
3951 case UNGE:
3952 case UNGT:
3953 case ORDERED:
3954 return 1;
3955 /* These are equivalent to ones above in non-IEEE comparisons. */
3956 case UNEQ:
3957 case UNLT:
3958 case UNLE:
3959 case LTGT:
3960 case GE:
3961 case GT:
3962 return !TARGET_IEEE_FP;
3963 default:
3964 return 0;
3967 /* Return 1 if OP is a valid comparison operator in valid mode. */
3969 ix86_comparison_operator (rtx op, enum machine_mode mode)
3971 enum machine_mode inmode;
3972 enum rtx_code code = GET_CODE (op);
3973 if (mode != VOIDmode && GET_MODE (op) != mode)
3974 return 0;
3975 if (GET_RTX_CLASS (code) != '<')
3976 return 0;
3977 inmode = GET_MODE (XEXP (op, 0));
3979 if (inmode == CCFPmode || inmode == CCFPUmode)
3981 enum rtx_code second_code, bypass_code;
3982 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
3983 return (bypass_code == NIL && second_code == NIL);
3985 switch (code)
3987 case EQ: case NE:
3988 return 1;
3989 case LT: case GE:
3990 if (inmode == CCmode || inmode == CCGCmode
3991 || inmode == CCGOCmode || inmode == CCNOmode)
3992 return 1;
3993 return 0;
3994 case LTU: case GTU: case LEU: case ORDERED: case UNORDERED: case GEU:
3995 if (inmode == CCmode)
3996 return 1;
3997 return 0;
3998 case GT: case LE:
3999 if (inmode == CCmode || inmode == CCGCmode || inmode == CCNOmode)
4000 return 1;
4001 return 0;
4002 default:
4003 return 0;
4007 /* Return 1 if OP is a valid comparison operator testing carry flag
4008 to be set. */
4010 ix86_carry_flag_operator (rtx op, enum machine_mode mode)
4012 enum machine_mode inmode;
4013 enum rtx_code code = GET_CODE (op);
4015 if (mode != VOIDmode && GET_MODE (op) != mode)
4016 return 0;
4017 if (GET_RTX_CLASS (code) != '<')
4018 return 0;
4019 inmode = GET_MODE (XEXP (op, 0));
4020 if (GET_CODE (XEXP (op, 0)) != REG
4021 || REGNO (XEXP (op, 0)) != 17
4022 || XEXP (op, 1) != const0_rtx)
4023 return 0;
4025 if (inmode == CCFPmode || inmode == CCFPUmode)
4027 enum rtx_code second_code, bypass_code;
4029 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
4030 if (bypass_code != NIL || second_code != NIL)
4031 return 0;
4032 code = ix86_fp_compare_code_to_integer (code);
4034 else if (inmode != CCmode)
4035 return 0;
4036 return code == LTU;
4039 /* Return 1 if OP is a comparison operator that can be issued by fcmov. */
4042 fcmov_comparison_operator (rtx op, enum machine_mode mode)
4044 enum machine_mode inmode;
4045 enum rtx_code code = GET_CODE (op);
4047 if (mode != VOIDmode && GET_MODE (op) != mode)
4048 return 0;
4049 if (GET_RTX_CLASS (code) != '<')
4050 return 0;
4051 inmode = GET_MODE (XEXP (op, 0));
4052 if (inmode == CCFPmode || inmode == CCFPUmode)
4054 enum rtx_code second_code, bypass_code;
4056 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
4057 if (bypass_code != NIL || second_code != NIL)
4058 return 0;
4059 code = ix86_fp_compare_code_to_integer (code);
4061 /* i387 supports just limited amount of conditional codes. */
4062 switch (code)
4064 case LTU: case GTU: case LEU: case GEU:
4065 if (inmode == CCmode || inmode == CCFPmode || inmode == CCFPUmode)
4066 return 1;
4067 return 0;
4068 case ORDERED: case UNORDERED:
4069 case EQ: case NE:
4070 return 1;
4071 default:
4072 return 0;
4076 /* Return 1 if OP is a binary operator that can be promoted to wider mode. */
4079 promotable_binary_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4081 switch (GET_CODE (op))
4083 case MULT:
4084 /* Modern CPUs have same latency for HImode and SImode multiply,
4085 but 386 and 486 do HImode multiply faster. */
4086 return ix86_tune > PROCESSOR_I486;
4087 case PLUS:
4088 case AND:
4089 case IOR:
4090 case XOR:
4091 case ASHIFT:
4092 return 1;
4093 default:
4094 return 0;
4098 /* Nearly general operand, but accept any const_double, since we wish
4099 to be able to drop them into memory rather than have them get pulled
4100 into registers. */
4103 cmp_fp_expander_operand (rtx op, enum machine_mode mode)
4105 if (mode != VOIDmode && mode != GET_MODE (op))
4106 return 0;
4107 if (GET_CODE (op) == CONST_DOUBLE)
4108 return 1;
4109 return general_operand (op, mode);
4112 /* Match an SI or HImode register for a zero_extract. */
4115 ext_register_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4117 int regno;
4118 if ((!TARGET_64BIT || GET_MODE (op) != DImode)
4119 && GET_MODE (op) != SImode && GET_MODE (op) != HImode)
4120 return 0;
4122 if (!register_operand (op, VOIDmode))
4123 return 0;
4125 /* Be careful to accept only registers having upper parts. */
4126 regno = REG_P (op) ? REGNO (op) : REGNO (SUBREG_REG (op));
4127 return (regno > LAST_VIRTUAL_REGISTER || regno < 4);
4130 /* Return 1 if this is a valid binary floating-point operation.
4131 OP is the expression matched, and MODE is its mode. */
4134 binary_fp_operator (rtx op, enum machine_mode mode)
4136 if (mode != VOIDmode && mode != GET_MODE (op))
4137 return 0;
4139 switch (GET_CODE (op))
4141 case PLUS:
4142 case MINUS:
4143 case MULT:
4144 case DIV:
4145 return GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT;
4147 default:
4148 return 0;
4153 mult_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4155 return GET_CODE (op) == MULT;
4159 div_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
4161 return GET_CODE (op) == DIV;
4165 arith_or_logical_operator (rtx op, enum machine_mode mode)
4167 return ((mode == VOIDmode || GET_MODE (op) == mode)
4168 && (GET_RTX_CLASS (GET_CODE (op)) == 'c'
4169 || GET_RTX_CLASS (GET_CODE (op)) == '2'));
4172 /* Returns 1 if OP is memory operand with a displacement. */
4175 memory_displacement_operand (rtx op, enum machine_mode mode)
4177 struct ix86_address parts;
4179 if (! memory_operand (op, mode))
4180 return 0;
4182 if (! ix86_decompose_address (XEXP (op, 0), &parts))
4183 abort ();
4185 return parts.disp != NULL_RTX;
4188 /* To avoid problems when jump re-emits comparisons like testqi_ext_ccno_0,
4189 re-recognize the operand to avoid a copy_to_mode_reg that will fail.
4191 ??? It seems likely that this will only work because cmpsi is an
4192 expander, and no actual insns use this. */
4195 cmpsi_operand (rtx op, enum machine_mode mode)
4197 if (nonimmediate_operand (op, mode))
4198 return 1;
4200 if (GET_CODE (op) == AND
4201 && GET_MODE (op) == SImode
4202 && GET_CODE (XEXP (op, 0)) == ZERO_EXTRACT
4203 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT
4204 && GET_CODE (XEXP (XEXP (op, 0), 2)) == CONST_INT
4205 && INTVAL (XEXP (XEXP (op, 0), 1)) == 8
4206 && INTVAL (XEXP (XEXP (op, 0), 2)) == 8
4207 && GET_CODE (XEXP (op, 1)) == CONST_INT)
4208 return 1;
4210 return 0;
4213 /* Returns 1 if OP is memory operand that can not be represented by the
4214 modRM array. */
4217 long_memory_operand (rtx op, enum machine_mode mode)
4219 if (! memory_operand (op, mode))
4220 return 0;
4222 return memory_address_length (op) != 0;
4225 /* Return nonzero if the rtx is known aligned. */
4228 aligned_operand (rtx op, enum machine_mode mode)
4230 struct ix86_address parts;
4232 if (!general_operand (op, mode))
4233 return 0;
4235 /* Registers and immediate operands are always "aligned". */
4236 if (GET_CODE (op) != MEM)
4237 return 1;
4239 /* Don't even try to do any aligned optimizations with volatiles. */
4240 if (MEM_VOLATILE_P (op))
4241 return 0;
4243 op = XEXP (op, 0);
4245 /* Pushes and pops are only valid on the stack pointer. */
4246 if (GET_CODE (op) == PRE_DEC
4247 || GET_CODE (op) == POST_INC)
4248 return 1;
4250 /* Decode the address. */
4251 if (! ix86_decompose_address (op, &parts))
4252 abort ();
4254 if (parts.base && GET_CODE (parts.base) == SUBREG)
4255 parts.base = SUBREG_REG (parts.base);
4256 if (parts.index && GET_CODE (parts.index) == SUBREG)
4257 parts.index = SUBREG_REG (parts.index);
4259 /* Look for some component that isn't known to be aligned. */
4260 if (parts.index)
4262 if (parts.scale < 4
4263 && REGNO_POINTER_ALIGN (REGNO (parts.index)) < 32)
4264 return 0;
4266 if (parts.base)
4268 if (REGNO_POINTER_ALIGN (REGNO (parts.base)) < 32)
4269 return 0;
4271 if (parts.disp)
4273 if (GET_CODE (parts.disp) != CONST_INT
4274 || (INTVAL (parts.disp) & 3) != 0)
4275 return 0;
4278 /* Didn't find one -- this must be an aligned address. */
4279 return 1;
4282 /* Initialize the table of extra 80387 mathematical constants. */
4284 static void
4285 init_ext_80387_constants (void)
4287 static const char * cst[5] =
4289 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
4290 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
4291 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
4292 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
4293 "3.1415926535897932385128089594061862044", /* 4: fldpi */
4295 int i;
4297 for (i = 0; i < 5; i++)
4299 real_from_string (&ext_80387_constants_table[i], cst[i]);
4300 /* Ensure each constant is rounded to XFmode precision. */
4301 real_convert (&ext_80387_constants_table[i],
4302 XFmode, &ext_80387_constants_table[i]);
4305 ext_80387_constants_init = 1;
4308 /* Return true if the constant is something that can be loaded with
4309 a special instruction. */
4312 standard_80387_constant_p (rtx x)
4314 if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
4315 return -1;
4317 if (x == CONST0_RTX (GET_MODE (x)))
4318 return 1;
4319 if (x == CONST1_RTX (GET_MODE (x)))
4320 return 2;
4322 /* For XFmode constants, try to find a special 80387 instruction on
4323 those CPUs that benefit from them. */
4324 if (GET_MODE (x) == XFmode
4325 && x86_ext_80387_constants & TUNEMASK)
4327 REAL_VALUE_TYPE r;
4328 int i;
4330 if (! ext_80387_constants_init)
4331 init_ext_80387_constants ();
4333 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4334 for (i = 0; i < 5; i++)
4335 if (real_identical (&r, &ext_80387_constants_table[i]))
4336 return i + 3;
4339 return 0;
4342 /* Return the opcode of the special instruction to be used to load
4343 the constant X. */
4345 const char *
4346 standard_80387_constant_opcode (rtx x)
4348 switch (standard_80387_constant_p (x))
4350 case 1:
4351 return "fldz";
4352 case 2:
4353 return "fld1";
4354 case 3:
4355 return "fldlg2";
4356 case 4:
4357 return "fldln2";
4358 case 5:
4359 return "fldl2e";
4360 case 6:
4361 return "fldl2t";
4362 case 7:
4363 return "fldpi";
4365 abort ();
4368 /* Return the CONST_DOUBLE representing the 80387 constant that is
4369 loaded by the specified special instruction. The argument IDX
4370 matches the return value from standard_80387_constant_p. */
4373 standard_80387_constant_rtx (int idx)
4375 int i;
4377 if (! ext_80387_constants_init)
4378 init_ext_80387_constants ();
4380 switch (idx)
4382 case 3:
4383 case 4:
4384 case 5:
4385 case 6:
4386 case 7:
4387 i = idx - 3;
4388 break;
4390 default:
4391 abort ();
4394 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
4395 XFmode);
4398 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
4401 standard_sse_constant_p (rtx x)
4403 if (x == const0_rtx)
4404 return 1;
4405 return (x == CONST0_RTX (GET_MODE (x)));
4408 /* Returns 1 if OP contains a symbol reference */
4411 symbolic_reference_mentioned_p (rtx op)
4413 const char *fmt;
4414 int i;
4416 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
4417 return 1;
4419 fmt = GET_RTX_FORMAT (GET_CODE (op));
4420 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
4422 if (fmt[i] == 'E')
4424 int j;
4426 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
4427 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
4428 return 1;
4431 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
4432 return 1;
4435 return 0;
4438 /* Return 1 if it is appropriate to emit `ret' instructions in the
4439 body of a function. Do this only if the epilogue is simple, needing a
4440 couple of insns. Prior to reloading, we can't tell how many registers
4441 must be saved, so return 0 then. Return 0 if there is no frame
4442 marker to de-allocate.
4444 If NON_SAVING_SETJMP is defined and true, then it is not possible
4445 for the epilogue to be simple, so return 0. This is a special case
4446 since NON_SAVING_SETJMP will not cause regs_ever_live to change
4447 until final, but jump_optimize may need to know sooner if a
4448 `return' is OK. */
4451 ix86_can_use_return_insn_p (void)
4453 struct ix86_frame frame;
4455 #ifdef NON_SAVING_SETJMP
4456 if (NON_SAVING_SETJMP && current_function_calls_setjmp)
4457 return 0;
4458 #endif
4460 if (! reload_completed || frame_pointer_needed)
4461 return 0;
4463 /* Don't allow more than 32 pop, since that's all we can do
4464 with one instruction. */
4465 if (current_function_pops_args
4466 && current_function_args_size >= 32768)
4467 return 0;
4469 ix86_compute_frame_layout (&frame);
4470 return frame.to_allocate == 0 && frame.nregs == 0;
4473 /* Return 1 if VALUE can be stored in the sign extended immediate field. */
4475 x86_64_sign_extended_value (rtx value)
4477 switch (GET_CODE (value))
4479 /* CONST_DOUBLES never match, since HOST_BITS_PER_WIDE_INT is known
4480 to be at least 32 and this all acceptable constants are
4481 represented as CONST_INT. */
4482 case CONST_INT:
4483 if (HOST_BITS_PER_WIDE_INT == 32)
4484 return 1;
4485 else
4487 HOST_WIDE_INT val = trunc_int_for_mode (INTVAL (value), DImode);
4488 return trunc_int_for_mode (val, SImode) == val;
4490 break;
4492 /* For certain code models, the symbolic references are known to fit.
4493 in CM_SMALL_PIC model we know it fits if it is local to the shared
4494 library. Don't count TLS SYMBOL_REFs here, since they should fit
4495 only if inside of UNSPEC handled below. */
4496 case SYMBOL_REF:
4497 /* TLS symbols are not constant. */
4498 if (tls_symbolic_operand (value, Pmode))
4499 return false;
4500 return (ix86_cmodel == CM_SMALL || ix86_cmodel == CM_KERNEL);
4502 /* For certain code models, the code is near as well. */
4503 case LABEL_REF:
4504 return (ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM
4505 || ix86_cmodel == CM_KERNEL);
4507 /* We also may accept the offsetted memory references in certain special
4508 cases. */
4509 case CONST:
4510 if (GET_CODE (XEXP (value, 0)) == UNSPEC)
4511 switch (XINT (XEXP (value, 0), 1))
4513 case UNSPEC_GOTPCREL:
4514 case UNSPEC_DTPOFF:
4515 case UNSPEC_GOTNTPOFF:
4516 case UNSPEC_NTPOFF:
4517 return 1;
4518 default:
4519 break;
4521 if (GET_CODE (XEXP (value, 0)) == PLUS)
4523 rtx op1 = XEXP (XEXP (value, 0), 0);
4524 rtx op2 = XEXP (XEXP (value, 0), 1);
4525 HOST_WIDE_INT offset;
4527 if (ix86_cmodel == CM_LARGE)
4528 return 0;
4529 if (GET_CODE (op2) != CONST_INT)
4530 return 0;
4531 offset = trunc_int_for_mode (INTVAL (op2), DImode);
4532 switch (GET_CODE (op1))
4534 case SYMBOL_REF:
4535 /* For CM_SMALL assume that latest object is 16MB before
4536 end of 31bits boundary. We may also accept pretty
4537 large negative constants knowing that all objects are
4538 in the positive half of address space. */
4539 if (ix86_cmodel == CM_SMALL
4540 && offset < 16*1024*1024
4541 && trunc_int_for_mode (offset, SImode) == offset)
4542 return 1;
4543 /* For CM_KERNEL we know that all object resist in the
4544 negative half of 32bits address space. We may not
4545 accept negative offsets, since they may be just off
4546 and we may accept pretty large positive ones. */
4547 if (ix86_cmodel == CM_KERNEL
4548 && offset > 0
4549 && trunc_int_for_mode (offset, SImode) == offset)
4550 return 1;
4551 break;
4552 case LABEL_REF:
4553 /* These conditions are similar to SYMBOL_REF ones, just the
4554 constraints for code models differ. */
4555 if ((ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM)
4556 && offset < 16*1024*1024
4557 && trunc_int_for_mode (offset, SImode) == offset)
4558 return 1;
4559 if (ix86_cmodel == CM_KERNEL
4560 && offset > 0
4561 && trunc_int_for_mode (offset, SImode) == offset)
4562 return 1;
4563 break;
4564 case UNSPEC:
4565 switch (XINT (op1, 1))
4567 case UNSPEC_DTPOFF:
4568 case UNSPEC_NTPOFF:
4569 if (offset > 0
4570 && trunc_int_for_mode (offset, SImode) == offset)
4571 return 1;
4573 break;
4574 default:
4575 return 0;
4578 return 0;
4579 default:
4580 return 0;
4584 /* Return 1 if VALUE can be stored in the zero extended immediate field. */
4586 x86_64_zero_extended_value (rtx value)
4588 switch (GET_CODE (value))
4590 case CONST_DOUBLE:
4591 if (HOST_BITS_PER_WIDE_INT == 32)
4592 return (GET_MODE (value) == VOIDmode
4593 && !CONST_DOUBLE_HIGH (value));
4594 else
4595 return 0;
4596 case CONST_INT:
4597 if (HOST_BITS_PER_WIDE_INT == 32)
4598 return INTVAL (value) >= 0;
4599 else
4600 return !(INTVAL (value) & ~(HOST_WIDE_INT) 0xffffffff);
4601 break;
4603 /* For certain code models, the symbolic references are known to fit. */
4604 case SYMBOL_REF:
4605 /* TLS symbols are not constant. */
4606 if (tls_symbolic_operand (value, Pmode))
4607 return false;
4608 return ix86_cmodel == CM_SMALL;
4610 /* For certain code models, the code is near as well. */
4611 case LABEL_REF:
4612 return ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM;
4614 /* We also may accept the offsetted memory references in certain special
4615 cases. */
4616 case CONST:
4617 if (GET_CODE (XEXP (value, 0)) == PLUS)
4619 rtx op1 = XEXP (XEXP (value, 0), 0);
4620 rtx op2 = XEXP (XEXP (value, 0), 1);
4622 if (ix86_cmodel == CM_LARGE)
4623 return 0;
4624 switch (GET_CODE (op1))
4626 case SYMBOL_REF:
4627 return 0;
4628 /* For small code model we may accept pretty large positive
4629 offsets, since one bit is available for free. Negative
4630 offsets are limited by the size of NULL pointer area
4631 specified by the ABI. */
4632 if (ix86_cmodel == CM_SMALL
4633 && GET_CODE (op2) == CONST_INT
4634 && trunc_int_for_mode (INTVAL (op2), DImode) > -0x10000
4635 && (trunc_int_for_mode (INTVAL (op2), SImode)
4636 == INTVAL (op2)))
4637 return 1;
4638 /* ??? For the kernel, we may accept adjustment of
4639 -0x10000000, since we know that it will just convert
4640 negative address space to positive, but perhaps this
4641 is not worthwhile. */
4642 break;
4643 case LABEL_REF:
4644 /* These conditions are similar to SYMBOL_REF ones, just the
4645 constraints for code models differ. */
4646 if ((ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM)
4647 && GET_CODE (op2) == CONST_INT
4648 && trunc_int_for_mode (INTVAL (op2), DImode) > -0x10000
4649 && (trunc_int_for_mode (INTVAL (op2), SImode)
4650 == INTVAL (op2)))
4651 return 1;
4652 break;
4653 default:
4654 return 0;
4657 return 0;
4658 default:
4659 return 0;
4663 /* Value should be nonzero if functions must have frame pointers.
4664 Zero means the frame pointer need not be set up (and parms may
4665 be accessed via the stack pointer) in functions that seem suitable. */
4668 ix86_frame_pointer_required (void)
4670 /* If we accessed previous frames, then the generated code expects
4671 to be able to access the saved ebp value in our frame. */
4672 if (cfun->machine->accesses_prev_frame)
4673 return 1;
4675 /* Several x86 os'es need a frame pointer for other reasons,
4676 usually pertaining to setjmp. */
4677 if (SUBTARGET_FRAME_POINTER_REQUIRED)
4678 return 1;
4680 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
4681 the frame pointer by default. Turn it back on now if we've not
4682 got a leaf function. */
4683 if (TARGET_OMIT_LEAF_FRAME_POINTER
4684 && (!current_function_is_leaf))
4685 return 1;
4687 if (current_function_profile)
4688 return 1;
4690 return 0;
4693 /* Record that the current function accesses previous call frames. */
4695 void
4696 ix86_setup_frame_addresses (void)
4698 cfun->machine->accesses_prev_frame = 1;
4701 #if defined(HAVE_GAS_HIDDEN) && defined(SUPPORTS_ONE_ONLY)
4702 # define USE_HIDDEN_LINKONCE 1
4703 #else
4704 # define USE_HIDDEN_LINKONCE 0
4705 #endif
4707 static int pic_labels_used;
4709 /* Fills in the label name that should be used for a pc thunk for
4710 the given register. */
4712 static void
4713 get_pc_thunk_name (char name[32], unsigned int regno)
4715 if (USE_HIDDEN_LINKONCE)
4716 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
4717 else
4718 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
4722 /* This function generates code for -fpic that loads %ebx with
4723 the return address of the caller and then returns. */
4725 void
4726 ix86_file_end (void)
4728 rtx xops[2];
4729 int regno;
4731 for (regno = 0; regno < 8; ++regno)
4733 char name[32];
4735 if (! ((pic_labels_used >> regno) & 1))
4736 continue;
4738 get_pc_thunk_name (name, regno);
4740 if (USE_HIDDEN_LINKONCE)
4742 tree decl;
4744 decl = build_decl (FUNCTION_DECL, get_identifier (name),
4745 error_mark_node);
4746 TREE_PUBLIC (decl) = 1;
4747 TREE_STATIC (decl) = 1;
4748 DECL_ONE_ONLY (decl) = 1;
4750 (*targetm.asm_out.unique_section) (decl, 0);
4751 named_section (decl, NULL, 0);
4753 (*targetm.asm_out.globalize_label) (asm_out_file, name);
4754 fputs ("\t.hidden\t", asm_out_file);
4755 assemble_name (asm_out_file, name);
4756 fputc ('\n', asm_out_file);
4757 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
4759 else
4761 text_section ();
4762 ASM_OUTPUT_LABEL (asm_out_file, name);
4765 xops[0] = gen_rtx_REG (SImode, regno);
4766 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
4767 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
4768 output_asm_insn ("ret", xops);
4771 if (NEED_INDICATE_EXEC_STACK)
4772 file_end_indicate_exec_stack ();
4775 /* Emit code for the SET_GOT patterns. */
4777 const char *
4778 output_set_got (rtx dest)
4780 rtx xops[3];
4782 xops[0] = dest;
4783 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
4785 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
4787 xops[2] = gen_rtx_LABEL_REF (Pmode, gen_label_rtx ());
4789 if (!flag_pic)
4790 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
4791 else
4792 output_asm_insn ("call\t%a2", xops);
4794 #if TARGET_MACHO
4795 /* Output the "canonical" label name ("Lxx$pb") here too. This
4796 is what will be referred to by the Mach-O PIC subsystem. */
4797 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
4798 #endif
4799 (*targetm.asm_out.internal_label) (asm_out_file, "L",
4800 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
4802 if (flag_pic)
4803 output_asm_insn ("pop{l}\t%0", xops);
4805 else
4807 char name[32];
4808 get_pc_thunk_name (name, REGNO (dest));
4809 pic_labels_used |= 1 << REGNO (dest);
4811 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
4812 xops[2] = gen_rtx_MEM (QImode, xops[2]);
4813 output_asm_insn ("call\t%X2", xops);
4816 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
4817 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
4818 else if (!TARGET_MACHO)
4819 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %a1+(.-%a2)}", xops);
4821 return "";
4824 /* Generate an "push" pattern for input ARG. */
4826 static rtx
4827 gen_push (rtx arg)
4829 return gen_rtx_SET (VOIDmode,
4830 gen_rtx_MEM (Pmode,
4831 gen_rtx_PRE_DEC (Pmode,
4832 stack_pointer_rtx)),
4833 arg);
4836 /* Return >= 0 if there is an unused call-clobbered register available
4837 for the entire function. */
4839 static unsigned int
4840 ix86_select_alt_pic_regnum (void)
4842 if (current_function_is_leaf && !current_function_profile)
4844 int i;
4845 for (i = 2; i >= 0; --i)
4846 if (!regs_ever_live[i])
4847 return i;
4850 return INVALID_REGNUM;
4853 /* Return 1 if we need to save REGNO. */
4854 static int
4855 ix86_save_reg (unsigned int regno, int maybe_eh_return)
4857 if (pic_offset_table_rtx
4858 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
4859 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
4860 || current_function_profile
4861 || current_function_calls_eh_return
4862 || current_function_uses_const_pool))
4864 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
4865 return 0;
4866 return 1;
4869 if (current_function_calls_eh_return && maybe_eh_return)
4871 unsigned i;
4872 for (i = 0; ; i++)
4874 unsigned test = EH_RETURN_DATA_REGNO (i);
4875 if (test == INVALID_REGNUM)
4876 break;
4877 if (test == regno)
4878 return 1;
4882 return (regs_ever_live[regno]
4883 && !call_used_regs[regno]
4884 && !fixed_regs[regno]
4885 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
4888 /* Return number of registers to be saved on the stack. */
4890 static int
4891 ix86_nsaved_regs (void)
4893 int nregs = 0;
4894 int regno;
4896 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
4897 if (ix86_save_reg (regno, true))
4898 nregs++;
4899 return nregs;
4902 /* Return the offset between two registers, one to be eliminated, and the other
4903 its replacement, at the start of a routine. */
4905 HOST_WIDE_INT
4906 ix86_initial_elimination_offset (int from, int to)
4908 struct ix86_frame frame;
4909 ix86_compute_frame_layout (&frame);
4911 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
4912 return frame.hard_frame_pointer_offset;
4913 else if (from == FRAME_POINTER_REGNUM
4914 && to == HARD_FRAME_POINTER_REGNUM)
4915 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
4916 else
4918 if (to != STACK_POINTER_REGNUM)
4919 abort ();
4920 else if (from == ARG_POINTER_REGNUM)
4921 return frame.stack_pointer_offset;
4922 else if (from != FRAME_POINTER_REGNUM)
4923 abort ();
4924 else
4925 return frame.stack_pointer_offset - frame.frame_pointer_offset;
4929 /* Fill structure ix86_frame about frame of currently computed function. */
4931 static void
4932 ix86_compute_frame_layout (struct ix86_frame *frame)
4934 HOST_WIDE_INT total_size;
4935 int stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
4936 HOST_WIDE_INT offset;
4937 int preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
4938 HOST_WIDE_INT size = get_frame_size ();
4940 frame->nregs = ix86_nsaved_regs ();
4941 total_size = size;
4943 /* During reload iteration the amount of registers saved can change.
4944 Recompute the value as needed. Do not recompute when amount of registers
4945 didn't change as reload does mutiple calls to the function and does not
4946 expect the decision to change within single iteration. */
4947 if (!optimize_size
4948 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
4950 int count = frame->nregs;
4952 cfun->machine->use_fast_prologue_epilogue_nregs = count;
4953 /* The fast prologue uses move instead of push to save registers. This
4954 is significantly longer, but also executes faster as modern hardware
4955 can execute the moves in parallel, but can't do that for push/pop.
4957 Be careful about choosing what prologue to emit: When function takes
4958 many instructions to execute we may use slow version as well as in
4959 case function is known to be outside hot spot (this is known with
4960 feedback only). Weight the size of function by number of registers
4961 to save as it is cheap to use one or two push instructions but very
4962 slow to use many of them. */
4963 if (count)
4964 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
4965 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
4966 || (flag_branch_probabilities
4967 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
4968 cfun->machine->use_fast_prologue_epilogue = false;
4969 else
4970 cfun->machine->use_fast_prologue_epilogue
4971 = !expensive_function_p (count);
4973 if (TARGET_PROLOGUE_USING_MOVE
4974 && cfun->machine->use_fast_prologue_epilogue)
4975 frame->save_regs_using_mov = true;
4976 else
4977 frame->save_regs_using_mov = false;
4980 /* Skip return address and saved base pointer. */
4981 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
4983 frame->hard_frame_pointer_offset = offset;
4985 /* Do some sanity checking of stack_alignment_needed and
4986 preferred_alignment, since i386 port is the only using those features
4987 that may break easily. */
4989 if (size && !stack_alignment_needed)
4990 abort ();
4991 if (preferred_alignment < STACK_BOUNDARY / BITS_PER_UNIT)
4992 abort ();
4993 if (preferred_alignment > PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)
4994 abort ();
4995 if (stack_alignment_needed > PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)
4996 abort ();
4998 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
4999 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
5001 /* Register save area */
5002 offset += frame->nregs * UNITS_PER_WORD;
5004 /* Va-arg area */
5005 if (ix86_save_varrargs_registers)
5007 offset += X86_64_VARARGS_SIZE;
5008 frame->va_arg_size = X86_64_VARARGS_SIZE;
5010 else
5011 frame->va_arg_size = 0;
5013 /* Align start of frame for local function. */
5014 frame->padding1 = ((offset + stack_alignment_needed - 1)
5015 & -stack_alignment_needed) - offset;
5017 offset += frame->padding1;
5019 /* Frame pointer points here. */
5020 frame->frame_pointer_offset = offset;
5022 offset += size;
5024 /* Add outgoing arguments area. Can be skipped if we eliminated
5025 all the function calls as dead code. */
5026 if (ACCUMULATE_OUTGOING_ARGS && !current_function_is_leaf)
5028 offset += current_function_outgoing_args_size;
5029 frame->outgoing_arguments_size = current_function_outgoing_args_size;
5031 else
5032 frame->outgoing_arguments_size = 0;
5034 /* Align stack boundary. Only needed if we're calling another function
5035 or using alloca. */
5036 if (!current_function_is_leaf || current_function_calls_alloca)
5037 frame->padding2 = ((offset + preferred_alignment - 1)
5038 & -preferred_alignment) - offset;
5039 else
5040 frame->padding2 = 0;
5042 offset += frame->padding2;
5044 /* We've reached end of stack frame. */
5045 frame->stack_pointer_offset = offset;
5047 /* Size prologue needs to allocate. */
5048 frame->to_allocate =
5049 (size + frame->padding1 + frame->padding2
5050 + frame->outgoing_arguments_size + frame->va_arg_size);
5052 if ((!frame->to_allocate && frame->nregs <= 1)
5053 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
5054 frame->save_regs_using_mov = false;
5056 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
5057 && current_function_is_leaf)
5059 frame->red_zone_size = frame->to_allocate;
5060 if (frame->save_regs_using_mov)
5061 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
5062 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
5063 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
5065 else
5066 frame->red_zone_size = 0;
5067 frame->to_allocate -= frame->red_zone_size;
5068 frame->stack_pointer_offset -= frame->red_zone_size;
5069 #if 0
5070 fprintf (stderr, "nregs: %i\n", frame->nregs);
5071 fprintf (stderr, "size: %i\n", size);
5072 fprintf (stderr, "alignment1: %i\n", stack_alignment_needed);
5073 fprintf (stderr, "padding1: %i\n", frame->padding1);
5074 fprintf (stderr, "va_arg: %i\n", frame->va_arg_size);
5075 fprintf (stderr, "padding2: %i\n", frame->padding2);
5076 fprintf (stderr, "to_allocate: %i\n", frame->to_allocate);
5077 fprintf (stderr, "red_zone_size: %i\n", frame->red_zone_size);
5078 fprintf (stderr, "frame_pointer_offset: %i\n", frame->frame_pointer_offset);
5079 fprintf (stderr, "hard_frame_pointer_offset: %i\n",
5080 frame->hard_frame_pointer_offset);
5081 fprintf (stderr, "stack_pointer_offset: %i\n", frame->stack_pointer_offset);
5082 #endif
5085 /* Emit code to save registers in the prologue. */
5087 static void
5088 ix86_emit_save_regs (void)
5090 int regno;
5091 rtx insn;
5093 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
5094 if (ix86_save_reg (regno, true))
5096 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
5097 RTX_FRAME_RELATED_P (insn) = 1;
5101 /* Emit code to save registers using MOV insns. First register
5102 is restored from POINTER + OFFSET. */
5103 static void
5104 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
5106 int regno;
5107 rtx insn;
5109 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5110 if (ix86_save_reg (regno, true))
5112 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
5113 Pmode, offset),
5114 gen_rtx_REG (Pmode, regno));
5115 RTX_FRAME_RELATED_P (insn) = 1;
5116 offset += UNITS_PER_WORD;
5120 /* Expand prologue or epilogue stack adjustment.
5121 The pattern exist to put a dependency on all ebp-based memory accesses.
5122 STYLE should be negative if instructions should be marked as frame related,
5123 zero if %r11 register is live and cannot be freely used and positive
5124 otherwise. */
5126 static void
5127 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
5129 rtx insn;
5131 if (! TARGET_64BIT)
5132 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
5133 else if (x86_64_immediate_operand (offset, DImode))
5134 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
5135 else
5137 rtx r11;
5138 /* r11 is used by indirect sibcall return as well, set before the
5139 epilogue and used after the epilogue. ATM indirect sibcall
5140 shouldn't be used together with huge frame sizes in one
5141 function because of the frame_size check in sibcall.c. */
5142 if (style == 0)
5143 abort ();
5144 r11 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
5145 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
5146 if (style < 0)
5147 RTX_FRAME_RELATED_P (insn) = 1;
5148 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
5149 offset));
5151 if (style < 0)
5152 RTX_FRAME_RELATED_P (insn) = 1;
5155 /* Expand the prologue into a bunch of separate insns. */
5157 void
5158 ix86_expand_prologue (void)
5160 rtx insn;
5161 bool pic_reg_used;
5162 struct ix86_frame frame;
5163 HOST_WIDE_INT allocate;
5165 ix86_compute_frame_layout (&frame);
5167 /* Note: AT&T enter does NOT have reversed args. Enter is probably
5168 slower on all targets. Also sdb doesn't like it. */
5170 if (frame_pointer_needed)
5172 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
5173 RTX_FRAME_RELATED_P (insn) = 1;
5175 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
5176 RTX_FRAME_RELATED_P (insn) = 1;
5179 allocate = frame.to_allocate;
5181 if (!frame.save_regs_using_mov)
5182 ix86_emit_save_regs ();
5183 else
5184 allocate += frame.nregs * UNITS_PER_WORD;
5186 /* When using red zone we may start register saving before allocating
5187 the stack frame saving one cycle of the prologue. */
5188 if (TARGET_RED_ZONE && frame.save_regs_using_mov)
5189 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
5190 : stack_pointer_rtx,
5191 -frame.nregs * UNITS_PER_WORD);
5193 if (allocate == 0)
5195 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
5196 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5197 GEN_INT (-allocate), -1);
5198 else
5200 /* Only valid for Win32. */
5201 rtx eax = gen_rtx_REG (SImode, 0);
5202 bool eax_live = ix86_eax_live_at_start_p ();
5204 if (TARGET_64BIT)
5205 abort ();
5207 if (eax_live)
5209 emit_insn (gen_push (eax));
5210 allocate -= 4;
5213 insn = emit_move_insn (eax, GEN_INT (allocate));
5214 RTX_FRAME_RELATED_P (insn) = 1;
5216 insn = emit_insn (gen_allocate_stack_worker (eax));
5217 RTX_FRAME_RELATED_P (insn) = 1;
5219 if (eax_live)
5221 rtx t = plus_constant (stack_pointer_rtx, allocate);
5222 emit_move_insn (eax, gen_rtx_MEM (SImode, t));
5226 if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
5228 if (!frame_pointer_needed || !frame.to_allocate)
5229 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
5230 else
5231 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
5232 -frame.nregs * UNITS_PER_WORD);
5235 pic_reg_used = false;
5236 if (pic_offset_table_rtx
5237 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
5238 || current_function_profile))
5240 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
5242 if (alt_pic_reg_used != INVALID_REGNUM)
5243 REGNO (pic_offset_table_rtx) = alt_pic_reg_used;
5245 pic_reg_used = true;
5248 if (pic_reg_used)
5250 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
5252 /* Even with accurate pre-reload life analysis, we can wind up
5253 deleting all references to the pic register after reload.
5254 Consider if cross-jumping unifies two sides of a branch
5255 controlled by a comparison vs the only read from a global.
5256 In which case, allow the set_got to be deleted, though we're
5257 too late to do anything about the ebx save in the prologue. */
5258 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
5261 /* Prevent function calls from be scheduled before the call to mcount.
5262 In the pic_reg_used case, make sure that the got load isn't deleted. */
5263 if (current_function_profile)
5264 emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
5267 /* Emit code to restore saved registers using MOV insns. First register
5268 is restored from POINTER + OFFSET. */
5269 static void
5270 ix86_emit_restore_regs_using_mov (rtx pointer, int offset, int maybe_eh_return)
5272 int regno;
5274 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5275 if (ix86_save_reg (regno, maybe_eh_return))
5277 emit_move_insn (gen_rtx_REG (Pmode, regno),
5278 adjust_address (gen_rtx_MEM (Pmode, pointer),
5279 Pmode, offset));
5280 offset += UNITS_PER_WORD;
5284 /* Restore function stack, frame, and registers. */
5286 void
5287 ix86_expand_epilogue (int style)
5289 int regno;
5290 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
5291 struct ix86_frame frame;
5292 HOST_WIDE_INT offset;
5294 ix86_compute_frame_layout (&frame);
5296 /* Calculate start of saved registers relative to ebp. Special care
5297 must be taken for the normal return case of a function using
5298 eh_return: the eax and edx registers are marked as saved, but not
5299 restored along this path. */
5300 offset = frame.nregs;
5301 if (current_function_calls_eh_return && style != 2)
5302 offset -= 2;
5303 offset *= -UNITS_PER_WORD;
5305 /* If we're only restoring one register and sp is not valid then
5306 using a move instruction to restore the register since it's
5307 less work than reloading sp and popping the register.
5309 The default code result in stack adjustment using add/lea instruction,
5310 while this code results in LEAVE instruction (or discrete equivalent),
5311 so it is profitable in some other cases as well. Especially when there
5312 are no registers to restore. We also use this code when TARGET_USE_LEAVE
5313 and there is exactly one register to pop. This heuristic may need some
5314 tuning in future. */
5315 if ((!sp_valid && frame.nregs <= 1)
5316 || (TARGET_EPILOGUE_USING_MOVE
5317 && cfun->machine->use_fast_prologue_epilogue
5318 && (frame.nregs > 1 || frame.to_allocate))
5319 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
5320 || (frame_pointer_needed && TARGET_USE_LEAVE
5321 && cfun->machine->use_fast_prologue_epilogue
5322 && frame.nregs == 1)
5323 || current_function_calls_eh_return)
5325 /* Restore registers. We can use ebp or esp to address the memory
5326 locations. If both are available, default to ebp, since offsets
5327 are known to be small. Only exception is esp pointing directly to the
5328 end of block of saved registers, where we may simplify addressing
5329 mode. */
5331 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
5332 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
5333 frame.to_allocate, style == 2);
5334 else
5335 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
5336 offset, style == 2);
5338 /* eh_return epilogues need %ecx added to the stack pointer. */
5339 if (style == 2)
5341 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
5343 if (frame_pointer_needed)
5345 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
5346 tmp = plus_constant (tmp, UNITS_PER_WORD);
5347 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
5349 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
5350 emit_move_insn (hard_frame_pointer_rtx, tmp);
5352 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
5353 const0_rtx, style);
5355 else
5357 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
5358 tmp = plus_constant (tmp, (frame.to_allocate
5359 + frame.nregs * UNITS_PER_WORD));
5360 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
5363 else if (!frame_pointer_needed)
5364 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5365 GEN_INT (frame.to_allocate
5366 + frame.nregs * UNITS_PER_WORD),
5367 style);
5368 /* If not an i386, mov & pop is faster than "leave". */
5369 else if (TARGET_USE_LEAVE || optimize_size
5370 || !cfun->machine->use_fast_prologue_epilogue)
5371 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5372 else
5374 pro_epilogue_adjust_stack (stack_pointer_rtx,
5375 hard_frame_pointer_rtx,
5376 const0_rtx, style);
5377 if (TARGET_64BIT)
5378 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5379 else
5380 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5383 else
5385 /* First step is to deallocate the stack frame so that we can
5386 pop the registers. */
5387 if (!sp_valid)
5389 if (!frame_pointer_needed)
5390 abort ();
5391 pro_epilogue_adjust_stack (stack_pointer_rtx,
5392 hard_frame_pointer_rtx,
5393 GEN_INT (offset), style);
5395 else if (frame.to_allocate)
5396 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5397 GEN_INT (frame.to_allocate), style);
5399 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5400 if (ix86_save_reg (regno, false))
5402 if (TARGET_64BIT)
5403 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
5404 else
5405 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
5407 if (frame_pointer_needed)
5409 /* Leave results in shorter dependency chains on CPUs that are
5410 able to grok it fast. */
5411 if (TARGET_USE_LEAVE)
5412 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5413 else if (TARGET_64BIT)
5414 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5415 else
5416 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5420 /* Sibcall epilogues don't want a return instruction. */
5421 if (style == 0)
5422 return;
5424 if (current_function_pops_args && current_function_args_size)
5426 rtx popc = GEN_INT (current_function_pops_args);
5428 /* i386 can only pop 64K bytes. If asked to pop more, pop
5429 return address, do explicit add, and jump indirectly to the
5430 caller. */
5432 if (current_function_pops_args >= 65536)
5434 rtx ecx = gen_rtx_REG (SImode, 2);
5436 /* There is no "pascal" calling convention in 64bit ABI. */
5437 if (TARGET_64BIT)
5438 abort ();
5440 emit_insn (gen_popsi1 (ecx));
5441 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
5442 emit_jump_insn (gen_return_indirect_internal (ecx));
5444 else
5445 emit_jump_insn (gen_return_pop_internal (popc));
5447 else
5448 emit_jump_insn (gen_return_internal ());
5451 /* Reset from the function's potential modifications. */
5453 static void
5454 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
5455 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5457 if (pic_offset_table_rtx)
5458 REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
5461 /* Extract the parts of an RTL expression that is a valid memory address
5462 for an instruction. Return 0 if the structure of the address is
5463 grossly off. Return -1 if the address contains ASHIFT, so it is not
5464 strictly valid, but still used for computing length of lea instruction. */
5466 static int
5467 ix86_decompose_address (rtx addr, struct ix86_address *out)
5469 rtx base = NULL_RTX;
5470 rtx index = NULL_RTX;
5471 rtx disp = NULL_RTX;
5472 HOST_WIDE_INT scale = 1;
5473 rtx scale_rtx = NULL_RTX;
5474 int retval = 1;
5475 enum ix86_address_seg seg = SEG_DEFAULT;
5477 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
5478 base = addr;
5479 else if (GET_CODE (addr) == PLUS)
5481 rtx addends[4], op;
5482 int n = 0, i;
5484 op = addr;
5487 if (n >= 4)
5488 return 0;
5489 addends[n++] = XEXP (op, 1);
5490 op = XEXP (op, 0);
5492 while (GET_CODE (op) == PLUS);
5493 if (n >= 4)
5494 return 0;
5495 addends[n] = op;
5497 for (i = n; i >= 0; --i)
5499 op = addends[i];
5500 switch (GET_CODE (op))
5502 case MULT:
5503 if (index)
5504 return 0;
5505 index = XEXP (op, 0);
5506 scale_rtx = XEXP (op, 1);
5507 break;
5509 case UNSPEC:
5510 if (XINT (op, 1) == UNSPEC_TP
5511 && TARGET_TLS_DIRECT_SEG_REFS
5512 && seg == SEG_DEFAULT)
5513 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
5514 else
5515 return 0;
5516 break;
5518 case REG:
5519 case SUBREG:
5520 if (!base)
5521 base = op;
5522 else if (!index)
5523 index = op;
5524 else
5525 return 0;
5526 break;
5528 case CONST:
5529 case CONST_INT:
5530 case SYMBOL_REF:
5531 case LABEL_REF:
5532 if (disp)
5533 return 0;
5534 disp = op;
5535 break;
5537 default:
5538 return 0;
5542 else if (GET_CODE (addr) == MULT)
5544 index = XEXP (addr, 0); /* index*scale */
5545 scale_rtx = XEXP (addr, 1);
5547 else if (GET_CODE (addr) == ASHIFT)
5549 rtx tmp;
5551 /* We're called for lea too, which implements ashift on occasion. */
5552 index = XEXP (addr, 0);
5553 tmp = XEXP (addr, 1);
5554 if (GET_CODE (tmp) != CONST_INT)
5555 return 0;
5556 scale = INTVAL (tmp);
5557 if ((unsigned HOST_WIDE_INT) scale > 3)
5558 return 0;
5559 scale = 1 << scale;
5560 retval = -1;
5562 else
5563 disp = addr; /* displacement */
5565 /* Extract the integral value of scale. */
5566 if (scale_rtx)
5568 if (GET_CODE (scale_rtx) != CONST_INT)
5569 return 0;
5570 scale = INTVAL (scale_rtx);
5573 /* Allow arg pointer and stack pointer as index if there is not scaling. */
5574 if (base && index && scale == 1
5575 && (index == arg_pointer_rtx
5576 || index == frame_pointer_rtx
5577 || (REG_P (index) && REGNO (index) == STACK_POINTER_REGNUM)))
5579 rtx tmp = base;
5580 base = index;
5581 index = tmp;
5584 /* Special case: %ebp cannot be encoded as a base without a displacement. */
5585 if ((base == hard_frame_pointer_rtx
5586 || base == frame_pointer_rtx
5587 || base == arg_pointer_rtx) && !disp)
5588 disp = const0_rtx;
5590 /* Special case: on K6, [%esi] makes the instruction vector decoded.
5591 Avoid this by transforming to [%esi+0]. */
5592 if (ix86_tune == PROCESSOR_K6 && !optimize_size
5593 && base && !index && !disp
5594 && REG_P (base)
5595 && REGNO_REG_CLASS (REGNO (base)) == SIREG)
5596 disp = const0_rtx;
5598 /* Special case: encode reg+reg instead of reg*2. */
5599 if (!base && index && scale && scale == 2)
5600 base = index, scale = 1;
5602 /* Special case: scaling cannot be encoded without base or displacement. */
5603 if (!base && !disp && index && scale != 1)
5604 disp = const0_rtx;
5606 out->base = base;
5607 out->index = index;
5608 out->disp = disp;
5609 out->scale = scale;
5610 out->seg = seg;
5612 return retval;
5615 /* Return cost of the memory address x.
5616 For i386, it is better to use a complex address than let gcc copy
5617 the address into a reg and make a new pseudo. But not if the address
5618 requires to two regs - that would mean more pseudos with longer
5619 lifetimes. */
5620 static int
5621 ix86_address_cost (rtx x)
5623 struct ix86_address parts;
5624 int cost = 1;
5626 if (!ix86_decompose_address (x, &parts))
5627 abort ();
5629 if (parts.base && GET_CODE (parts.base) == SUBREG)
5630 parts.base = SUBREG_REG (parts.base);
5631 if (parts.index && GET_CODE (parts.index) == SUBREG)
5632 parts.index = SUBREG_REG (parts.index);
5634 /* More complex memory references are better. */
5635 if (parts.disp && parts.disp != const0_rtx)
5636 cost--;
5637 if (parts.seg != SEG_DEFAULT)
5638 cost--;
5640 /* Attempt to minimize number of registers in the address. */
5641 if ((parts.base
5642 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
5643 || (parts.index
5644 && (!REG_P (parts.index)
5645 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
5646 cost++;
5648 if (parts.base
5649 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
5650 && parts.index
5651 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
5652 && parts.base != parts.index)
5653 cost++;
5655 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
5656 since it's predecode logic can't detect the length of instructions
5657 and it degenerates to vector decoded. Increase cost of such
5658 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
5659 to split such addresses or even refuse such addresses at all.
5661 Following addressing modes are affected:
5662 [base+scale*index]
5663 [scale*index+disp]
5664 [base+index]
5666 The first and last case may be avoidable by explicitly coding the zero in
5667 memory address, but I don't have AMD-K6 machine handy to check this
5668 theory. */
5670 if (TARGET_K6
5671 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
5672 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
5673 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
5674 cost += 10;
5676 return cost;
5679 /* If X is a machine specific address (i.e. a symbol or label being
5680 referenced as a displacement from the GOT implemented using an
5681 UNSPEC), then return the base term. Otherwise return X. */
5684 ix86_find_base_term (rtx x)
5686 rtx term;
5688 if (TARGET_64BIT)
5690 if (GET_CODE (x) != CONST)
5691 return x;
5692 term = XEXP (x, 0);
5693 if (GET_CODE (term) == PLUS
5694 && (GET_CODE (XEXP (term, 1)) == CONST_INT
5695 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
5696 term = XEXP (term, 0);
5697 if (GET_CODE (term) != UNSPEC
5698 || XINT (term, 1) != UNSPEC_GOTPCREL)
5699 return x;
5701 term = XVECEXP (term, 0, 0);
5703 if (GET_CODE (term) != SYMBOL_REF
5704 && GET_CODE (term) != LABEL_REF)
5705 return x;
5707 return term;
5710 term = ix86_delegitimize_address (x);
5712 if (GET_CODE (term) != SYMBOL_REF
5713 && GET_CODE (term) != LABEL_REF)
5714 return x;
5716 return term;
5719 /* Determine if a given RTX is a valid constant. We already know this
5720 satisfies CONSTANT_P. */
5722 bool
5723 legitimate_constant_p (rtx x)
5725 rtx inner;
5727 switch (GET_CODE (x))
5729 case SYMBOL_REF:
5730 /* TLS symbols are not constant. */
5731 if (tls_symbolic_operand (x, Pmode))
5732 return false;
5733 break;
5735 case CONST:
5736 inner = XEXP (x, 0);
5738 /* Offsets of TLS symbols are never valid.
5739 Discourage CSE from creating them. */
5740 if (GET_CODE (inner) == PLUS
5741 && tls_symbolic_operand (XEXP (inner, 0), Pmode))
5742 return false;
5744 if (GET_CODE (inner) == PLUS)
5746 if (GET_CODE (XEXP (inner, 1)) != CONST_INT)
5747 return false;
5748 inner = XEXP (inner, 0);
5751 /* Only some unspecs are valid as "constants". */
5752 if (GET_CODE (inner) == UNSPEC)
5753 switch (XINT (inner, 1))
5755 case UNSPEC_TPOFF:
5756 case UNSPEC_NTPOFF:
5757 return local_exec_symbolic_operand (XVECEXP (inner, 0, 0), Pmode);
5758 case UNSPEC_DTPOFF:
5759 return local_dynamic_symbolic_operand (XVECEXP (inner, 0, 0), Pmode);
5760 default:
5761 return false;
5763 break;
5765 default:
5766 break;
5769 /* Otherwise we handle everything else in the move patterns. */
5770 return true;
5773 /* Determine if it's legal to put X into the constant pool. This
5774 is not possible for the address of thread-local symbols, which
5775 is checked above. */
5777 static bool
5778 ix86_cannot_force_const_mem (rtx x)
5780 return !legitimate_constant_p (x);
5783 /* Determine if a given RTX is a valid constant address. */
5785 bool
5786 constant_address_p (rtx x)
5788 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
5791 /* Nonzero if the constant value X is a legitimate general operand
5792 when generating PIC code. It is given that flag_pic is on and
5793 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
5795 bool
5796 legitimate_pic_operand_p (rtx x)
5798 rtx inner;
5800 switch (GET_CODE (x))
5802 case CONST:
5803 inner = XEXP (x, 0);
5805 /* Only some unspecs are valid as "constants". */
5806 if (GET_CODE (inner) == UNSPEC)
5807 switch (XINT (inner, 1))
5809 case UNSPEC_TPOFF:
5810 return local_exec_symbolic_operand (XVECEXP (inner, 0, 0), Pmode);
5811 default:
5812 return false;
5814 /* FALLTHRU */
5816 case SYMBOL_REF:
5817 case LABEL_REF:
5818 return legitimate_pic_address_disp_p (x);
5820 default:
5821 return true;
5825 /* Determine if a given CONST RTX is a valid memory displacement
5826 in PIC mode. */
5829 legitimate_pic_address_disp_p (rtx disp)
5831 bool saw_plus;
5833 /* In 64bit mode we can allow direct addresses of symbols and labels
5834 when they are not dynamic symbols. */
5835 if (TARGET_64BIT)
5837 /* TLS references should always be enclosed in UNSPEC. */
5838 if (tls_symbolic_operand (disp, GET_MODE (disp)))
5839 return 0;
5840 if (GET_CODE (disp) == SYMBOL_REF
5841 && ix86_cmodel == CM_SMALL_PIC
5842 && SYMBOL_REF_LOCAL_P (disp))
5843 return 1;
5844 if (GET_CODE (disp) == LABEL_REF)
5845 return 1;
5846 if (GET_CODE (disp) == CONST
5847 && GET_CODE (XEXP (disp, 0)) == PLUS)
5849 rtx op0 = XEXP (XEXP (disp, 0), 0);
5850 rtx op1 = XEXP (XEXP (disp, 0), 1);
5852 /* TLS references should always be enclosed in UNSPEC. */
5853 if (tls_symbolic_operand (op0, GET_MODE (op0)))
5854 return 0;
5855 if (((GET_CODE (op0) == SYMBOL_REF
5856 && ix86_cmodel == CM_SMALL_PIC
5857 && SYMBOL_REF_LOCAL_P (op0))
5858 || GET_CODE (op0) == LABEL_REF)
5859 && GET_CODE (op1) == CONST_INT
5860 && INTVAL (op1) < 16*1024*1024
5861 && INTVAL (op1) >= -16*1024*1024)
5862 return 1;
5865 if (GET_CODE (disp) != CONST)
5866 return 0;
5867 disp = XEXP (disp, 0);
5869 if (TARGET_64BIT)
5871 /* We are unsafe to allow PLUS expressions. This limit allowed distance
5872 of GOT tables. We should not need these anyway. */
5873 if (GET_CODE (disp) != UNSPEC
5874 || XINT (disp, 1) != UNSPEC_GOTPCREL)
5875 return 0;
5877 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
5878 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
5879 return 0;
5880 return 1;
5883 saw_plus = false;
5884 if (GET_CODE (disp) == PLUS)
5886 if (GET_CODE (XEXP (disp, 1)) != CONST_INT)
5887 return 0;
5888 disp = XEXP (disp, 0);
5889 saw_plus = true;
5892 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O. */
5893 if (TARGET_MACHO && GET_CODE (disp) == MINUS)
5895 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
5896 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
5897 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
5899 const char *sym_name = XSTR (XEXP (disp, 1), 0);
5900 if (! strcmp (sym_name, "<pic base>"))
5901 return 1;
5905 if (GET_CODE (disp) != UNSPEC)
5906 return 0;
5908 switch (XINT (disp, 1))
5910 case UNSPEC_GOT:
5911 if (saw_plus)
5912 return false;
5913 return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF;
5914 case UNSPEC_GOTOFF:
5915 if (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
5916 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
5917 return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5918 return false;
5919 case UNSPEC_GOTTPOFF:
5920 case UNSPEC_GOTNTPOFF:
5921 case UNSPEC_INDNTPOFF:
5922 if (saw_plus)
5923 return false;
5924 return initial_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5925 case UNSPEC_NTPOFF:
5926 return local_exec_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5927 case UNSPEC_DTPOFF:
5928 return local_dynamic_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
5931 return 0;
5934 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
5935 memory address for an instruction. The MODE argument is the machine mode
5936 for the MEM expression that wants to use this address.
5938 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
5939 convert common non-canonical forms to canonical form so that they will
5940 be recognized. */
5943 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
5945 struct ix86_address parts;
5946 rtx base, index, disp;
5947 HOST_WIDE_INT scale;
5948 const char *reason = NULL;
5949 rtx reason_rtx = NULL_RTX;
5951 if (TARGET_DEBUG_ADDR)
5953 fprintf (stderr,
5954 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
5955 GET_MODE_NAME (mode), strict);
5956 debug_rtx (addr);
5959 if (ix86_decompose_address (addr, &parts) <= 0)
5961 reason = "decomposition failed";
5962 goto report_error;
5965 base = parts.base;
5966 index = parts.index;
5967 disp = parts.disp;
5968 scale = parts.scale;
5970 /* Validate base register.
5972 Don't allow SUBREG's here, it can lead to spill failures when the base
5973 is one word out of a two word structure, which is represented internally
5974 as a DImode int. */
5976 if (base)
5978 rtx reg;
5979 reason_rtx = base;
5981 if (GET_CODE (base) == SUBREG)
5982 reg = SUBREG_REG (base);
5983 else
5984 reg = base;
5986 if (GET_CODE (reg) != REG)
5988 reason = "base is not a register";
5989 goto report_error;
5992 if (GET_MODE (base) != Pmode)
5994 reason = "base is not in Pmode";
5995 goto report_error;
5998 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
5999 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
6001 reason = "base is not valid";
6002 goto report_error;
6006 /* Validate index register.
6008 Don't allow SUBREG's here, it can lead to spill failures when the index
6009 is one word out of a two word structure, which is represented internally
6010 as a DImode int. */
6012 if (index)
6014 rtx reg;
6015 reason_rtx = index;
6017 if (GET_CODE (index) == SUBREG)
6018 reg = SUBREG_REG (index);
6019 else
6020 reg = index;
6022 if (GET_CODE (reg) != REG)
6024 reason = "index is not a register";
6025 goto report_error;
6028 if (GET_MODE (index) != Pmode)
6030 reason = "index is not in Pmode";
6031 goto report_error;
6034 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
6035 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
6037 reason = "index is not valid";
6038 goto report_error;
6042 /* Validate scale factor. */
6043 if (scale != 1)
6045 reason_rtx = GEN_INT (scale);
6046 if (!index)
6048 reason = "scale without index";
6049 goto report_error;
6052 if (scale != 2 && scale != 4 && scale != 8)
6054 reason = "scale is not a valid multiplier";
6055 goto report_error;
6059 /* Validate displacement. */
6060 if (disp)
6062 reason_rtx = disp;
6064 if (GET_CODE (disp) == CONST
6065 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
6066 switch (XINT (XEXP (disp, 0), 1))
6068 case UNSPEC_GOT:
6069 case UNSPEC_GOTOFF:
6070 case UNSPEC_GOTPCREL:
6071 if (!flag_pic)
6072 abort ();
6073 goto is_legitimate_pic;
6075 case UNSPEC_GOTTPOFF:
6076 case UNSPEC_GOTNTPOFF:
6077 case UNSPEC_INDNTPOFF:
6078 case UNSPEC_NTPOFF:
6079 case UNSPEC_DTPOFF:
6080 break;
6082 default:
6083 reason = "invalid address unspec";
6084 goto report_error;
6087 else if (flag_pic && (SYMBOLIC_CONST (disp)
6088 #if TARGET_MACHO
6089 && !machopic_operand_p (disp)
6090 #endif
6093 is_legitimate_pic:
6094 if (TARGET_64BIT && (index || base))
6096 /* foo@dtpoff(%rX) is ok. */
6097 if (GET_CODE (disp) != CONST
6098 || GET_CODE (XEXP (disp, 0)) != PLUS
6099 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
6100 || GET_CODE (XEXP (XEXP (disp, 0), 1)) != CONST_INT
6101 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
6102 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
6104 reason = "non-constant pic memory reference";
6105 goto report_error;
6108 else if (! legitimate_pic_address_disp_p (disp))
6110 reason = "displacement is an invalid pic construct";
6111 goto report_error;
6114 /* This code used to verify that a symbolic pic displacement
6115 includes the pic_offset_table_rtx register.
6117 While this is good idea, unfortunately these constructs may
6118 be created by "adds using lea" optimization for incorrect
6119 code like:
6121 int a;
6122 int foo(int i)
6124 return *(&a+i);
6127 This code is nonsensical, but results in addressing
6128 GOT table with pic_offset_table_rtx base. We can't
6129 just refuse it easily, since it gets matched by
6130 "addsi3" pattern, that later gets split to lea in the
6131 case output register differs from input. While this
6132 can be handled by separate addsi pattern for this case
6133 that never results in lea, this seems to be easier and
6134 correct fix for crash to disable this test. */
6136 else if (GET_CODE (disp) != LABEL_REF
6137 && GET_CODE (disp) != CONST_INT
6138 && (GET_CODE (disp) != CONST
6139 || !legitimate_constant_p (disp))
6140 && (GET_CODE (disp) != SYMBOL_REF
6141 || !legitimate_constant_p (disp)))
6143 reason = "displacement is not constant";
6144 goto report_error;
6146 else if (TARGET_64BIT && !x86_64_sign_extended_value (disp))
6148 reason = "displacement is out of range";
6149 goto report_error;
6153 /* Everything looks valid. */
6154 if (TARGET_DEBUG_ADDR)
6155 fprintf (stderr, "Success.\n");
6156 return TRUE;
6158 report_error:
6159 if (TARGET_DEBUG_ADDR)
6161 fprintf (stderr, "Error: %s\n", reason);
6162 debug_rtx (reason_rtx);
6164 return FALSE;
6167 /* Return an unique alias set for the GOT. */
6169 static HOST_WIDE_INT
6170 ix86_GOT_alias_set (void)
6172 static HOST_WIDE_INT set = -1;
6173 if (set == -1)
6174 set = new_alias_set ();
6175 return set;
6178 /* Return a legitimate reference for ORIG (an address) using the
6179 register REG. If REG is 0, a new pseudo is generated.
6181 There are two types of references that must be handled:
6183 1. Global data references must load the address from the GOT, via
6184 the PIC reg. An insn is emitted to do this load, and the reg is
6185 returned.
6187 2. Static data references, constant pool addresses, and code labels
6188 compute the address as an offset from the GOT, whose base is in
6189 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
6190 differentiate them from global data objects. The returned
6191 address is the PIC reg + an unspec constant.
6193 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
6194 reg also appears in the address. */
6197 legitimize_pic_address (rtx orig, rtx reg)
6199 rtx addr = orig;
6200 rtx new = orig;
6201 rtx base;
6203 #if TARGET_MACHO
6204 if (reg == 0)
6205 reg = gen_reg_rtx (Pmode);
6206 /* Use the generic Mach-O PIC machinery. */
6207 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
6208 #endif
6210 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
6211 new = addr;
6212 else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode))
6214 /* This symbol may be referenced via a displacement from the PIC
6215 base address (@GOTOFF). */
6217 if (reload_in_progress)
6218 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6219 if (GET_CODE (addr) == CONST)
6220 addr = XEXP (addr, 0);
6221 if (GET_CODE (addr) == PLUS)
6223 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
6224 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
6226 else
6227 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
6228 new = gen_rtx_CONST (Pmode, new);
6229 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6231 if (reg != 0)
6233 emit_move_insn (reg, new);
6234 new = reg;
6237 else if (GET_CODE (addr) == SYMBOL_REF)
6239 if (TARGET_64BIT)
6241 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
6242 new = gen_rtx_CONST (Pmode, new);
6243 new = gen_rtx_MEM (Pmode, new);
6244 RTX_UNCHANGING_P (new) = 1;
6245 set_mem_alias_set (new, ix86_GOT_alias_set ());
6247 if (reg == 0)
6248 reg = gen_reg_rtx (Pmode);
6249 /* Use directly gen_movsi, otherwise the address is loaded
6250 into register for CSE. We don't want to CSE this addresses,
6251 instead we CSE addresses from the GOT table, so skip this. */
6252 emit_insn (gen_movsi (reg, new));
6253 new = reg;
6255 else
6257 /* This symbol must be referenced via a load from the
6258 Global Offset Table (@GOT). */
6260 if (reload_in_progress)
6261 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6262 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
6263 new = gen_rtx_CONST (Pmode, new);
6264 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6265 new = gen_rtx_MEM (Pmode, new);
6266 RTX_UNCHANGING_P (new) = 1;
6267 set_mem_alias_set (new, ix86_GOT_alias_set ());
6269 if (reg == 0)
6270 reg = gen_reg_rtx (Pmode);
6271 emit_move_insn (reg, new);
6272 new = reg;
6275 else
6277 if (GET_CODE (addr) == CONST)
6279 addr = XEXP (addr, 0);
6281 /* We must match stuff we generate before. Assume the only
6282 unspecs that can get here are ours. Not that we could do
6283 anything with them anyway.... */
6284 if (GET_CODE (addr) == UNSPEC
6285 || (GET_CODE (addr) == PLUS
6286 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
6287 return orig;
6288 if (GET_CODE (addr) != PLUS)
6289 abort ();
6291 if (GET_CODE (addr) == PLUS)
6293 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
6295 /* Check first to see if this is a constant offset from a @GOTOFF
6296 symbol reference. */
6297 if (local_symbolic_operand (op0, Pmode)
6298 && GET_CODE (op1) == CONST_INT)
6300 if (!TARGET_64BIT)
6302 if (reload_in_progress)
6303 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6304 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
6305 UNSPEC_GOTOFF);
6306 new = gen_rtx_PLUS (Pmode, new, op1);
6307 new = gen_rtx_CONST (Pmode, new);
6308 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6310 if (reg != 0)
6312 emit_move_insn (reg, new);
6313 new = reg;
6316 else
6318 if (INTVAL (op1) < -16*1024*1024
6319 || INTVAL (op1) >= 16*1024*1024)
6320 new = gen_rtx_PLUS (Pmode, op0, force_reg (Pmode, op1));
6323 else
6325 base = legitimize_pic_address (XEXP (addr, 0), reg);
6326 new = legitimize_pic_address (XEXP (addr, 1),
6327 base == reg ? NULL_RTX : reg);
6329 if (GET_CODE (new) == CONST_INT)
6330 new = plus_constant (base, INTVAL (new));
6331 else
6333 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
6335 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
6336 new = XEXP (new, 1);
6338 new = gen_rtx_PLUS (Pmode, base, new);
6343 return new;
6346 /* Load the thread pointer. If TO_REG is true, force it into a register. */
6348 static rtx
6349 get_thread_pointer (int to_reg)
6351 rtx tp, reg, insn;
6353 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
6354 if (!to_reg)
6355 return tp;
6357 reg = gen_reg_rtx (Pmode);
6358 insn = gen_rtx_SET (VOIDmode, reg, tp);
6359 insn = emit_insn (insn);
6361 return reg;
6364 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
6365 false if we expect this to be used for a memory address and true if
6366 we expect to load the address into a register. */
6368 static rtx
6369 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
6371 rtx dest, base, off, pic;
6372 int type;
6374 switch (model)
6376 case TLS_MODEL_GLOBAL_DYNAMIC:
6377 dest = gen_reg_rtx (Pmode);
6378 if (TARGET_64BIT)
6380 rtx rax = gen_rtx_REG (Pmode, 0), insns;
6382 start_sequence ();
6383 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
6384 insns = get_insns ();
6385 end_sequence ();
6387 emit_libcall_block (insns, dest, rax, x);
6389 else
6390 emit_insn (gen_tls_global_dynamic_32 (dest, x));
6391 break;
6393 case TLS_MODEL_LOCAL_DYNAMIC:
6394 base = gen_reg_rtx (Pmode);
6395 if (TARGET_64BIT)
6397 rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
6399 start_sequence ();
6400 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
6401 insns = get_insns ();
6402 end_sequence ();
6404 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
6405 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
6406 emit_libcall_block (insns, base, rax, note);
6408 else
6409 emit_insn (gen_tls_local_dynamic_base_32 (base));
6411 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
6412 off = gen_rtx_CONST (Pmode, off);
6414 return gen_rtx_PLUS (Pmode, base, off);
6416 case TLS_MODEL_INITIAL_EXEC:
6417 if (TARGET_64BIT)
6419 pic = NULL;
6420 type = UNSPEC_GOTNTPOFF;
6422 else if (flag_pic)
6424 if (reload_in_progress)
6425 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6426 pic = pic_offset_table_rtx;
6427 type = TARGET_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
6429 else if (!TARGET_GNU_TLS)
6431 pic = gen_reg_rtx (Pmode);
6432 emit_insn (gen_set_got (pic));
6433 type = UNSPEC_GOTTPOFF;
6435 else
6437 pic = NULL;
6438 type = UNSPEC_INDNTPOFF;
6441 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
6442 off = gen_rtx_CONST (Pmode, off);
6443 if (pic)
6444 off = gen_rtx_PLUS (Pmode, pic, off);
6445 off = gen_rtx_MEM (Pmode, off);
6446 RTX_UNCHANGING_P (off) = 1;
6447 set_mem_alias_set (off, ix86_GOT_alias_set ());
6449 if (TARGET_64BIT || TARGET_GNU_TLS)
6451 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
6452 off = force_reg (Pmode, off);
6453 return gen_rtx_PLUS (Pmode, base, off);
6455 else
6457 base = get_thread_pointer (true);
6458 dest = gen_reg_rtx (Pmode);
6459 emit_insn (gen_subsi3 (dest, base, off));
6461 break;
6463 case TLS_MODEL_LOCAL_EXEC:
6464 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
6465 (TARGET_64BIT || TARGET_GNU_TLS)
6466 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
6467 off = gen_rtx_CONST (Pmode, off);
6469 if (TARGET_64BIT || TARGET_GNU_TLS)
6471 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
6472 return gen_rtx_PLUS (Pmode, base, off);
6474 else
6476 base = get_thread_pointer (true);
6477 dest = gen_reg_rtx (Pmode);
6478 emit_insn (gen_subsi3 (dest, base, off));
6480 break;
6482 default:
6483 abort ();
6486 return dest;
6489 /* Try machine-dependent ways of modifying an illegitimate address
6490 to be legitimate. If we find one, return the new, valid address.
6491 This macro is used in only one place: `memory_address' in explow.c.
6493 OLDX is the address as it was before break_out_memory_refs was called.
6494 In some cases it is useful to look at this to decide what needs to be done.
6496 MODE and WIN are passed so that this macro can use
6497 GO_IF_LEGITIMATE_ADDRESS.
6499 It is always safe for this macro to do nothing. It exists to recognize
6500 opportunities to optimize the output.
6502 For the 80386, we handle X+REG by loading X into a register R and
6503 using R+REG. R will go in a general reg and indexing will be used.
6504 However, if REG is a broken-out memory address or multiplication,
6505 nothing needs to be done because REG can certainly go in a general reg.
6507 When -fpic is used, special handling is needed for symbolic references.
6508 See comments by legitimize_pic_address in i386.c for details. */
6511 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
6513 int changed = 0;
6514 unsigned log;
6516 if (TARGET_DEBUG_ADDR)
6518 fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
6519 GET_MODE_NAME (mode));
6520 debug_rtx (x);
6523 log = tls_symbolic_operand (x, mode);
6524 if (log)
6525 return legitimize_tls_address (x, log, false);
6527 if (flag_pic && SYMBOLIC_CONST (x))
6528 return legitimize_pic_address (x, 0);
6530 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
6531 if (GET_CODE (x) == ASHIFT
6532 && GET_CODE (XEXP (x, 1)) == CONST_INT
6533 && (log = (unsigned) exact_log2 (INTVAL (XEXP (x, 1)))) < 4)
6535 changed = 1;
6536 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
6537 GEN_INT (1 << log));
6540 if (GET_CODE (x) == PLUS)
6542 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
6544 if (GET_CODE (XEXP (x, 0)) == ASHIFT
6545 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
6546 && (log = (unsigned) exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)))) < 4)
6548 changed = 1;
6549 XEXP (x, 0) = gen_rtx_MULT (Pmode,
6550 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
6551 GEN_INT (1 << log));
6554 if (GET_CODE (XEXP (x, 1)) == ASHIFT
6555 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
6556 && (log = (unsigned) exact_log2 (INTVAL (XEXP (XEXP (x, 1), 1)))) < 4)
6558 changed = 1;
6559 XEXP (x, 1) = gen_rtx_MULT (Pmode,
6560 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
6561 GEN_INT (1 << log));
6564 /* Put multiply first if it isn't already. */
6565 if (GET_CODE (XEXP (x, 1)) == MULT)
6567 rtx tmp = XEXP (x, 0);
6568 XEXP (x, 0) = XEXP (x, 1);
6569 XEXP (x, 1) = tmp;
6570 changed = 1;
6573 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
6574 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
6575 created by virtual register instantiation, register elimination, and
6576 similar optimizations. */
6577 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
6579 changed = 1;
6580 x = gen_rtx_PLUS (Pmode,
6581 gen_rtx_PLUS (Pmode, XEXP (x, 0),
6582 XEXP (XEXP (x, 1), 0)),
6583 XEXP (XEXP (x, 1), 1));
6586 /* Canonicalize
6587 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
6588 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
6589 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
6590 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
6591 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
6592 && CONSTANT_P (XEXP (x, 1)))
6594 rtx constant;
6595 rtx other = NULL_RTX;
6597 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6599 constant = XEXP (x, 1);
6600 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
6602 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
6604 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
6605 other = XEXP (x, 1);
6607 else
6608 constant = 0;
6610 if (constant)
6612 changed = 1;
6613 x = gen_rtx_PLUS (Pmode,
6614 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
6615 XEXP (XEXP (XEXP (x, 0), 1), 0)),
6616 plus_constant (other, INTVAL (constant)));
6620 if (changed && legitimate_address_p (mode, x, FALSE))
6621 return x;
6623 if (GET_CODE (XEXP (x, 0)) == MULT)
6625 changed = 1;
6626 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
6629 if (GET_CODE (XEXP (x, 1)) == MULT)
6631 changed = 1;
6632 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
6635 if (changed
6636 && GET_CODE (XEXP (x, 1)) == REG
6637 && GET_CODE (XEXP (x, 0)) == REG)
6638 return x;
6640 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
6642 changed = 1;
6643 x = legitimize_pic_address (x, 0);
6646 if (changed && legitimate_address_p (mode, x, FALSE))
6647 return x;
6649 if (GET_CODE (XEXP (x, 0)) == REG)
6651 rtx temp = gen_reg_rtx (Pmode);
6652 rtx val = force_operand (XEXP (x, 1), temp);
6653 if (val != temp)
6654 emit_move_insn (temp, val);
6656 XEXP (x, 1) = temp;
6657 return x;
6660 else if (GET_CODE (XEXP (x, 1)) == REG)
6662 rtx temp = gen_reg_rtx (Pmode);
6663 rtx val = force_operand (XEXP (x, 0), temp);
6664 if (val != temp)
6665 emit_move_insn (temp, val);
6667 XEXP (x, 0) = temp;
6668 return x;
6672 return x;
6675 /* Print an integer constant expression in assembler syntax. Addition
6676 and subtraction are the only arithmetic that may appear in these
6677 expressions. FILE is the stdio stream to write to, X is the rtx, and
6678 CODE is the operand print code from the output string. */
6680 static void
6681 output_pic_addr_const (FILE *file, rtx x, int code)
6683 char buf[256];
6685 switch (GET_CODE (x))
6687 case PC:
6688 if (flag_pic)
6689 putc ('.', file);
6690 else
6691 abort ();
6692 break;
6694 case SYMBOL_REF:
6695 assemble_name (file, XSTR (x, 0));
6696 if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
6697 fputs ("@PLT", file);
6698 break;
6700 case LABEL_REF:
6701 x = XEXP (x, 0);
6702 /* FALLTHRU */
6703 case CODE_LABEL:
6704 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
6705 assemble_name (asm_out_file, buf);
6706 break;
6708 case CONST_INT:
6709 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
6710 break;
6712 case CONST:
6713 /* This used to output parentheses around the expression,
6714 but that does not work on the 386 (either ATT or BSD assembler). */
6715 output_pic_addr_const (file, XEXP (x, 0), code);
6716 break;
6718 case CONST_DOUBLE:
6719 if (GET_MODE (x) == VOIDmode)
6721 /* We can use %d if the number is <32 bits and positive. */
6722 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
6723 fprintf (file, "0x%lx%08lx",
6724 (unsigned long) CONST_DOUBLE_HIGH (x),
6725 (unsigned long) CONST_DOUBLE_LOW (x));
6726 else
6727 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
6729 else
6730 /* We can't handle floating point constants;
6731 PRINT_OPERAND must handle them. */
6732 output_operand_lossage ("floating constant misused");
6733 break;
6735 case PLUS:
6736 /* Some assemblers need integer constants to appear first. */
6737 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
6739 output_pic_addr_const (file, XEXP (x, 0), code);
6740 putc ('+', file);
6741 output_pic_addr_const (file, XEXP (x, 1), code);
6743 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6745 output_pic_addr_const (file, XEXP (x, 1), code);
6746 putc ('+', file);
6747 output_pic_addr_const (file, XEXP (x, 0), code);
6749 else
6750 abort ();
6751 break;
6753 case MINUS:
6754 if (!TARGET_MACHO)
6755 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
6756 output_pic_addr_const (file, XEXP (x, 0), code);
6757 putc ('-', file);
6758 output_pic_addr_const (file, XEXP (x, 1), code);
6759 if (!TARGET_MACHO)
6760 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
6761 break;
6763 case UNSPEC:
6764 if (XVECLEN (x, 0) != 1)
6765 abort ();
6766 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
6767 switch (XINT (x, 1))
6769 case UNSPEC_GOT:
6770 fputs ("@GOT", file);
6771 break;
6772 case UNSPEC_GOTOFF:
6773 fputs ("@GOTOFF", file);
6774 break;
6775 case UNSPEC_GOTPCREL:
6776 fputs ("@GOTPCREL(%rip)", file);
6777 break;
6778 case UNSPEC_GOTTPOFF:
6779 /* FIXME: This might be @TPOFF in Sun ld too. */
6780 fputs ("@GOTTPOFF", file);
6781 break;
6782 case UNSPEC_TPOFF:
6783 fputs ("@TPOFF", file);
6784 break;
6785 case UNSPEC_NTPOFF:
6786 if (TARGET_64BIT)
6787 fputs ("@TPOFF", file);
6788 else
6789 fputs ("@NTPOFF", file);
6790 break;
6791 case UNSPEC_DTPOFF:
6792 fputs ("@DTPOFF", file);
6793 break;
6794 case UNSPEC_GOTNTPOFF:
6795 if (TARGET_64BIT)
6796 fputs ("@GOTTPOFF(%rip)", file);
6797 else
6798 fputs ("@GOTNTPOFF", file);
6799 break;
6800 case UNSPEC_INDNTPOFF:
6801 fputs ("@INDNTPOFF", file);
6802 break;
6803 default:
6804 output_operand_lossage ("invalid UNSPEC as operand");
6805 break;
6807 break;
6809 default:
6810 output_operand_lossage ("invalid expression as operand");
6814 /* This is called from dwarfout.c via ASM_OUTPUT_DWARF_ADDR_CONST.
6815 We need to handle our special PIC relocations. */
6817 void
6818 i386_dwarf_output_addr_const (FILE *file, rtx x)
6820 #ifdef ASM_QUAD
6821 fprintf (file, "%s", TARGET_64BIT ? ASM_QUAD : ASM_LONG);
6822 #else
6823 if (TARGET_64BIT)
6824 abort ();
6825 fprintf (file, "%s", ASM_LONG);
6826 #endif
6827 if (flag_pic)
6828 output_pic_addr_const (file, x, '\0');
6829 else
6830 output_addr_const (file, x);
6831 fputc ('\n', file);
6834 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
6835 We need to emit DTP-relative relocations. */
6837 void
6838 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
6840 fputs (ASM_LONG, file);
6841 output_addr_const (file, x);
6842 fputs ("@DTPOFF", file);
6843 switch (size)
6845 case 4:
6846 break;
6847 case 8:
6848 fputs (", 0", file);
6849 break;
6850 default:
6851 abort ();
6855 /* In the name of slightly smaller debug output, and to cater to
6856 general assembler losage, recognize PIC+GOTOFF and turn it back
6857 into a direct symbol reference. */
6859 static rtx
6860 ix86_delegitimize_address (rtx orig_x)
6862 rtx x = orig_x, y;
6864 if (GET_CODE (x) == MEM)
6865 x = XEXP (x, 0);
6867 if (TARGET_64BIT)
6869 if (GET_CODE (x) != CONST
6870 || GET_CODE (XEXP (x, 0)) != UNSPEC
6871 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
6872 || GET_CODE (orig_x) != MEM)
6873 return orig_x;
6874 return XVECEXP (XEXP (x, 0), 0, 0);
6877 if (GET_CODE (x) != PLUS
6878 || GET_CODE (XEXP (x, 1)) != CONST)
6879 return orig_x;
6881 if (GET_CODE (XEXP (x, 0)) == REG
6882 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
6883 /* %ebx + GOT/GOTOFF */
6884 y = NULL;
6885 else if (GET_CODE (XEXP (x, 0)) == PLUS)
6887 /* %ebx + %reg * scale + GOT/GOTOFF */
6888 y = XEXP (x, 0);
6889 if (GET_CODE (XEXP (y, 0)) == REG
6890 && REGNO (XEXP (y, 0)) == PIC_OFFSET_TABLE_REGNUM)
6891 y = XEXP (y, 1);
6892 else if (GET_CODE (XEXP (y, 1)) == REG
6893 && REGNO (XEXP (y, 1)) == PIC_OFFSET_TABLE_REGNUM)
6894 y = XEXP (y, 0);
6895 else
6896 return orig_x;
6897 if (GET_CODE (y) != REG
6898 && GET_CODE (y) != MULT
6899 && GET_CODE (y) != ASHIFT)
6900 return orig_x;
6902 else
6903 return orig_x;
6905 x = XEXP (XEXP (x, 1), 0);
6906 if (GET_CODE (x) == UNSPEC
6907 && ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6908 || (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM)))
6910 if (y)
6911 return gen_rtx_PLUS (Pmode, y, XVECEXP (x, 0, 0));
6912 return XVECEXP (x, 0, 0);
6915 if (GET_CODE (x) == PLUS
6916 && GET_CODE (XEXP (x, 0)) == UNSPEC
6917 && GET_CODE (XEXP (x, 1)) == CONST_INT
6918 && ((XINT (XEXP (x, 0), 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
6919 || (XINT (XEXP (x, 0), 1) == UNSPEC_GOTOFF
6920 && GET_CODE (orig_x) != MEM)))
6922 x = gen_rtx_PLUS (VOIDmode, XVECEXP (XEXP (x, 0), 0, 0), XEXP (x, 1));
6923 if (y)
6924 return gen_rtx_PLUS (Pmode, y, x);
6925 return x;
6928 return orig_x;
6931 static void
6932 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
6933 int fp, FILE *file)
6935 const char *suffix;
6937 if (mode == CCFPmode || mode == CCFPUmode)
6939 enum rtx_code second_code, bypass_code;
6940 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
6941 if (bypass_code != NIL || second_code != NIL)
6942 abort ();
6943 code = ix86_fp_compare_code_to_integer (code);
6944 mode = CCmode;
6946 if (reverse)
6947 code = reverse_condition (code);
6949 switch (code)
6951 case EQ:
6952 suffix = "e";
6953 break;
6954 case NE:
6955 suffix = "ne";
6956 break;
6957 case GT:
6958 if (mode != CCmode && mode != CCNOmode && mode != CCGCmode)
6959 abort ();
6960 suffix = "g";
6961 break;
6962 case GTU:
6963 /* ??? Use "nbe" instead of "a" for fcmov losage on some assemblers.
6964 Those same assemblers have the same but opposite losage on cmov. */
6965 if (mode != CCmode)
6966 abort ();
6967 suffix = fp ? "nbe" : "a";
6968 break;
6969 case LT:
6970 if (mode == CCNOmode || mode == CCGOCmode)
6971 suffix = "s";
6972 else if (mode == CCmode || mode == CCGCmode)
6973 suffix = "l";
6974 else
6975 abort ();
6976 break;
6977 case LTU:
6978 if (mode != CCmode)
6979 abort ();
6980 suffix = "b";
6981 break;
6982 case GE:
6983 if (mode == CCNOmode || mode == CCGOCmode)
6984 suffix = "ns";
6985 else if (mode == CCmode || mode == CCGCmode)
6986 suffix = "ge";
6987 else
6988 abort ();
6989 break;
6990 case GEU:
6991 /* ??? As above. */
6992 if (mode != CCmode)
6993 abort ();
6994 suffix = fp ? "nb" : "ae";
6995 break;
6996 case LE:
6997 if (mode != CCmode && mode != CCGCmode && mode != CCNOmode)
6998 abort ();
6999 suffix = "le";
7000 break;
7001 case LEU:
7002 if (mode != CCmode)
7003 abort ();
7004 suffix = "be";
7005 break;
7006 case UNORDERED:
7007 suffix = fp ? "u" : "p";
7008 break;
7009 case ORDERED:
7010 suffix = fp ? "nu" : "np";
7011 break;
7012 default:
7013 abort ();
7015 fputs (suffix, file);
7018 /* Print the name of register X to FILE based on its machine mode and number.
7019 If CODE is 'w', pretend the mode is HImode.
7020 If CODE is 'b', pretend the mode is QImode.
7021 If CODE is 'k', pretend the mode is SImode.
7022 If CODE is 'q', pretend the mode is DImode.
7023 If CODE is 'h', pretend the reg is the `high' byte register.
7024 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
7026 void
7027 print_reg (rtx x, int code, FILE *file)
7029 if (REGNO (x) == ARG_POINTER_REGNUM
7030 || REGNO (x) == FRAME_POINTER_REGNUM
7031 || REGNO (x) == FLAGS_REG
7032 || REGNO (x) == FPSR_REG)
7033 abort ();
7035 if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
7036 putc ('%', file);
7038 if (code == 'w' || MMX_REG_P (x))
7039 code = 2;
7040 else if (code == 'b')
7041 code = 1;
7042 else if (code == 'k')
7043 code = 4;
7044 else if (code == 'q')
7045 code = 8;
7046 else if (code == 'y')
7047 code = 3;
7048 else if (code == 'h')
7049 code = 0;
7050 else
7051 code = GET_MODE_SIZE (GET_MODE (x));
7053 /* Irritatingly, AMD extended registers use different naming convention
7054 from the normal registers. */
7055 if (REX_INT_REG_P (x))
7057 if (!TARGET_64BIT)
7058 abort ();
7059 switch (code)
7061 case 0:
7062 error ("extended registers have no high halves");
7063 break;
7064 case 1:
7065 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
7066 break;
7067 case 2:
7068 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
7069 break;
7070 case 4:
7071 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
7072 break;
7073 case 8:
7074 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
7075 break;
7076 default:
7077 error ("unsupported operand size for extended register");
7078 break;
7080 return;
7082 switch (code)
7084 case 3:
7085 if (STACK_TOP_P (x))
7087 fputs ("st(0)", file);
7088 break;
7090 /* FALLTHRU */
7091 case 8:
7092 case 4:
7093 case 12:
7094 if (! ANY_FP_REG_P (x))
7095 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
7096 /* FALLTHRU */
7097 case 16:
7098 case 2:
7099 normal:
7100 fputs (hi_reg_name[REGNO (x)], file);
7101 break;
7102 case 1:
7103 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
7104 goto normal;
7105 fputs (qi_reg_name[REGNO (x)], file);
7106 break;
7107 case 0:
7108 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
7109 goto normal;
7110 fputs (qi_high_reg_name[REGNO (x)], file);
7111 break;
7112 default:
7113 abort ();
7117 /* Locate some local-dynamic symbol still in use by this function
7118 so that we can print its name in some tls_local_dynamic_base
7119 pattern. */
7121 static const char *
7122 get_some_local_dynamic_name (void)
7124 rtx insn;
7126 if (cfun->machine->some_ld_name)
7127 return cfun->machine->some_ld_name;
7129 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
7130 if (INSN_P (insn)
7131 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
7132 return cfun->machine->some_ld_name;
7134 abort ();
7137 static int
7138 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
7140 rtx x = *px;
7142 if (GET_CODE (x) == SYMBOL_REF
7143 && local_dynamic_symbolic_operand (x, Pmode))
7145 cfun->machine->some_ld_name = XSTR (x, 0);
7146 return 1;
7149 return 0;
7152 /* Meaning of CODE:
7153 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
7154 C -- print opcode suffix for set/cmov insn.
7155 c -- like C, but print reversed condition
7156 F,f -- likewise, but for floating-point.
7157 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
7158 otherwise nothing
7159 R -- print the prefix for register names.
7160 z -- print the opcode suffix for the size of the current operand.
7161 * -- print a star (in certain assembler syntax)
7162 A -- print an absolute memory reference.
7163 w -- print the operand as if it's a "word" (HImode) even if it isn't.
7164 s -- print a shift double count, followed by the assemblers argument
7165 delimiter.
7166 b -- print the QImode name of the register for the indicated operand.
7167 %b0 would print %al if operands[0] is reg 0.
7168 w -- likewise, print the HImode name of the register.
7169 k -- likewise, print the SImode name of the register.
7170 q -- likewise, print the DImode name of the register.
7171 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
7172 y -- print "st(0)" instead of "st" as a register.
7173 D -- print condition for SSE cmp instruction.
7174 P -- if PIC, print an @PLT suffix.
7175 X -- don't print any sort of PIC '@' suffix for a symbol.
7176 & -- print some in-use local-dynamic symbol name.
7179 void
7180 print_operand (FILE *file, rtx x, int code)
7182 if (code)
7184 switch (code)
7186 case '*':
7187 if (ASSEMBLER_DIALECT == ASM_ATT)
7188 putc ('*', file);
7189 return;
7191 case '&':
7192 assemble_name (file, get_some_local_dynamic_name ());
7193 return;
7195 case 'A':
7196 if (ASSEMBLER_DIALECT == ASM_ATT)
7197 putc ('*', file);
7198 else if (ASSEMBLER_DIALECT == ASM_INTEL)
7200 /* Intel syntax. For absolute addresses, registers should not
7201 be surrounded by braces. */
7202 if (GET_CODE (x) != REG)
7204 putc ('[', file);
7205 PRINT_OPERAND (file, x, 0);
7206 putc (']', file);
7207 return;
7210 else
7211 abort ();
7213 PRINT_OPERAND (file, x, 0);
7214 return;
7217 case 'L':
7218 if (ASSEMBLER_DIALECT == ASM_ATT)
7219 putc ('l', file);
7220 return;
7222 case 'W':
7223 if (ASSEMBLER_DIALECT == ASM_ATT)
7224 putc ('w', file);
7225 return;
7227 case 'B':
7228 if (ASSEMBLER_DIALECT == ASM_ATT)
7229 putc ('b', file);
7230 return;
7232 case 'Q':
7233 if (ASSEMBLER_DIALECT == ASM_ATT)
7234 putc ('l', file);
7235 return;
7237 case 'S':
7238 if (ASSEMBLER_DIALECT == ASM_ATT)
7239 putc ('s', file);
7240 return;
7242 case 'T':
7243 if (ASSEMBLER_DIALECT == ASM_ATT)
7244 putc ('t', file);
7245 return;
7247 case 'z':
7248 /* 387 opcodes don't get size suffixes if the operands are
7249 registers. */
7250 if (STACK_REG_P (x))
7251 return;
7253 /* Likewise if using Intel opcodes. */
7254 if (ASSEMBLER_DIALECT == ASM_INTEL)
7255 return;
7257 /* This is the size of op from size of operand. */
7258 switch (GET_MODE_SIZE (GET_MODE (x)))
7260 case 2:
7261 #ifdef HAVE_GAS_FILDS_FISTS
7262 putc ('s', file);
7263 #endif
7264 return;
7266 case 4:
7267 if (GET_MODE (x) == SFmode)
7269 putc ('s', file);
7270 return;
7272 else
7273 putc ('l', file);
7274 return;
7276 case 12:
7277 case 16:
7278 putc ('t', file);
7279 return;
7281 case 8:
7282 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
7284 #ifdef GAS_MNEMONICS
7285 putc ('q', file);
7286 #else
7287 putc ('l', file);
7288 putc ('l', file);
7289 #endif
7291 else
7292 putc ('l', file);
7293 return;
7295 default:
7296 abort ();
7299 case 'b':
7300 case 'w':
7301 case 'k':
7302 case 'q':
7303 case 'h':
7304 case 'y':
7305 case 'X':
7306 case 'P':
7307 break;
7309 case 's':
7310 if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
7312 PRINT_OPERAND (file, x, 0);
7313 putc (',', file);
7315 return;
7317 case 'D':
7318 /* Little bit of braindamage here. The SSE compare instructions
7319 does use completely different names for the comparisons that the
7320 fp conditional moves. */
7321 switch (GET_CODE (x))
7323 case EQ:
7324 case UNEQ:
7325 fputs ("eq", file);
7326 break;
7327 case LT:
7328 case UNLT:
7329 fputs ("lt", file);
7330 break;
7331 case LE:
7332 case UNLE:
7333 fputs ("le", file);
7334 break;
7335 case UNORDERED:
7336 fputs ("unord", file);
7337 break;
7338 case NE:
7339 case LTGT:
7340 fputs ("neq", file);
7341 break;
7342 case UNGE:
7343 case GE:
7344 fputs ("nlt", file);
7345 break;
7346 case UNGT:
7347 case GT:
7348 fputs ("nle", file);
7349 break;
7350 case ORDERED:
7351 fputs ("ord", file);
7352 break;
7353 default:
7354 abort ();
7355 break;
7357 return;
7358 case 'O':
7359 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
7360 if (ASSEMBLER_DIALECT == ASM_ATT)
7362 switch (GET_MODE (x))
7364 case HImode: putc ('w', file); break;
7365 case SImode:
7366 case SFmode: putc ('l', file); break;
7367 case DImode:
7368 case DFmode: putc ('q', file); break;
7369 default: abort ();
7371 putc ('.', file);
7373 #endif
7374 return;
7375 case 'C':
7376 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
7377 return;
7378 case 'F':
7379 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
7380 if (ASSEMBLER_DIALECT == ASM_ATT)
7381 putc ('.', file);
7382 #endif
7383 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
7384 return;
7386 /* Like above, but reverse condition */
7387 case 'c':
7388 /* Check to see if argument to %c is really a constant
7389 and not a condition code which needs to be reversed. */
7390 if (GET_RTX_CLASS (GET_CODE (x)) != '<')
7392 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
7393 return;
7395 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
7396 return;
7397 case 'f':
7398 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
7399 if (ASSEMBLER_DIALECT == ASM_ATT)
7400 putc ('.', file);
7401 #endif
7402 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
7403 return;
7404 case '+':
7406 rtx x;
7408 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
7409 return;
7411 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
7412 if (x)
7414 int pred_val = INTVAL (XEXP (x, 0));
7416 if (pred_val < REG_BR_PROB_BASE * 45 / 100
7417 || pred_val > REG_BR_PROB_BASE * 55 / 100)
7419 int taken = pred_val > REG_BR_PROB_BASE / 2;
7420 int cputaken = final_forward_branch_p (current_output_insn) == 0;
7422 /* Emit hints only in the case default branch prediction
7423 heuristics would fail. */
7424 if (taken != cputaken)
7426 /* We use 3e (DS) prefix for taken branches and
7427 2e (CS) prefix for not taken branches. */
7428 if (taken)
7429 fputs ("ds ; ", file);
7430 else
7431 fputs ("cs ; ", file);
7435 return;
7437 default:
7438 output_operand_lossage ("invalid operand code `%c'", code);
7442 if (GET_CODE (x) == REG)
7443 print_reg (x, code, file);
7445 else if (GET_CODE (x) == MEM)
7447 /* No `byte ptr' prefix for call instructions. */
7448 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
7450 const char * size;
7451 switch (GET_MODE_SIZE (GET_MODE (x)))
7453 case 1: size = "BYTE"; break;
7454 case 2: size = "WORD"; break;
7455 case 4: size = "DWORD"; break;
7456 case 8: size = "QWORD"; break;
7457 case 12: size = "XWORD"; break;
7458 case 16: size = "XMMWORD"; break;
7459 default:
7460 abort ();
7463 /* Check for explicit size override (codes 'b', 'w' and 'k') */
7464 if (code == 'b')
7465 size = "BYTE";
7466 else if (code == 'w')
7467 size = "WORD";
7468 else if (code == 'k')
7469 size = "DWORD";
7471 fputs (size, file);
7472 fputs (" PTR ", file);
7475 x = XEXP (x, 0);
7476 /* Avoid (%rip) for call operands. */
7477 if (CONSTANT_ADDRESS_P (x) && code == 'P'
7478 && GET_CODE (x) != CONST_INT)
7479 output_addr_const (file, x);
7480 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
7481 output_operand_lossage ("invalid constraints for operand");
7482 else
7483 output_address (x);
7486 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
7488 REAL_VALUE_TYPE r;
7489 long l;
7491 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7492 REAL_VALUE_TO_TARGET_SINGLE (r, l);
7494 if (ASSEMBLER_DIALECT == ASM_ATT)
7495 putc ('$', file);
7496 fprintf (file, "0x%08lx", l);
7499 /* These float cases don't actually occur as immediate operands. */
7500 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
7502 char dstr[30];
7504 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
7505 fprintf (file, "%s", dstr);
7508 else if (GET_CODE (x) == CONST_DOUBLE
7509 && GET_MODE (x) == XFmode)
7511 char dstr[30];
7513 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
7514 fprintf (file, "%s", dstr);
7517 else
7519 if (code != 'P')
7521 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
7523 if (ASSEMBLER_DIALECT == ASM_ATT)
7524 putc ('$', file);
7526 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
7527 || GET_CODE (x) == LABEL_REF)
7529 if (ASSEMBLER_DIALECT == ASM_ATT)
7530 putc ('$', file);
7531 else
7532 fputs ("OFFSET FLAT:", file);
7535 if (GET_CODE (x) == CONST_INT)
7536 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
7537 else if (flag_pic)
7538 output_pic_addr_const (file, x, code);
7539 else
7540 output_addr_const (file, x);
7544 /* Print a memory operand whose address is ADDR. */
7546 void
7547 print_operand_address (FILE *file, rtx addr)
7549 struct ix86_address parts;
7550 rtx base, index, disp;
7551 int scale;
7553 if (! ix86_decompose_address (addr, &parts))
7554 abort ();
7556 base = parts.base;
7557 index = parts.index;
7558 disp = parts.disp;
7559 scale = parts.scale;
7561 switch (parts.seg)
7563 case SEG_DEFAULT:
7564 break;
7565 case SEG_FS:
7566 case SEG_GS:
7567 if (USER_LABEL_PREFIX[0] == 0)
7568 putc ('%', file);
7569 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
7570 break;
7571 default:
7572 abort ();
7575 if (!base && !index)
7577 /* Displacement only requires special attention. */
7579 if (GET_CODE (disp) == CONST_INT)
7581 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
7583 if (USER_LABEL_PREFIX[0] == 0)
7584 putc ('%', file);
7585 fputs ("ds:", file);
7587 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
7589 else if (flag_pic)
7590 output_pic_addr_const (file, disp, 0);
7591 else
7592 output_addr_const (file, disp);
7594 /* Use one byte shorter RIP relative addressing for 64bit mode. */
7595 if (TARGET_64BIT
7596 && ((GET_CODE (disp) == SYMBOL_REF
7597 && ! tls_symbolic_operand (disp, GET_MODE (disp)))
7598 || GET_CODE (disp) == LABEL_REF
7599 || (GET_CODE (disp) == CONST
7600 && GET_CODE (XEXP (disp, 0)) == PLUS
7601 && (GET_CODE (XEXP (XEXP (disp, 0), 0)) == SYMBOL_REF
7602 || GET_CODE (XEXP (XEXP (disp, 0), 0)) == LABEL_REF)
7603 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)))
7604 fputs ("(%rip)", file);
7606 else
7608 if (ASSEMBLER_DIALECT == ASM_ATT)
7610 if (disp)
7612 if (flag_pic)
7613 output_pic_addr_const (file, disp, 0);
7614 else if (GET_CODE (disp) == LABEL_REF)
7615 output_asm_label (disp);
7616 else
7617 output_addr_const (file, disp);
7620 putc ('(', file);
7621 if (base)
7622 print_reg (base, 0, file);
7623 if (index)
7625 putc (',', file);
7626 print_reg (index, 0, file);
7627 if (scale != 1)
7628 fprintf (file, ",%d", scale);
7630 putc (')', file);
7632 else
7634 rtx offset = NULL_RTX;
7636 if (disp)
7638 /* Pull out the offset of a symbol; print any symbol itself. */
7639 if (GET_CODE (disp) == CONST
7640 && GET_CODE (XEXP (disp, 0)) == PLUS
7641 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
7643 offset = XEXP (XEXP (disp, 0), 1);
7644 disp = gen_rtx_CONST (VOIDmode,
7645 XEXP (XEXP (disp, 0), 0));
7648 if (flag_pic)
7649 output_pic_addr_const (file, disp, 0);
7650 else if (GET_CODE (disp) == LABEL_REF)
7651 output_asm_label (disp);
7652 else if (GET_CODE (disp) == CONST_INT)
7653 offset = disp;
7654 else
7655 output_addr_const (file, disp);
7658 putc ('[', file);
7659 if (base)
7661 print_reg (base, 0, file);
7662 if (offset)
7664 if (INTVAL (offset) >= 0)
7665 putc ('+', file);
7666 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
7669 else if (offset)
7670 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
7671 else
7672 putc ('0', file);
7674 if (index)
7676 putc ('+', file);
7677 print_reg (index, 0, file);
7678 if (scale != 1)
7679 fprintf (file, "*%d", scale);
7681 putc (']', file);
7686 bool
7687 output_addr_const_extra (FILE *file, rtx x)
7689 rtx op;
7691 if (GET_CODE (x) != UNSPEC)
7692 return false;
7694 op = XVECEXP (x, 0, 0);
7695 switch (XINT (x, 1))
7697 case UNSPEC_GOTTPOFF:
7698 output_addr_const (file, op);
7699 /* FIXME: This might be @TPOFF in Sun ld. */
7700 fputs ("@GOTTPOFF", file);
7701 break;
7702 case UNSPEC_TPOFF:
7703 output_addr_const (file, op);
7704 fputs ("@TPOFF", file);
7705 break;
7706 case UNSPEC_NTPOFF:
7707 output_addr_const (file, op);
7708 if (TARGET_64BIT)
7709 fputs ("@TPOFF", file);
7710 else
7711 fputs ("@NTPOFF", file);
7712 break;
7713 case UNSPEC_DTPOFF:
7714 output_addr_const (file, op);
7715 fputs ("@DTPOFF", file);
7716 break;
7717 case UNSPEC_GOTNTPOFF:
7718 output_addr_const (file, op);
7719 if (TARGET_64BIT)
7720 fputs ("@GOTTPOFF(%rip)", file);
7721 else
7722 fputs ("@GOTNTPOFF", file);
7723 break;
7724 case UNSPEC_INDNTPOFF:
7725 output_addr_const (file, op);
7726 fputs ("@INDNTPOFF", file);
7727 break;
7729 default:
7730 return false;
7733 return true;
7736 /* Split one or more DImode RTL references into pairs of SImode
7737 references. The RTL can be REG, offsettable MEM, integer constant, or
7738 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
7739 split and "num" is its length. lo_half and hi_half are output arrays
7740 that parallel "operands". */
7742 void
7743 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
7745 while (num--)
7747 rtx op = operands[num];
7749 /* simplify_subreg refuse to split volatile memory addresses,
7750 but we still have to handle it. */
7751 if (GET_CODE (op) == MEM)
7753 lo_half[num] = adjust_address (op, SImode, 0);
7754 hi_half[num] = adjust_address (op, SImode, 4);
7756 else
7758 lo_half[num] = simplify_gen_subreg (SImode, op,
7759 GET_MODE (op) == VOIDmode
7760 ? DImode : GET_MODE (op), 0);
7761 hi_half[num] = simplify_gen_subreg (SImode, op,
7762 GET_MODE (op) == VOIDmode
7763 ? DImode : GET_MODE (op), 4);
7767 /* Split one or more TImode RTL references into pairs of SImode
7768 references. The RTL can be REG, offsettable MEM, integer constant, or
7769 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
7770 split and "num" is its length. lo_half and hi_half are output arrays
7771 that parallel "operands". */
7773 void
7774 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
7776 while (num--)
7778 rtx op = operands[num];
7780 /* simplify_subreg refuse to split volatile memory addresses, but we
7781 still have to handle it. */
7782 if (GET_CODE (op) == MEM)
7784 lo_half[num] = adjust_address (op, DImode, 0);
7785 hi_half[num] = adjust_address (op, DImode, 8);
7787 else
7789 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
7790 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
7795 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
7796 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
7797 is the expression of the binary operation. The output may either be
7798 emitted here, or returned to the caller, like all output_* functions.
7800 There is no guarantee that the operands are the same mode, as they
7801 might be within FLOAT or FLOAT_EXTEND expressions. */
7803 #ifndef SYSV386_COMPAT
7804 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
7805 wants to fix the assemblers because that causes incompatibility
7806 with gcc. No-one wants to fix gcc because that causes
7807 incompatibility with assemblers... You can use the option of
7808 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
7809 #define SYSV386_COMPAT 1
7810 #endif
7812 const char *
7813 output_387_binary_op (rtx insn, rtx *operands)
7815 static char buf[30];
7816 const char *p;
7817 const char *ssep;
7818 int is_sse = SSE_REG_P (operands[0]) | SSE_REG_P (operands[1]) | SSE_REG_P (operands[2]);
7820 #ifdef ENABLE_CHECKING
7821 /* Even if we do not want to check the inputs, this documents input
7822 constraints. Which helps in understanding the following code. */
7823 if (STACK_REG_P (operands[0])
7824 && ((REG_P (operands[1])
7825 && REGNO (operands[0]) == REGNO (operands[1])
7826 && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM))
7827 || (REG_P (operands[2])
7828 && REGNO (operands[0]) == REGNO (operands[2])
7829 && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)))
7830 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
7831 ; /* ok */
7832 else if (!is_sse)
7833 abort ();
7834 #endif
7836 switch (GET_CODE (operands[3]))
7838 case PLUS:
7839 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7840 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7841 p = "fiadd";
7842 else
7843 p = "fadd";
7844 ssep = "add";
7845 break;
7847 case MINUS:
7848 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7849 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7850 p = "fisub";
7851 else
7852 p = "fsub";
7853 ssep = "sub";
7854 break;
7856 case MULT:
7857 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7858 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7859 p = "fimul";
7860 else
7861 p = "fmul";
7862 ssep = "mul";
7863 break;
7865 case DIV:
7866 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
7867 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
7868 p = "fidiv";
7869 else
7870 p = "fdiv";
7871 ssep = "div";
7872 break;
7874 default:
7875 abort ();
7878 if (is_sse)
7880 strcpy (buf, ssep);
7881 if (GET_MODE (operands[0]) == SFmode)
7882 strcat (buf, "ss\t{%2, %0|%0, %2}");
7883 else
7884 strcat (buf, "sd\t{%2, %0|%0, %2}");
7885 return buf;
7887 strcpy (buf, p);
7889 switch (GET_CODE (operands[3]))
7891 case MULT:
7892 case PLUS:
7893 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
7895 rtx temp = operands[2];
7896 operands[2] = operands[1];
7897 operands[1] = temp;
7900 /* know operands[0] == operands[1]. */
7902 if (GET_CODE (operands[2]) == MEM)
7904 p = "%z2\t%2";
7905 break;
7908 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7910 if (STACK_TOP_P (operands[0]))
7911 /* How is it that we are storing to a dead operand[2]?
7912 Well, presumably operands[1] is dead too. We can't
7913 store the result to st(0) as st(0) gets popped on this
7914 instruction. Instead store to operands[2] (which I
7915 think has to be st(1)). st(1) will be popped later.
7916 gcc <= 2.8.1 didn't have this check and generated
7917 assembly code that the Unixware assembler rejected. */
7918 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7919 else
7920 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7921 break;
7924 if (STACK_TOP_P (operands[0]))
7925 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7926 else
7927 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
7928 break;
7930 case MINUS:
7931 case DIV:
7932 if (GET_CODE (operands[1]) == MEM)
7934 p = "r%z1\t%1";
7935 break;
7938 if (GET_CODE (operands[2]) == MEM)
7940 p = "%z2\t%2";
7941 break;
7944 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
7946 #if SYSV386_COMPAT
7947 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
7948 derived assemblers, confusingly reverse the direction of
7949 the operation for fsub{r} and fdiv{r} when the
7950 destination register is not st(0). The Intel assembler
7951 doesn't have this brain damage. Read !SYSV386_COMPAT to
7952 figure out what the hardware really does. */
7953 if (STACK_TOP_P (operands[0]))
7954 p = "{p\t%0, %2|rp\t%2, %0}";
7955 else
7956 p = "{rp\t%2, %0|p\t%0, %2}";
7957 #else
7958 if (STACK_TOP_P (operands[0]))
7959 /* As above for fmul/fadd, we can't store to st(0). */
7960 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
7961 else
7962 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
7963 #endif
7964 break;
7967 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
7969 #if SYSV386_COMPAT
7970 if (STACK_TOP_P (operands[0]))
7971 p = "{rp\t%0, %1|p\t%1, %0}";
7972 else
7973 p = "{p\t%1, %0|rp\t%0, %1}";
7974 #else
7975 if (STACK_TOP_P (operands[0]))
7976 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
7977 else
7978 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
7979 #endif
7980 break;
7983 if (STACK_TOP_P (operands[0]))
7985 if (STACK_TOP_P (operands[1]))
7986 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
7987 else
7988 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
7989 break;
7991 else if (STACK_TOP_P (operands[1]))
7993 #if SYSV386_COMPAT
7994 p = "{\t%1, %0|r\t%0, %1}";
7995 #else
7996 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
7997 #endif
7999 else
8001 #if SYSV386_COMPAT
8002 p = "{r\t%2, %0|\t%0, %2}";
8003 #else
8004 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
8005 #endif
8007 break;
8009 default:
8010 abort ();
8013 strcat (buf, p);
8014 return buf;
8017 /* Output code to initialize control word copies used by
8018 trunc?f?i patterns. NORMAL is set to current control word, while ROUND_DOWN
8019 is set to control word rounding downwards. */
8020 void
8021 emit_i387_cw_initialization (rtx normal, rtx round_down)
8023 rtx reg = gen_reg_rtx (HImode);
8025 emit_insn (gen_x86_fnstcw_1 (normal));
8026 emit_move_insn (reg, normal);
8027 if (!TARGET_PARTIAL_REG_STALL && !optimize_size
8028 && !TARGET_64BIT)
8029 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
8030 else
8031 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0xc00)));
8032 emit_move_insn (round_down, reg);
8035 /* Output code for INSN to convert a float to a signed int. OPERANDS
8036 are the insn operands. The output may be [HSD]Imode and the input
8037 operand may be [SDX]Fmode. */
8039 const char *
8040 output_fix_trunc (rtx insn, rtx *operands)
8042 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
8043 int dimode_p = GET_MODE (operands[0]) == DImode;
8045 /* Jump through a hoop or two for DImode, since the hardware has no
8046 non-popping instruction. We used to do this a different way, but
8047 that was somewhat fragile and broke with post-reload splitters. */
8048 if (dimode_p && !stack_top_dies)
8049 output_asm_insn ("fld\t%y1", operands);
8051 if (!STACK_TOP_P (operands[1]))
8052 abort ();
8054 if (GET_CODE (operands[0]) != MEM)
8055 abort ();
8057 output_asm_insn ("fldcw\t%3", operands);
8058 if (stack_top_dies || dimode_p)
8059 output_asm_insn ("fistp%z0\t%0", operands);
8060 else
8061 output_asm_insn ("fist%z0\t%0", operands);
8062 output_asm_insn ("fldcw\t%2", operands);
8064 return "";
8067 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
8068 should be used and 2 when fnstsw should be used. UNORDERED_P is true
8069 when fucom should be used. */
8071 const char *
8072 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
8074 int stack_top_dies;
8075 rtx cmp_op0 = operands[0];
8076 rtx cmp_op1 = operands[1];
8077 int is_sse = SSE_REG_P (operands[0]) | SSE_REG_P (operands[1]);
8079 if (eflags_p == 2)
8081 cmp_op0 = cmp_op1;
8082 cmp_op1 = operands[2];
8084 if (is_sse)
8086 if (GET_MODE (operands[0]) == SFmode)
8087 if (unordered_p)
8088 return "ucomiss\t{%1, %0|%0, %1}";
8089 else
8090 return "comiss\t{%1, %0|%0, %1}";
8091 else
8092 if (unordered_p)
8093 return "ucomisd\t{%1, %0|%0, %1}";
8094 else
8095 return "comisd\t{%1, %0|%0, %1}";
8098 if (! STACK_TOP_P (cmp_op0))
8099 abort ();
8101 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
8103 if (STACK_REG_P (cmp_op1)
8104 && stack_top_dies
8105 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
8106 && REGNO (cmp_op1) != FIRST_STACK_REG)
8108 /* If both the top of the 387 stack dies, and the other operand
8109 is also a stack register that dies, then this must be a
8110 `fcompp' float compare */
8112 if (eflags_p == 1)
8114 /* There is no double popping fcomi variant. Fortunately,
8115 eflags is immune from the fstp's cc clobbering. */
8116 if (unordered_p)
8117 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
8118 else
8119 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
8120 return "fstp\t%y0";
8122 else
8124 if (eflags_p == 2)
8126 if (unordered_p)
8127 return "fucompp\n\tfnstsw\t%0";
8128 else
8129 return "fcompp\n\tfnstsw\t%0";
8131 else
8133 if (unordered_p)
8134 return "fucompp";
8135 else
8136 return "fcompp";
8140 else
8142 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
8144 static const char * const alt[24] =
8146 "fcom%z1\t%y1",
8147 "fcomp%z1\t%y1",
8148 "fucom%z1\t%y1",
8149 "fucomp%z1\t%y1",
8151 "ficom%z1\t%y1",
8152 "ficomp%z1\t%y1",
8153 NULL,
8154 NULL,
8156 "fcomi\t{%y1, %0|%0, %y1}",
8157 "fcomip\t{%y1, %0|%0, %y1}",
8158 "fucomi\t{%y1, %0|%0, %y1}",
8159 "fucomip\t{%y1, %0|%0, %y1}",
8161 NULL,
8162 NULL,
8163 NULL,
8164 NULL,
8166 "fcom%z2\t%y2\n\tfnstsw\t%0",
8167 "fcomp%z2\t%y2\n\tfnstsw\t%0",
8168 "fucom%z2\t%y2\n\tfnstsw\t%0",
8169 "fucomp%z2\t%y2\n\tfnstsw\t%0",
8171 "ficom%z2\t%y2\n\tfnstsw\t%0",
8172 "ficomp%z2\t%y2\n\tfnstsw\t%0",
8173 NULL,
8174 NULL
8177 int mask;
8178 const char *ret;
8180 mask = eflags_p << 3;
8181 mask |= (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT) << 2;
8182 mask |= unordered_p << 1;
8183 mask |= stack_top_dies;
8185 if (mask >= 24)
8186 abort ();
8187 ret = alt[mask];
8188 if (ret == NULL)
8189 abort ();
8191 return ret;
8195 void
8196 ix86_output_addr_vec_elt (FILE *file, int value)
8198 const char *directive = ASM_LONG;
8200 if (TARGET_64BIT)
8202 #ifdef ASM_QUAD
8203 directive = ASM_QUAD;
8204 #else
8205 abort ();
8206 #endif
8209 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
8212 void
8213 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
8215 if (TARGET_64BIT)
8216 fprintf (file, "%s%s%d-%s%d\n",
8217 ASM_LONG, LPREFIX, value, LPREFIX, rel);
8218 else if (HAVE_AS_GOTOFF_IN_DATA)
8219 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
8220 #if TARGET_MACHO
8221 else if (TARGET_MACHO)
8223 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
8224 machopic_output_function_base_name (file);
8225 fprintf(file, "\n");
8227 #endif
8228 else
8229 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
8230 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
8233 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
8234 for the target. */
8236 void
8237 ix86_expand_clear (rtx dest)
8239 rtx tmp;
8241 /* We play register width games, which are only valid after reload. */
8242 if (!reload_completed)
8243 abort ();
8245 /* Avoid HImode and its attendant prefix byte. */
8246 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
8247 dest = gen_rtx_REG (SImode, REGNO (dest));
8249 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
8251 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
8252 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
8254 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
8255 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
8258 emit_insn (tmp);
8261 /* X is an unchanging MEM. If it is a constant pool reference, return
8262 the constant pool rtx, else NULL. */
8264 static rtx
8265 maybe_get_pool_constant (rtx x)
8267 x = ix86_delegitimize_address (XEXP (x, 0));
8269 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
8270 return get_pool_constant (x);
8272 return NULL_RTX;
8275 void
8276 ix86_expand_move (enum machine_mode mode, rtx operands[])
8278 int strict = (reload_in_progress || reload_completed);
8279 rtx op0, op1;
8280 enum tls_model model;
8282 op0 = operands[0];
8283 op1 = operands[1];
8285 model = tls_symbolic_operand (op1, Pmode);
8286 if (model)
8288 op1 = legitimize_tls_address (op1, model, true);
8289 op1 = force_operand (op1, op0);
8290 if (op1 == op0)
8291 return;
8294 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
8296 #if TARGET_MACHO
8297 if (MACHOPIC_PURE)
8299 rtx temp = ((reload_in_progress
8300 || ((op0 && GET_CODE (op0) == REG)
8301 && mode == Pmode))
8302 ? op0 : gen_reg_rtx (Pmode));
8303 op1 = machopic_indirect_data_reference (op1, temp);
8304 op1 = machopic_legitimize_pic_address (op1, mode,
8305 temp == op1 ? 0 : temp);
8307 else if (MACHOPIC_INDIRECT)
8308 op1 = machopic_indirect_data_reference (op1, 0);
8309 if (op0 == op1)
8310 return;
8311 #else
8312 if (GET_CODE (op0) == MEM)
8313 op1 = force_reg (Pmode, op1);
8314 else
8316 rtx temp = op0;
8317 if (GET_CODE (temp) != REG)
8318 temp = gen_reg_rtx (Pmode);
8319 temp = legitimize_pic_address (op1, temp);
8320 if (temp == op0)
8321 return;
8322 op1 = temp;
8324 #endif /* TARGET_MACHO */
8326 else
8328 if (GET_CODE (op0) == MEM
8329 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
8330 || !push_operand (op0, mode))
8331 && GET_CODE (op1) == MEM)
8332 op1 = force_reg (mode, op1);
8334 if (push_operand (op0, mode)
8335 && ! general_no_elim_operand (op1, mode))
8336 op1 = copy_to_mode_reg (mode, op1);
8338 /* Force large constants in 64bit compilation into register
8339 to get them CSEed. */
8340 if (TARGET_64BIT && mode == DImode
8341 && immediate_operand (op1, mode)
8342 && !x86_64_zero_extended_value (op1)
8343 && !register_operand (op0, mode)
8344 && optimize && !reload_completed && !reload_in_progress)
8345 op1 = copy_to_mode_reg (mode, op1);
8347 if (FLOAT_MODE_P (mode))
8349 /* If we are loading a floating point constant to a register,
8350 force the value to memory now, since we'll get better code
8351 out the back end. */
8353 if (strict)
8355 else if (GET_CODE (op1) == CONST_DOUBLE)
8357 op1 = validize_mem (force_const_mem (mode, op1));
8358 if (!register_operand (op0, mode))
8360 rtx temp = gen_reg_rtx (mode);
8361 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
8362 emit_move_insn (op0, temp);
8363 return;
8369 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
8372 void
8373 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
8375 /* Force constants other than zero into memory. We do not know how
8376 the instructions used to build constants modify the upper 64 bits
8377 of the register, once we have that information we may be able
8378 to handle some of them more efficiently. */
8379 if ((reload_in_progress | reload_completed) == 0
8380 && register_operand (operands[0], mode)
8381 && CONSTANT_P (operands[1]) && operands[1] != CONST0_RTX (mode))
8382 operands[1] = validize_mem (force_const_mem (mode, operands[1]));
8384 /* Make operand1 a register if it isn't already. */
8385 if (!no_new_pseudos
8386 && !register_operand (operands[0], mode)
8387 && !register_operand (operands[1], mode))
8389 rtx temp = force_reg (GET_MODE (operands[1]), operands[1]);
8390 emit_move_insn (operands[0], temp);
8391 return;
8394 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
8397 /* Attempt to expand a binary operator. Make the expansion closer to the
8398 actual machine, then just general_operand, which will allow 3 separate
8399 memory references (one output, two input) in a single insn. */
8401 void
8402 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
8403 rtx operands[])
8405 int matching_memory;
8406 rtx src1, src2, dst, op, clob;
8408 dst = operands[0];
8409 src1 = operands[1];
8410 src2 = operands[2];
8412 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
8413 if (GET_RTX_CLASS (code) == 'c'
8414 && (rtx_equal_p (dst, src2)
8415 || immediate_operand (src1, mode)))
8417 rtx temp = src1;
8418 src1 = src2;
8419 src2 = temp;
8422 /* If the destination is memory, and we do not have matching source
8423 operands, do things in registers. */
8424 matching_memory = 0;
8425 if (GET_CODE (dst) == MEM)
8427 if (rtx_equal_p (dst, src1))
8428 matching_memory = 1;
8429 else if (GET_RTX_CLASS (code) == 'c'
8430 && rtx_equal_p (dst, src2))
8431 matching_memory = 2;
8432 else
8433 dst = gen_reg_rtx (mode);
8436 /* Both source operands cannot be in memory. */
8437 if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM)
8439 if (matching_memory != 2)
8440 src2 = force_reg (mode, src2);
8441 else
8442 src1 = force_reg (mode, src1);
8445 /* If the operation is not commutable, source 1 cannot be a constant
8446 or non-matching memory. */
8447 if ((CONSTANT_P (src1)
8448 || (!matching_memory && GET_CODE (src1) == MEM))
8449 && GET_RTX_CLASS (code) != 'c')
8450 src1 = force_reg (mode, src1);
8452 /* If optimizing, copy to regs to improve CSE */
8453 if (optimize && ! no_new_pseudos)
8455 if (GET_CODE (dst) == MEM)
8456 dst = gen_reg_rtx (mode);
8457 if (GET_CODE (src1) == MEM)
8458 src1 = force_reg (mode, src1);
8459 if (GET_CODE (src2) == MEM)
8460 src2 = force_reg (mode, src2);
8463 /* Emit the instruction. */
8465 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
8466 if (reload_in_progress)
8468 /* Reload doesn't know about the flags register, and doesn't know that
8469 it doesn't want to clobber it. We can only do this with PLUS. */
8470 if (code != PLUS)
8471 abort ();
8472 emit_insn (op);
8474 else
8476 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8477 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
8480 /* Fix up the destination if needed. */
8481 if (dst != operands[0])
8482 emit_move_insn (operands[0], dst);
8485 /* Return TRUE or FALSE depending on whether the binary operator meets the
8486 appropriate constraints. */
8489 ix86_binary_operator_ok (enum rtx_code code,
8490 enum machine_mode mode ATTRIBUTE_UNUSED,
8491 rtx operands[3])
8493 /* Both source operands cannot be in memory. */
8494 if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM)
8495 return 0;
8496 /* If the operation is not commutable, source 1 cannot be a constant. */
8497 if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != 'c')
8498 return 0;
8499 /* If the destination is memory, we must have a matching source operand. */
8500 if (GET_CODE (operands[0]) == MEM
8501 && ! (rtx_equal_p (operands[0], operands[1])
8502 || (GET_RTX_CLASS (code) == 'c'
8503 && rtx_equal_p (operands[0], operands[2]))))
8504 return 0;
8505 /* If the operation is not commutable and the source 1 is memory, we must
8506 have a matching destination. */
8507 if (GET_CODE (operands[1]) == MEM
8508 && GET_RTX_CLASS (code) != 'c'
8509 && ! rtx_equal_p (operands[0], operands[1]))
8510 return 0;
8511 return 1;
8514 /* Attempt to expand a unary operator. Make the expansion closer to the
8515 actual machine, then just general_operand, which will allow 2 separate
8516 memory references (one output, one input) in a single insn. */
8518 void
8519 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
8520 rtx operands[])
8522 int matching_memory;
8523 rtx src, dst, op, clob;
8525 dst = operands[0];
8526 src = operands[1];
8528 /* If the destination is memory, and we do not have matching source
8529 operands, do things in registers. */
8530 matching_memory = 0;
8531 if (GET_CODE (dst) == MEM)
8533 if (rtx_equal_p (dst, src))
8534 matching_memory = 1;
8535 else
8536 dst = gen_reg_rtx (mode);
8539 /* When source operand is memory, destination must match. */
8540 if (!matching_memory && GET_CODE (src) == MEM)
8541 src = force_reg (mode, src);
8543 /* If optimizing, copy to regs to improve CSE */
8544 if (optimize && ! no_new_pseudos)
8546 if (GET_CODE (dst) == MEM)
8547 dst = gen_reg_rtx (mode);
8548 if (GET_CODE (src) == MEM)
8549 src = force_reg (mode, src);
8552 /* Emit the instruction. */
8554 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
8555 if (reload_in_progress || code == NOT)
8557 /* Reload doesn't know about the flags register, and doesn't know that
8558 it doesn't want to clobber it. */
8559 if (code != NOT)
8560 abort ();
8561 emit_insn (op);
8563 else
8565 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
8566 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
8569 /* Fix up the destination if needed. */
8570 if (dst != operands[0])
8571 emit_move_insn (operands[0], dst);
8574 /* Return TRUE or FALSE depending on whether the unary operator meets the
8575 appropriate constraints. */
8578 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
8579 enum machine_mode mode ATTRIBUTE_UNUSED,
8580 rtx operands[2] ATTRIBUTE_UNUSED)
8582 /* If one of operands is memory, source and destination must match. */
8583 if ((GET_CODE (operands[0]) == MEM
8584 || GET_CODE (operands[1]) == MEM)
8585 && ! rtx_equal_p (operands[0], operands[1]))
8586 return FALSE;
8587 return TRUE;
8590 /* Return TRUE or FALSE depending on whether the first SET in INSN
8591 has source and destination with matching CC modes, and that the
8592 CC mode is at least as constrained as REQ_MODE. */
8595 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
8597 rtx set;
8598 enum machine_mode set_mode;
8600 set = PATTERN (insn);
8601 if (GET_CODE (set) == PARALLEL)
8602 set = XVECEXP (set, 0, 0);
8603 if (GET_CODE (set) != SET)
8604 abort ();
8605 if (GET_CODE (SET_SRC (set)) != COMPARE)
8606 abort ();
8608 set_mode = GET_MODE (SET_DEST (set));
8609 switch (set_mode)
8611 case CCNOmode:
8612 if (req_mode != CCNOmode
8613 && (req_mode != CCmode
8614 || XEXP (SET_SRC (set), 1) != const0_rtx))
8615 return 0;
8616 break;
8617 case CCmode:
8618 if (req_mode == CCGCmode)
8619 return 0;
8620 /* FALLTHRU */
8621 case CCGCmode:
8622 if (req_mode == CCGOCmode || req_mode == CCNOmode)
8623 return 0;
8624 /* FALLTHRU */
8625 case CCGOCmode:
8626 if (req_mode == CCZmode)
8627 return 0;
8628 /* FALLTHRU */
8629 case CCZmode:
8630 break;
8632 default:
8633 abort ();
8636 return (GET_MODE (SET_SRC (set)) == set_mode);
8639 /* Generate insn patterns to do an integer compare of OPERANDS. */
8641 static rtx
8642 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
8644 enum machine_mode cmpmode;
8645 rtx tmp, flags;
8647 cmpmode = SELECT_CC_MODE (code, op0, op1);
8648 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
8650 /* This is very simple, but making the interface the same as in the
8651 FP case makes the rest of the code easier. */
8652 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
8653 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
8655 /* Return the test that should be put into the flags user, i.e.
8656 the bcc, scc, or cmov instruction. */
8657 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
8660 /* Figure out whether to use ordered or unordered fp comparisons.
8661 Return the appropriate mode to use. */
8663 enum machine_mode
8664 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
8666 /* ??? In order to make all comparisons reversible, we do all comparisons
8667 non-trapping when compiling for IEEE. Once gcc is able to distinguish
8668 all forms trapping and nontrapping comparisons, we can make inequality
8669 comparisons trapping again, since it results in better code when using
8670 FCOM based compares. */
8671 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
8674 enum machine_mode
8675 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
8677 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
8678 return ix86_fp_compare_mode (code);
8679 switch (code)
8681 /* Only zero flag is needed. */
8682 case EQ: /* ZF=0 */
8683 case NE: /* ZF!=0 */
8684 return CCZmode;
8685 /* Codes needing carry flag. */
8686 case GEU: /* CF=0 */
8687 case GTU: /* CF=0 & ZF=0 */
8688 case LTU: /* CF=1 */
8689 case LEU: /* CF=1 | ZF=1 */
8690 return CCmode;
8691 /* Codes possibly doable only with sign flag when
8692 comparing against zero. */
8693 case GE: /* SF=OF or SF=0 */
8694 case LT: /* SF<>OF or SF=1 */
8695 if (op1 == const0_rtx)
8696 return CCGOCmode;
8697 else
8698 /* For other cases Carry flag is not required. */
8699 return CCGCmode;
8700 /* Codes doable only with sign flag when comparing
8701 against zero, but we miss jump instruction for it
8702 so we need to use relational tests against overflow
8703 that thus needs to be zero. */
8704 case GT: /* ZF=0 & SF=OF */
8705 case LE: /* ZF=1 | SF<>OF */
8706 if (op1 == const0_rtx)
8707 return CCNOmode;
8708 else
8709 return CCGCmode;
8710 /* strcmp pattern do (use flags) and combine may ask us for proper
8711 mode. */
8712 case USE:
8713 return CCmode;
8714 default:
8715 abort ();
8719 /* Return true if we should use an FCOMI instruction for this fp comparison. */
8722 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
8724 enum rtx_code swapped_code = swap_condition (code);
8725 return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code))
8726 || (ix86_fp_comparison_cost (swapped_code)
8727 == ix86_fp_comparison_fcomi_cost (swapped_code)));
8730 /* Swap, force into registers, or otherwise massage the two operands
8731 to a fp comparison. The operands are updated in place; the new
8732 comparison code is returned. */
8734 static enum rtx_code
8735 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
8737 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
8738 rtx op0 = *pop0, op1 = *pop1;
8739 enum machine_mode op_mode = GET_MODE (op0);
8740 int is_sse = SSE_REG_P (op0) | SSE_REG_P (op1);
8742 /* All of the unordered compare instructions only work on registers.
8743 The same is true of the XFmode compare instructions. The same is
8744 true of the fcomi compare instructions. */
8746 if (!is_sse
8747 && (fpcmp_mode == CCFPUmode
8748 || op_mode == XFmode
8749 || ix86_use_fcomi_compare (code)))
8751 op0 = force_reg (op_mode, op0);
8752 op1 = force_reg (op_mode, op1);
8754 else
8756 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
8757 things around if they appear profitable, otherwise force op0
8758 into a register. */
8760 if (standard_80387_constant_p (op0) == 0
8761 || (GET_CODE (op0) == MEM
8762 && ! (standard_80387_constant_p (op1) == 0
8763 || GET_CODE (op1) == MEM)))
8765 rtx tmp;
8766 tmp = op0, op0 = op1, op1 = tmp;
8767 code = swap_condition (code);
8770 if (GET_CODE (op0) != REG)
8771 op0 = force_reg (op_mode, op0);
8773 if (CONSTANT_P (op1))
8775 if (standard_80387_constant_p (op1))
8776 op1 = force_reg (op_mode, op1);
8777 else
8778 op1 = validize_mem (force_const_mem (op_mode, op1));
8782 /* Try to rearrange the comparison to make it cheaper. */
8783 if (ix86_fp_comparison_cost (code)
8784 > ix86_fp_comparison_cost (swap_condition (code))
8785 && (GET_CODE (op1) == REG || !no_new_pseudos))
8787 rtx tmp;
8788 tmp = op0, op0 = op1, op1 = tmp;
8789 code = swap_condition (code);
8790 if (GET_CODE (op0) != REG)
8791 op0 = force_reg (op_mode, op0);
8794 *pop0 = op0;
8795 *pop1 = op1;
8796 return code;
8799 /* Convert comparison codes we use to represent FP comparison to integer
8800 code that will result in proper branch. Return UNKNOWN if no such code
8801 is available. */
8802 static enum rtx_code
8803 ix86_fp_compare_code_to_integer (enum rtx_code code)
8805 switch (code)
8807 case GT:
8808 return GTU;
8809 case GE:
8810 return GEU;
8811 case ORDERED:
8812 case UNORDERED:
8813 return code;
8814 break;
8815 case UNEQ:
8816 return EQ;
8817 break;
8818 case UNLT:
8819 return LTU;
8820 break;
8821 case UNLE:
8822 return LEU;
8823 break;
8824 case LTGT:
8825 return NE;
8826 break;
8827 default:
8828 return UNKNOWN;
8832 /* Split comparison code CODE into comparisons we can do using branch
8833 instructions. BYPASS_CODE is comparison code for branch that will
8834 branch around FIRST_CODE and SECOND_CODE. If some of branches
8835 is not required, set value to NIL.
8836 We never require more than two branches. */
8837 static void
8838 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
8839 enum rtx_code *first_code,
8840 enum rtx_code *second_code)
8842 *first_code = code;
8843 *bypass_code = NIL;
8844 *second_code = NIL;
8846 /* The fcomi comparison sets flags as follows:
8848 cmp ZF PF CF
8849 > 0 0 0
8850 < 0 0 1
8851 = 1 0 0
8852 un 1 1 1 */
8854 switch (code)
8856 case GT: /* GTU - CF=0 & ZF=0 */
8857 case GE: /* GEU - CF=0 */
8858 case ORDERED: /* PF=0 */
8859 case UNORDERED: /* PF=1 */
8860 case UNEQ: /* EQ - ZF=1 */
8861 case UNLT: /* LTU - CF=1 */
8862 case UNLE: /* LEU - CF=1 | ZF=1 */
8863 case LTGT: /* EQ - ZF=0 */
8864 break;
8865 case LT: /* LTU - CF=1 - fails on unordered */
8866 *first_code = UNLT;
8867 *bypass_code = UNORDERED;
8868 break;
8869 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
8870 *first_code = UNLE;
8871 *bypass_code = UNORDERED;
8872 break;
8873 case EQ: /* EQ - ZF=1 - fails on unordered */
8874 *first_code = UNEQ;
8875 *bypass_code = UNORDERED;
8876 break;
8877 case NE: /* NE - ZF=0 - fails on unordered */
8878 *first_code = LTGT;
8879 *second_code = UNORDERED;
8880 break;
8881 case UNGE: /* GEU - CF=0 - fails on unordered */
8882 *first_code = GE;
8883 *second_code = UNORDERED;
8884 break;
8885 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
8886 *first_code = GT;
8887 *second_code = UNORDERED;
8888 break;
8889 default:
8890 abort ();
8892 if (!TARGET_IEEE_FP)
8894 *second_code = NIL;
8895 *bypass_code = NIL;
8899 /* Return cost of comparison done fcom + arithmetics operations on AX.
8900 All following functions do use number of instructions as a cost metrics.
8901 In future this should be tweaked to compute bytes for optimize_size and
8902 take into account performance of various instructions on various CPUs. */
8903 static int
8904 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
8906 if (!TARGET_IEEE_FP)
8907 return 4;
8908 /* The cost of code output by ix86_expand_fp_compare. */
8909 switch (code)
8911 case UNLE:
8912 case UNLT:
8913 case LTGT:
8914 case GT:
8915 case GE:
8916 case UNORDERED:
8917 case ORDERED:
8918 case UNEQ:
8919 return 4;
8920 break;
8921 case LT:
8922 case NE:
8923 case EQ:
8924 case UNGE:
8925 return 5;
8926 break;
8927 case LE:
8928 case UNGT:
8929 return 6;
8930 break;
8931 default:
8932 abort ();
8936 /* Return cost of comparison done using fcomi operation.
8937 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8938 static int
8939 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
8941 enum rtx_code bypass_code, first_code, second_code;
8942 /* Return arbitrarily high cost when instruction is not supported - this
8943 prevents gcc from using it. */
8944 if (!TARGET_CMOVE)
8945 return 1024;
8946 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8947 return (bypass_code != NIL || second_code != NIL) + 2;
8950 /* Return cost of comparison done using sahf operation.
8951 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8952 static int
8953 ix86_fp_comparison_sahf_cost (enum rtx_code code)
8955 enum rtx_code bypass_code, first_code, second_code;
8956 /* Return arbitrarily high cost when instruction is not preferred - this
8957 avoids gcc from using it. */
8958 if (!TARGET_USE_SAHF && !optimize_size)
8959 return 1024;
8960 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
8961 return (bypass_code != NIL || second_code != NIL) + 3;
8964 /* Compute cost of the comparison done using any method.
8965 See ix86_fp_comparison_arithmetics_cost for the metrics. */
8966 static int
8967 ix86_fp_comparison_cost (enum rtx_code code)
8969 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
8970 int min;
8972 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
8973 sahf_cost = ix86_fp_comparison_sahf_cost (code);
8975 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
8976 if (min > sahf_cost)
8977 min = sahf_cost;
8978 if (min > fcomi_cost)
8979 min = fcomi_cost;
8980 return min;
8983 /* Generate insn patterns to do a floating point compare of OPERANDS. */
8985 static rtx
8986 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
8987 rtx *second_test, rtx *bypass_test)
8989 enum machine_mode fpcmp_mode, intcmp_mode;
8990 rtx tmp, tmp2;
8991 int cost = ix86_fp_comparison_cost (code);
8992 enum rtx_code bypass_code, first_code, second_code;
8994 fpcmp_mode = ix86_fp_compare_mode (code);
8995 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
8997 if (second_test)
8998 *second_test = NULL_RTX;
8999 if (bypass_test)
9000 *bypass_test = NULL_RTX;
9002 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9004 /* Do fcomi/sahf based test when profitable. */
9005 if ((bypass_code == NIL || bypass_test)
9006 && (second_code == NIL || second_test)
9007 && ix86_fp_comparison_arithmetics_cost (code) > cost)
9009 if (TARGET_CMOVE)
9011 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9012 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
9013 tmp);
9014 emit_insn (tmp);
9016 else
9018 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9019 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
9020 if (!scratch)
9021 scratch = gen_reg_rtx (HImode);
9022 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
9023 emit_insn (gen_x86_sahf_1 (scratch));
9026 /* The FP codes work out to act like unsigned. */
9027 intcmp_mode = fpcmp_mode;
9028 code = first_code;
9029 if (bypass_code != NIL)
9030 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
9031 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9032 const0_rtx);
9033 if (second_code != NIL)
9034 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
9035 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9036 const0_rtx);
9038 else
9040 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
9041 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
9042 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
9043 if (!scratch)
9044 scratch = gen_reg_rtx (HImode);
9045 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
9047 /* In the unordered case, we have to check C2 for NaN's, which
9048 doesn't happen to work out to anything nice combination-wise.
9049 So do some bit twiddling on the value we've got in AH to come
9050 up with an appropriate set of condition codes. */
9052 intcmp_mode = CCNOmode;
9053 switch (code)
9055 case GT:
9056 case UNGT:
9057 if (code == GT || !TARGET_IEEE_FP)
9059 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
9060 code = EQ;
9062 else
9064 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9065 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
9066 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
9067 intcmp_mode = CCmode;
9068 code = GEU;
9070 break;
9071 case LT:
9072 case UNLT:
9073 if (code == LT && TARGET_IEEE_FP)
9075 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9076 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
9077 intcmp_mode = CCmode;
9078 code = EQ;
9080 else
9082 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
9083 code = NE;
9085 break;
9086 case GE:
9087 case UNGE:
9088 if (code == GE || !TARGET_IEEE_FP)
9090 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
9091 code = EQ;
9093 else
9095 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9096 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
9097 GEN_INT (0x01)));
9098 code = NE;
9100 break;
9101 case LE:
9102 case UNLE:
9103 if (code == LE && TARGET_IEEE_FP)
9105 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9106 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
9107 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
9108 intcmp_mode = CCmode;
9109 code = LTU;
9111 else
9113 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
9114 code = NE;
9116 break;
9117 case EQ:
9118 case UNEQ:
9119 if (code == EQ && TARGET_IEEE_FP)
9121 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9122 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
9123 intcmp_mode = CCmode;
9124 code = EQ;
9126 else
9128 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
9129 code = NE;
9130 break;
9132 break;
9133 case NE:
9134 case LTGT:
9135 if (code == NE && TARGET_IEEE_FP)
9137 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
9138 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
9139 GEN_INT (0x40)));
9140 code = NE;
9142 else
9144 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
9145 code = EQ;
9147 break;
9149 case UNORDERED:
9150 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
9151 code = NE;
9152 break;
9153 case ORDERED:
9154 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
9155 code = EQ;
9156 break;
9158 default:
9159 abort ();
9163 /* Return the test that should be put into the flags user, i.e.
9164 the bcc, scc, or cmov instruction. */
9165 return gen_rtx_fmt_ee (code, VOIDmode,
9166 gen_rtx_REG (intcmp_mode, FLAGS_REG),
9167 const0_rtx);
9171 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
9173 rtx op0, op1, ret;
9174 op0 = ix86_compare_op0;
9175 op1 = ix86_compare_op1;
9177 if (second_test)
9178 *second_test = NULL_RTX;
9179 if (bypass_test)
9180 *bypass_test = NULL_RTX;
9182 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_FLOAT)
9183 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
9184 second_test, bypass_test);
9185 else
9186 ret = ix86_expand_int_compare (code, op0, op1);
9188 return ret;
9191 /* Return true if the CODE will result in nontrivial jump sequence. */
9192 bool
9193 ix86_fp_jump_nontrivial_p (enum rtx_code code)
9195 enum rtx_code bypass_code, first_code, second_code;
9196 if (!TARGET_CMOVE)
9197 return true;
9198 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9199 return bypass_code != NIL || second_code != NIL;
9202 void
9203 ix86_expand_branch (enum rtx_code code, rtx label)
9205 rtx tmp;
9207 switch (GET_MODE (ix86_compare_op0))
9209 case QImode:
9210 case HImode:
9211 case SImode:
9212 simple:
9213 tmp = ix86_expand_compare (code, NULL, NULL);
9214 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
9215 gen_rtx_LABEL_REF (VOIDmode, label),
9216 pc_rtx);
9217 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
9218 return;
9220 case SFmode:
9221 case DFmode:
9222 case XFmode:
9224 rtvec vec;
9225 int use_fcomi;
9226 enum rtx_code bypass_code, first_code, second_code;
9228 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
9229 &ix86_compare_op1);
9231 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
9233 /* Check whether we will use the natural sequence with one jump. If
9234 so, we can expand jump early. Otherwise delay expansion by
9235 creating compound insn to not confuse optimizers. */
9236 if (bypass_code == NIL && second_code == NIL
9237 && TARGET_CMOVE)
9239 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
9240 gen_rtx_LABEL_REF (VOIDmode, label),
9241 pc_rtx, NULL_RTX);
9243 else
9245 tmp = gen_rtx_fmt_ee (code, VOIDmode,
9246 ix86_compare_op0, ix86_compare_op1);
9247 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
9248 gen_rtx_LABEL_REF (VOIDmode, label),
9249 pc_rtx);
9250 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
9252 use_fcomi = ix86_use_fcomi_compare (code);
9253 vec = rtvec_alloc (3 + !use_fcomi);
9254 RTVEC_ELT (vec, 0) = tmp;
9255 RTVEC_ELT (vec, 1)
9256 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
9257 RTVEC_ELT (vec, 2)
9258 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
9259 if (! use_fcomi)
9260 RTVEC_ELT (vec, 3)
9261 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
9263 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
9265 return;
9268 case DImode:
9269 if (TARGET_64BIT)
9270 goto simple;
9271 /* Expand DImode branch into multiple compare+branch. */
9273 rtx lo[2], hi[2], label2;
9274 enum rtx_code code1, code2, code3;
9276 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
9278 tmp = ix86_compare_op0;
9279 ix86_compare_op0 = ix86_compare_op1;
9280 ix86_compare_op1 = tmp;
9281 code = swap_condition (code);
9283 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
9284 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
9286 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
9287 avoid two branches. This costs one extra insn, so disable when
9288 optimizing for size. */
9290 if ((code == EQ || code == NE)
9291 && (!optimize_size
9292 || hi[1] == const0_rtx || lo[1] == const0_rtx))
9294 rtx xor0, xor1;
9296 xor1 = hi[0];
9297 if (hi[1] != const0_rtx)
9298 xor1 = expand_binop (SImode, xor_optab, xor1, hi[1],
9299 NULL_RTX, 0, OPTAB_WIDEN);
9301 xor0 = lo[0];
9302 if (lo[1] != const0_rtx)
9303 xor0 = expand_binop (SImode, xor_optab, xor0, lo[1],
9304 NULL_RTX, 0, OPTAB_WIDEN);
9306 tmp = expand_binop (SImode, ior_optab, xor1, xor0,
9307 NULL_RTX, 0, OPTAB_WIDEN);
9309 ix86_compare_op0 = tmp;
9310 ix86_compare_op1 = const0_rtx;
9311 ix86_expand_branch (code, label);
9312 return;
9315 /* Otherwise, if we are doing less-than or greater-or-equal-than,
9316 op1 is a constant and the low word is zero, then we can just
9317 examine the high word. */
9319 if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx)
9320 switch (code)
9322 case LT: case LTU: case GE: case GEU:
9323 ix86_compare_op0 = hi[0];
9324 ix86_compare_op1 = hi[1];
9325 ix86_expand_branch (code, label);
9326 return;
9327 default:
9328 break;
9331 /* Otherwise, we need two or three jumps. */
9333 label2 = gen_label_rtx ();
9335 code1 = code;
9336 code2 = swap_condition (code);
9337 code3 = unsigned_condition (code);
9339 switch (code)
9341 case LT: case GT: case LTU: case GTU:
9342 break;
9344 case LE: code1 = LT; code2 = GT; break;
9345 case GE: code1 = GT; code2 = LT; break;
9346 case LEU: code1 = LTU; code2 = GTU; break;
9347 case GEU: code1 = GTU; code2 = LTU; break;
9349 case EQ: code1 = NIL; code2 = NE; break;
9350 case NE: code2 = NIL; break;
9352 default:
9353 abort ();
9357 * a < b =>
9358 * if (hi(a) < hi(b)) goto true;
9359 * if (hi(a) > hi(b)) goto false;
9360 * if (lo(a) < lo(b)) goto true;
9361 * false:
9364 ix86_compare_op0 = hi[0];
9365 ix86_compare_op1 = hi[1];
9367 if (code1 != NIL)
9368 ix86_expand_branch (code1, label);
9369 if (code2 != NIL)
9370 ix86_expand_branch (code2, label2);
9372 ix86_compare_op0 = lo[0];
9373 ix86_compare_op1 = lo[1];
9374 ix86_expand_branch (code3, label);
9376 if (code2 != NIL)
9377 emit_label (label2);
9378 return;
9381 default:
9382 abort ();
9386 /* Split branch based on floating point condition. */
9387 void
9388 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
9389 rtx target1, rtx target2, rtx tmp)
9391 rtx second, bypass;
9392 rtx label = NULL_RTX;
9393 rtx condition;
9394 int bypass_probability = -1, second_probability = -1, probability = -1;
9395 rtx i;
9397 if (target2 != pc_rtx)
9399 rtx tmp = target2;
9400 code = reverse_condition_maybe_unordered (code);
9401 target2 = target1;
9402 target1 = tmp;
9405 condition = ix86_expand_fp_compare (code, op1, op2,
9406 tmp, &second, &bypass);
9408 if (split_branch_probability >= 0)
9410 /* Distribute the probabilities across the jumps.
9411 Assume the BYPASS and SECOND to be always test
9412 for UNORDERED. */
9413 probability = split_branch_probability;
9415 /* Value of 1 is low enough to make no need for probability
9416 to be updated. Later we may run some experiments and see
9417 if unordered values are more frequent in practice. */
9418 if (bypass)
9419 bypass_probability = 1;
9420 if (second)
9421 second_probability = 1;
9423 if (bypass != NULL_RTX)
9425 label = gen_label_rtx ();
9426 i = emit_jump_insn (gen_rtx_SET
9427 (VOIDmode, pc_rtx,
9428 gen_rtx_IF_THEN_ELSE (VOIDmode,
9429 bypass,
9430 gen_rtx_LABEL_REF (VOIDmode,
9431 label),
9432 pc_rtx)));
9433 if (bypass_probability >= 0)
9434 REG_NOTES (i)
9435 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9436 GEN_INT (bypass_probability),
9437 REG_NOTES (i));
9439 i = emit_jump_insn (gen_rtx_SET
9440 (VOIDmode, pc_rtx,
9441 gen_rtx_IF_THEN_ELSE (VOIDmode,
9442 condition, target1, target2)));
9443 if (probability >= 0)
9444 REG_NOTES (i)
9445 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9446 GEN_INT (probability),
9447 REG_NOTES (i));
9448 if (second != NULL_RTX)
9450 i = emit_jump_insn (gen_rtx_SET
9451 (VOIDmode, pc_rtx,
9452 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
9453 target2)));
9454 if (second_probability >= 0)
9455 REG_NOTES (i)
9456 = gen_rtx_EXPR_LIST (REG_BR_PROB,
9457 GEN_INT (second_probability),
9458 REG_NOTES (i));
9460 if (label != NULL_RTX)
9461 emit_label (label);
9465 ix86_expand_setcc (enum rtx_code code, rtx dest)
9467 rtx ret, tmp, tmpreg, equiv;
9468 rtx second_test, bypass_test;
9470 if (GET_MODE (ix86_compare_op0) == DImode
9471 && !TARGET_64BIT)
9472 return 0; /* FAIL */
9474 if (GET_MODE (dest) != QImode)
9475 abort ();
9477 ret = ix86_expand_compare (code, &second_test, &bypass_test);
9478 PUT_MODE (ret, QImode);
9480 tmp = dest;
9481 tmpreg = dest;
9483 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
9484 if (bypass_test || second_test)
9486 rtx test = second_test;
9487 int bypass = 0;
9488 rtx tmp2 = gen_reg_rtx (QImode);
9489 if (bypass_test)
9491 if (second_test)
9492 abort ();
9493 test = bypass_test;
9494 bypass = 1;
9495 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
9497 PUT_MODE (test, QImode);
9498 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
9500 if (bypass)
9501 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
9502 else
9503 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
9506 /* Attach a REG_EQUAL note describing the comparison result. */
9507 equiv = simplify_gen_relational (code, QImode,
9508 GET_MODE (ix86_compare_op0),
9509 ix86_compare_op0, ix86_compare_op1);
9510 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
9512 return 1; /* DONE */
9515 /* Expand comparison setting or clearing carry flag. Return true when
9516 successful and set pop for the operation. */
9517 static bool
9518 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
9520 enum machine_mode mode =
9521 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
9523 /* Do not handle DImode compares that go trought special path. Also we can't
9524 deal with FP compares yet. This is possible to add. */
9525 if ((mode == DImode && !TARGET_64BIT))
9526 return false;
9527 if (FLOAT_MODE_P (mode))
9529 rtx second_test = NULL, bypass_test = NULL;
9530 rtx compare_op, compare_seq;
9532 /* Shortcut: following common codes never translate into carry flag compares. */
9533 if (code == EQ || code == NE || code == UNEQ || code == LTGT
9534 || code == ORDERED || code == UNORDERED)
9535 return false;
9537 /* These comparisons require zero flag; swap operands so they won't. */
9538 if ((code == GT || code == UNLE || code == LE || code == UNGT)
9539 && !TARGET_IEEE_FP)
9541 rtx tmp = op0;
9542 op0 = op1;
9543 op1 = tmp;
9544 code = swap_condition (code);
9547 /* Try to expand the comparison and verify that we end up with carry flag
9548 based comparison. This is fails to be true only when we decide to expand
9549 comparison using arithmetic that is not too common scenario. */
9550 start_sequence ();
9551 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
9552 &second_test, &bypass_test);
9553 compare_seq = get_insns ();
9554 end_sequence ();
9556 if (second_test || bypass_test)
9557 return false;
9558 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
9559 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
9560 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
9561 else
9562 code = GET_CODE (compare_op);
9563 if (code != LTU && code != GEU)
9564 return false;
9565 emit_insn (compare_seq);
9566 *pop = compare_op;
9567 return true;
9569 if (!INTEGRAL_MODE_P (mode))
9570 return false;
9571 switch (code)
9573 case LTU:
9574 case GEU:
9575 break;
9577 /* Convert a==0 into (unsigned)a<1. */
9578 case EQ:
9579 case NE:
9580 if (op1 != const0_rtx)
9581 return false;
9582 op1 = const1_rtx;
9583 code = (code == EQ ? LTU : GEU);
9584 break;
9586 /* Convert a>b into b<a or a>=b-1. */
9587 case GTU:
9588 case LEU:
9589 if (GET_CODE (op1) == CONST_INT)
9591 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
9592 /* Bail out on overflow. We still can swap operands but that
9593 would force loading of the constant into register. */
9594 if (op1 == const0_rtx
9595 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
9596 return false;
9597 code = (code == GTU ? GEU : LTU);
9599 else
9601 rtx tmp = op1;
9602 op1 = op0;
9603 op0 = tmp;
9604 code = (code == GTU ? LTU : GEU);
9606 break;
9608 /* Convert a>=0 into (unsigned)a<0x80000000. */
9609 case LT:
9610 case GE:
9611 if (mode == DImode || op1 != const0_rtx)
9612 return false;
9613 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
9614 code = (code == LT ? GEU : LTU);
9615 break;
9616 case LE:
9617 case GT:
9618 if (mode == DImode || op1 != constm1_rtx)
9619 return false;
9620 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
9621 code = (code == LE ? GEU : LTU);
9622 break;
9624 default:
9625 return false;
9627 /* Swapping operands may cause constant to appear as first operand. */
9628 if (!nonimmediate_operand (op0, VOIDmode))
9630 if (no_new_pseudos)
9631 return false;
9632 op0 = force_reg (mode, op0);
9634 ix86_compare_op0 = op0;
9635 ix86_compare_op1 = op1;
9636 *pop = ix86_expand_compare (code, NULL, NULL);
9637 if (GET_CODE (*pop) != LTU && GET_CODE (*pop) != GEU)
9638 abort ();
9639 return true;
9643 ix86_expand_int_movcc (rtx operands[])
9645 enum rtx_code code = GET_CODE (operands[1]), compare_code;
9646 rtx compare_seq, compare_op;
9647 rtx second_test, bypass_test;
9648 enum machine_mode mode = GET_MODE (operands[0]);
9649 bool sign_bit_compare_p = false;;
9651 start_sequence ();
9652 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
9653 compare_seq = get_insns ();
9654 end_sequence ();
9656 compare_code = GET_CODE (compare_op);
9658 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
9659 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
9660 sign_bit_compare_p = true;
9662 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
9663 HImode insns, we'd be swallowed in word prefix ops. */
9665 if ((mode != HImode || TARGET_FAST_PREFIX)
9666 && (mode != DImode || TARGET_64BIT)
9667 && GET_CODE (operands[2]) == CONST_INT
9668 && GET_CODE (operands[3]) == CONST_INT)
9670 rtx out = operands[0];
9671 HOST_WIDE_INT ct = INTVAL (operands[2]);
9672 HOST_WIDE_INT cf = INTVAL (operands[3]);
9673 HOST_WIDE_INT diff;
9675 diff = ct - cf;
9676 /* Sign bit compares are better done using shifts than we do by using
9677 sbb. */
9678 if (sign_bit_compare_p
9679 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
9680 ix86_compare_op1, &compare_op))
9682 /* Detect overlap between destination and compare sources. */
9683 rtx tmp = out;
9685 if (!sign_bit_compare_p)
9687 bool fpcmp = false;
9689 compare_code = GET_CODE (compare_op);
9691 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
9692 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
9694 fpcmp = true;
9695 compare_code = ix86_fp_compare_code_to_integer (compare_code);
9698 /* To simplify rest of code, restrict to the GEU case. */
9699 if (compare_code == LTU)
9701 HOST_WIDE_INT tmp = ct;
9702 ct = cf;
9703 cf = tmp;
9704 compare_code = reverse_condition (compare_code);
9705 code = reverse_condition (code);
9707 else
9709 if (fpcmp)
9710 PUT_CODE (compare_op,
9711 reverse_condition_maybe_unordered
9712 (GET_CODE (compare_op)));
9713 else
9714 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
9716 diff = ct - cf;
9718 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
9719 || reg_overlap_mentioned_p (out, ix86_compare_op1))
9720 tmp = gen_reg_rtx (mode);
9722 if (mode == DImode)
9723 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
9724 else
9725 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
9727 else
9729 if (code == GT || code == GE)
9730 code = reverse_condition (code);
9731 else
9733 HOST_WIDE_INT tmp = ct;
9734 ct = cf;
9735 cf = tmp;
9736 diff = ct - cf;
9738 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
9739 ix86_compare_op1, VOIDmode, 0, -1);
9742 if (diff == 1)
9745 * cmpl op0,op1
9746 * sbbl dest,dest
9747 * [addl dest, ct]
9749 * Size 5 - 8.
9751 if (ct)
9752 tmp = expand_simple_binop (mode, PLUS,
9753 tmp, GEN_INT (ct),
9754 copy_rtx (tmp), 1, OPTAB_DIRECT);
9756 else if (cf == -1)
9759 * cmpl op0,op1
9760 * sbbl dest,dest
9761 * orl $ct, dest
9763 * Size 8.
9765 tmp = expand_simple_binop (mode, IOR,
9766 tmp, GEN_INT (ct),
9767 copy_rtx (tmp), 1, OPTAB_DIRECT);
9769 else if (diff == -1 && ct)
9772 * cmpl op0,op1
9773 * sbbl dest,dest
9774 * notl dest
9775 * [addl dest, cf]
9777 * Size 8 - 11.
9779 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
9780 if (cf)
9781 tmp = expand_simple_binop (mode, PLUS,
9782 copy_rtx (tmp), GEN_INT (cf),
9783 copy_rtx (tmp), 1, OPTAB_DIRECT);
9785 else
9788 * cmpl op0,op1
9789 * sbbl dest,dest
9790 * [notl dest]
9791 * andl cf - ct, dest
9792 * [addl dest, ct]
9794 * Size 8 - 11.
9797 if (cf == 0)
9799 cf = ct;
9800 ct = 0;
9801 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
9804 tmp = expand_simple_binop (mode, AND,
9805 copy_rtx (tmp),
9806 gen_int_mode (cf - ct, mode),
9807 copy_rtx (tmp), 1, OPTAB_DIRECT);
9808 if (ct)
9809 tmp = expand_simple_binop (mode, PLUS,
9810 copy_rtx (tmp), GEN_INT (ct),
9811 copy_rtx (tmp), 1, OPTAB_DIRECT);
9814 if (!rtx_equal_p (tmp, out))
9815 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
9817 return 1; /* DONE */
9820 if (diff < 0)
9822 HOST_WIDE_INT tmp;
9823 tmp = ct, ct = cf, cf = tmp;
9824 diff = -diff;
9825 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
9827 /* We may be reversing unordered compare to normal compare, that
9828 is not valid in general (we may convert non-trapping condition
9829 to trapping one), however on i386 we currently emit all
9830 comparisons unordered. */
9831 compare_code = reverse_condition_maybe_unordered (compare_code);
9832 code = reverse_condition_maybe_unordered (code);
9834 else
9836 compare_code = reverse_condition (compare_code);
9837 code = reverse_condition (code);
9841 compare_code = NIL;
9842 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
9843 && GET_CODE (ix86_compare_op1) == CONST_INT)
9845 if (ix86_compare_op1 == const0_rtx
9846 && (code == LT || code == GE))
9847 compare_code = code;
9848 else if (ix86_compare_op1 == constm1_rtx)
9850 if (code == LE)
9851 compare_code = LT;
9852 else if (code == GT)
9853 compare_code = GE;
9857 /* Optimize dest = (op0 < 0) ? -1 : cf. */
9858 if (compare_code != NIL
9859 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
9860 && (cf == -1 || ct == -1))
9862 /* If lea code below could be used, only optimize
9863 if it results in a 2 insn sequence. */
9865 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
9866 || diff == 3 || diff == 5 || diff == 9)
9867 || (compare_code == LT && ct == -1)
9868 || (compare_code == GE && cf == -1))
9871 * notl op1 (if necessary)
9872 * sarl $31, op1
9873 * orl cf, op1
9875 if (ct != -1)
9877 cf = ct;
9878 ct = -1;
9879 code = reverse_condition (code);
9882 out = emit_store_flag (out, code, ix86_compare_op0,
9883 ix86_compare_op1, VOIDmode, 0, -1);
9885 out = expand_simple_binop (mode, IOR,
9886 out, GEN_INT (cf),
9887 out, 1, OPTAB_DIRECT);
9888 if (out != operands[0])
9889 emit_move_insn (operands[0], out);
9891 return 1; /* DONE */
9896 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
9897 || diff == 3 || diff == 5 || diff == 9)
9898 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
9899 && (mode != DImode || x86_64_sign_extended_value (GEN_INT (cf))))
9902 * xorl dest,dest
9903 * cmpl op1,op2
9904 * setcc dest
9905 * lea cf(dest*(ct-cf)),dest
9907 * Size 14.
9909 * This also catches the degenerate setcc-only case.
9912 rtx tmp;
9913 int nops;
9915 out = emit_store_flag (out, code, ix86_compare_op0,
9916 ix86_compare_op1, VOIDmode, 0, 1);
9918 nops = 0;
9919 /* On x86_64 the lea instruction operates on Pmode, so we need
9920 to get arithmetics done in proper mode to match. */
9921 if (diff == 1)
9922 tmp = copy_rtx (out);
9923 else
9925 rtx out1;
9926 out1 = copy_rtx (out);
9927 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
9928 nops++;
9929 if (diff & 1)
9931 tmp = gen_rtx_PLUS (mode, tmp, out1);
9932 nops++;
9935 if (cf != 0)
9937 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
9938 nops++;
9940 if (!rtx_equal_p (tmp, out))
9942 if (nops == 1)
9943 out = force_operand (tmp, copy_rtx (out));
9944 else
9945 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
9947 if (!rtx_equal_p (out, operands[0]))
9948 emit_move_insn (operands[0], copy_rtx (out));
9950 return 1; /* DONE */
9954 * General case: Jumpful:
9955 * xorl dest,dest cmpl op1, op2
9956 * cmpl op1, op2 movl ct, dest
9957 * setcc dest jcc 1f
9958 * decl dest movl cf, dest
9959 * andl (cf-ct),dest 1:
9960 * addl ct,dest
9962 * Size 20. Size 14.
9964 * This is reasonably steep, but branch mispredict costs are
9965 * high on modern cpus, so consider failing only if optimizing
9966 * for space.
9969 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
9970 && BRANCH_COST >= 2)
9972 if (cf == 0)
9974 cf = ct;
9975 ct = 0;
9976 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
9977 /* We may be reversing unordered compare to normal compare,
9978 that is not valid in general (we may convert non-trapping
9979 condition to trapping one), however on i386 we currently
9980 emit all comparisons unordered. */
9981 code = reverse_condition_maybe_unordered (code);
9982 else
9984 code = reverse_condition (code);
9985 if (compare_code != NIL)
9986 compare_code = reverse_condition (compare_code);
9990 if (compare_code != NIL)
9992 /* notl op1 (if needed)
9993 sarl $31, op1
9994 andl (cf-ct), op1
9995 addl ct, op1
9997 For x < 0 (resp. x <= -1) there will be no notl,
9998 so if possible swap the constants to get rid of the
9999 complement.
10000 True/false will be -1/0 while code below (store flag
10001 followed by decrement) is 0/-1, so the constants need
10002 to be exchanged once more. */
10004 if (compare_code == GE || !cf)
10006 code = reverse_condition (code);
10007 compare_code = LT;
10009 else
10011 HOST_WIDE_INT tmp = cf;
10012 cf = ct;
10013 ct = tmp;
10016 out = emit_store_flag (out, code, ix86_compare_op0,
10017 ix86_compare_op1, VOIDmode, 0, -1);
10019 else
10021 out = emit_store_flag (out, code, ix86_compare_op0,
10022 ix86_compare_op1, VOIDmode, 0, 1);
10024 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
10025 copy_rtx (out), 1, OPTAB_DIRECT);
10028 out = expand_simple_binop (mode, AND, copy_rtx (out),
10029 gen_int_mode (cf - ct, mode),
10030 copy_rtx (out), 1, OPTAB_DIRECT);
10031 if (ct)
10032 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
10033 copy_rtx (out), 1, OPTAB_DIRECT);
10034 if (!rtx_equal_p (out, operands[0]))
10035 emit_move_insn (operands[0], copy_rtx (out));
10037 return 1; /* DONE */
10041 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
10043 /* Try a few things more with specific constants and a variable. */
10045 optab op;
10046 rtx var, orig_out, out, tmp;
10048 if (BRANCH_COST <= 2)
10049 return 0; /* FAIL */
10051 /* If one of the two operands is an interesting constant, load a
10052 constant with the above and mask it in with a logical operation. */
10054 if (GET_CODE (operands[2]) == CONST_INT)
10056 var = operands[3];
10057 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
10058 operands[3] = constm1_rtx, op = and_optab;
10059 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
10060 operands[3] = const0_rtx, op = ior_optab;
10061 else
10062 return 0; /* FAIL */
10064 else if (GET_CODE (operands[3]) == CONST_INT)
10066 var = operands[2];
10067 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
10068 operands[2] = constm1_rtx, op = and_optab;
10069 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
10070 operands[2] = const0_rtx, op = ior_optab;
10071 else
10072 return 0; /* FAIL */
10074 else
10075 return 0; /* FAIL */
10077 orig_out = operands[0];
10078 tmp = gen_reg_rtx (mode);
10079 operands[0] = tmp;
10081 /* Recurse to get the constant loaded. */
10082 if (ix86_expand_int_movcc (operands) == 0)
10083 return 0; /* FAIL */
10085 /* Mask in the interesting variable. */
10086 out = expand_binop (mode, op, var, tmp, orig_out, 0,
10087 OPTAB_WIDEN);
10088 if (!rtx_equal_p (out, orig_out))
10089 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
10091 return 1; /* DONE */
10095 * For comparison with above,
10097 * movl cf,dest
10098 * movl ct,tmp
10099 * cmpl op1,op2
10100 * cmovcc tmp,dest
10102 * Size 15.
10105 if (! nonimmediate_operand (operands[2], mode))
10106 operands[2] = force_reg (mode, operands[2]);
10107 if (! nonimmediate_operand (operands[3], mode))
10108 operands[3] = force_reg (mode, operands[3]);
10110 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
10112 rtx tmp = gen_reg_rtx (mode);
10113 emit_move_insn (tmp, operands[3]);
10114 operands[3] = tmp;
10116 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
10118 rtx tmp = gen_reg_rtx (mode);
10119 emit_move_insn (tmp, operands[2]);
10120 operands[2] = tmp;
10123 if (! register_operand (operands[2], VOIDmode)
10124 && (mode == QImode
10125 || ! register_operand (operands[3], VOIDmode)))
10126 operands[2] = force_reg (mode, operands[2]);
10128 if (mode == QImode
10129 && ! register_operand (operands[3], VOIDmode))
10130 operands[3] = force_reg (mode, operands[3]);
10132 emit_insn (compare_seq);
10133 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10134 gen_rtx_IF_THEN_ELSE (mode,
10135 compare_op, operands[2],
10136 operands[3])));
10137 if (bypass_test)
10138 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
10139 gen_rtx_IF_THEN_ELSE (mode,
10140 bypass_test,
10141 copy_rtx (operands[3]),
10142 copy_rtx (operands[0]))));
10143 if (second_test)
10144 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
10145 gen_rtx_IF_THEN_ELSE (mode,
10146 second_test,
10147 copy_rtx (operands[2]),
10148 copy_rtx (operands[0]))));
10150 return 1; /* DONE */
10154 ix86_expand_fp_movcc (rtx operands[])
10156 enum rtx_code code;
10157 rtx tmp;
10158 rtx compare_op, second_test, bypass_test;
10160 /* For SF/DFmode conditional moves based on comparisons
10161 in same mode, we may want to use SSE min/max instructions. */
10162 if (((TARGET_SSE_MATH && GET_MODE (operands[0]) == SFmode)
10163 || (TARGET_SSE2 && TARGET_SSE_MATH && GET_MODE (operands[0]) == DFmode))
10164 && GET_MODE (ix86_compare_op0) == GET_MODE (operands[0])
10165 /* The SSE comparisons does not support the LTGT/UNEQ pair. */
10166 && (!TARGET_IEEE_FP
10167 || (GET_CODE (operands[1]) != LTGT && GET_CODE (operands[1]) != UNEQ))
10168 /* We may be called from the post-reload splitter. */
10169 && (!REG_P (operands[0])
10170 || SSE_REG_P (operands[0])
10171 || REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
10173 rtx op0 = ix86_compare_op0, op1 = ix86_compare_op1;
10174 code = GET_CODE (operands[1]);
10176 /* See if we have (cross) match between comparison operands and
10177 conditional move operands. */
10178 if (rtx_equal_p (operands[2], op1))
10180 rtx tmp = op0;
10181 op0 = op1;
10182 op1 = tmp;
10183 code = reverse_condition_maybe_unordered (code);
10185 if (rtx_equal_p (operands[2], op0) && rtx_equal_p (operands[3], op1))
10187 /* Check for min operation. */
10188 if (code == LT || code == UNLE)
10190 if (code == UNLE)
10192 rtx tmp = op0;
10193 op0 = op1;
10194 op1 = tmp;
10196 operands[0] = force_reg (GET_MODE (operands[0]), operands[0]);
10197 if (memory_operand (op0, VOIDmode))
10198 op0 = force_reg (GET_MODE (operands[0]), op0);
10199 if (GET_MODE (operands[0]) == SFmode)
10200 emit_insn (gen_minsf3 (operands[0], op0, op1));
10201 else
10202 emit_insn (gen_mindf3 (operands[0], op0, op1));
10203 return 1;
10205 /* Check for max operation. */
10206 if (code == GT || code == UNGE)
10208 if (code == UNGE)
10210 rtx tmp = op0;
10211 op0 = op1;
10212 op1 = tmp;
10214 operands[0] = force_reg (GET_MODE (operands[0]), operands[0]);
10215 if (memory_operand (op0, VOIDmode))
10216 op0 = force_reg (GET_MODE (operands[0]), op0);
10217 if (GET_MODE (operands[0]) == SFmode)
10218 emit_insn (gen_maxsf3 (operands[0], op0, op1));
10219 else
10220 emit_insn (gen_maxdf3 (operands[0], op0, op1));
10221 return 1;
10224 /* Manage condition to be sse_comparison_operator. In case we are
10225 in non-ieee mode, try to canonicalize the destination operand
10226 to be first in the comparison - this helps reload to avoid extra
10227 moves. */
10228 if (!sse_comparison_operator (operands[1], VOIDmode)
10229 || (rtx_equal_p (operands[0], ix86_compare_op1) && !TARGET_IEEE_FP))
10231 rtx tmp = ix86_compare_op0;
10232 ix86_compare_op0 = ix86_compare_op1;
10233 ix86_compare_op1 = tmp;
10234 operands[1] = gen_rtx_fmt_ee (swap_condition (GET_CODE (operands[1])),
10235 VOIDmode, ix86_compare_op0,
10236 ix86_compare_op1);
10238 /* Similarly try to manage result to be first operand of conditional
10239 move. We also don't support the NE comparison on SSE, so try to
10240 avoid it. */
10241 if ((rtx_equal_p (operands[0], operands[3])
10242 && (!TARGET_IEEE_FP || GET_CODE (operands[1]) != EQ))
10243 || (GET_CODE (operands[1]) == NE && TARGET_IEEE_FP))
10245 rtx tmp = operands[2];
10246 operands[2] = operands[3];
10247 operands[3] = tmp;
10248 operands[1] = gen_rtx_fmt_ee (reverse_condition_maybe_unordered
10249 (GET_CODE (operands[1])),
10250 VOIDmode, ix86_compare_op0,
10251 ix86_compare_op1);
10253 if (GET_MODE (operands[0]) == SFmode)
10254 emit_insn (gen_sse_movsfcc (operands[0], operands[1],
10255 operands[2], operands[3],
10256 ix86_compare_op0, ix86_compare_op1));
10257 else
10258 emit_insn (gen_sse_movdfcc (operands[0], operands[1],
10259 operands[2], operands[3],
10260 ix86_compare_op0, ix86_compare_op1));
10261 return 1;
10264 /* The floating point conditional move instructions don't directly
10265 support conditions resulting from a signed integer comparison. */
10267 code = GET_CODE (operands[1]);
10268 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
10270 /* The floating point conditional move instructions don't directly
10271 support signed integer comparisons. */
10273 if (!fcmov_comparison_operator (compare_op, VOIDmode))
10275 if (second_test != NULL || bypass_test != NULL)
10276 abort ();
10277 tmp = gen_reg_rtx (QImode);
10278 ix86_expand_setcc (code, tmp);
10279 code = NE;
10280 ix86_compare_op0 = tmp;
10281 ix86_compare_op1 = const0_rtx;
10282 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
10284 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
10286 tmp = gen_reg_rtx (GET_MODE (operands[0]));
10287 emit_move_insn (tmp, operands[3]);
10288 operands[3] = tmp;
10290 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
10292 tmp = gen_reg_rtx (GET_MODE (operands[0]));
10293 emit_move_insn (tmp, operands[2]);
10294 operands[2] = tmp;
10297 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10298 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
10299 compare_op,
10300 operands[2],
10301 operands[3])));
10302 if (bypass_test)
10303 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10304 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
10305 bypass_test,
10306 operands[3],
10307 operands[0])));
10308 if (second_test)
10309 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
10310 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
10311 second_test,
10312 operands[2],
10313 operands[0])));
10315 return 1;
10318 /* Expand conditional increment or decrement using adb/sbb instructions.
10319 The default case using setcc followed by the conditional move can be
10320 done by generic code. */
10322 ix86_expand_int_addcc (rtx operands[])
10324 enum rtx_code code = GET_CODE (operands[1]);
10325 rtx compare_op;
10326 rtx val = const0_rtx;
10327 bool fpcmp = false;
10328 enum machine_mode mode = GET_MODE (operands[0]);
10330 if (operands[3] != const1_rtx
10331 && operands[3] != constm1_rtx)
10332 return 0;
10333 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
10334 ix86_compare_op1, &compare_op))
10335 return 0;
10336 code = GET_CODE (compare_op);
10338 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
10339 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
10341 fpcmp = true;
10342 code = ix86_fp_compare_code_to_integer (code);
10345 if (code != LTU)
10347 val = constm1_rtx;
10348 if (fpcmp)
10349 PUT_CODE (compare_op,
10350 reverse_condition_maybe_unordered
10351 (GET_CODE (compare_op)));
10352 else
10353 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
10355 PUT_MODE (compare_op, mode);
10357 /* Construct either adc or sbb insn. */
10358 if ((code == LTU) == (operands[3] == constm1_rtx))
10360 switch (GET_MODE (operands[0]))
10362 case QImode:
10363 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
10364 break;
10365 case HImode:
10366 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
10367 break;
10368 case SImode:
10369 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
10370 break;
10371 case DImode:
10372 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
10373 break;
10374 default:
10375 abort ();
10378 else
10380 switch (GET_MODE (operands[0]))
10382 case QImode:
10383 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
10384 break;
10385 case HImode:
10386 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
10387 break;
10388 case SImode:
10389 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
10390 break;
10391 case DImode:
10392 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
10393 break;
10394 default:
10395 abort ();
10398 return 1; /* DONE */
10402 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
10403 works for floating pointer parameters and nonoffsetable memories.
10404 For pushes, it returns just stack offsets; the values will be saved
10405 in the right order. Maximally three parts are generated. */
10407 static int
10408 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
10410 int size;
10412 if (!TARGET_64BIT)
10413 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
10414 else
10415 size = (GET_MODE_SIZE (mode) + 4) / 8;
10417 if (GET_CODE (operand) == REG && MMX_REGNO_P (REGNO (operand)))
10418 abort ();
10419 if (size < 2 || size > 3)
10420 abort ();
10422 /* Optimize constant pool reference to immediates. This is used by fp
10423 moves, that force all constants to memory to allow combining. */
10424 if (GET_CODE (operand) == MEM && RTX_UNCHANGING_P (operand))
10426 rtx tmp = maybe_get_pool_constant (operand);
10427 if (tmp)
10428 operand = tmp;
10431 if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand))
10433 /* The only non-offsetable memories we handle are pushes. */
10434 if (! push_operand (operand, VOIDmode))
10435 abort ();
10437 operand = copy_rtx (operand);
10438 PUT_MODE (operand, Pmode);
10439 parts[0] = parts[1] = parts[2] = operand;
10441 else if (!TARGET_64BIT)
10443 if (mode == DImode)
10444 split_di (&operand, 1, &parts[0], &parts[1]);
10445 else
10447 if (REG_P (operand))
10449 if (!reload_completed)
10450 abort ();
10451 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
10452 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
10453 if (size == 3)
10454 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
10456 else if (offsettable_memref_p (operand))
10458 operand = adjust_address (operand, SImode, 0);
10459 parts[0] = operand;
10460 parts[1] = adjust_address (operand, SImode, 4);
10461 if (size == 3)
10462 parts[2] = adjust_address (operand, SImode, 8);
10464 else if (GET_CODE (operand) == CONST_DOUBLE)
10466 REAL_VALUE_TYPE r;
10467 long l[4];
10469 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
10470 switch (mode)
10472 case XFmode:
10473 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
10474 parts[2] = gen_int_mode (l[2], SImode);
10475 break;
10476 case DFmode:
10477 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
10478 break;
10479 default:
10480 abort ();
10482 parts[1] = gen_int_mode (l[1], SImode);
10483 parts[0] = gen_int_mode (l[0], SImode);
10485 else
10486 abort ();
10489 else
10491 if (mode == TImode)
10492 split_ti (&operand, 1, &parts[0], &parts[1]);
10493 if (mode == XFmode || mode == TFmode)
10495 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
10496 if (REG_P (operand))
10498 if (!reload_completed)
10499 abort ();
10500 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
10501 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
10503 else if (offsettable_memref_p (operand))
10505 operand = adjust_address (operand, DImode, 0);
10506 parts[0] = operand;
10507 parts[1] = adjust_address (operand, upper_mode, 8);
10509 else if (GET_CODE (operand) == CONST_DOUBLE)
10511 REAL_VALUE_TYPE r;
10512 long l[3];
10514 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
10515 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
10516 /* Do not use shift by 32 to avoid warning on 32bit systems. */
10517 if (HOST_BITS_PER_WIDE_INT >= 64)
10518 parts[0]
10519 = gen_int_mode
10520 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
10521 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
10522 DImode);
10523 else
10524 parts[0] = immed_double_const (l[0], l[1], DImode);
10525 if (upper_mode == SImode)
10526 parts[1] = gen_int_mode (l[2], SImode);
10527 else if (HOST_BITS_PER_WIDE_INT >= 64)
10528 parts[1]
10529 = gen_int_mode
10530 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
10531 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
10532 DImode);
10533 else
10534 parts[1] = immed_double_const (l[2], l[3], DImode);
10536 else
10537 abort ();
10541 return size;
10544 /* Emit insns to perform a move or push of DI, DF, and XF values.
10545 Return false when normal moves are needed; true when all required
10546 insns have been emitted. Operands 2-4 contain the input values
10547 int the correct order; operands 5-7 contain the output values. */
10549 void
10550 ix86_split_long_move (rtx operands[])
10552 rtx part[2][3];
10553 int nparts;
10554 int push = 0;
10555 int collisions = 0;
10556 enum machine_mode mode = GET_MODE (operands[0]);
10558 /* The DFmode expanders may ask us to move double.
10559 For 64bit target this is single move. By hiding the fact
10560 here we simplify i386.md splitters. */
10561 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
10563 /* Optimize constant pool reference to immediates. This is used by
10564 fp moves, that force all constants to memory to allow combining. */
10566 if (GET_CODE (operands[1]) == MEM
10567 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10568 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
10569 operands[1] = get_pool_constant (XEXP (operands[1], 0));
10570 if (push_operand (operands[0], VOIDmode))
10572 operands[0] = copy_rtx (operands[0]);
10573 PUT_MODE (operands[0], Pmode);
10575 else
10576 operands[0] = gen_lowpart (DImode, operands[0]);
10577 operands[1] = gen_lowpart (DImode, operands[1]);
10578 emit_move_insn (operands[0], operands[1]);
10579 return;
10582 /* The only non-offsettable memory we handle is push. */
10583 if (push_operand (operands[0], VOIDmode))
10584 push = 1;
10585 else if (GET_CODE (operands[0]) == MEM
10586 && ! offsettable_memref_p (operands[0]))
10587 abort ();
10589 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
10590 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
10592 /* When emitting push, take care for source operands on the stack. */
10593 if (push && GET_CODE (operands[1]) == MEM
10594 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
10596 if (nparts == 3)
10597 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
10598 XEXP (part[1][2], 0));
10599 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
10600 XEXP (part[1][1], 0));
10603 /* We need to do copy in the right order in case an address register
10604 of the source overlaps the destination. */
10605 if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM)
10607 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
10608 collisions++;
10609 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
10610 collisions++;
10611 if (nparts == 3
10612 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
10613 collisions++;
10615 /* Collision in the middle part can be handled by reordering. */
10616 if (collisions == 1 && nparts == 3
10617 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
10619 rtx tmp;
10620 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
10621 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
10624 /* If there are more collisions, we can't handle it by reordering.
10625 Do an lea to the last part and use only one colliding move. */
10626 else if (collisions > 1)
10628 rtx base;
10630 collisions = 1;
10632 base = part[0][nparts - 1];
10634 /* Handle the case when the last part isn't valid for lea.
10635 Happens in 64-bit mode storing the 12-byte XFmode. */
10636 if (GET_MODE (base) != Pmode)
10637 base = gen_rtx_REG (Pmode, REGNO (base));
10639 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
10640 part[1][0] = replace_equiv_address (part[1][0], base);
10641 part[1][1] = replace_equiv_address (part[1][1],
10642 plus_constant (base, UNITS_PER_WORD));
10643 if (nparts == 3)
10644 part[1][2] = replace_equiv_address (part[1][2],
10645 plus_constant (base, 8));
10649 if (push)
10651 if (!TARGET_64BIT)
10653 if (nparts == 3)
10655 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
10656 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
10657 emit_move_insn (part[0][2], part[1][2]);
10660 else
10662 /* In 64bit mode we don't have 32bit push available. In case this is
10663 register, it is OK - we will just use larger counterpart. We also
10664 retype memory - these comes from attempt to avoid REX prefix on
10665 moving of second half of TFmode value. */
10666 if (GET_MODE (part[1][1]) == SImode)
10668 if (GET_CODE (part[1][1]) == MEM)
10669 part[1][1] = adjust_address (part[1][1], DImode, 0);
10670 else if (REG_P (part[1][1]))
10671 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
10672 else
10673 abort ();
10674 if (GET_MODE (part[1][0]) == SImode)
10675 part[1][0] = part[1][1];
10678 emit_move_insn (part[0][1], part[1][1]);
10679 emit_move_insn (part[0][0], part[1][0]);
10680 return;
10683 /* Choose correct order to not overwrite the source before it is copied. */
10684 if ((REG_P (part[0][0])
10685 && REG_P (part[1][1])
10686 && (REGNO (part[0][0]) == REGNO (part[1][1])
10687 || (nparts == 3
10688 && REGNO (part[0][0]) == REGNO (part[1][2]))))
10689 || (collisions > 0
10690 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
10692 if (nparts == 3)
10694 operands[2] = part[0][2];
10695 operands[3] = part[0][1];
10696 operands[4] = part[0][0];
10697 operands[5] = part[1][2];
10698 operands[6] = part[1][1];
10699 operands[7] = part[1][0];
10701 else
10703 operands[2] = part[0][1];
10704 operands[3] = part[0][0];
10705 operands[5] = part[1][1];
10706 operands[6] = part[1][0];
10709 else
10711 if (nparts == 3)
10713 operands[2] = part[0][0];
10714 operands[3] = part[0][1];
10715 operands[4] = part[0][2];
10716 operands[5] = part[1][0];
10717 operands[6] = part[1][1];
10718 operands[7] = part[1][2];
10720 else
10722 operands[2] = part[0][0];
10723 operands[3] = part[0][1];
10724 operands[5] = part[1][0];
10725 operands[6] = part[1][1];
10728 emit_move_insn (operands[2], operands[5]);
10729 emit_move_insn (operands[3], operands[6]);
10730 if (nparts == 3)
10731 emit_move_insn (operands[4], operands[7]);
10733 return;
10736 void
10737 ix86_split_ashldi (rtx *operands, rtx scratch)
10739 rtx low[2], high[2];
10740 int count;
10742 if (GET_CODE (operands[2]) == CONST_INT)
10744 split_di (operands, 2, low, high);
10745 count = INTVAL (operands[2]) & 63;
10747 if (count >= 32)
10749 emit_move_insn (high[0], low[1]);
10750 emit_move_insn (low[0], const0_rtx);
10752 if (count > 32)
10753 emit_insn (gen_ashlsi3 (high[0], high[0], GEN_INT (count - 32)));
10755 else
10757 if (!rtx_equal_p (operands[0], operands[1]))
10758 emit_move_insn (operands[0], operands[1]);
10759 emit_insn (gen_x86_shld_1 (high[0], low[0], GEN_INT (count)));
10760 emit_insn (gen_ashlsi3 (low[0], low[0], GEN_INT (count)));
10763 else
10765 if (!rtx_equal_p (operands[0], operands[1]))
10766 emit_move_insn (operands[0], operands[1]);
10768 split_di (operands, 1, low, high);
10770 emit_insn (gen_x86_shld_1 (high[0], low[0], operands[2]));
10771 emit_insn (gen_ashlsi3 (low[0], low[0], operands[2]));
10773 if (TARGET_CMOVE && (! no_new_pseudos || scratch))
10775 if (! no_new_pseudos)
10776 scratch = force_reg (SImode, const0_rtx);
10777 else
10778 emit_move_insn (scratch, const0_rtx);
10780 emit_insn (gen_x86_shift_adj_1 (high[0], low[0], operands[2],
10781 scratch));
10783 else
10784 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
10788 void
10789 ix86_split_ashrdi (rtx *operands, rtx scratch)
10791 rtx low[2], high[2];
10792 int count;
10794 if (GET_CODE (operands[2]) == CONST_INT)
10796 split_di (operands, 2, low, high);
10797 count = INTVAL (operands[2]) & 63;
10799 if (count >= 32)
10801 emit_move_insn (low[0], high[1]);
10803 if (! reload_completed)
10804 emit_insn (gen_ashrsi3 (high[0], low[0], GEN_INT (31)));
10805 else
10807 emit_move_insn (high[0], low[0]);
10808 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (31)));
10811 if (count > 32)
10812 emit_insn (gen_ashrsi3 (low[0], low[0], GEN_INT (count - 32)));
10814 else
10816 if (!rtx_equal_p (operands[0], operands[1]))
10817 emit_move_insn (operands[0], operands[1]);
10818 emit_insn (gen_x86_shrd_1 (low[0], high[0], GEN_INT (count)));
10819 emit_insn (gen_ashrsi3 (high[0], high[0], GEN_INT (count)));
10822 else
10824 if (!rtx_equal_p (operands[0], operands[1]))
10825 emit_move_insn (operands[0], operands[1]);
10827 split_di (operands, 1, low, high);
10829 emit_insn (gen_x86_shrd_1 (low[0], high[0], operands[2]));
10830 emit_insn (gen_ashrsi3 (high[0], high[0], operands[2]));
10832 if (TARGET_CMOVE && (! no_new_pseudos || scratch))
10834 if (! no_new_pseudos)
10835 scratch = gen_reg_rtx (SImode);
10836 emit_move_insn (scratch, high[0]);
10837 emit_insn (gen_ashrsi3 (scratch, scratch, GEN_INT (31)));
10838 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
10839 scratch));
10841 else
10842 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
10846 void
10847 ix86_split_lshrdi (rtx *operands, rtx scratch)
10849 rtx low[2], high[2];
10850 int count;
10852 if (GET_CODE (operands[2]) == CONST_INT)
10854 split_di (operands, 2, low, high);
10855 count = INTVAL (operands[2]) & 63;
10857 if (count >= 32)
10859 emit_move_insn (low[0], high[1]);
10860 emit_move_insn (high[0], const0_rtx);
10862 if (count > 32)
10863 emit_insn (gen_lshrsi3 (low[0], low[0], GEN_INT (count - 32)));
10865 else
10867 if (!rtx_equal_p (operands[0], operands[1]))
10868 emit_move_insn (operands[0], operands[1]);
10869 emit_insn (gen_x86_shrd_1 (low[0], high[0], GEN_INT (count)));
10870 emit_insn (gen_lshrsi3 (high[0], high[0], GEN_INT (count)));
10873 else
10875 if (!rtx_equal_p (operands[0], operands[1]))
10876 emit_move_insn (operands[0], operands[1]);
10878 split_di (operands, 1, low, high);
10880 emit_insn (gen_x86_shrd_1 (low[0], high[0], operands[2]));
10881 emit_insn (gen_lshrsi3 (high[0], high[0], operands[2]));
10883 /* Heh. By reversing the arguments, we can reuse this pattern. */
10884 if (TARGET_CMOVE && (! no_new_pseudos || scratch))
10886 if (! no_new_pseudos)
10887 scratch = force_reg (SImode, const0_rtx);
10888 else
10889 emit_move_insn (scratch, const0_rtx);
10891 emit_insn (gen_x86_shift_adj_1 (low[0], high[0], operands[2],
10892 scratch));
10894 else
10895 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
10899 /* Helper function for the string operations below. Dest VARIABLE whether
10900 it is aligned to VALUE bytes. If true, jump to the label. */
10901 static rtx
10902 ix86_expand_aligntest (rtx variable, int value)
10904 rtx label = gen_label_rtx ();
10905 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
10906 if (GET_MODE (variable) == DImode)
10907 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
10908 else
10909 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
10910 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
10911 1, label);
10912 return label;
10915 /* Adjust COUNTER by the VALUE. */
10916 static void
10917 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
10919 if (GET_MODE (countreg) == DImode)
10920 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
10921 else
10922 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
10925 /* Zero extend possibly SImode EXP to Pmode register. */
10927 ix86_zero_extend_to_Pmode (rtx exp)
10929 rtx r;
10930 if (GET_MODE (exp) == VOIDmode)
10931 return force_reg (Pmode, exp);
10932 if (GET_MODE (exp) == Pmode)
10933 return copy_to_mode_reg (Pmode, exp);
10934 r = gen_reg_rtx (Pmode);
10935 emit_insn (gen_zero_extendsidi2 (r, exp));
10936 return r;
10939 /* Expand string move (memcpy) operation. Use i386 string operations when
10940 profitable. expand_clrstr contains similar code. */
10942 ix86_expand_movstr (rtx dst, rtx src, rtx count_exp, rtx align_exp)
10944 rtx srcreg, destreg, countreg;
10945 enum machine_mode counter_mode;
10946 HOST_WIDE_INT align = 0;
10947 unsigned HOST_WIDE_INT count = 0;
10948 rtx insns;
10950 if (GET_CODE (align_exp) == CONST_INT)
10951 align = INTVAL (align_exp);
10953 /* Can't use any of this if the user has appropriated esi or edi. */
10954 if (global_regs[4] || global_regs[5])
10955 return 0;
10957 /* This simple hack avoids all inlining code and simplifies code below. */
10958 if (!TARGET_ALIGN_STRINGOPS)
10959 align = 64;
10961 if (GET_CODE (count_exp) == CONST_INT)
10963 count = INTVAL (count_exp);
10964 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
10965 return 0;
10968 /* Figure out proper mode for counter. For 32bits it is always SImode,
10969 for 64bits use SImode when possible, otherwise DImode.
10970 Set count to number of bytes copied when known at compile time. */
10971 if (!TARGET_64BIT || GET_MODE (count_exp) == SImode
10972 || x86_64_zero_extended_value (count_exp))
10973 counter_mode = SImode;
10974 else
10975 counter_mode = DImode;
10977 start_sequence ();
10979 if (counter_mode != SImode && counter_mode != DImode)
10980 abort ();
10982 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
10983 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
10985 emit_insn (gen_cld ());
10987 /* When optimizing for size emit simple rep ; movsb instruction for
10988 counts not divisible by 4. */
10990 if ((!optimize || optimize_size) && (count == 0 || (count & 0x03)))
10992 countreg = ix86_zero_extend_to_Pmode (count_exp);
10993 if (TARGET_64BIT)
10994 emit_insn (gen_rep_movqi_rex64 (destreg, srcreg, countreg,
10995 destreg, srcreg, countreg));
10996 else
10997 emit_insn (gen_rep_movqi (destreg, srcreg, countreg,
10998 destreg, srcreg, countreg));
11001 /* For constant aligned (or small unaligned) copies use rep movsl
11002 followed by code copying the rest. For PentiumPro ensure 8 byte
11003 alignment to allow rep movsl acceleration. */
11005 else if (count != 0
11006 && (align >= 8
11007 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
11008 || optimize_size || count < (unsigned int) 64))
11010 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
11011 if (count & ~(size - 1))
11013 countreg = copy_to_mode_reg (counter_mode,
11014 GEN_INT ((count >> (size == 4 ? 2 : 3))
11015 & (TARGET_64BIT ? -1 : 0x3fffffff)));
11016 countreg = ix86_zero_extend_to_Pmode (countreg);
11017 if (size == 4)
11019 if (TARGET_64BIT)
11020 emit_insn (gen_rep_movsi_rex64 (destreg, srcreg, countreg,
11021 destreg, srcreg, countreg));
11022 else
11023 emit_insn (gen_rep_movsi (destreg, srcreg, countreg,
11024 destreg, srcreg, countreg));
11026 else
11027 emit_insn (gen_rep_movdi_rex64 (destreg, srcreg, countreg,
11028 destreg, srcreg, countreg));
11030 if (size == 8 && (count & 0x04))
11031 emit_insn (gen_strmovsi (destreg, srcreg));
11032 if (count & 0x02)
11033 emit_insn (gen_strmovhi (destreg, srcreg));
11034 if (count & 0x01)
11035 emit_insn (gen_strmovqi (destreg, srcreg));
11037 /* The generic code based on the glibc implementation:
11038 - align destination to 4 bytes (8 byte alignment is used for PentiumPro
11039 allowing accelerated copying there)
11040 - copy the data using rep movsl
11041 - copy the rest. */
11042 else
11044 rtx countreg2;
11045 rtx label = NULL;
11046 int desired_alignment = (TARGET_PENTIUMPRO
11047 && (count == 0 || count >= (unsigned int) 260)
11048 ? 8 : UNITS_PER_WORD);
11050 /* In case we don't know anything about the alignment, default to
11051 library version, since it is usually equally fast and result in
11052 shorter code.
11054 Also emit call when we know that the count is large and call overhead
11055 will not be important. */
11056 if (!TARGET_INLINE_ALL_STRINGOPS
11057 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
11059 end_sequence ();
11060 return 0;
11063 if (TARGET_SINGLE_STRINGOP)
11064 emit_insn (gen_cld ());
11066 countreg2 = gen_reg_rtx (Pmode);
11067 countreg = copy_to_mode_reg (counter_mode, count_exp);
11069 /* We don't use loops to align destination and to copy parts smaller
11070 than 4 bytes, because gcc is able to optimize such code better (in
11071 the case the destination or the count really is aligned, gcc is often
11072 able to predict the branches) and also it is friendlier to the
11073 hardware branch prediction.
11075 Using loops is beneficial for generic case, because we can
11076 handle small counts using the loops. Many CPUs (such as Athlon)
11077 have large REP prefix setup costs.
11079 This is quite costly. Maybe we can revisit this decision later or
11080 add some customizability to this code. */
11082 if (count == 0 && align < desired_alignment)
11084 label = gen_label_rtx ();
11085 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
11086 LEU, 0, counter_mode, 1, label);
11088 if (align <= 1)
11090 rtx label = ix86_expand_aligntest (destreg, 1);
11091 emit_insn (gen_strmovqi (destreg, srcreg));
11092 ix86_adjust_counter (countreg, 1);
11093 emit_label (label);
11094 LABEL_NUSES (label) = 1;
11096 if (align <= 2)
11098 rtx label = ix86_expand_aligntest (destreg, 2);
11099 emit_insn (gen_strmovhi (destreg, srcreg));
11100 ix86_adjust_counter (countreg, 2);
11101 emit_label (label);
11102 LABEL_NUSES (label) = 1;
11104 if (align <= 4 && desired_alignment > 4)
11106 rtx label = ix86_expand_aligntest (destreg, 4);
11107 emit_insn (gen_strmovsi (destreg, srcreg));
11108 ix86_adjust_counter (countreg, 4);
11109 emit_label (label);
11110 LABEL_NUSES (label) = 1;
11113 if (label && desired_alignment > 4 && !TARGET_64BIT)
11115 emit_label (label);
11116 LABEL_NUSES (label) = 1;
11117 label = NULL_RTX;
11119 if (!TARGET_SINGLE_STRINGOP)
11120 emit_insn (gen_cld ());
11121 if (TARGET_64BIT)
11123 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
11124 GEN_INT (3)));
11125 emit_insn (gen_rep_movdi_rex64 (destreg, srcreg, countreg2,
11126 destreg, srcreg, countreg2));
11128 else
11130 emit_insn (gen_lshrsi3 (countreg2, countreg, GEN_INT (2)));
11131 emit_insn (gen_rep_movsi (destreg, srcreg, countreg2,
11132 destreg, srcreg, countreg2));
11135 if (label)
11137 emit_label (label);
11138 LABEL_NUSES (label) = 1;
11140 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
11141 emit_insn (gen_strmovsi (destreg, srcreg));
11142 if ((align <= 4 || count == 0) && TARGET_64BIT)
11144 rtx label = ix86_expand_aligntest (countreg, 4);
11145 emit_insn (gen_strmovsi (destreg, srcreg));
11146 emit_label (label);
11147 LABEL_NUSES (label) = 1;
11149 if (align > 2 && count != 0 && (count & 2))
11150 emit_insn (gen_strmovhi (destreg, srcreg));
11151 if (align <= 2 || count == 0)
11153 rtx label = ix86_expand_aligntest (countreg, 2);
11154 emit_insn (gen_strmovhi (destreg, srcreg));
11155 emit_label (label);
11156 LABEL_NUSES (label) = 1;
11158 if (align > 1 && count != 0 && (count & 1))
11159 emit_insn (gen_strmovqi (destreg, srcreg));
11160 if (align <= 1 || count == 0)
11162 rtx label = ix86_expand_aligntest (countreg, 1);
11163 emit_insn (gen_strmovqi (destreg, srcreg));
11164 emit_label (label);
11165 LABEL_NUSES (label) = 1;
11169 insns = get_insns ();
11170 end_sequence ();
11172 ix86_set_move_mem_attrs (insns, dst, src, destreg, srcreg);
11173 emit_insn (insns);
11174 return 1;
11177 /* Expand string clear operation (bzero). Use i386 string operations when
11178 profitable. expand_movstr contains similar code. */
11180 ix86_expand_clrstr (rtx src, rtx count_exp, rtx align_exp)
11182 rtx destreg, zeroreg, countreg;
11183 enum machine_mode counter_mode;
11184 HOST_WIDE_INT align = 0;
11185 unsigned HOST_WIDE_INT count = 0;
11187 if (GET_CODE (align_exp) == CONST_INT)
11188 align = INTVAL (align_exp);
11190 /* Can't use any of this if the user has appropriated esi. */
11191 if (global_regs[4])
11192 return 0;
11194 /* This simple hack avoids all inlining code and simplifies code below. */
11195 if (!TARGET_ALIGN_STRINGOPS)
11196 align = 32;
11198 if (GET_CODE (count_exp) == CONST_INT)
11200 count = INTVAL (count_exp);
11201 if (!TARGET_INLINE_ALL_STRINGOPS && count > 64)
11202 return 0;
11204 /* Figure out proper mode for counter. For 32bits it is always SImode,
11205 for 64bits use SImode when possible, otherwise DImode.
11206 Set count to number of bytes copied when known at compile time. */
11207 if (!TARGET_64BIT || GET_MODE (count_exp) == SImode
11208 || x86_64_zero_extended_value (count_exp))
11209 counter_mode = SImode;
11210 else
11211 counter_mode = DImode;
11213 destreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
11215 emit_insn (gen_cld ());
11217 /* When optimizing for size emit simple rep ; movsb instruction for
11218 counts not divisible by 4. */
11220 if ((!optimize || optimize_size) && (count == 0 || (count & 0x03)))
11222 countreg = ix86_zero_extend_to_Pmode (count_exp);
11223 zeroreg = copy_to_mode_reg (QImode, const0_rtx);
11224 if (TARGET_64BIT)
11225 emit_insn (gen_rep_stosqi_rex64 (destreg, countreg, zeroreg,
11226 destreg, countreg));
11227 else
11228 emit_insn (gen_rep_stosqi (destreg, countreg, zeroreg,
11229 destreg, countreg));
11231 else if (count != 0
11232 && (align >= 8
11233 || (!TARGET_PENTIUMPRO && !TARGET_64BIT && align >= 4)
11234 || optimize_size || count < (unsigned int) 64))
11236 int size = TARGET_64BIT && !optimize_size ? 8 : 4;
11237 zeroreg = copy_to_mode_reg (size == 4 ? SImode : DImode, const0_rtx);
11238 if (count & ~(size - 1))
11240 countreg = copy_to_mode_reg (counter_mode,
11241 GEN_INT ((count >> (size == 4 ? 2 : 3))
11242 & (TARGET_64BIT ? -1 : 0x3fffffff)));
11243 countreg = ix86_zero_extend_to_Pmode (countreg);
11244 if (size == 4)
11246 if (TARGET_64BIT)
11247 emit_insn (gen_rep_stossi_rex64 (destreg, countreg, zeroreg,
11248 destreg, countreg));
11249 else
11250 emit_insn (gen_rep_stossi (destreg, countreg, zeroreg,
11251 destreg, countreg));
11253 else
11254 emit_insn (gen_rep_stosdi_rex64 (destreg, countreg, zeroreg,
11255 destreg, countreg));
11257 if (size == 8 && (count & 0x04))
11258 emit_insn (gen_strsetsi (destreg,
11259 gen_rtx_SUBREG (SImode, zeroreg, 0)));
11260 if (count & 0x02)
11261 emit_insn (gen_strsethi (destreg,
11262 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11263 if (count & 0x01)
11264 emit_insn (gen_strsetqi (destreg,
11265 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11267 else
11269 rtx countreg2;
11270 rtx label = NULL;
11271 /* Compute desired alignment of the string operation. */
11272 int desired_alignment = (TARGET_PENTIUMPRO
11273 && (count == 0 || count >= (unsigned int) 260)
11274 ? 8 : UNITS_PER_WORD);
11276 /* In case we don't know anything about the alignment, default to
11277 library version, since it is usually equally fast and result in
11278 shorter code.
11280 Also emit call when we know that the count is large and call overhead
11281 will not be important. */
11282 if (!TARGET_INLINE_ALL_STRINGOPS
11283 && (align < UNITS_PER_WORD || !TARGET_REP_MOVL_OPTIMAL))
11284 return 0;
11286 if (TARGET_SINGLE_STRINGOP)
11287 emit_insn (gen_cld ());
11289 countreg2 = gen_reg_rtx (Pmode);
11290 countreg = copy_to_mode_reg (counter_mode, count_exp);
11291 zeroreg = copy_to_mode_reg (Pmode, const0_rtx);
11293 if (count == 0 && align < desired_alignment)
11295 label = gen_label_rtx ();
11296 emit_cmp_and_jump_insns (countreg, GEN_INT (desired_alignment - 1),
11297 LEU, 0, counter_mode, 1, label);
11299 if (align <= 1)
11301 rtx label = ix86_expand_aligntest (destreg, 1);
11302 emit_insn (gen_strsetqi (destreg,
11303 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11304 ix86_adjust_counter (countreg, 1);
11305 emit_label (label);
11306 LABEL_NUSES (label) = 1;
11308 if (align <= 2)
11310 rtx label = ix86_expand_aligntest (destreg, 2);
11311 emit_insn (gen_strsethi (destreg,
11312 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11313 ix86_adjust_counter (countreg, 2);
11314 emit_label (label);
11315 LABEL_NUSES (label) = 1;
11317 if (align <= 4 && desired_alignment > 4)
11319 rtx label = ix86_expand_aligntest (destreg, 4);
11320 emit_insn (gen_strsetsi (destreg, (TARGET_64BIT
11321 ? gen_rtx_SUBREG (SImode, zeroreg, 0)
11322 : zeroreg)));
11323 ix86_adjust_counter (countreg, 4);
11324 emit_label (label);
11325 LABEL_NUSES (label) = 1;
11328 if (label && desired_alignment > 4 && !TARGET_64BIT)
11330 emit_label (label);
11331 LABEL_NUSES (label) = 1;
11332 label = NULL_RTX;
11335 if (!TARGET_SINGLE_STRINGOP)
11336 emit_insn (gen_cld ());
11337 if (TARGET_64BIT)
11339 emit_insn (gen_lshrdi3 (countreg2, ix86_zero_extend_to_Pmode (countreg),
11340 GEN_INT (3)));
11341 emit_insn (gen_rep_stosdi_rex64 (destreg, countreg2, zeroreg,
11342 destreg, countreg2));
11344 else
11346 emit_insn (gen_lshrsi3 (countreg2, countreg, GEN_INT (2)));
11347 emit_insn (gen_rep_stossi (destreg, countreg2, zeroreg,
11348 destreg, countreg2));
11350 if (label)
11352 emit_label (label);
11353 LABEL_NUSES (label) = 1;
11356 if (TARGET_64BIT && align > 4 && count != 0 && (count & 4))
11357 emit_insn (gen_strsetsi (destreg,
11358 gen_rtx_SUBREG (SImode, zeroreg, 0)));
11359 if (TARGET_64BIT && (align <= 4 || count == 0))
11361 rtx label = ix86_expand_aligntest (countreg, 4);
11362 emit_insn (gen_strsetsi (destreg,
11363 gen_rtx_SUBREG (SImode, zeroreg, 0)));
11364 emit_label (label);
11365 LABEL_NUSES (label) = 1;
11367 if (align > 2 && count != 0 && (count & 2))
11368 emit_insn (gen_strsethi (destreg,
11369 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11370 if (align <= 2 || count == 0)
11372 rtx label = ix86_expand_aligntest (countreg, 2);
11373 emit_insn (gen_strsethi (destreg,
11374 gen_rtx_SUBREG (HImode, zeroreg, 0)));
11375 emit_label (label);
11376 LABEL_NUSES (label) = 1;
11378 if (align > 1 && count != 0 && (count & 1))
11379 emit_insn (gen_strsetqi (destreg,
11380 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11381 if (align <= 1 || count == 0)
11383 rtx label = ix86_expand_aligntest (countreg, 1);
11384 emit_insn (gen_strsetqi (destreg,
11385 gen_rtx_SUBREG (QImode, zeroreg, 0)));
11386 emit_label (label);
11387 LABEL_NUSES (label) = 1;
11390 return 1;
11392 /* Expand strlen. */
11394 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
11396 rtx addr, scratch1, scratch2, scratch3, scratch4;
11398 /* The generic case of strlen expander is long. Avoid it's
11399 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
11401 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
11402 && !TARGET_INLINE_ALL_STRINGOPS
11403 && !optimize_size
11404 && (GET_CODE (align) != CONST_INT || INTVAL (align) < 4))
11405 return 0;
11407 addr = force_reg (Pmode, XEXP (src, 0));
11408 scratch1 = gen_reg_rtx (Pmode);
11410 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
11411 && !optimize_size)
11413 /* Well it seems that some optimizer does not combine a call like
11414 foo(strlen(bar), strlen(bar));
11415 when the move and the subtraction is done here. It does calculate
11416 the length just once when these instructions are done inside of
11417 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
11418 often used and I use one fewer register for the lifetime of
11419 output_strlen_unroll() this is better. */
11421 emit_move_insn (out, addr);
11423 ix86_expand_strlensi_unroll_1 (out, align);
11425 /* strlensi_unroll_1 returns the address of the zero at the end of
11426 the string, like memchr(), so compute the length by subtracting
11427 the start address. */
11428 if (TARGET_64BIT)
11429 emit_insn (gen_subdi3 (out, out, addr));
11430 else
11431 emit_insn (gen_subsi3 (out, out, addr));
11433 else
11435 scratch2 = gen_reg_rtx (Pmode);
11436 scratch3 = gen_reg_rtx (Pmode);
11437 scratch4 = force_reg (Pmode, constm1_rtx);
11439 emit_move_insn (scratch3, addr);
11440 eoschar = force_reg (QImode, eoschar);
11442 emit_insn (gen_cld ());
11443 if (TARGET_64BIT)
11445 emit_insn (gen_strlenqi_rex_1 (scratch1, scratch3, eoschar,
11446 align, scratch4, scratch3));
11447 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
11448 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
11450 else
11452 emit_insn (gen_strlenqi_1 (scratch1, scratch3, eoschar,
11453 align, scratch4, scratch3));
11454 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
11455 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
11458 return 1;
11461 /* Expand the appropriate insns for doing strlen if not just doing
11462 repnz; scasb
11464 out = result, initialized with the start address
11465 align_rtx = alignment of the address.
11466 scratch = scratch register, initialized with the startaddress when
11467 not aligned, otherwise undefined
11469 This is just the body. It needs the initializations mentioned above and
11470 some address computing at the end. These things are done in i386.md. */
11472 static void
11473 ix86_expand_strlensi_unroll_1 (rtx out, rtx align_rtx)
11475 int align;
11476 rtx tmp;
11477 rtx align_2_label = NULL_RTX;
11478 rtx align_3_label = NULL_RTX;
11479 rtx align_4_label = gen_label_rtx ();
11480 rtx end_0_label = gen_label_rtx ();
11481 rtx mem;
11482 rtx tmpreg = gen_reg_rtx (SImode);
11483 rtx scratch = gen_reg_rtx (SImode);
11484 rtx cmp;
11486 align = 0;
11487 if (GET_CODE (align_rtx) == CONST_INT)
11488 align = INTVAL (align_rtx);
11490 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
11492 /* Is there a known alignment and is it less than 4? */
11493 if (align < 4)
11495 rtx scratch1 = gen_reg_rtx (Pmode);
11496 emit_move_insn (scratch1, out);
11497 /* Is there a known alignment and is it not 2? */
11498 if (align != 2)
11500 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
11501 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
11503 /* Leave just the 3 lower bits. */
11504 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
11505 NULL_RTX, 0, OPTAB_WIDEN);
11507 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
11508 Pmode, 1, align_4_label);
11509 emit_cmp_and_jump_insns (align_rtx, GEN_INT (2), EQ, NULL,
11510 Pmode, 1, align_2_label);
11511 emit_cmp_and_jump_insns (align_rtx, GEN_INT (2), GTU, NULL,
11512 Pmode, 1, align_3_label);
11514 else
11516 /* Since the alignment is 2, we have to check 2 or 0 bytes;
11517 check if is aligned to 4 - byte. */
11519 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (2),
11520 NULL_RTX, 0, OPTAB_WIDEN);
11522 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
11523 Pmode, 1, align_4_label);
11526 mem = gen_rtx_MEM (QImode, out);
11528 /* Now compare the bytes. */
11530 /* Compare the first n unaligned byte on a byte per byte basis. */
11531 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
11532 QImode, 1, end_0_label);
11534 /* Increment the address. */
11535 if (TARGET_64BIT)
11536 emit_insn (gen_adddi3 (out, out, const1_rtx));
11537 else
11538 emit_insn (gen_addsi3 (out, out, const1_rtx));
11540 /* Not needed with an alignment of 2 */
11541 if (align != 2)
11543 emit_label (align_2_label);
11545 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
11546 end_0_label);
11548 if (TARGET_64BIT)
11549 emit_insn (gen_adddi3 (out, out, const1_rtx));
11550 else
11551 emit_insn (gen_addsi3 (out, out, const1_rtx));
11553 emit_label (align_3_label);
11556 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
11557 end_0_label);
11559 if (TARGET_64BIT)
11560 emit_insn (gen_adddi3 (out, out, const1_rtx));
11561 else
11562 emit_insn (gen_addsi3 (out, out, const1_rtx));
11565 /* Generate loop to check 4 bytes at a time. It is not a good idea to
11566 align this loop. It gives only huge programs, but does not help to
11567 speed up. */
11568 emit_label (align_4_label);
11570 mem = gen_rtx_MEM (SImode, out);
11571 emit_move_insn (scratch, mem);
11572 if (TARGET_64BIT)
11573 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
11574 else
11575 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
11577 /* This formula yields a nonzero result iff one of the bytes is zero.
11578 This saves three branches inside loop and many cycles. */
11580 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
11581 emit_insn (gen_one_cmplsi2 (scratch, scratch));
11582 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
11583 emit_insn (gen_andsi3 (tmpreg, tmpreg,
11584 gen_int_mode (0x80808080, SImode)));
11585 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
11586 align_4_label);
11588 if (TARGET_CMOVE)
11590 rtx reg = gen_reg_rtx (SImode);
11591 rtx reg2 = gen_reg_rtx (Pmode);
11592 emit_move_insn (reg, tmpreg);
11593 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
11595 /* If zero is not in the first two bytes, move two bytes forward. */
11596 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
11597 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
11598 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
11599 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
11600 gen_rtx_IF_THEN_ELSE (SImode, tmp,
11601 reg,
11602 tmpreg)));
11603 /* Emit lea manually to avoid clobbering of flags. */
11604 emit_insn (gen_rtx_SET (SImode, reg2,
11605 gen_rtx_PLUS (Pmode, out, GEN_INT (2))));
11607 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
11608 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
11609 emit_insn (gen_rtx_SET (VOIDmode, out,
11610 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
11611 reg2,
11612 out)));
11615 else
11617 rtx end_2_label = gen_label_rtx ();
11618 /* Is zero in the first two bytes? */
11620 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
11621 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
11622 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
11623 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
11624 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
11625 pc_rtx);
11626 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
11627 JUMP_LABEL (tmp) = end_2_label;
11629 /* Not in the first two. Move two bytes forward. */
11630 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
11631 if (TARGET_64BIT)
11632 emit_insn (gen_adddi3 (out, out, GEN_INT (2)));
11633 else
11634 emit_insn (gen_addsi3 (out, out, GEN_INT (2)));
11636 emit_label (end_2_label);
11640 /* Avoid branch in fixing the byte. */
11641 tmpreg = gen_lowpart (QImode, tmpreg);
11642 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
11643 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
11644 if (TARGET_64BIT)
11645 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
11646 else
11647 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
11649 emit_label (end_0_label);
11652 void
11653 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
11654 rtx callarg2 ATTRIBUTE_UNUSED,
11655 rtx pop, int sibcall)
11657 rtx use = NULL, call;
11659 if (pop == const0_rtx)
11660 pop = NULL;
11661 if (TARGET_64BIT && pop)
11662 abort ();
11664 #if TARGET_MACHO
11665 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
11666 fnaddr = machopic_indirect_call_target (fnaddr);
11667 #else
11668 /* Static functions and indirect calls don't need the pic register. */
11669 if (! TARGET_64BIT && flag_pic
11670 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
11671 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
11672 use_reg (&use, pic_offset_table_rtx);
11674 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
11676 rtx al = gen_rtx_REG (QImode, 0);
11677 emit_move_insn (al, callarg2);
11678 use_reg (&use, al);
11680 #endif /* TARGET_MACHO */
11682 if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
11684 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
11685 fnaddr = gen_rtx_MEM (QImode, fnaddr);
11687 if (sibcall && TARGET_64BIT
11688 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
11690 rtx addr;
11691 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
11692 fnaddr = gen_rtx_REG (Pmode, FIRST_REX_INT_REG + 3 /* R11 */);
11693 emit_move_insn (fnaddr, addr);
11694 fnaddr = gen_rtx_MEM (QImode, fnaddr);
11697 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
11698 if (retval)
11699 call = gen_rtx_SET (VOIDmode, retval, call);
11700 if (pop)
11702 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
11703 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
11704 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
11707 call = emit_call_insn (call);
11708 if (use)
11709 CALL_INSN_FUNCTION_USAGE (call) = use;
11713 /* Clear stack slot assignments remembered from previous functions.
11714 This is called from INIT_EXPANDERS once before RTL is emitted for each
11715 function. */
11717 static struct machine_function *
11718 ix86_init_machine_status (void)
11720 struct machine_function *f;
11722 f = ggc_alloc_cleared (sizeof (struct machine_function));
11723 f->use_fast_prologue_epilogue_nregs = -1;
11725 return f;
11728 /* Return a MEM corresponding to a stack slot with mode MODE.
11729 Allocate a new slot if necessary.
11731 The RTL for a function can have several slots available: N is
11732 which slot to use. */
11735 assign_386_stack_local (enum machine_mode mode, int n)
11737 struct stack_local_entry *s;
11739 if (n < 0 || n >= MAX_386_STACK_LOCALS)
11740 abort ();
11742 for (s = ix86_stack_locals; s; s = s->next)
11743 if (s->mode == mode && s->n == n)
11744 return s->rtl;
11746 s = (struct stack_local_entry *)
11747 ggc_alloc (sizeof (struct stack_local_entry));
11748 s->n = n;
11749 s->mode = mode;
11750 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
11752 s->next = ix86_stack_locals;
11753 ix86_stack_locals = s;
11754 return s->rtl;
11757 /* Construct the SYMBOL_REF for the tls_get_addr function. */
11759 static GTY(()) rtx ix86_tls_symbol;
11761 ix86_tls_get_addr (void)
11764 if (!ix86_tls_symbol)
11766 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
11767 (TARGET_GNU_TLS && !TARGET_64BIT)
11768 ? "___tls_get_addr"
11769 : "__tls_get_addr");
11772 return ix86_tls_symbol;
11775 /* Calculate the length of the memory address in the instruction
11776 encoding. Does not include the one-byte modrm, opcode, or prefix. */
11778 static int
11779 memory_address_length (rtx addr)
11781 struct ix86_address parts;
11782 rtx base, index, disp;
11783 int len;
11785 if (GET_CODE (addr) == PRE_DEC
11786 || GET_CODE (addr) == POST_INC
11787 || GET_CODE (addr) == PRE_MODIFY
11788 || GET_CODE (addr) == POST_MODIFY)
11789 return 0;
11791 if (! ix86_decompose_address (addr, &parts))
11792 abort ();
11794 base = parts.base;
11795 index = parts.index;
11796 disp = parts.disp;
11797 len = 0;
11799 /* Rule of thumb:
11800 - esp as the base always wants an index,
11801 - ebp as the base always wants a displacement. */
11803 /* Register Indirect. */
11804 if (base && !index && !disp)
11806 /* esp (for its index) and ebp (for its displacement) need
11807 the two-byte modrm form. */
11808 if (addr == stack_pointer_rtx
11809 || addr == arg_pointer_rtx
11810 || addr == frame_pointer_rtx
11811 || addr == hard_frame_pointer_rtx)
11812 len = 1;
11815 /* Direct Addressing. */
11816 else if (disp && !base && !index)
11817 len = 4;
11819 else
11821 /* Find the length of the displacement constant. */
11822 if (disp)
11824 if (GET_CODE (disp) == CONST_INT
11825 && CONST_OK_FOR_LETTER_P (INTVAL (disp), 'K')
11826 && base)
11827 len = 1;
11828 else
11829 len = 4;
11831 /* ebp always wants a displacement. */
11832 else if (base == hard_frame_pointer_rtx)
11833 len = 1;
11835 /* An index requires the two-byte modrm form.... */
11836 if (index
11837 /* ...like esp, which always wants an index. */
11838 || base == stack_pointer_rtx
11839 || base == arg_pointer_rtx
11840 || base == frame_pointer_rtx)
11841 len += 1;
11844 return len;
11847 /* Compute default value for "length_immediate" attribute. When SHORTFORM
11848 is set, expect that insn have 8bit immediate alternative. */
11850 ix86_attr_length_immediate_default (rtx insn, int shortform)
11852 int len = 0;
11853 int i;
11854 extract_insn_cached (insn);
11855 for (i = recog_data.n_operands - 1; i >= 0; --i)
11856 if (CONSTANT_P (recog_data.operand[i]))
11858 if (len)
11859 abort ();
11860 if (shortform
11861 && GET_CODE (recog_data.operand[i]) == CONST_INT
11862 && CONST_OK_FOR_LETTER_P (INTVAL (recog_data.operand[i]), 'K'))
11863 len = 1;
11864 else
11866 switch (get_attr_mode (insn))
11868 case MODE_QI:
11869 len+=1;
11870 break;
11871 case MODE_HI:
11872 len+=2;
11873 break;
11874 case MODE_SI:
11875 len+=4;
11876 break;
11877 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
11878 case MODE_DI:
11879 len+=4;
11880 break;
11881 default:
11882 fatal_insn ("unknown insn mode", insn);
11886 return len;
11888 /* Compute default value for "length_address" attribute. */
11890 ix86_attr_length_address_default (rtx insn)
11892 int i;
11894 if (get_attr_type (insn) == TYPE_LEA)
11896 rtx set = PATTERN (insn);
11897 if (GET_CODE (set) == SET)
11899 else if (GET_CODE (set) == PARALLEL
11900 && GET_CODE (XVECEXP (set, 0, 0)) == SET)
11901 set = XVECEXP (set, 0, 0);
11902 else
11904 #ifdef ENABLE_CHECKING
11905 abort ();
11906 #endif
11907 return 0;
11910 return memory_address_length (SET_SRC (set));
11913 extract_insn_cached (insn);
11914 for (i = recog_data.n_operands - 1; i >= 0; --i)
11915 if (GET_CODE (recog_data.operand[i]) == MEM)
11917 return memory_address_length (XEXP (recog_data.operand[i], 0));
11918 break;
11920 return 0;
11923 /* Return the maximum number of instructions a cpu can issue. */
11925 static int
11926 ix86_issue_rate (void)
11928 switch (ix86_tune)
11930 case PROCESSOR_PENTIUM:
11931 case PROCESSOR_K6:
11932 return 2;
11934 case PROCESSOR_PENTIUMPRO:
11935 case PROCESSOR_PENTIUM4:
11936 case PROCESSOR_ATHLON:
11937 case PROCESSOR_K8:
11938 return 3;
11940 default:
11941 return 1;
11945 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
11946 by DEP_INSN and nothing set by DEP_INSN. */
11948 static int
11949 ix86_flags_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
11951 rtx set, set2;
11953 /* Simplify the test for uninteresting insns. */
11954 if (insn_type != TYPE_SETCC
11955 && insn_type != TYPE_ICMOV
11956 && insn_type != TYPE_FCMOV
11957 && insn_type != TYPE_IBR)
11958 return 0;
11960 if ((set = single_set (dep_insn)) != 0)
11962 set = SET_DEST (set);
11963 set2 = NULL_RTX;
11965 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
11966 && XVECLEN (PATTERN (dep_insn), 0) == 2
11967 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
11968 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
11970 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
11971 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
11973 else
11974 return 0;
11976 if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG)
11977 return 0;
11979 /* This test is true if the dependent insn reads the flags but
11980 not any other potentially set register. */
11981 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
11982 return 0;
11984 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
11985 return 0;
11987 return 1;
11990 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
11991 address with operands set by DEP_INSN. */
11993 static int
11994 ix86_agi_dependant (rtx insn, rtx dep_insn, enum attr_type insn_type)
11996 rtx addr;
11998 if (insn_type == TYPE_LEA
11999 && TARGET_PENTIUM)
12001 addr = PATTERN (insn);
12002 if (GET_CODE (addr) == SET)
12004 else if (GET_CODE (addr) == PARALLEL
12005 && GET_CODE (XVECEXP (addr, 0, 0)) == SET)
12006 addr = XVECEXP (addr, 0, 0);
12007 else
12008 abort ();
12009 addr = SET_SRC (addr);
12011 else
12013 int i;
12014 extract_insn_cached (insn);
12015 for (i = recog_data.n_operands - 1; i >= 0; --i)
12016 if (GET_CODE (recog_data.operand[i]) == MEM)
12018 addr = XEXP (recog_data.operand[i], 0);
12019 goto found;
12021 return 0;
12022 found:;
12025 return modified_in_p (addr, dep_insn);
12028 static int
12029 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
12031 enum attr_type insn_type, dep_insn_type;
12032 enum attr_memory memory, dep_memory;
12033 rtx set, set2;
12034 int dep_insn_code_number;
12036 /* Anti and output dependencies have zero cost on all CPUs. */
12037 if (REG_NOTE_KIND (link) != 0)
12038 return 0;
12040 dep_insn_code_number = recog_memoized (dep_insn);
12042 /* If we can't recognize the insns, we can't really do anything. */
12043 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
12044 return cost;
12046 insn_type = get_attr_type (insn);
12047 dep_insn_type = get_attr_type (dep_insn);
12049 switch (ix86_tune)
12051 case PROCESSOR_PENTIUM:
12052 /* Address Generation Interlock adds a cycle of latency. */
12053 if (ix86_agi_dependant (insn, dep_insn, insn_type))
12054 cost += 1;
12056 /* ??? Compares pair with jump/setcc. */
12057 if (ix86_flags_dependant (insn, dep_insn, insn_type))
12058 cost = 0;
12060 /* Floating point stores require value to be ready one cycle earlier. */
12061 if (insn_type == TYPE_FMOV
12062 && get_attr_memory (insn) == MEMORY_STORE
12063 && !ix86_agi_dependant (insn, dep_insn, insn_type))
12064 cost += 1;
12065 break;
12067 case PROCESSOR_PENTIUMPRO:
12068 memory = get_attr_memory (insn);
12069 dep_memory = get_attr_memory (dep_insn);
12071 /* Since we can't represent delayed latencies of load+operation,
12072 increase the cost here for non-imov insns. */
12073 if (dep_insn_type != TYPE_IMOV
12074 && dep_insn_type != TYPE_FMOV
12075 && (dep_memory == MEMORY_LOAD || dep_memory == MEMORY_BOTH))
12076 cost += 1;
12078 /* INT->FP conversion is expensive. */
12079 if (get_attr_fp_int_src (dep_insn))
12080 cost += 5;
12082 /* There is one cycle extra latency between an FP op and a store. */
12083 if (insn_type == TYPE_FMOV
12084 && (set = single_set (dep_insn)) != NULL_RTX
12085 && (set2 = single_set (insn)) != NULL_RTX
12086 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
12087 && GET_CODE (SET_DEST (set2)) == MEM)
12088 cost += 1;
12090 /* Show ability of reorder buffer to hide latency of load by executing
12091 in parallel with previous instruction in case
12092 previous instruction is not needed to compute the address. */
12093 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
12094 && !ix86_agi_dependant (insn, dep_insn, insn_type))
12096 /* Claim moves to take one cycle, as core can issue one load
12097 at time and the next load can start cycle later. */
12098 if (dep_insn_type == TYPE_IMOV
12099 || dep_insn_type == TYPE_FMOV)
12100 cost = 1;
12101 else if (cost > 1)
12102 cost--;
12104 break;
12106 case PROCESSOR_K6:
12107 memory = get_attr_memory (insn);
12108 dep_memory = get_attr_memory (dep_insn);
12109 /* The esp dependency is resolved before the instruction is really
12110 finished. */
12111 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
12112 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
12113 return 1;
12115 /* Since we can't represent delayed latencies of load+operation,
12116 increase the cost here for non-imov insns. */
12117 if (dep_memory == MEMORY_LOAD || dep_memory == MEMORY_BOTH)
12118 cost += (dep_insn_type != TYPE_IMOV) ? 2 : 1;
12120 /* INT->FP conversion is expensive. */
12121 if (get_attr_fp_int_src (dep_insn))
12122 cost += 5;
12124 /* Show ability of reorder buffer to hide latency of load by executing
12125 in parallel with previous instruction in case
12126 previous instruction is not needed to compute the address. */
12127 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
12128 && !ix86_agi_dependant (insn, dep_insn, insn_type))
12130 /* Claim moves to take one cycle, as core can issue one load
12131 at time and the next load can start cycle later. */
12132 if (dep_insn_type == TYPE_IMOV
12133 || dep_insn_type == TYPE_FMOV)
12134 cost = 1;
12135 else if (cost > 2)
12136 cost -= 2;
12137 else
12138 cost = 1;
12140 break;
12142 case PROCESSOR_ATHLON:
12143 case PROCESSOR_K8:
12144 memory = get_attr_memory (insn);
12145 dep_memory = get_attr_memory (dep_insn);
12147 /* Show ability of reorder buffer to hide latency of load by executing
12148 in parallel with previous instruction in case
12149 previous instruction is not needed to compute the address. */
12150 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
12151 && !ix86_agi_dependant (insn, dep_insn, insn_type))
12153 enum attr_unit unit = get_attr_unit (insn);
12154 int loadcost = 3;
12156 /* Because of the difference between the length of integer and
12157 floating unit pipeline preparation stages, the memory operands
12158 for floating point are cheaper.
12160 ??? For Athlon it the difference is most probably 2. */
12161 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
12162 loadcost = 3;
12163 else
12164 loadcost = TARGET_ATHLON ? 2 : 0;
12166 if (cost >= loadcost)
12167 cost -= loadcost;
12168 else
12169 cost = 0;
12172 default:
12173 break;
12176 return cost;
12179 static union
12181 struct ppro_sched_data
12183 rtx decode[3];
12184 int issued_this_cycle;
12185 } ppro;
12186 } ix86_sched_data;
12188 static enum attr_ppro_uops
12189 ix86_safe_ppro_uops (rtx insn)
12191 if (recog_memoized (insn) >= 0)
12192 return get_attr_ppro_uops (insn);
12193 else
12194 return PPRO_UOPS_MANY;
12197 static void
12198 ix86_dump_ppro_packet (FILE *dump)
12200 if (ix86_sched_data.ppro.decode[0])
12202 fprintf (dump, "PPRO packet: %d",
12203 INSN_UID (ix86_sched_data.ppro.decode[0]));
12204 if (ix86_sched_data.ppro.decode[1])
12205 fprintf (dump, " %d", INSN_UID (ix86_sched_data.ppro.decode[1]));
12206 if (ix86_sched_data.ppro.decode[2])
12207 fprintf (dump, " %d", INSN_UID (ix86_sched_data.ppro.decode[2]));
12208 fputc ('\n', dump);
12212 /* We're beginning a new block. Initialize data structures as necessary. */
12214 static void
12215 ix86_sched_init (FILE *dump ATTRIBUTE_UNUSED,
12216 int sched_verbose ATTRIBUTE_UNUSED,
12217 int veclen ATTRIBUTE_UNUSED)
12219 memset (&ix86_sched_data, 0, sizeof (ix86_sched_data));
12222 /* Shift INSN to SLOT, and shift everything else down. */
12224 static void
12225 ix86_reorder_insn (rtx *insnp, rtx *slot)
12227 if (insnp != slot)
12229 rtx insn = *insnp;
12231 insnp[0] = insnp[1];
12232 while (++insnp != slot);
12233 *insnp = insn;
12237 static void
12238 ix86_sched_reorder_ppro (rtx *ready, rtx *e_ready)
12240 rtx decode[3];
12241 enum attr_ppro_uops cur_uops;
12242 int issued_this_cycle;
12243 rtx *insnp;
12244 int i;
12246 /* At this point .ppro.decode contains the state of the three
12247 decoders from last "cycle". That is, those insns that were
12248 actually independent. But here we're scheduling for the
12249 decoder, and we may find things that are decodable in the
12250 same cycle. */
12252 memcpy (decode, ix86_sched_data.ppro.decode, sizeof (decode));
12253 issued_this_cycle = 0;
12255 insnp = e_ready;
12256 cur_uops = ix86_safe_ppro_uops (*insnp);
12258 /* If the decoders are empty, and we've a complex insn at the
12259 head of the priority queue, let it issue without complaint. */
12260 if (decode[0] == NULL)
12262 if (cur_uops == PPRO_UOPS_MANY)
12264 decode[0] = *insnp;
12265 goto ppro_done;
12268 /* Otherwise, search for a 2-4 uop unsn to issue. */
12269 while (cur_uops != PPRO_UOPS_FEW)
12271 if (insnp == ready)
12272 break;
12273 cur_uops = ix86_safe_ppro_uops (*--insnp);
12276 /* If so, move it to the head of the line. */
12277 if (cur_uops == PPRO_UOPS_FEW)
12278 ix86_reorder_insn (insnp, e_ready);
12280 /* Issue the head of the queue. */
12281 issued_this_cycle = 1;
12282 decode[0] = *e_ready--;
12285 /* Look for simple insns to fill in the other two slots. */
12286 for (i = 1; i < 3; ++i)
12287 if (decode[i] == NULL)
12289 if (ready > e_ready)
12290 goto ppro_done;
12292 insnp = e_ready;
12293 cur_uops = ix86_safe_ppro_uops (*insnp);
12294 while (cur_uops != PPRO_UOPS_ONE)
12296 if (insnp == ready)
12297 break;
12298 cur_uops = ix86_safe_ppro_uops (*--insnp);
12301 /* Found one. Move it to the head of the queue and issue it. */
12302 if (cur_uops == PPRO_UOPS_ONE)
12304 ix86_reorder_insn (insnp, e_ready);
12305 decode[i] = *e_ready--;
12306 issued_this_cycle++;
12307 continue;
12310 /* ??? Didn't find one. Ideally, here we would do a lazy split
12311 of 2-uop insns, issue one and queue the other. */
12314 ppro_done:
12315 if (issued_this_cycle == 0)
12316 issued_this_cycle = 1;
12317 ix86_sched_data.ppro.issued_this_cycle = issued_this_cycle;
12320 /* We are about to being issuing insns for this clock cycle.
12321 Override the default sort algorithm to better slot instructions. */
12322 static int
12323 ix86_sched_reorder (FILE *dump ATTRIBUTE_UNUSED,
12324 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
12325 int *n_readyp, int clock_var ATTRIBUTE_UNUSED)
12327 int n_ready = *n_readyp;
12328 rtx *e_ready = ready + n_ready - 1;
12330 /* Make sure to go ahead and initialize key items in
12331 ix86_sched_data if we are not going to bother trying to
12332 reorder the ready queue. */
12333 if (n_ready < 2)
12335 ix86_sched_data.ppro.issued_this_cycle = 1;
12336 goto out;
12339 switch (ix86_tune)
12341 default:
12342 break;
12344 case PROCESSOR_PENTIUMPRO:
12345 ix86_sched_reorder_ppro (ready, e_ready);
12346 break;
12349 out:
12350 return ix86_issue_rate ();
12353 /* We are about to issue INSN. Return the number of insns left on the
12354 ready queue that can be issued this cycle. */
12356 static int
12357 ix86_variable_issue (FILE *dump, int sched_verbose, rtx insn,
12358 int can_issue_more)
12360 int i;
12361 switch (ix86_tune)
12363 default:
12364 return can_issue_more - 1;
12366 case PROCESSOR_PENTIUMPRO:
12368 enum attr_ppro_uops uops = ix86_safe_ppro_uops (insn);
12370 if (uops == PPRO_UOPS_MANY)
12372 if (sched_verbose)
12373 ix86_dump_ppro_packet (dump);
12374 ix86_sched_data.ppro.decode[0] = insn;
12375 ix86_sched_data.ppro.decode[1] = NULL;
12376 ix86_sched_data.ppro.decode[2] = NULL;
12377 if (sched_verbose)
12378 ix86_dump_ppro_packet (dump);
12379 ix86_sched_data.ppro.decode[0] = NULL;
12381 else if (uops == PPRO_UOPS_FEW)
12383 if (sched_verbose)
12384 ix86_dump_ppro_packet (dump);
12385 ix86_sched_data.ppro.decode[0] = insn;
12386 ix86_sched_data.ppro.decode[1] = NULL;
12387 ix86_sched_data.ppro.decode[2] = NULL;
12389 else
12391 for (i = 0; i < 3; ++i)
12392 if (ix86_sched_data.ppro.decode[i] == NULL)
12394 ix86_sched_data.ppro.decode[i] = insn;
12395 break;
12397 if (i == 3)
12398 abort ();
12399 if (i == 2)
12401 if (sched_verbose)
12402 ix86_dump_ppro_packet (dump);
12403 ix86_sched_data.ppro.decode[0] = NULL;
12404 ix86_sched_data.ppro.decode[1] = NULL;
12405 ix86_sched_data.ppro.decode[2] = NULL;
12409 return --ix86_sched_data.ppro.issued_this_cycle;
12413 static int
12414 ia32_use_dfa_pipeline_interface (void)
12416 if (TARGET_PENTIUM || TARGET_ATHLON_K8)
12417 return 1;
12418 return 0;
12421 /* How many alternative schedules to try. This should be as wide as the
12422 scheduling freedom in the DFA, but no wider. Making this value too
12423 large results extra work for the scheduler. */
12425 static int
12426 ia32_multipass_dfa_lookahead (void)
12428 if (ix86_tune == PROCESSOR_PENTIUM)
12429 return 2;
12430 else
12431 return 0;
12435 /* Walk through INSNS and look for MEM references whose address is DSTREG or
12436 SRCREG and set the memory attribute to those of DSTREF and SRCREF, as
12437 appropriate. */
12439 void
12440 ix86_set_move_mem_attrs (rtx insns, rtx dstref, rtx srcref, rtx dstreg,
12441 rtx srcreg)
12443 rtx insn;
12445 for (insn = insns; insn != 0 ; insn = NEXT_INSN (insn))
12446 if (INSN_P (insn))
12447 ix86_set_move_mem_attrs_1 (PATTERN (insn), dstref, srcref,
12448 dstreg, srcreg);
12451 /* Subroutine of above to actually do the updating by recursively walking
12452 the rtx. */
12454 static void
12455 ix86_set_move_mem_attrs_1 (rtx x, rtx dstref, rtx srcref, rtx dstreg,
12456 rtx srcreg)
12458 enum rtx_code code = GET_CODE (x);
12459 const char *format_ptr = GET_RTX_FORMAT (code);
12460 int i, j;
12462 if (code == MEM && XEXP (x, 0) == dstreg)
12463 MEM_COPY_ATTRIBUTES (x, dstref);
12464 else if (code == MEM && XEXP (x, 0) == srcreg)
12465 MEM_COPY_ATTRIBUTES (x, srcref);
12467 for (i = 0; i < GET_RTX_LENGTH (code); i++, format_ptr++)
12469 if (*format_ptr == 'e')
12470 ix86_set_move_mem_attrs_1 (XEXP (x, i), dstref, srcref,
12471 dstreg, srcreg);
12472 else if (*format_ptr == 'E')
12473 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
12474 ix86_set_move_mem_attrs_1 (XVECEXP (x, i, j), dstref, srcref,
12475 dstreg, srcreg);
12479 /* Compute the alignment given to a constant that is being placed in memory.
12480 EXP is the constant and ALIGN is the alignment that the object would
12481 ordinarily have.
12482 The value of this function is used instead of that alignment to align
12483 the object. */
12486 ix86_constant_alignment (tree exp, int align)
12488 if (TREE_CODE (exp) == REAL_CST)
12490 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
12491 return 64;
12492 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
12493 return 128;
12495 else if (TREE_CODE (exp) == STRING_CST && TREE_STRING_LENGTH (exp) >= 31
12496 && align < 256)
12497 return 256;
12499 return align;
12502 /* Compute the alignment for a static variable.
12503 TYPE is the data type, and ALIGN is the alignment that
12504 the object would ordinarily have. The value of this function is used
12505 instead of that alignment to align the object. */
12508 ix86_data_alignment (tree type, int align)
12510 if (AGGREGATE_TYPE_P (type)
12511 && TYPE_SIZE (type)
12512 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
12513 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 256
12514 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 256)
12515 return 256;
12517 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
12518 to 16byte boundary. */
12519 if (TARGET_64BIT)
12521 if (AGGREGATE_TYPE_P (type)
12522 && TYPE_SIZE (type)
12523 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
12524 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
12525 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
12526 return 128;
12529 if (TREE_CODE (type) == ARRAY_TYPE)
12531 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
12532 return 64;
12533 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
12534 return 128;
12536 else if (TREE_CODE (type) == COMPLEX_TYPE)
12539 if (TYPE_MODE (type) == DCmode && align < 64)
12540 return 64;
12541 if (TYPE_MODE (type) == XCmode && align < 128)
12542 return 128;
12544 else if ((TREE_CODE (type) == RECORD_TYPE
12545 || TREE_CODE (type) == UNION_TYPE
12546 || TREE_CODE (type) == QUAL_UNION_TYPE)
12547 && TYPE_FIELDS (type))
12549 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
12550 return 64;
12551 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
12552 return 128;
12554 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
12555 || TREE_CODE (type) == INTEGER_TYPE)
12557 if (TYPE_MODE (type) == DFmode && align < 64)
12558 return 64;
12559 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
12560 return 128;
12563 return align;
12566 /* Compute the alignment for a local variable.
12567 TYPE is the data type, and ALIGN is the alignment that
12568 the object would ordinarily have. The value of this macro is used
12569 instead of that alignment to align the object. */
12572 ix86_local_alignment (tree type, int align)
12574 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
12575 to 16byte boundary. */
12576 if (TARGET_64BIT)
12578 if (AGGREGATE_TYPE_P (type)
12579 && TYPE_SIZE (type)
12580 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
12581 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
12582 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
12583 return 128;
12585 if (TREE_CODE (type) == ARRAY_TYPE)
12587 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
12588 return 64;
12589 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
12590 return 128;
12592 else if (TREE_CODE (type) == COMPLEX_TYPE)
12594 if (TYPE_MODE (type) == DCmode && align < 64)
12595 return 64;
12596 if (TYPE_MODE (type) == XCmode && align < 128)
12597 return 128;
12599 else if ((TREE_CODE (type) == RECORD_TYPE
12600 || TREE_CODE (type) == UNION_TYPE
12601 || TREE_CODE (type) == QUAL_UNION_TYPE)
12602 && TYPE_FIELDS (type))
12604 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
12605 return 64;
12606 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
12607 return 128;
12609 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
12610 || TREE_CODE (type) == INTEGER_TYPE)
12613 if (TYPE_MODE (type) == DFmode && align < 64)
12614 return 64;
12615 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
12616 return 128;
12618 return align;
12621 /* Emit RTL insns to initialize the variable parts of a trampoline.
12622 FNADDR is an RTX for the address of the function's pure code.
12623 CXT is an RTX for the static chain value for the function. */
12624 void
12625 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
12627 if (!TARGET_64BIT)
12629 /* Compute offset from the end of the jmp to the target function. */
12630 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
12631 plus_constant (tramp, 10),
12632 NULL_RTX, 1, OPTAB_DIRECT);
12633 emit_move_insn (gen_rtx_MEM (QImode, tramp),
12634 gen_int_mode (0xb9, QImode));
12635 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
12636 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
12637 gen_int_mode (0xe9, QImode));
12638 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
12640 else
12642 int offset = 0;
12643 /* Try to load address using shorter movl instead of movabs.
12644 We may want to support movq for kernel mode, but kernel does not use
12645 trampolines at the moment. */
12646 if (x86_64_zero_extended_value (fnaddr))
12648 fnaddr = copy_to_mode_reg (DImode, fnaddr);
12649 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12650 gen_int_mode (0xbb41, HImode));
12651 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
12652 gen_lowpart (SImode, fnaddr));
12653 offset += 6;
12655 else
12657 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12658 gen_int_mode (0xbb49, HImode));
12659 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
12660 fnaddr);
12661 offset += 10;
12663 /* Load static chain using movabs to r10. */
12664 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12665 gen_int_mode (0xba49, HImode));
12666 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
12667 cxt);
12668 offset += 10;
12669 /* Jump to the r11 */
12670 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
12671 gen_int_mode (0xff49, HImode));
12672 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
12673 gen_int_mode (0xe3, QImode));
12674 offset += 3;
12675 if (offset > TRAMPOLINE_SIZE)
12676 abort ();
12679 #ifdef TRANSFER_FROM_TRAMPOLINE
12680 emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "__enable_execute_stack"),
12681 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
12682 #endif
12685 #define def_builtin(MASK, NAME, TYPE, CODE) \
12686 do { \
12687 if ((MASK) & target_flags \
12688 && (!((MASK) & MASK_64BIT) || TARGET_64BIT)) \
12689 builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
12690 NULL, NULL_TREE); \
12691 } while (0)
12693 struct builtin_description
12695 const unsigned int mask;
12696 const enum insn_code icode;
12697 const char *const name;
12698 const enum ix86_builtins code;
12699 const enum rtx_code comparison;
12700 const unsigned int flag;
12703 static const struct builtin_description bdesc_comi[] =
12705 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
12706 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
12707 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
12708 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
12709 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
12710 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
12711 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
12712 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
12713 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
12714 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
12715 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
12716 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
12717 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
12718 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
12719 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
12720 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
12721 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
12722 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
12723 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
12724 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
12725 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
12726 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
12727 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
12728 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
12731 static const struct builtin_description bdesc_2arg[] =
12733 /* SSE */
12734 { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
12735 { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
12736 { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
12737 { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
12738 { MASK_SSE, CODE_FOR_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
12739 { MASK_SSE, CODE_FOR_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
12740 { MASK_SSE, CODE_FOR_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
12741 { MASK_SSE, CODE_FOR_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
12743 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
12744 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
12745 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
12746 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, 1 },
12747 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, 1 },
12748 { MASK_SSE, CODE_FOR_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
12749 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, EQ, 0 },
12750 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, LT, 0 },
12751 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, LE, 0 },
12752 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, LT, 1 },
12753 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, LE, 1 },
12754 { MASK_SSE, CODE_FOR_maskncmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, UNORDERED, 0 },
12755 { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
12756 { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
12757 { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
12758 { MASK_SSE, CODE_FOR_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
12759 { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, EQ, 0 },
12760 { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, LT, 0 },
12761 { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, LE, 0 },
12762 { MASK_SSE, CODE_FOR_vmmaskncmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 },
12764 { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
12765 { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
12766 { MASK_SSE, CODE_FOR_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
12767 { MASK_SSE, CODE_FOR_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
12769 { MASK_SSE, CODE_FOR_sse_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
12770 { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
12771 { MASK_SSE, CODE_FOR_sse_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
12772 { MASK_SSE, CODE_FOR_sse_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
12774 { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
12775 { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
12776 { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
12777 { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
12778 { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
12780 /* MMX */
12781 { MASK_MMX, CODE_FOR_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
12782 { MASK_MMX, CODE_FOR_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
12783 { MASK_MMX, CODE_FOR_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
12784 { MASK_MMX, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
12785 { MASK_MMX, CODE_FOR_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
12786 { MASK_MMX, CODE_FOR_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
12787 { MASK_MMX, CODE_FOR_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
12788 { MASK_MMX, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
12790 { MASK_MMX, CODE_FOR_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
12791 { MASK_MMX, CODE_FOR_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
12792 { MASK_MMX, CODE_FOR_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
12793 { MASK_MMX, CODE_FOR_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
12794 { MASK_MMX, CODE_FOR_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
12795 { MASK_MMX, CODE_FOR_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
12796 { MASK_MMX, CODE_FOR_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
12797 { MASK_MMX, CODE_FOR_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
12799 { MASK_MMX, CODE_FOR_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
12800 { MASK_MMX, CODE_FOR_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
12801 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
12803 { MASK_MMX, CODE_FOR_mmx_anddi3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
12804 { MASK_MMX, CODE_FOR_mmx_nanddi3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
12805 { MASK_MMX, CODE_FOR_mmx_iordi3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
12806 { MASK_MMX, CODE_FOR_mmx_xordi3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
12808 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
12809 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
12811 { MASK_MMX, CODE_FOR_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
12812 { MASK_MMX, CODE_FOR_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
12813 { MASK_MMX, CODE_FOR_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
12814 { MASK_MMX, CODE_FOR_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
12815 { MASK_MMX, CODE_FOR_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
12816 { MASK_MMX, CODE_FOR_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
12818 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
12819 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
12820 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
12821 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
12823 { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
12824 { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
12825 { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
12826 { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
12827 { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
12828 { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
12830 /* Special. */
12831 { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
12832 { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
12833 { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
12835 { MASK_SSE, CODE_FOR_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
12836 { MASK_SSE, CODE_FOR_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
12837 { MASK_SSE | MASK_64BIT, CODE_FOR_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
12839 { MASK_MMX, CODE_FOR_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
12840 { MASK_MMX, CODE_FOR_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
12841 { MASK_MMX, CODE_FOR_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
12842 { MASK_MMX, CODE_FOR_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
12843 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
12844 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
12846 { MASK_MMX, CODE_FOR_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
12847 { MASK_MMX, CODE_FOR_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
12848 { MASK_MMX, CODE_FOR_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
12849 { MASK_MMX, CODE_FOR_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
12850 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
12851 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
12853 { MASK_MMX, CODE_FOR_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
12854 { MASK_MMX, CODE_FOR_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
12855 { MASK_MMX, CODE_FOR_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
12856 { MASK_MMX, CODE_FOR_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 },
12858 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
12859 { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
12861 /* SSE2 */
12862 { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
12863 { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
12864 { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
12865 { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
12866 { MASK_SSE2, CODE_FOR_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
12867 { MASK_SSE2, CODE_FOR_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
12868 { MASK_SSE2, CODE_FOR_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
12869 { MASK_SSE2, CODE_FOR_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
12871 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
12872 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
12873 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
12874 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, 1 },
12875 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, 1 },
12876 { MASK_SSE2, CODE_FOR_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
12877 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, EQ, 0 },
12878 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, LT, 0 },
12879 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, LE, 0 },
12880 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, LT, 1 },
12881 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, LE, 1 },
12882 { MASK_SSE2, CODE_FOR_maskncmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, UNORDERED, 0 },
12883 { MASK_SSE2, CODE_FOR_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
12884 { MASK_SSE2, CODE_FOR_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
12885 { MASK_SSE2, CODE_FOR_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
12886 { MASK_SSE2, CODE_FOR_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
12887 { MASK_SSE2, CODE_FOR_vmmaskncmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, EQ, 0 },
12888 { MASK_SSE2, CODE_FOR_vmmaskncmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, LT, 0 },
12889 { MASK_SSE2, CODE_FOR_vmmaskncmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, LE, 0 },
12890 { MASK_SSE2, CODE_FOR_vmmaskncmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, UNORDERED, 0 },
12892 { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
12893 { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
12894 { MASK_SSE2, CODE_FOR_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
12895 { MASK_SSE2, CODE_FOR_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
12897 { MASK_SSE2, CODE_FOR_sse2_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
12898 { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
12899 { MASK_SSE2, CODE_FOR_sse2_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
12900 { MASK_SSE2, CODE_FOR_sse2_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
12902 { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
12903 { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
12904 { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
12906 /* SSE2 MMX */
12907 { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
12908 { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
12909 { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
12910 { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
12911 { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
12912 { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
12913 { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
12914 { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
12916 { MASK_MMX, CODE_FOR_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
12917 { MASK_MMX, CODE_FOR_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
12918 { MASK_MMX, CODE_FOR_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
12919 { MASK_MMX, CODE_FOR_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
12920 { MASK_MMX, CODE_FOR_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
12921 { MASK_MMX, CODE_FOR_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
12922 { MASK_MMX, CODE_FOR_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
12923 { MASK_MMX, CODE_FOR_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
12925 { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
12926 { MASK_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
12927 { MASK_SSE2, CODE_FOR_sse2_umulsidi3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, 0, 0 },
12928 { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, 0, 0 },
12930 { MASK_SSE2, CODE_FOR_sse2_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
12931 { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
12932 { MASK_SSE2, CODE_FOR_sse2_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
12933 { MASK_SSE2, CODE_FOR_sse2_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
12935 { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
12936 { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
12938 { MASK_SSE2, CODE_FOR_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
12939 { MASK_SSE2, CODE_FOR_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
12940 { MASK_SSE2, CODE_FOR_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
12941 { MASK_SSE2, CODE_FOR_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
12942 { MASK_SSE2, CODE_FOR_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
12943 { MASK_SSE2, CODE_FOR_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
12945 { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
12946 { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
12947 { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
12948 { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
12950 { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
12951 { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
12952 { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
12953 { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
12954 { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
12955 { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
12956 { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
12957 { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
12959 { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
12960 { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
12961 { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
12963 { MASK_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
12964 { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
12966 { MASK_SSE2, CODE_FOR_ashlv8hi3_ti, 0, IX86_BUILTIN_PSLLW128, 0, 0 },
12967 { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
12968 { MASK_SSE2, CODE_FOR_ashlv4si3_ti, 0, IX86_BUILTIN_PSLLD128, 0, 0 },
12969 { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
12970 { MASK_SSE2, CODE_FOR_ashlv2di3_ti, 0, IX86_BUILTIN_PSLLQ128, 0, 0 },
12971 { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
12973 { MASK_SSE2, CODE_FOR_lshrv8hi3_ti, 0, IX86_BUILTIN_PSRLW128, 0, 0 },
12974 { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
12975 { MASK_SSE2, CODE_FOR_lshrv4si3_ti, 0, IX86_BUILTIN_PSRLD128, 0, 0 },
12976 { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
12977 { MASK_SSE2, CODE_FOR_lshrv2di3_ti, 0, IX86_BUILTIN_PSRLQ128, 0, 0 },
12978 { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
12980 { MASK_SSE2, CODE_FOR_ashrv8hi3_ti, 0, IX86_BUILTIN_PSRAW128, 0, 0 },
12981 { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
12982 { MASK_SSE2, CODE_FOR_ashrv4si3_ti, 0, IX86_BUILTIN_PSRAD128, 0, 0 },
12983 { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
12985 { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
12987 { MASK_SSE2, CODE_FOR_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
12988 { MASK_SSE2 | MASK_64BIT, CODE_FOR_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
12989 { MASK_SSE2, CODE_FOR_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
12990 { MASK_SSE2, CODE_FOR_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
12992 /* PNI MMX */
12993 { MASK_PNI, CODE_FOR_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
12994 { MASK_PNI, CODE_FOR_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
12995 { MASK_PNI, CODE_FOR_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
12996 { MASK_PNI, CODE_FOR_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
12997 { MASK_PNI, CODE_FOR_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
12998 { MASK_PNI, CODE_FOR_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 }
13001 static const struct builtin_description bdesc_1arg[] =
13003 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
13004 { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
13006 { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
13007 { MASK_SSE, CODE_FOR_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
13008 { MASK_SSE, CODE_FOR_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
13010 { MASK_SSE, CODE_FOR_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
13011 { MASK_SSE, CODE_FOR_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
13012 { MASK_SSE | MASK_64BIT, CODE_FOR_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
13013 { MASK_SSE, CODE_FOR_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
13014 { MASK_SSE, CODE_FOR_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
13015 { MASK_SSE | MASK_64BIT, CODE_FOR_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
13017 { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
13018 { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
13019 { MASK_SSE2, CODE_FOR_sse2_movq2dq, 0, IX86_BUILTIN_MOVQ2DQ, 0, 0 },
13020 { MASK_SSE2, CODE_FOR_sse2_movdq2q, 0, IX86_BUILTIN_MOVDQ2Q, 0, 0 },
13022 { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
13024 { MASK_SSE2, CODE_FOR_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
13025 { MASK_SSE2, CODE_FOR_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
13027 { MASK_SSE2, CODE_FOR_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
13028 { MASK_SSE2, CODE_FOR_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
13029 { MASK_SSE2, CODE_FOR_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
13030 { MASK_SSE2, CODE_FOR_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
13031 { MASK_SSE2, CODE_FOR_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
13033 { MASK_SSE2, CODE_FOR_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
13035 { MASK_SSE2, CODE_FOR_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
13036 { MASK_SSE2, CODE_FOR_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
13037 { MASK_SSE2 | MASK_64BIT, CODE_FOR_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
13038 { MASK_SSE2 | MASK_64BIT, CODE_FOR_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
13040 { MASK_SSE2, CODE_FOR_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
13041 { MASK_SSE2, CODE_FOR_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
13042 { MASK_SSE2, CODE_FOR_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
13044 { MASK_SSE2, CODE_FOR_sse2_movq, 0, IX86_BUILTIN_MOVQ, 0, 0 },
13046 /* PNI */
13047 { MASK_PNI, CODE_FOR_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
13048 { MASK_PNI, CODE_FOR_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
13049 { MASK_PNI, CODE_FOR_movddup, 0, IX86_BUILTIN_MOVDDUP, 0, 0 }
13052 void
13053 ix86_init_builtins (void)
13055 if (TARGET_MMX)
13056 ix86_init_mmx_sse_builtins ();
13059 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
13060 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
13061 builtins. */
13062 static void
13063 ix86_init_mmx_sse_builtins (void)
13065 const struct builtin_description * d;
13066 size_t i;
13068 tree pchar_type_node = build_pointer_type (char_type_node);
13069 tree pcchar_type_node = build_pointer_type (
13070 build_type_variant (char_type_node, 1, 0));
13071 tree pfloat_type_node = build_pointer_type (float_type_node);
13072 tree pcfloat_type_node = build_pointer_type (
13073 build_type_variant (float_type_node, 1, 0));
13074 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
13075 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
13076 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
13078 /* Comparisons. */
13079 tree int_ftype_v4sf_v4sf
13080 = build_function_type_list (integer_type_node,
13081 V4SF_type_node, V4SF_type_node, NULL_TREE);
13082 tree v4si_ftype_v4sf_v4sf
13083 = build_function_type_list (V4SI_type_node,
13084 V4SF_type_node, V4SF_type_node, NULL_TREE);
13085 /* MMX/SSE/integer conversions. */
13086 tree int_ftype_v4sf
13087 = build_function_type_list (integer_type_node,
13088 V4SF_type_node, NULL_TREE);
13089 tree int64_ftype_v4sf
13090 = build_function_type_list (long_long_integer_type_node,
13091 V4SF_type_node, NULL_TREE);
13092 tree int_ftype_v8qi
13093 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
13094 tree v4sf_ftype_v4sf_int
13095 = build_function_type_list (V4SF_type_node,
13096 V4SF_type_node, integer_type_node, NULL_TREE);
13097 tree v4sf_ftype_v4sf_int64
13098 = build_function_type_list (V4SF_type_node,
13099 V4SF_type_node, long_long_integer_type_node,
13100 NULL_TREE);
13101 tree v4sf_ftype_v4sf_v2si
13102 = build_function_type_list (V4SF_type_node,
13103 V4SF_type_node, V2SI_type_node, NULL_TREE);
13104 tree int_ftype_v4hi_int
13105 = build_function_type_list (integer_type_node,
13106 V4HI_type_node, integer_type_node, NULL_TREE);
13107 tree v4hi_ftype_v4hi_int_int
13108 = build_function_type_list (V4HI_type_node, V4HI_type_node,
13109 integer_type_node, integer_type_node,
13110 NULL_TREE);
13111 /* Miscellaneous. */
13112 tree v8qi_ftype_v4hi_v4hi
13113 = build_function_type_list (V8QI_type_node,
13114 V4HI_type_node, V4HI_type_node, NULL_TREE);
13115 tree v4hi_ftype_v2si_v2si
13116 = build_function_type_list (V4HI_type_node,
13117 V2SI_type_node, V2SI_type_node, NULL_TREE);
13118 tree v4sf_ftype_v4sf_v4sf_int
13119 = build_function_type_list (V4SF_type_node,
13120 V4SF_type_node, V4SF_type_node,
13121 integer_type_node, NULL_TREE);
13122 tree v2si_ftype_v4hi_v4hi
13123 = build_function_type_list (V2SI_type_node,
13124 V4HI_type_node, V4HI_type_node, NULL_TREE);
13125 tree v4hi_ftype_v4hi_int
13126 = build_function_type_list (V4HI_type_node,
13127 V4HI_type_node, integer_type_node, NULL_TREE);
13128 tree v4hi_ftype_v4hi_di
13129 = build_function_type_list (V4HI_type_node,
13130 V4HI_type_node, long_long_unsigned_type_node,
13131 NULL_TREE);
13132 tree v2si_ftype_v2si_di
13133 = build_function_type_list (V2SI_type_node,
13134 V2SI_type_node, long_long_unsigned_type_node,
13135 NULL_TREE);
13136 tree void_ftype_void
13137 = build_function_type (void_type_node, void_list_node);
13138 tree void_ftype_unsigned
13139 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
13140 tree void_ftype_unsigned_unsigned
13141 = build_function_type_list (void_type_node, unsigned_type_node,
13142 unsigned_type_node, NULL_TREE);
13143 tree void_ftype_pcvoid_unsigned_unsigned
13144 = build_function_type_list (void_type_node, const_ptr_type_node,
13145 unsigned_type_node, unsigned_type_node,
13146 NULL_TREE);
13147 tree unsigned_ftype_void
13148 = build_function_type (unsigned_type_node, void_list_node);
13149 tree di_ftype_void
13150 = build_function_type (long_long_unsigned_type_node, void_list_node);
13151 tree v4sf_ftype_void
13152 = build_function_type (V4SF_type_node, void_list_node);
13153 tree v2si_ftype_v4sf
13154 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
13155 /* Loads/stores. */
13156 tree void_ftype_v8qi_v8qi_pchar
13157 = build_function_type_list (void_type_node,
13158 V8QI_type_node, V8QI_type_node,
13159 pchar_type_node, NULL_TREE);
13160 tree v4sf_ftype_pcfloat
13161 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
13162 /* @@@ the type is bogus */
13163 tree v4sf_ftype_v4sf_pv2si
13164 = build_function_type_list (V4SF_type_node,
13165 V4SF_type_node, pv2si_type_node, NULL_TREE);
13166 tree void_ftype_pv2si_v4sf
13167 = build_function_type_list (void_type_node,
13168 pv2si_type_node, V4SF_type_node, NULL_TREE);
13169 tree void_ftype_pfloat_v4sf
13170 = build_function_type_list (void_type_node,
13171 pfloat_type_node, V4SF_type_node, NULL_TREE);
13172 tree void_ftype_pdi_di
13173 = build_function_type_list (void_type_node,
13174 pdi_type_node, long_long_unsigned_type_node,
13175 NULL_TREE);
13176 tree void_ftype_pv2di_v2di
13177 = build_function_type_list (void_type_node,
13178 pv2di_type_node, V2DI_type_node, NULL_TREE);
13179 /* Normal vector unops. */
13180 tree v4sf_ftype_v4sf
13181 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
13183 /* Normal vector binops. */
13184 tree v4sf_ftype_v4sf_v4sf
13185 = build_function_type_list (V4SF_type_node,
13186 V4SF_type_node, V4SF_type_node, NULL_TREE);
13187 tree v8qi_ftype_v8qi_v8qi
13188 = build_function_type_list (V8QI_type_node,
13189 V8QI_type_node, V8QI_type_node, NULL_TREE);
13190 tree v4hi_ftype_v4hi_v4hi
13191 = build_function_type_list (V4HI_type_node,
13192 V4HI_type_node, V4HI_type_node, NULL_TREE);
13193 tree v2si_ftype_v2si_v2si
13194 = build_function_type_list (V2SI_type_node,
13195 V2SI_type_node, V2SI_type_node, NULL_TREE);
13196 tree di_ftype_di_di
13197 = build_function_type_list (long_long_unsigned_type_node,
13198 long_long_unsigned_type_node,
13199 long_long_unsigned_type_node, NULL_TREE);
13201 tree v2si_ftype_v2sf
13202 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
13203 tree v2sf_ftype_v2si
13204 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
13205 tree v2si_ftype_v2si
13206 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
13207 tree v2sf_ftype_v2sf
13208 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
13209 tree v2sf_ftype_v2sf_v2sf
13210 = build_function_type_list (V2SF_type_node,
13211 V2SF_type_node, V2SF_type_node, NULL_TREE);
13212 tree v2si_ftype_v2sf_v2sf
13213 = build_function_type_list (V2SI_type_node,
13214 V2SF_type_node, V2SF_type_node, NULL_TREE);
13215 tree pint_type_node = build_pointer_type (integer_type_node);
13216 tree pcint_type_node = build_pointer_type (
13217 build_type_variant (integer_type_node, 1, 0));
13218 tree pdouble_type_node = build_pointer_type (double_type_node);
13219 tree pcdouble_type_node = build_pointer_type (
13220 build_type_variant (double_type_node, 1, 0));
13221 tree int_ftype_v2df_v2df
13222 = build_function_type_list (integer_type_node,
13223 V2DF_type_node, V2DF_type_node, NULL_TREE);
13225 tree ti_ftype_void
13226 = build_function_type (intTI_type_node, void_list_node);
13227 tree v2di_ftype_void
13228 = build_function_type (V2DI_type_node, void_list_node);
13229 tree ti_ftype_ti_ti
13230 = build_function_type_list (intTI_type_node,
13231 intTI_type_node, intTI_type_node, NULL_TREE);
13232 tree void_ftype_pcvoid
13233 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
13234 tree v2di_ftype_di
13235 = build_function_type_list (V2DI_type_node,
13236 long_long_unsigned_type_node, NULL_TREE);
13237 tree di_ftype_v2di
13238 = build_function_type_list (long_long_unsigned_type_node,
13239 V2DI_type_node, NULL_TREE);
13240 tree v4sf_ftype_v4si
13241 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
13242 tree v4si_ftype_v4sf
13243 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
13244 tree v2df_ftype_v4si
13245 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
13246 tree v4si_ftype_v2df
13247 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
13248 tree v2si_ftype_v2df
13249 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
13250 tree v4sf_ftype_v2df
13251 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
13252 tree v2df_ftype_v2si
13253 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
13254 tree v2df_ftype_v4sf
13255 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
13256 tree int_ftype_v2df
13257 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
13258 tree int64_ftype_v2df
13259 = build_function_type_list (long_long_integer_type_node,
13260 V2DF_type_node, NULL_TREE);
13261 tree v2df_ftype_v2df_int
13262 = build_function_type_list (V2DF_type_node,
13263 V2DF_type_node, integer_type_node, NULL_TREE);
13264 tree v2df_ftype_v2df_int64
13265 = build_function_type_list (V2DF_type_node,
13266 V2DF_type_node, long_long_integer_type_node,
13267 NULL_TREE);
13268 tree v4sf_ftype_v4sf_v2df
13269 = build_function_type_list (V4SF_type_node,
13270 V4SF_type_node, V2DF_type_node, NULL_TREE);
13271 tree v2df_ftype_v2df_v4sf
13272 = build_function_type_list (V2DF_type_node,
13273 V2DF_type_node, V4SF_type_node, NULL_TREE);
13274 tree v2df_ftype_v2df_v2df_int
13275 = build_function_type_list (V2DF_type_node,
13276 V2DF_type_node, V2DF_type_node,
13277 integer_type_node,
13278 NULL_TREE);
13279 tree v2df_ftype_v2df_pv2si
13280 = build_function_type_list (V2DF_type_node,
13281 V2DF_type_node, pv2si_type_node, NULL_TREE);
13282 tree void_ftype_pv2si_v2df
13283 = build_function_type_list (void_type_node,
13284 pv2si_type_node, V2DF_type_node, NULL_TREE);
13285 tree void_ftype_pdouble_v2df
13286 = build_function_type_list (void_type_node,
13287 pdouble_type_node, V2DF_type_node, NULL_TREE);
13288 tree void_ftype_pint_int
13289 = build_function_type_list (void_type_node,
13290 pint_type_node, integer_type_node, NULL_TREE);
13291 tree void_ftype_v16qi_v16qi_pchar
13292 = build_function_type_list (void_type_node,
13293 V16QI_type_node, V16QI_type_node,
13294 pchar_type_node, NULL_TREE);
13295 tree v2df_ftype_pcdouble
13296 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
13297 tree v2df_ftype_v2df_v2df
13298 = build_function_type_list (V2DF_type_node,
13299 V2DF_type_node, V2DF_type_node, NULL_TREE);
13300 tree v16qi_ftype_v16qi_v16qi
13301 = build_function_type_list (V16QI_type_node,
13302 V16QI_type_node, V16QI_type_node, NULL_TREE);
13303 tree v8hi_ftype_v8hi_v8hi
13304 = build_function_type_list (V8HI_type_node,
13305 V8HI_type_node, V8HI_type_node, NULL_TREE);
13306 tree v4si_ftype_v4si_v4si
13307 = build_function_type_list (V4SI_type_node,
13308 V4SI_type_node, V4SI_type_node, NULL_TREE);
13309 tree v2di_ftype_v2di_v2di
13310 = build_function_type_list (V2DI_type_node,
13311 V2DI_type_node, V2DI_type_node, NULL_TREE);
13312 tree v2di_ftype_v2df_v2df
13313 = build_function_type_list (V2DI_type_node,
13314 V2DF_type_node, V2DF_type_node, NULL_TREE);
13315 tree v2df_ftype_v2df
13316 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
13317 tree v2df_ftype_double
13318 = build_function_type_list (V2DF_type_node, double_type_node, NULL_TREE);
13319 tree v2df_ftype_double_double
13320 = build_function_type_list (V2DF_type_node,
13321 double_type_node, double_type_node, NULL_TREE);
13322 tree int_ftype_v8hi_int
13323 = build_function_type_list (integer_type_node,
13324 V8HI_type_node, integer_type_node, NULL_TREE);
13325 tree v8hi_ftype_v8hi_int_int
13326 = build_function_type_list (V8HI_type_node,
13327 V8HI_type_node, integer_type_node,
13328 integer_type_node, NULL_TREE);
13329 tree v2di_ftype_v2di_int
13330 = build_function_type_list (V2DI_type_node,
13331 V2DI_type_node, integer_type_node, NULL_TREE);
13332 tree v4si_ftype_v4si_int
13333 = build_function_type_list (V4SI_type_node,
13334 V4SI_type_node, integer_type_node, NULL_TREE);
13335 tree v8hi_ftype_v8hi_int
13336 = build_function_type_list (V8HI_type_node,
13337 V8HI_type_node, integer_type_node, NULL_TREE);
13338 tree v8hi_ftype_v8hi_v2di
13339 = build_function_type_list (V8HI_type_node,
13340 V8HI_type_node, V2DI_type_node, NULL_TREE);
13341 tree v4si_ftype_v4si_v2di
13342 = build_function_type_list (V4SI_type_node,
13343 V4SI_type_node, V2DI_type_node, NULL_TREE);
13344 tree v4si_ftype_v8hi_v8hi
13345 = build_function_type_list (V4SI_type_node,
13346 V8HI_type_node, V8HI_type_node, NULL_TREE);
13347 tree di_ftype_v8qi_v8qi
13348 = build_function_type_list (long_long_unsigned_type_node,
13349 V8QI_type_node, V8QI_type_node, NULL_TREE);
13350 tree v2di_ftype_v16qi_v16qi
13351 = build_function_type_list (V2DI_type_node,
13352 V16QI_type_node, V16QI_type_node, NULL_TREE);
13353 tree int_ftype_v16qi
13354 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
13355 tree v16qi_ftype_pcchar
13356 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
13357 tree void_ftype_pchar_v16qi
13358 = build_function_type_list (void_type_node,
13359 pchar_type_node, V16QI_type_node, NULL_TREE);
13360 tree v4si_ftype_pcint
13361 = build_function_type_list (V4SI_type_node, pcint_type_node, NULL_TREE);
13362 tree void_ftype_pcint_v4si
13363 = build_function_type_list (void_type_node,
13364 pcint_type_node, V4SI_type_node, NULL_TREE);
13365 tree v2di_ftype_v2di
13366 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
13368 tree float80_type;
13369 tree float128_type;
13371 /* The __float80 type. */
13372 if (TYPE_MODE (long_double_type_node) == XFmode)
13373 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
13374 "__float80");
13375 else
13377 /* The __float80 type. */
13378 float80_type = make_node (REAL_TYPE);
13379 TYPE_PRECISION (float80_type) = 96;
13380 layout_type (float80_type);
13381 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
13384 float128_type = make_node (REAL_TYPE);
13385 TYPE_PRECISION (float128_type) = 128;
13386 layout_type (float128_type);
13387 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
13389 /* Add all builtins that are more or less simple operations on two
13390 operands. */
13391 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
13393 /* Use one of the operands; the target can have a different mode for
13394 mask-generating compares. */
13395 enum machine_mode mode;
13396 tree type;
13398 if (d->name == 0)
13399 continue;
13400 mode = insn_data[d->icode].operand[1].mode;
13402 switch (mode)
13404 case V16QImode:
13405 type = v16qi_ftype_v16qi_v16qi;
13406 break;
13407 case V8HImode:
13408 type = v8hi_ftype_v8hi_v8hi;
13409 break;
13410 case V4SImode:
13411 type = v4si_ftype_v4si_v4si;
13412 break;
13413 case V2DImode:
13414 type = v2di_ftype_v2di_v2di;
13415 break;
13416 case V2DFmode:
13417 type = v2df_ftype_v2df_v2df;
13418 break;
13419 case TImode:
13420 type = ti_ftype_ti_ti;
13421 break;
13422 case V4SFmode:
13423 type = v4sf_ftype_v4sf_v4sf;
13424 break;
13425 case V8QImode:
13426 type = v8qi_ftype_v8qi_v8qi;
13427 break;
13428 case V4HImode:
13429 type = v4hi_ftype_v4hi_v4hi;
13430 break;
13431 case V2SImode:
13432 type = v2si_ftype_v2si_v2si;
13433 break;
13434 case DImode:
13435 type = di_ftype_di_di;
13436 break;
13438 default:
13439 abort ();
13442 /* Override for comparisons. */
13443 if (d->icode == CODE_FOR_maskcmpv4sf3
13444 || d->icode == CODE_FOR_maskncmpv4sf3
13445 || d->icode == CODE_FOR_vmmaskcmpv4sf3
13446 || d->icode == CODE_FOR_vmmaskncmpv4sf3)
13447 type = v4si_ftype_v4sf_v4sf;
13449 if (d->icode == CODE_FOR_maskcmpv2df3
13450 || d->icode == CODE_FOR_maskncmpv2df3
13451 || d->icode == CODE_FOR_vmmaskcmpv2df3
13452 || d->icode == CODE_FOR_vmmaskncmpv2df3)
13453 type = v2di_ftype_v2df_v2df;
13455 def_builtin (d->mask, d->name, type, d->code);
13458 /* Add the remaining MMX insns with somewhat more complicated types. */
13459 def_builtin (MASK_MMX, "__builtin_ia32_mmx_zero", di_ftype_void, IX86_BUILTIN_MMX_ZERO);
13460 def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
13461 def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
13462 def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD);
13463 def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ);
13465 def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW);
13466 def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD);
13467 def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ);
13469 def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
13470 def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
13472 def_builtin (MASK_MMX, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
13473 def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
13475 /* comi/ucomi insns. */
13476 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
13477 if (d->mask == MASK_SSE2)
13478 def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
13479 else
13480 def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
13482 def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
13483 def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
13484 def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
13486 def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
13487 def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
13488 def_builtin (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
13489 def_builtin (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
13490 def_builtin (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
13491 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
13492 def_builtin (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
13493 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
13494 def_builtin (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
13495 def_builtin (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
13496 def_builtin (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
13498 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pextrw", int_ftype_v4hi_int, IX86_BUILTIN_PEXTRW);
13499 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pinsrw", v4hi_ftype_v4hi_int_int, IX86_BUILTIN_PINSRW);
13501 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
13503 def_builtin (MASK_SSE, "__builtin_ia32_loadaps", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADAPS);
13504 def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
13505 def_builtin (MASK_SSE, "__builtin_ia32_loadss", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADSS);
13506 def_builtin (MASK_SSE, "__builtin_ia32_storeaps", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREAPS);
13507 def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
13508 def_builtin (MASK_SSE, "__builtin_ia32_storess", void_ftype_pfloat_v4sf, IX86_BUILTIN_STORESS);
13510 def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
13511 def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
13512 def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
13513 def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
13515 def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
13516 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
13517 def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
13518 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
13520 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
13522 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
13524 def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
13525 def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
13526 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
13527 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
13528 def_builtin (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
13529 def_builtin (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
13531 def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
13533 /* Original 3DNow! */
13534 def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
13535 def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
13536 def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
13537 def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
13538 def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
13539 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
13540 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
13541 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
13542 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
13543 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
13544 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
13545 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
13546 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
13547 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
13548 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
13549 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
13550 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
13551 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
13552 def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
13553 def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
13555 /* 3DNow! extension as used in the Athlon CPU. */
13556 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
13557 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
13558 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
13559 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
13560 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
13561 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
13563 def_builtin (MASK_SSE, "__builtin_ia32_setzerops", v4sf_ftype_void, IX86_BUILTIN_SSE_ZERO);
13565 /* SSE2 */
13566 def_builtin (MASK_SSE2, "__builtin_ia32_pextrw128", int_ftype_v8hi_int, IX86_BUILTIN_PEXTRW128);
13567 def_builtin (MASK_SSE2, "__builtin_ia32_pinsrw128", v8hi_ftype_v8hi_int_int, IX86_BUILTIN_PINSRW128);
13569 def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
13570 def_builtin (MASK_SSE2, "__builtin_ia32_movq2dq", v2di_ftype_di, IX86_BUILTIN_MOVQ2DQ);
13571 def_builtin (MASK_SSE2, "__builtin_ia32_movdq2q", di_ftype_v2di, IX86_BUILTIN_MOVDQ2Q);
13573 def_builtin (MASK_SSE2, "__builtin_ia32_loadapd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADAPD);
13574 def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
13575 def_builtin (MASK_SSE2, "__builtin_ia32_loadsd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADSD);
13576 def_builtin (MASK_SSE2, "__builtin_ia32_storeapd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREAPD);
13577 def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
13578 def_builtin (MASK_SSE2, "__builtin_ia32_storesd", void_ftype_pdouble_v2df, IX86_BUILTIN_STORESD);
13580 def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pv2si, IX86_BUILTIN_LOADHPD);
13581 def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pv2si, IX86_BUILTIN_LOADLPD);
13582 def_builtin (MASK_SSE2, "__builtin_ia32_storehpd", void_ftype_pv2si_v2df, IX86_BUILTIN_STOREHPD);
13583 def_builtin (MASK_SSE2, "__builtin_ia32_storelpd", void_ftype_pv2si_v2df, IX86_BUILTIN_STORELPD);
13585 def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
13586 def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
13587 def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
13588 def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
13589 def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
13591 def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
13592 def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
13593 def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
13594 def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
13596 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
13597 def_builtin (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
13599 def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
13601 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
13602 def_builtin (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
13604 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
13605 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
13606 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
13607 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
13608 def_builtin (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
13610 def_builtin (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
13612 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
13613 def_builtin (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
13614 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
13615 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
13617 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
13618 def_builtin (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
13619 def_builtin (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
13621 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
13622 def_builtin (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
13623 def_builtin (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
13624 def_builtin (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
13626 def_builtin (MASK_SSE2, "__builtin_ia32_setpd1", v2df_ftype_double, IX86_BUILTIN_SETPD1);
13627 def_builtin (MASK_SSE2, "__builtin_ia32_setpd", v2df_ftype_double_double, IX86_BUILTIN_SETPD);
13628 def_builtin (MASK_SSE2, "__builtin_ia32_setzeropd", ti_ftype_void, IX86_BUILTIN_CLRPD);
13629 def_builtin (MASK_SSE2, "__builtin_ia32_loadpd1", v2df_ftype_pcdouble, IX86_BUILTIN_LOADPD1);
13630 def_builtin (MASK_SSE2, "__builtin_ia32_loadrpd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADRPD);
13631 def_builtin (MASK_SSE2, "__builtin_ia32_storepd1", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREPD1);
13632 def_builtin (MASK_SSE2, "__builtin_ia32_storerpd", void_ftype_pdouble_v2df, IX86_BUILTIN_STORERPD);
13634 def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
13635 def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
13636 def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
13638 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqa", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQA);
13639 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
13640 def_builtin (MASK_SSE2, "__builtin_ia32_loadd", v4si_ftype_pcint, IX86_BUILTIN_LOADD);
13641 def_builtin (MASK_SSE2, "__builtin_ia32_storedqa", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQA);
13642 def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
13643 def_builtin (MASK_SSE2, "__builtin_ia32_stored", void_ftype_pcint_v4si, IX86_BUILTIN_STORED);
13644 def_builtin (MASK_SSE2, "__builtin_ia32_movq", v2di_ftype_v2di, IX86_BUILTIN_MOVQ);
13646 def_builtin (MASK_SSE, "__builtin_ia32_setzero128", v2di_ftype_void, IX86_BUILTIN_CLRTI);
13648 def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128);
13649 def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128);
13650 def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
13652 def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRLW128);
13653 def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRLD128);
13654 def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
13656 def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRAW128);
13657 def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRAD128);
13659 def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
13660 def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
13661 def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
13662 def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
13664 def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
13665 def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
13666 def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
13667 def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
13669 def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
13670 def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
13672 def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
13674 /* Prescott New Instructions. */
13675 def_builtin (MASK_PNI, "__builtin_ia32_monitor",
13676 void_ftype_pcvoid_unsigned_unsigned,
13677 IX86_BUILTIN_MONITOR);
13678 def_builtin (MASK_PNI, "__builtin_ia32_mwait",
13679 void_ftype_unsigned_unsigned,
13680 IX86_BUILTIN_MWAIT);
13681 def_builtin (MASK_PNI, "__builtin_ia32_movshdup",
13682 v4sf_ftype_v4sf,
13683 IX86_BUILTIN_MOVSHDUP);
13684 def_builtin (MASK_PNI, "__builtin_ia32_movsldup",
13685 v4sf_ftype_v4sf,
13686 IX86_BUILTIN_MOVSLDUP);
13687 def_builtin (MASK_PNI, "__builtin_ia32_lddqu",
13688 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
13689 def_builtin (MASK_PNI, "__builtin_ia32_loadddup",
13690 v2df_ftype_pcdouble, IX86_BUILTIN_LOADDDUP);
13691 def_builtin (MASK_PNI, "__builtin_ia32_movddup",
13692 v2df_ftype_v2df, IX86_BUILTIN_MOVDDUP);
13695 /* Errors in the source file can cause expand_expr to return const0_rtx
13696 where we expect a vector. To avoid crashing, use one of the vector
13697 clear instructions. */
13698 static rtx
13699 safe_vector_operand (rtx x, enum machine_mode mode)
13701 if (x != const0_rtx)
13702 return x;
13703 x = gen_reg_rtx (mode);
13705 if (VALID_MMX_REG_MODE (mode) || VALID_MMX_REG_MODE_3DNOW (mode))
13706 emit_insn (gen_mmx_clrdi (mode == DImode ? x
13707 : gen_rtx_SUBREG (DImode, x, 0)));
13708 else
13709 emit_insn (gen_sse_clrv4sf (mode == V4SFmode ? x
13710 : gen_rtx_SUBREG (V4SFmode, x, 0),
13711 CONST0_RTX (V4SFmode)));
13712 return x;
13715 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
13717 static rtx
13718 ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
13720 rtx pat;
13721 tree arg0 = TREE_VALUE (arglist);
13722 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13723 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13724 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13725 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13726 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13727 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
13729 if (VECTOR_MODE_P (mode0))
13730 op0 = safe_vector_operand (op0, mode0);
13731 if (VECTOR_MODE_P (mode1))
13732 op1 = safe_vector_operand (op1, mode1);
13734 if (! target
13735 || GET_MODE (target) != tmode
13736 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13737 target = gen_reg_rtx (tmode);
13739 if (GET_MODE (op1) == SImode && mode1 == TImode)
13741 rtx x = gen_reg_rtx (V4SImode);
13742 emit_insn (gen_sse2_loadd (x, op1));
13743 op1 = gen_lowpart (TImode, x);
13746 /* In case the insn wants input operands in modes different from
13747 the result, abort. */
13748 if ((GET_MODE (op0) != mode0 && GET_MODE (op0) != VOIDmode)
13749 || (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode))
13750 abort ();
13752 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13753 op0 = copy_to_mode_reg (mode0, op0);
13754 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13755 op1 = copy_to_mode_reg (mode1, op1);
13757 /* In the commutative cases, both op0 and op1 are nonimmediate_operand,
13758 yet one of the two must not be a memory. This is normally enforced
13759 by expanders, but we didn't bother to create one here. */
13760 if (GET_CODE (op0) == MEM && GET_CODE (op1) == MEM)
13761 op0 = copy_to_mode_reg (mode0, op0);
13763 pat = GEN_FCN (icode) (target, op0, op1);
13764 if (! pat)
13765 return 0;
13766 emit_insn (pat);
13767 return target;
13770 /* Subroutine of ix86_expand_builtin to take care of stores. */
13772 static rtx
13773 ix86_expand_store_builtin (enum insn_code icode, tree arglist)
13775 rtx pat;
13776 tree arg0 = TREE_VALUE (arglist);
13777 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13778 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13779 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13780 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
13781 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
13783 if (VECTOR_MODE_P (mode1))
13784 op1 = safe_vector_operand (op1, mode1);
13786 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
13787 op1 = copy_to_mode_reg (mode1, op1);
13789 pat = GEN_FCN (icode) (op0, op1);
13790 if (pat)
13791 emit_insn (pat);
13792 return 0;
13795 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
13797 static rtx
13798 ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
13799 rtx target, int do_load)
13801 rtx pat;
13802 tree arg0 = TREE_VALUE (arglist);
13803 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13804 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13805 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13807 if (! target
13808 || GET_MODE (target) != tmode
13809 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13810 target = gen_reg_rtx (tmode);
13811 if (do_load)
13812 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
13813 else
13815 if (VECTOR_MODE_P (mode0))
13816 op0 = safe_vector_operand (op0, mode0);
13818 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13819 op0 = copy_to_mode_reg (mode0, op0);
13822 pat = GEN_FCN (icode) (target, op0);
13823 if (! pat)
13824 return 0;
13825 emit_insn (pat);
13826 return target;
13829 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
13830 sqrtss, rsqrtss, rcpss. */
13832 static rtx
13833 ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
13835 rtx pat;
13836 tree arg0 = TREE_VALUE (arglist);
13837 rtx op1, op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13838 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13839 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13841 if (! target
13842 || GET_MODE (target) != tmode
13843 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13844 target = gen_reg_rtx (tmode);
13846 if (VECTOR_MODE_P (mode0))
13847 op0 = safe_vector_operand (op0, mode0);
13849 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13850 op0 = copy_to_mode_reg (mode0, op0);
13852 op1 = op0;
13853 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
13854 op1 = copy_to_mode_reg (mode0, op1);
13856 pat = GEN_FCN (icode) (target, op0, op1);
13857 if (! pat)
13858 return 0;
13859 emit_insn (pat);
13860 return target;
13863 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
13865 static rtx
13866 ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
13867 rtx target)
13869 rtx pat;
13870 tree arg0 = TREE_VALUE (arglist);
13871 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13872 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13873 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13874 rtx op2;
13875 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
13876 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
13877 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
13878 enum rtx_code comparison = d->comparison;
13880 if (VECTOR_MODE_P (mode0))
13881 op0 = safe_vector_operand (op0, mode0);
13882 if (VECTOR_MODE_P (mode1))
13883 op1 = safe_vector_operand (op1, mode1);
13885 /* Swap operands if we have a comparison that isn't available in
13886 hardware. */
13887 if (d->flag)
13889 rtx tmp = gen_reg_rtx (mode1);
13890 emit_move_insn (tmp, op1);
13891 op1 = op0;
13892 op0 = tmp;
13895 if (! target
13896 || GET_MODE (target) != tmode
13897 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
13898 target = gen_reg_rtx (tmode);
13900 if (! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
13901 op0 = copy_to_mode_reg (mode0, op0);
13902 if (! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
13903 op1 = copy_to_mode_reg (mode1, op1);
13905 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
13906 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
13907 if (! pat)
13908 return 0;
13909 emit_insn (pat);
13910 return target;
13913 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
13915 static rtx
13916 ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
13917 rtx target)
13919 rtx pat;
13920 tree arg0 = TREE_VALUE (arglist);
13921 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
13922 rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
13923 rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
13924 rtx op2;
13925 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
13926 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
13927 enum rtx_code comparison = d->comparison;
13929 if (VECTOR_MODE_P (mode0))
13930 op0 = safe_vector_operand (op0, mode0);
13931 if (VECTOR_MODE_P (mode1))
13932 op1 = safe_vector_operand (op1, mode1);
13934 /* Swap operands if we have a comparison that isn't available in
13935 hardware. */
13936 if (d->flag)
13938 rtx tmp = op1;
13939 op1 = op0;
13940 op0 = tmp;
13943 target = gen_reg_rtx (SImode);
13944 emit_move_insn (target, const0_rtx);
13945 target = gen_rtx_SUBREG (QImode, target, 0);
13947 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
13948 op0 = copy_to_mode_reg (mode0, op0);
13949 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
13950 op1 = copy_to_mode_reg (mode1, op1);
13952 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
13953 pat = GEN_FCN (d->icode) (op0, op1);
13954 if (! pat)
13955 return 0;
13956 emit_insn (pat);
13957 emit_insn (gen_rtx_SET (VOIDmode,
13958 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
13959 gen_rtx_fmt_ee (comparison, QImode,
13960 SET_DEST (pat),
13961 const0_rtx)));
13963 return SUBREG_REG (target);
13966 /* Expand an expression EXP that calls a built-in function,
13967 with result going to TARGET if that's convenient
13968 (and in mode MODE if that's convenient).
13969 SUBTARGET may be used as the target for computing one of EXP's operands.
13970 IGNORE is nonzero if the value is to be ignored. */
13973 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
13974 enum machine_mode mode ATTRIBUTE_UNUSED,
13975 int ignore ATTRIBUTE_UNUSED)
13977 const struct builtin_description *d;
13978 size_t i;
13979 enum insn_code icode;
13980 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
13981 tree arglist = TREE_OPERAND (exp, 1);
13982 tree arg0, arg1, arg2;
13983 rtx op0, op1, op2, pat;
13984 enum machine_mode tmode, mode0, mode1, mode2;
13985 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
13987 switch (fcode)
13989 case IX86_BUILTIN_EMMS:
13990 emit_insn (gen_emms ());
13991 return 0;
13993 case IX86_BUILTIN_SFENCE:
13994 emit_insn (gen_sfence ());
13995 return 0;
13997 case IX86_BUILTIN_PEXTRW:
13998 case IX86_BUILTIN_PEXTRW128:
13999 icode = (fcode == IX86_BUILTIN_PEXTRW
14000 ? CODE_FOR_mmx_pextrw
14001 : CODE_FOR_sse2_pextrw);
14002 arg0 = TREE_VALUE (arglist);
14003 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14004 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14005 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14006 tmode = insn_data[icode].operand[0].mode;
14007 mode0 = insn_data[icode].operand[1].mode;
14008 mode1 = insn_data[icode].operand[2].mode;
14010 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14011 op0 = copy_to_mode_reg (mode0, op0);
14012 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14014 error ("selector must be an integer constant in the range 0..%i",
14015 fcode == IX86_BUILTIN_PEXTRW ? 3:7);
14016 return gen_reg_rtx (tmode);
14018 if (target == 0
14019 || GET_MODE (target) != tmode
14020 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14021 target = gen_reg_rtx (tmode);
14022 pat = GEN_FCN (icode) (target, op0, op1);
14023 if (! pat)
14024 return 0;
14025 emit_insn (pat);
14026 return target;
14028 case IX86_BUILTIN_PINSRW:
14029 case IX86_BUILTIN_PINSRW128:
14030 icode = (fcode == IX86_BUILTIN_PINSRW
14031 ? CODE_FOR_mmx_pinsrw
14032 : CODE_FOR_sse2_pinsrw);
14033 arg0 = TREE_VALUE (arglist);
14034 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14035 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
14036 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14037 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14038 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
14039 tmode = insn_data[icode].operand[0].mode;
14040 mode0 = insn_data[icode].operand[1].mode;
14041 mode1 = insn_data[icode].operand[2].mode;
14042 mode2 = insn_data[icode].operand[3].mode;
14044 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14045 op0 = copy_to_mode_reg (mode0, op0);
14046 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14047 op1 = copy_to_mode_reg (mode1, op1);
14048 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14050 error ("selector must be an integer constant in the range 0..%i",
14051 fcode == IX86_BUILTIN_PINSRW ? 15:255);
14052 return const0_rtx;
14054 if (target == 0
14055 || GET_MODE (target) != tmode
14056 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14057 target = gen_reg_rtx (tmode);
14058 pat = GEN_FCN (icode) (target, op0, op1, op2);
14059 if (! pat)
14060 return 0;
14061 emit_insn (pat);
14062 return target;
14064 case IX86_BUILTIN_MASKMOVQ:
14065 case IX86_BUILTIN_MASKMOVDQU:
14066 icode = (fcode == IX86_BUILTIN_MASKMOVQ
14067 ? (TARGET_64BIT ? CODE_FOR_mmx_maskmovq_rex : CODE_FOR_mmx_maskmovq)
14068 : (TARGET_64BIT ? CODE_FOR_sse2_maskmovdqu_rex64
14069 : CODE_FOR_sse2_maskmovdqu));
14070 /* Note the arg order is different from the operand order. */
14071 arg1 = TREE_VALUE (arglist);
14072 arg2 = TREE_VALUE (TREE_CHAIN (arglist));
14073 arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
14074 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14075 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14076 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
14077 mode0 = insn_data[icode].operand[0].mode;
14078 mode1 = insn_data[icode].operand[1].mode;
14079 mode2 = insn_data[icode].operand[2].mode;
14081 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14082 op0 = copy_to_mode_reg (mode0, op0);
14083 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
14084 op1 = copy_to_mode_reg (mode1, op1);
14085 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
14086 op2 = copy_to_mode_reg (mode2, op2);
14087 pat = GEN_FCN (icode) (op0, op1, op2);
14088 if (! pat)
14089 return 0;
14090 emit_insn (pat);
14091 return 0;
14093 case IX86_BUILTIN_SQRTSS:
14094 return ix86_expand_unop1_builtin (CODE_FOR_vmsqrtv4sf2, arglist, target);
14095 case IX86_BUILTIN_RSQRTSS:
14096 return ix86_expand_unop1_builtin (CODE_FOR_vmrsqrtv4sf2, arglist, target);
14097 case IX86_BUILTIN_RCPSS:
14098 return ix86_expand_unop1_builtin (CODE_FOR_vmrcpv4sf2, arglist, target);
14100 case IX86_BUILTIN_LOADAPS:
14101 return ix86_expand_unop_builtin (CODE_FOR_sse_movaps, arglist, target, 1);
14103 case IX86_BUILTIN_LOADUPS:
14104 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
14106 case IX86_BUILTIN_STOREAPS:
14107 return ix86_expand_store_builtin (CODE_FOR_sse_movaps, arglist);
14109 case IX86_BUILTIN_STOREUPS:
14110 return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
14112 case IX86_BUILTIN_LOADSS:
14113 return ix86_expand_unop_builtin (CODE_FOR_sse_loadss, arglist, target, 1);
14115 case IX86_BUILTIN_STORESS:
14116 return ix86_expand_store_builtin (CODE_FOR_sse_storess, arglist);
14118 case IX86_BUILTIN_LOADHPS:
14119 case IX86_BUILTIN_LOADLPS:
14120 case IX86_BUILTIN_LOADHPD:
14121 case IX86_BUILTIN_LOADLPD:
14122 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_movhps
14123 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_movlps
14124 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_movhpd
14125 : CODE_FOR_sse2_movsd);
14126 arg0 = TREE_VALUE (arglist);
14127 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14128 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14129 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14130 tmode = insn_data[icode].operand[0].mode;
14131 mode0 = insn_data[icode].operand[1].mode;
14132 mode1 = insn_data[icode].operand[2].mode;
14134 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14135 op0 = copy_to_mode_reg (mode0, op0);
14136 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
14137 if (target == 0
14138 || GET_MODE (target) != tmode
14139 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14140 target = gen_reg_rtx (tmode);
14141 pat = GEN_FCN (icode) (target, op0, op1);
14142 if (! pat)
14143 return 0;
14144 emit_insn (pat);
14145 return target;
14147 case IX86_BUILTIN_STOREHPS:
14148 case IX86_BUILTIN_STORELPS:
14149 case IX86_BUILTIN_STOREHPD:
14150 case IX86_BUILTIN_STORELPD:
14151 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_movhps
14152 : fcode == IX86_BUILTIN_STORELPS ? CODE_FOR_sse_movlps
14153 : fcode == IX86_BUILTIN_STOREHPD ? CODE_FOR_sse2_movhpd
14154 : CODE_FOR_sse2_movsd);
14155 arg0 = TREE_VALUE (arglist);
14156 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14157 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14158 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14159 mode0 = insn_data[icode].operand[1].mode;
14160 mode1 = insn_data[icode].operand[2].mode;
14162 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
14163 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14164 op1 = copy_to_mode_reg (mode1, op1);
14166 pat = GEN_FCN (icode) (op0, op0, op1);
14167 if (! pat)
14168 return 0;
14169 emit_insn (pat);
14170 return 0;
14172 case IX86_BUILTIN_MOVNTPS:
14173 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
14174 case IX86_BUILTIN_MOVNTQ:
14175 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
14177 case IX86_BUILTIN_LDMXCSR:
14178 op0 = expand_expr (TREE_VALUE (arglist), NULL_RTX, VOIDmode, 0);
14179 target = assign_386_stack_local (SImode, 0);
14180 emit_move_insn (target, op0);
14181 emit_insn (gen_ldmxcsr (target));
14182 return 0;
14184 case IX86_BUILTIN_STMXCSR:
14185 target = assign_386_stack_local (SImode, 0);
14186 emit_insn (gen_stmxcsr (target));
14187 return copy_to_mode_reg (SImode, target);
14189 case IX86_BUILTIN_SHUFPS:
14190 case IX86_BUILTIN_SHUFPD:
14191 icode = (fcode == IX86_BUILTIN_SHUFPS
14192 ? CODE_FOR_sse_shufps
14193 : CODE_FOR_sse2_shufpd);
14194 arg0 = TREE_VALUE (arglist);
14195 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14196 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
14197 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14198 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14199 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
14200 tmode = insn_data[icode].operand[0].mode;
14201 mode0 = insn_data[icode].operand[1].mode;
14202 mode1 = insn_data[icode].operand[2].mode;
14203 mode2 = insn_data[icode].operand[3].mode;
14205 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14206 op0 = copy_to_mode_reg (mode0, op0);
14207 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14208 op1 = copy_to_mode_reg (mode1, op1);
14209 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14211 /* @@@ better error message */
14212 error ("mask must be an immediate");
14213 return gen_reg_rtx (tmode);
14215 if (target == 0
14216 || GET_MODE (target) != tmode
14217 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14218 target = gen_reg_rtx (tmode);
14219 pat = GEN_FCN (icode) (target, op0, op1, op2);
14220 if (! pat)
14221 return 0;
14222 emit_insn (pat);
14223 return target;
14225 case IX86_BUILTIN_PSHUFW:
14226 case IX86_BUILTIN_PSHUFD:
14227 case IX86_BUILTIN_PSHUFHW:
14228 case IX86_BUILTIN_PSHUFLW:
14229 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
14230 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
14231 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
14232 : CODE_FOR_mmx_pshufw);
14233 arg0 = TREE_VALUE (arglist);
14234 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14235 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14236 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14237 tmode = insn_data[icode].operand[0].mode;
14238 mode1 = insn_data[icode].operand[1].mode;
14239 mode2 = insn_data[icode].operand[2].mode;
14241 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
14242 op0 = copy_to_mode_reg (mode1, op0);
14243 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
14245 /* @@@ better error message */
14246 error ("mask must be an immediate");
14247 return const0_rtx;
14249 if (target == 0
14250 || GET_MODE (target) != tmode
14251 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14252 target = gen_reg_rtx (tmode);
14253 pat = GEN_FCN (icode) (target, op0, op1);
14254 if (! pat)
14255 return 0;
14256 emit_insn (pat);
14257 return target;
14259 case IX86_BUILTIN_PSLLDQI128:
14260 case IX86_BUILTIN_PSRLDQI128:
14261 icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
14262 : CODE_FOR_sse2_lshrti3);
14263 arg0 = TREE_VALUE (arglist);
14264 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14265 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14266 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14267 tmode = insn_data[icode].operand[0].mode;
14268 mode1 = insn_data[icode].operand[1].mode;
14269 mode2 = insn_data[icode].operand[2].mode;
14271 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
14273 op0 = copy_to_reg (op0);
14274 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
14276 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
14278 error ("shift must be an immediate");
14279 return const0_rtx;
14281 target = gen_reg_rtx (V2DImode);
14282 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0), op0, op1);
14283 if (! pat)
14284 return 0;
14285 emit_insn (pat);
14286 return target;
14288 case IX86_BUILTIN_FEMMS:
14289 emit_insn (gen_femms ());
14290 return NULL_RTX;
14292 case IX86_BUILTIN_PAVGUSB:
14293 return ix86_expand_binop_builtin (CODE_FOR_pavgusb, arglist, target);
14295 case IX86_BUILTIN_PF2ID:
14296 return ix86_expand_unop_builtin (CODE_FOR_pf2id, arglist, target, 0);
14298 case IX86_BUILTIN_PFACC:
14299 return ix86_expand_binop_builtin (CODE_FOR_pfacc, arglist, target);
14301 case IX86_BUILTIN_PFADD:
14302 return ix86_expand_binop_builtin (CODE_FOR_addv2sf3, arglist, target);
14304 case IX86_BUILTIN_PFCMPEQ:
14305 return ix86_expand_binop_builtin (CODE_FOR_eqv2sf3, arglist, target);
14307 case IX86_BUILTIN_PFCMPGE:
14308 return ix86_expand_binop_builtin (CODE_FOR_gev2sf3, arglist, target);
14310 case IX86_BUILTIN_PFCMPGT:
14311 return ix86_expand_binop_builtin (CODE_FOR_gtv2sf3, arglist, target);
14313 case IX86_BUILTIN_PFMAX:
14314 return ix86_expand_binop_builtin (CODE_FOR_pfmaxv2sf3, arglist, target);
14316 case IX86_BUILTIN_PFMIN:
14317 return ix86_expand_binop_builtin (CODE_FOR_pfminv2sf3, arglist, target);
14319 case IX86_BUILTIN_PFMUL:
14320 return ix86_expand_binop_builtin (CODE_FOR_mulv2sf3, arglist, target);
14322 case IX86_BUILTIN_PFRCP:
14323 return ix86_expand_unop_builtin (CODE_FOR_pfrcpv2sf2, arglist, target, 0);
14325 case IX86_BUILTIN_PFRCPIT1:
14326 return ix86_expand_binop_builtin (CODE_FOR_pfrcpit1v2sf3, arglist, target);
14328 case IX86_BUILTIN_PFRCPIT2:
14329 return ix86_expand_binop_builtin (CODE_FOR_pfrcpit2v2sf3, arglist, target);
14331 case IX86_BUILTIN_PFRSQIT1:
14332 return ix86_expand_binop_builtin (CODE_FOR_pfrsqit1v2sf3, arglist, target);
14334 case IX86_BUILTIN_PFRSQRT:
14335 return ix86_expand_unop_builtin (CODE_FOR_pfrsqrtv2sf2, arglist, target, 0);
14337 case IX86_BUILTIN_PFSUB:
14338 return ix86_expand_binop_builtin (CODE_FOR_subv2sf3, arglist, target);
14340 case IX86_BUILTIN_PFSUBR:
14341 return ix86_expand_binop_builtin (CODE_FOR_subrv2sf3, arglist, target);
14343 case IX86_BUILTIN_PI2FD:
14344 return ix86_expand_unop_builtin (CODE_FOR_floatv2si2, arglist, target, 0);
14346 case IX86_BUILTIN_PMULHRW:
14347 return ix86_expand_binop_builtin (CODE_FOR_pmulhrwv4hi3, arglist, target);
14349 case IX86_BUILTIN_PF2IW:
14350 return ix86_expand_unop_builtin (CODE_FOR_pf2iw, arglist, target, 0);
14352 case IX86_BUILTIN_PFNACC:
14353 return ix86_expand_binop_builtin (CODE_FOR_pfnacc, arglist, target);
14355 case IX86_BUILTIN_PFPNACC:
14356 return ix86_expand_binop_builtin (CODE_FOR_pfpnacc, arglist, target);
14358 case IX86_BUILTIN_PI2FW:
14359 return ix86_expand_unop_builtin (CODE_FOR_pi2fw, arglist, target, 0);
14361 case IX86_BUILTIN_PSWAPDSI:
14362 return ix86_expand_unop_builtin (CODE_FOR_pswapdv2si2, arglist, target, 0);
14364 case IX86_BUILTIN_PSWAPDSF:
14365 return ix86_expand_unop_builtin (CODE_FOR_pswapdv2sf2, arglist, target, 0);
14367 case IX86_BUILTIN_SSE_ZERO:
14368 target = gen_reg_rtx (V4SFmode);
14369 emit_insn (gen_sse_clrv4sf (target, CONST0_RTX (V4SFmode)));
14370 return target;
14372 case IX86_BUILTIN_MMX_ZERO:
14373 target = gen_reg_rtx (DImode);
14374 emit_insn (gen_mmx_clrdi (target));
14375 return target;
14377 case IX86_BUILTIN_CLRTI:
14378 target = gen_reg_rtx (V2DImode);
14379 emit_insn (gen_sse2_clrti (simplify_gen_subreg (TImode, target, V2DImode, 0)));
14380 return target;
14383 case IX86_BUILTIN_SQRTSD:
14384 return ix86_expand_unop1_builtin (CODE_FOR_vmsqrtv2df2, arglist, target);
14385 case IX86_BUILTIN_LOADAPD:
14386 return ix86_expand_unop_builtin (CODE_FOR_sse2_movapd, arglist, target, 1);
14387 case IX86_BUILTIN_LOADUPD:
14388 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
14390 case IX86_BUILTIN_STOREAPD:
14391 return ix86_expand_store_builtin (CODE_FOR_sse2_movapd, arglist);
14392 case IX86_BUILTIN_STOREUPD:
14393 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
14395 case IX86_BUILTIN_LOADSD:
14396 return ix86_expand_unop_builtin (CODE_FOR_sse2_loadsd, arglist, target, 1);
14398 case IX86_BUILTIN_STORESD:
14399 return ix86_expand_store_builtin (CODE_FOR_sse2_storesd, arglist);
14401 case IX86_BUILTIN_SETPD1:
14402 target = assign_386_stack_local (DFmode, 0);
14403 arg0 = TREE_VALUE (arglist);
14404 emit_move_insn (adjust_address (target, DFmode, 0),
14405 expand_expr (arg0, NULL_RTX, VOIDmode, 0));
14406 op0 = gen_reg_rtx (V2DFmode);
14407 emit_insn (gen_sse2_loadsd (op0, adjust_address (target, V2DFmode, 0)));
14408 emit_insn (gen_sse2_shufpd (op0, op0, op0, GEN_INT (0)));
14409 return op0;
14411 case IX86_BUILTIN_SETPD:
14412 target = assign_386_stack_local (V2DFmode, 0);
14413 arg0 = TREE_VALUE (arglist);
14414 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14415 emit_move_insn (adjust_address (target, DFmode, 0),
14416 expand_expr (arg0, NULL_RTX, VOIDmode, 0));
14417 emit_move_insn (adjust_address (target, DFmode, 8),
14418 expand_expr (arg1, NULL_RTX, VOIDmode, 0));
14419 op0 = gen_reg_rtx (V2DFmode);
14420 emit_insn (gen_sse2_movapd (op0, target));
14421 return op0;
14423 case IX86_BUILTIN_LOADRPD:
14424 target = ix86_expand_unop_builtin (CODE_FOR_sse2_movapd, arglist,
14425 gen_reg_rtx (V2DFmode), 1);
14426 emit_insn (gen_sse2_shufpd (target, target, target, GEN_INT (1)));
14427 return target;
14429 case IX86_BUILTIN_LOADPD1:
14430 target = ix86_expand_unop_builtin (CODE_FOR_sse2_loadsd, arglist,
14431 gen_reg_rtx (V2DFmode), 1);
14432 emit_insn (gen_sse2_shufpd (target, target, target, const0_rtx));
14433 return target;
14435 case IX86_BUILTIN_STOREPD1:
14436 return ix86_expand_store_builtin (CODE_FOR_sse2_movapd, arglist);
14437 case IX86_BUILTIN_STORERPD:
14438 return ix86_expand_store_builtin (CODE_FOR_sse2_movapd, arglist);
14440 case IX86_BUILTIN_CLRPD:
14441 target = gen_reg_rtx (V2DFmode);
14442 emit_insn (gen_sse_clrv2df (target));
14443 return target;
14445 case IX86_BUILTIN_MFENCE:
14446 emit_insn (gen_sse2_mfence ());
14447 return 0;
14448 case IX86_BUILTIN_LFENCE:
14449 emit_insn (gen_sse2_lfence ());
14450 return 0;
14452 case IX86_BUILTIN_CLFLUSH:
14453 arg0 = TREE_VALUE (arglist);
14454 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14455 icode = CODE_FOR_sse2_clflush;
14456 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
14457 op0 = copy_to_mode_reg (Pmode, op0);
14459 emit_insn (gen_sse2_clflush (op0));
14460 return 0;
14462 case IX86_BUILTIN_MOVNTPD:
14463 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
14464 case IX86_BUILTIN_MOVNTDQ:
14465 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
14466 case IX86_BUILTIN_MOVNTI:
14467 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
14469 case IX86_BUILTIN_LOADDQA:
14470 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqa, arglist, target, 1);
14471 case IX86_BUILTIN_LOADDQU:
14472 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
14473 case IX86_BUILTIN_LOADD:
14474 return ix86_expand_unop_builtin (CODE_FOR_sse2_loadd, arglist, target, 1);
14476 case IX86_BUILTIN_STOREDQA:
14477 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqa, arglist);
14478 case IX86_BUILTIN_STOREDQU:
14479 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
14480 case IX86_BUILTIN_STORED:
14481 return ix86_expand_store_builtin (CODE_FOR_sse2_stored, arglist);
14483 case IX86_BUILTIN_MONITOR:
14484 arg0 = TREE_VALUE (arglist);
14485 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14486 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
14487 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14488 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14489 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
14490 if (!REG_P (op0))
14491 op0 = copy_to_mode_reg (SImode, op0);
14492 if (!REG_P (op1))
14493 op1 = copy_to_mode_reg (SImode, op1);
14494 if (!REG_P (op2))
14495 op2 = copy_to_mode_reg (SImode, op2);
14496 emit_insn (gen_monitor (op0, op1, op2));
14497 return 0;
14499 case IX86_BUILTIN_MWAIT:
14500 arg0 = TREE_VALUE (arglist);
14501 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
14502 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
14503 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
14504 if (!REG_P (op0))
14505 op0 = copy_to_mode_reg (SImode, op0);
14506 if (!REG_P (op1))
14507 op1 = copy_to_mode_reg (SImode, op1);
14508 emit_insn (gen_mwait (op0, op1));
14509 return 0;
14511 case IX86_BUILTIN_LOADDDUP:
14512 return ix86_expand_unop_builtin (CODE_FOR_loadddup, arglist, target, 1);
14514 case IX86_BUILTIN_LDDQU:
14515 return ix86_expand_unop_builtin (CODE_FOR_lddqu, arglist, target,
14518 default:
14519 break;
14522 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
14523 if (d->code == fcode)
14525 /* Compares are treated specially. */
14526 if (d->icode == CODE_FOR_maskcmpv4sf3
14527 || d->icode == CODE_FOR_vmmaskcmpv4sf3
14528 || d->icode == CODE_FOR_maskncmpv4sf3
14529 || d->icode == CODE_FOR_vmmaskncmpv4sf3
14530 || d->icode == CODE_FOR_maskcmpv2df3
14531 || d->icode == CODE_FOR_vmmaskcmpv2df3
14532 || d->icode == CODE_FOR_maskncmpv2df3
14533 || d->icode == CODE_FOR_vmmaskncmpv2df3)
14534 return ix86_expand_sse_compare (d, arglist, target);
14536 return ix86_expand_binop_builtin (d->icode, arglist, target);
14539 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
14540 if (d->code == fcode)
14541 return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
14543 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
14544 if (d->code == fcode)
14545 return ix86_expand_sse_comi (d, arglist, target);
14547 /* @@@ Should really do something sensible here. */
14548 return 0;
14551 /* Store OPERAND to the memory after reload is completed. This means
14552 that we can't easily use assign_stack_local. */
14554 ix86_force_to_memory (enum machine_mode mode, rtx operand)
14556 rtx result;
14557 if (!reload_completed)
14558 abort ();
14559 if (TARGET_RED_ZONE)
14561 result = gen_rtx_MEM (mode,
14562 gen_rtx_PLUS (Pmode,
14563 stack_pointer_rtx,
14564 GEN_INT (-RED_ZONE_SIZE)));
14565 emit_move_insn (result, operand);
14567 else if (!TARGET_RED_ZONE && TARGET_64BIT)
14569 switch (mode)
14571 case HImode:
14572 case SImode:
14573 operand = gen_lowpart (DImode, operand);
14574 /* FALLTHRU */
14575 case DImode:
14576 emit_insn (
14577 gen_rtx_SET (VOIDmode,
14578 gen_rtx_MEM (DImode,
14579 gen_rtx_PRE_DEC (DImode,
14580 stack_pointer_rtx)),
14581 operand));
14582 break;
14583 default:
14584 abort ();
14586 result = gen_rtx_MEM (mode, stack_pointer_rtx);
14588 else
14590 switch (mode)
14592 case DImode:
14594 rtx operands[2];
14595 split_di (&operand, 1, operands, operands + 1);
14596 emit_insn (
14597 gen_rtx_SET (VOIDmode,
14598 gen_rtx_MEM (SImode,
14599 gen_rtx_PRE_DEC (Pmode,
14600 stack_pointer_rtx)),
14601 operands[1]));
14602 emit_insn (
14603 gen_rtx_SET (VOIDmode,
14604 gen_rtx_MEM (SImode,
14605 gen_rtx_PRE_DEC (Pmode,
14606 stack_pointer_rtx)),
14607 operands[0]));
14609 break;
14610 case HImode:
14611 /* It is better to store HImodes as SImodes. */
14612 if (!TARGET_PARTIAL_REG_STALL)
14613 operand = gen_lowpart (SImode, operand);
14614 /* FALLTHRU */
14615 case SImode:
14616 emit_insn (
14617 gen_rtx_SET (VOIDmode,
14618 gen_rtx_MEM (GET_MODE (operand),
14619 gen_rtx_PRE_DEC (SImode,
14620 stack_pointer_rtx)),
14621 operand));
14622 break;
14623 default:
14624 abort ();
14626 result = gen_rtx_MEM (mode, stack_pointer_rtx);
14628 return result;
14631 /* Free operand from the memory. */
14632 void
14633 ix86_free_from_memory (enum machine_mode mode)
14635 if (!TARGET_RED_ZONE)
14637 int size;
14639 if (mode == DImode || TARGET_64BIT)
14640 size = 8;
14641 else if (mode == HImode && TARGET_PARTIAL_REG_STALL)
14642 size = 2;
14643 else
14644 size = 4;
14645 /* Use LEA to deallocate stack space. In peephole2 it will be converted
14646 to pop or add instruction if registers are available. */
14647 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
14648 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
14649 GEN_INT (size))));
14653 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
14654 QImode must go into class Q_REGS.
14655 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
14656 movdf to do mem-to-mem moves through integer regs. */
14657 enum reg_class
14658 ix86_preferred_reload_class (rtx x, enum reg_class class)
14660 if (GET_CODE (x) == CONST_VECTOR && x != CONST0_RTX (GET_MODE (x)))
14661 return NO_REGS;
14662 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
14664 /* SSE can't load any constant directly yet. */
14665 if (SSE_CLASS_P (class))
14666 return NO_REGS;
14667 /* Floats can load 0 and 1. */
14668 if (MAYBE_FLOAT_CLASS_P (class) && standard_80387_constant_p (x))
14670 /* Limit class to non-SSE. Use GENERAL_REGS if possible. */
14671 if (MAYBE_SSE_CLASS_P (class))
14672 return (reg_class_subset_p (class, GENERAL_REGS)
14673 ? GENERAL_REGS : FLOAT_REGS);
14674 else
14675 return class;
14677 /* General regs can load everything. */
14678 if (reg_class_subset_p (class, GENERAL_REGS))
14679 return GENERAL_REGS;
14680 /* In case we haven't resolved FLOAT or SSE yet, give up. */
14681 if (MAYBE_FLOAT_CLASS_P (class) || MAYBE_SSE_CLASS_P (class))
14682 return NO_REGS;
14684 if (MAYBE_MMX_CLASS_P (class) && CONSTANT_P (x))
14685 return NO_REGS;
14686 if (GET_MODE (x) == QImode && ! reg_class_subset_p (class, Q_REGS))
14687 return Q_REGS;
14688 return class;
14691 /* If we are copying between general and FP registers, we need a memory
14692 location. The same is true for SSE and MMX registers.
14694 The macro can't work reliably when one of the CLASSES is class containing
14695 registers from multiple units (SSE, MMX, integer). We avoid this by never
14696 combining those units in single alternative in the machine description.
14697 Ensure that this constraint holds to avoid unexpected surprises.
14699 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
14700 enforce these sanity checks. */
14702 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
14703 enum machine_mode mode, int strict)
14705 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
14706 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
14707 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
14708 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
14709 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
14710 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
14712 if (strict)
14713 abort ();
14714 else
14715 return 1;
14717 return (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2)
14718 || ((SSE_CLASS_P (class1) != SSE_CLASS_P (class2)
14719 || MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
14720 && ((mode != SImode && (mode != DImode || !TARGET_64BIT))
14721 || (!TARGET_INTER_UNIT_MOVES && !optimize_size))));
14723 /* Return the cost of moving data from a register in class CLASS1 to
14724 one in class CLASS2.
14726 It is not required that the cost always equal 2 when FROM is the same as TO;
14727 on some machines it is expensive to move between registers if they are not
14728 general registers. */
14730 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
14731 enum reg_class class2)
14733 /* In case we require secondary memory, compute cost of the store followed
14734 by load. In order to avoid bad register allocation choices, we need
14735 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
14737 if (ix86_secondary_memory_needed (class1, class2, mode, 0))
14739 int cost = 1;
14741 cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
14742 MEMORY_MOVE_COST (mode, class1, 1));
14743 cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
14744 MEMORY_MOVE_COST (mode, class2, 1));
14746 /* In case of copying from general_purpose_register we may emit multiple
14747 stores followed by single load causing memory size mismatch stall.
14748 Count this as arbitrarily high cost of 20. */
14749 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
14750 cost += 20;
14752 /* In the case of FP/MMX moves, the registers actually overlap, and we
14753 have to switch modes in order to treat them differently. */
14754 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
14755 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
14756 cost += 20;
14758 return cost;
14761 /* Moves between SSE/MMX and integer unit are expensive. */
14762 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
14763 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
14764 return ix86_cost->mmxsse_to_integer;
14765 if (MAYBE_FLOAT_CLASS_P (class1))
14766 return ix86_cost->fp_move;
14767 if (MAYBE_SSE_CLASS_P (class1))
14768 return ix86_cost->sse_move;
14769 if (MAYBE_MMX_CLASS_P (class1))
14770 return ix86_cost->mmx_move;
14771 return 2;
14774 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
14776 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
14778 /* Flags and only flags can only hold CCmode values. */
14779 if (CC_REGNO_P (regno))
14780 return GET_MODE_CLASS (mode) == MODE_CC;
14781 if (GET_MODE_CLASS (mode) == MODE_CC
14782 || GET_MODE_CLASS (mode) == MODE_RANDOM
14783 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
14784 return 0;
14785 if (FP_REGNO_P (regno))
14786 return VALID_FP_MODE_P (mode);
14787 if (SSE_REGNO_P (regno))
14788 return (TARGET_SSE ? VALID_SSE_REG_MODE (mode) : 0);
14789 if (MMX_REGNO_P (regno))
14790 return (TARGET_MMX
14791 ? VALID_MMX_REG_MODE (mode) || VALID_MMX_REG_MODE_3DNOW (mode) : 0);
14792 /* We handle both integer and floats in the general purpose registers.
14793 In future we should be able to handle vector modes as well. */
14794 if (!VALID_INT_MODE_P (mode) && !VALID_FP_MODE_P (mode))
14795 return 0;
14796 /* Take care for QImode values - they can be in non-QI regs, but then
14797 they do cause partial register stalls. */
14798 if (regno < 4 || mode != QImode || TARGET_64BIT)
14799 return 1;
14800 return reload_in_progress || reload_completed || !TARGET_PARTIAL_REG_STALL;
14803 /* Return the cost of moving data of mode M between a
14804 register and memory. A value of 2 is the default; this cost is
14805 relative to those in `REGISTER_MOVE_COST'.
14807 If moving between registers and memory is more expensive than
14808 between two registers, you should define this macro to express the
14809 relative cost.
14811 Model also increased moving costs of QImode registers in non
14812 Q_REGS classes.
14815 ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
14817 if (FLOAT_CLASS_P (class))
14819 int index;
14820 switch (mode)
14822 case SFmode:
14823 index = 0;
14824 break;
14825 case DFmode:
14826 index = 1;
14827 break;
14828 case XFmode:
14829 index = 2;
14830 break;
14831 default:
14832 return 100;
14834 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
14836 if (SSE_CLASS_P (class))
14838 int index;
14839 switch (GET_MODE_SIZE (mode))
14841 case 4:
14842 index = 0;
14843 break;
14844 case 8:
14845 index = 1;
14846 break;
14847 case 16:
14848 index = 2;
14849 break;
14850 default:
14851 return 100;
14853 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
14855 if (MMX_CLASS_P (class))
14857 int index;
14858 switch (GET_MODE_SIZE (mode))
14860 case 4:
14861 index = 0;
14862 break;
14863 case 8:
14864 index = 1;
14865 break;
14866 default:
14867 return 100;
14869 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
14871 switch (GET_MODE_SIZE (mode))
14873 case 1:
14874 if (in)
14875 return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
14876 : ix86_cost->movzbl_load);
14877 else
14878 return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
14879 : ix86_cost->int_store[0] + 4);
14880 break;
14881 case 2:
14882 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
14883 default:
14884 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
14885 if (mode == TFmode)
14886 mode = XFmode;
14887 return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
14888 * (((int) GET_MODE_SIZE (mode)
14889 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
14893 /* Compute a (partial) cost for rtx X. Return true if the complete
14894 cost has been computed, and false if subexpressions should be
14895 scanned. In either case, *TOTAL contains the cost result. */
14897 static bool
14898 ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
14900 enum machine_mode mode = GET_MODE (x);
14902 switch (code)
14904 case CONST_INT:
14905 case CONST:
14906 case LABEL_REF:
14907 case SYMBOL_REF:
14908 if (TARGET_64BIT && !x86_64_sign_extended_value (x))
14909 *total = 3;
14910 else if (TARGET_64BIT && !x86_64_zero_extended_value (x))
14911 *total = 2;
14912 else if (flag_pic && SYMBOLIC_CONST (x)
14913 && (!TARGET_64BIT
14914 || (!GET_CODE (x) != LABEL_REF
14915 && (GET_CODE (x) != SYMBOL_REF
14916 || !SYMBOL_REF_LOCAL_P (x)))))
14917 *total = 1;
14918 else
14919 *total = 0;
14920 return true;
14922 case CONST_DOUBLE:
14923 if (mode == VOIDmode)
14924 *total = 0;
14925 else
14926 switch (standard_80387_constant_p (x))
14928 case 1: /* 0.0 */
14929 *total = 1;
14930 break;
14931 default: /* Other constants */
14932 *total = 2;
14933 break;
14934 case 0:
14935 case -1:
14936 /* Start with (MEM (SYMBOL_REF)), since that's where
14937 it'll probably end up. Add a penalty for size. */
14938 *total = (COSTS_N_INSNS (1)
14939 + (flag_pic != 0 && !TARGET_64BIT)
14940 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
14941 break;
14943 return true;
14945 case ZERO_EXTEND:
14946 /* The zero extensions is often completely free on x86_64, so make
14947 it as cheap as possible. */
14948 if (TARGET_64BIT && mode == DImode
14949 && GET_MODE (XEXP (x, 0)) == SImode)
14950 *total = 1;
14951 else if (TARGET_ZERO_EXTEND_WITH_AND)
14952 *total = COSTS_N_INSNS (ix86_cost->add);
14953 else
14954 *total = COSTS_N_INSNS (ix86_cost->movzx);
14955 return false;
14957 case SIGN_EXTEND:
14958 *total = COSTS_N_INSNS (ix86_cost->movsx);
14959 return false;
14961 case ASHIFT:
14962 if (GET_CODE (XEXP (x, 1)) == CONST_INT
14963 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
14965 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
14966 if (value == 1)
14968 *total = COSTS_N_INSNS (ix86_cost->add);
14969 return false;
14971 if ((value == 2 || value == 3)
14972 && !TARGET_DECOMPOSE_LEA
14973 && ix86_cost->lea <= ix86_cost->shift_const)
14975 *total = COSTS_N_INSNS (ix86_cost->lea);
14976 return false;
14979 /* FALLTHRU */
14981 case ROTATE:
14982 case ASHIFTRT:
14983 case LSHIFTRT:
14984 case ROTATERT:
14985 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
14987 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
14989 if (INTVAL (XEXP (x, 1)) > 32)
14990 *total = COSTS_N_INSNS(ix86_cost->shift_const + 2);
14991 else
14992 *total = COSTS_N_INSNS(ix86_cost->shift_const * 2);
14994 else
14996 if (GET_CODE (XEXP (x, 1)) == AND)
14997 *total = COSTS_N_INSNS(ix86_cost->shift_var * 2);
14998 else
14999 *total = COSTS_N_INSNS(ix86_cost->shift_var * 6 + 2);
15002 else
15004 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
15005 *total = COSTS_N_INSNS (ix86_cost->shift_const);
15006 else
15007 *total = COSTS_N_INSNS (ix86_cost->shift_var);
15009 return false;
15011 case MULT:
15012 if (FLOAT_MODE_P (mode))
15013 *total = COSTS_N_INSNS (ix86_cost->fmul);
15014 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
15016 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
15017 int nbits;
15019 for (nbits = 0; value != 0; value >>= 1)
15020 nbits++;
15022 *total = COSTS_N_INSNS (ix86_cost->mult_init[MODE_INDEX (mode)]
15023 + nbits * ix86_cost->mult_bit);
15025 else
15027 /* This is arbitrary */
15028 *total = COSTS_N_INSNS (ix86_cost->mult_init[MODE_INDEX (mode)]
15029 + 7 * ix86_cost->mult_bit);
15031 return false;
15033 case DIV:
15034 case UDIV:
15035 case MOD:
15036 case UMOD:
15037 if (FLOAT_MODE_P (mode))
15038 *total = COSTS_N_INSNS (ix86_cost->fdiv);
15039 else
15040 *total = COSTS_N_INSNS (ix86_cost->divide[MODE_INDEX (mode)]);
15041 return false;
15043 case PLUS:
15044 if (FLOAT_MODE_P (mode))
15045 *total = COSTS_N_INSNS (ix86_cost->fadd);
15046 else if (!TARGET_DECOMPOSE_LEA
15047 && GET_MODE_CLASS (mode) == MODE_INT
15048 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
15050 if (GET_CODE (XEXP (x, 0)) == PLUS
15051 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
15052 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
15053 && CONSTANT_P (XEXP (x, 1)))
15055 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
15056 if (val == 2 || val == 4 || val == 8)
15058 *total = COSTS_N_INSNS (ix86_cost->lea);
15059 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
15060 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
15061 outer_code);
15062 *total += rtx_cost (XEXP (x, 1), outer_code);
15063 return true;
15066 else if (GET_CODE (XEXP (x, 0)) == MULT
15067 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
15069 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
15070 if (val == 2 || val == 4 || val == 8)
15072 *total = COSTS_N_INSNS (ix86_cost->lea);
15073 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
15074 *total += rtx_cost (XEXP (x, 1), outer_code);
15075 return true;
15078 else if (GET_CODE (XEXP (x, 0)) == PLUS)
15080 *total = COSTS_N_INSNS (ix86_cost->lea);
15081 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
15082 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
15083 *total += rtx_cost (XEXP (x, 1), outer_code);
15084 return true;
15087 /* FALLTHRU */
15089 case MINUS:
15090 if (FLOAT_MODE_P (mode))
15092 *total = COSTS_N_INSNS (ix86_cost->fadd);
15093 return false;
15095 /* FALLTHRU */
15097 case AND:
15098 case IOR:
15099 case XOR:
15100 if (!TARGET_64BIT && mode == DImode)
15102 *total = (COSTS_N_INSNS (ix86_cost->add) * 2
15103 + (rtx_cost (XEXP (x, 0), outer_code)
15104 << (GET_MODE (XEXP (x, 0)) != DImode))
15105 + (rtx_cost (XEXP (x, 1), outer_code)
15106 << (GET_MODE (XEXP (x, 1)) != DImode)));
15107 return true;
15109 /* FALLTHRU */
15111 case NEG:
15112 if (FLOAT_MODE_P (mode))
15114 *total = COSTS_N_INSNS (ix86_cost->fchs);
15115 return false;
15117 /* FALLTHRU */
15119 case NOT:
15120 if (!TARGET_64BIT && mode == DImode)
15121 *total = COSTS_N_INSNS (ix86_cost->add * 2);
15122 else
15123 *total = COSTS_N_INSNS (ix86_cost->add);
15124 return false;
15126 case FLOAT_EXTEND:
15127 if (!TARGET_SSE_MATH || !VALID_SSE_REG_MODE (mode))
15128 *total = 0;
15129 return false;
15131 case ABS:
15132 if (FLOAT_MODE_P (mode))
15133 *total = COSTS_N_INSNS (ix86_cost->fabs);
15134 return false;
15136 case SQRT:
15137 if (FLOAT_MODE_P (mode))
15138 *total = COSTS_N_INSNS (ix86_cost->fsqrt);
15139 return false;
15141 case UNSPEC:
15142 if (XINT (x, 1) == UNSPEC_TP)
15143 *total = 0;
15144 return false;
15146 default:
15147 return false;
15151 #if defined (DO_GLOBAL_CTORS_BODY) && defined (HAS_INIT_SECTION)
15152 static void
15153 ix86_svr3_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
15155 init_section ();
15156 fputs ("\tpushl $", asm_out_file);
15157 assemble_name (asm_out_file, XSTR (symbol, 0));
15158 fputc ('\n', asm_out_file);
15160 #endif
15162 #if TARGET_MACHO
15164 static int current_machopic_label_num;
15166 /* Given a symbol name and its associated stub, write out the
15167 definition of the stub. */
15169 void
15170 machopic_output_stub (FILE *file, const char *symb, const char *stub)
15172 unsigned int length;
15173 char *binder_name, *symbol_name, lazy_ptr_name[32];
15174 int label = ++current_machopic_label_num;
15176 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
15177 symb = (*targetm.strip_name_encoding) (symb);
15179 length = strlen (stub);
15180 binder_name = alloca (length + 32);
15181 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
15183 length = strlen (symb);
15184 symbol_name = alloca (length + 32);
15185 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
15187 sprintf (lazy_ptr_name, "L%d$lz", label);
15189 if (MACHOPIC_PURE)
15190 machopic_picsymbol_stub_section ();
15191 else
15192 machopic_symbol_stub_section ();
15194 fprintf (file, "%s:\n", stub);
15195 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
15197 if (MACHOPIC_PURE)
15199 fprintf (file, "\tcall LPC$%d\nLPC$%d:\tpopl %%eax\n", label, label);
15200 fprintf (file, "\tmovl %s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
15201 fprintf (file, "\tjmp %%edx\n");
15203 else
15204 fprintf (file, "\tjmp *%s\n", lazy_ptr_name);
15206 fprintf (file, "%s:\n", binder_name);
15208 if (MACHOPIC_PURE)
15210 fprintf (file, "\tlea %s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
15211 fprintf (file, "\tpushl %%eax\n");
15213 else
15214 fprintf (file, "\t pushl $%s\n", lazy_ptr_name);
15216 fprintf (file, "\tjmp dyld_stub_binding_helper\n");
15218 machopic_lazy_symbol_ptr_section ();
15219 fprintf (file, "%s:\n", lazy_ptr_name);
15220 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
15221 fprintf (file, "\t.long %s\n", binder_name);
15223 #endif /* TARGET_MACHO */
15225 /* Order the registers for register allocator. */
15227 void
15228 x86_order_regs_for_local_alloc (void)
15230 int pos = 0;
15231 int i;
15233 /* First allocate the local general purpose registers. */
15234 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
15235 if (GENERAL_REGNO_P (i) && call_used_regs[i])
15236 reg_alloc_order [pos++] = i;
15238 /* Global general purpose registers. */
15239 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
15240 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
15241 reg_alloc_order [pos++] = i;
15243 /* x87 registers come first in case we are doing FP math
15244 using them. */
15245 if (!TARGET_SSE_MATH)
15246 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
15247 reg_alloc_order [pos++] = i;
15249 /* SSE registers. */
15250 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
15251 reg_alloc_order [pos++] = i;
15252 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
15253 reg_alloc_order [pos++] = i;
15255 /* x87 registers. */
15256 if (TARGET_SSE_MATH)
15257 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
15258 reg_alloc_order [pos++] = i;
15260 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
15261 reg_alloc_order [pos++] = i;
15263 /* Initialize the rest of array as we do not allocate some registers
15264 at all. */
15265 while (pos < FIRST_PSEUDO_REGISTER)
15266 reg_alloc_order [pos++] = 0;
15269 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
15270 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
15271 #endif
15273 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
15274 struct attribute_spec.handler. */
15275 static tree
15276 ix86_handle_struct_attribute (tree *node, tree name,
15277 tree args ATTRIBUTE_UNUSED,
15278 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
15280 tree *type = NULL;
15281 if (DECL_P (*node))
15283 if (TREE_CODE (*node) == TYPE_DECL)
15284 type = &TREE_TYPE (*node);
15286 else
15287 type = node;
15289 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
15290 || TREE_CODE (*type) == UNION_TYPE)))
15292 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
15293 *no_add_attrs = true;
15296 else if ((is_attribute_p ("ms_struct", name)
15297 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
15298 || ((is_attribute_p ("gcc_struct", name)
15299 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
15301 warning ("`%s' incompatible attribute ignored",
15302 IDENTIFIER_POINTER (name));
15303 *no_add_attrs = true;
15306 return NULL_TREE;
15309 static bool
15310 ix86_ms_bitfield_layout_p (tree record_type)
15312 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
15313 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
15314 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
15317 /* Returns an expression indicating where the this parameter is
15318 located on entry to the FUNCTION. */
15320 static rtx
15321 x86_this_parameter (tree function)
15323 tree type = TREE_TYPE (function);
15325 if (TARGET_64BIT)
15327 int n = aggregate_value_p (TREE_TYPE (type), type) != 0;
15328 return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]);
15331 if (ix86_function_regparm (type, function) > 0)
15333 tree parm;
15335 parm = TYPE_ARG_TYPES (type);
15336 /* Figure out whether or not the function has a variable number of
15337 arguments. */
15338 for (; parm; parm = TREE_CHAIN (parm))
15339 if (TREE_VALUE (parm) == void_type_node)
15340 break;
15341 /* If not, the this parameter is in the first argument. */
15342 if (parm)
15344 int regno = 0;
15345 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
15346 regno = 2;
15347 return gen_rtx_REG (SImode, regno);
15351 if (aggregate_value_p (TREE_TYPE (type), type))
15352 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8));
15353 else
15354 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4));
15357 /* Determine whether x86_output_mi_thunk can succeed. */
15359 static bool
15360 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
15361 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
15362 HOST_WIDE_INT vcall_offset, tree function)
15364 /* 64-bit can handle anything. */
15365 if (TARGET_64BIT)
15366 return true;
15368 /* For 32-bit, everything's fine if we have one free register. */
15369 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
15370 return true;
15372 /* Need a free register for vcall_offset. */
15373 if (vcall_offset)
15374 return false;
15376 /* Need a free register for GOT references. */
15377 if (flag_pic && !(*targetm.binds_local_p) (function))
15378 return false;
15380 /* Otherwise ok. */
15381 return true;
15384 /* Output the assembler code for a thunk function. THUNK_DECL is the
15385 declaration for the thunk function itself, FUNCTION is the decl for
15386 the target function. DELTA is an immediate constant offset to be
15387 added to THIS. If VCALL_OFFSET is nonzero, the word at
15388 *(*this + vcall_offset) should be added to THIS. */
15390 static void
15391 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
15392 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
15393 HOST_WIDE_INT vcall_offset, tree function)
15395 rtx xops[3];
15396 rtx this = x86_this_parameter (function);
15397 rtx this_reg, tmp;
15399 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
15400 pull it in now and let DELTA benefit. */
15401 if (REG_P (this))
15402 this_reg = this;
15403 else if (vcall_offset)
15405 /* Put the this parameter into %eax. */
15406 xops[0] = this;
15407 xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
15408 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
15410 else
15411 this_reg = NULL_RTX;
15413 /* Adjust the this parameter by a fixed constant. */
15414 if (delta)
15416 xops[0] = GEN_INT (delta);
15417 xops[1] = this_reg ? this_reg : this;
15418 if (TARGET_64BIT)
15420 if (!x86_64_general_operand (xops[0], DImode))
15422 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
15423 xops[1] = tmp;
15424 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
15425 xops[0] = tmp;
15426 xops[1] = this;
15428 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
15430 else
15431 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
15434 /* Adjust the this parameter by a value stored in the vtable. */
15435 if (vcall_offset)
15437 if (TARGET_64BIT)
15438 tmp = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 2 /* R10 */);
15439 else
15441 int tmp_regno = 2 /* ECX */;
15442 if (lookup_attribute ("fastcall",
15443 TYPE_ATTRIBUTES (TREE_TYPE (function))))
15444 tmp_regno = 0 /* EAX */;
15445 tmp = gen_rtx_REG (SImode, tmp_regno);
15448 xops[0] = gen_rtx_MEM (Pmode, this_reg);
15449 xops[1] = tmp;
15450 if (TARGET_64BIT)
15451 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
15452 else
15453 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
15455 /* Adjust the this parameter. */
15456 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
15457 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
15459 rtx tmp2 = gen_rtx_REG (DImode, FIRST_REX_INT_REG + 3 /* R11 */);
15460 xops[0] = GEN_INT (vcall_offset);
15461 xops[1] = tmp2;
15462 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
15463 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
15465 xops[1] = this_reg;
15466 if (TARGET_64BIT)
15467 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
15468 else
15469 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
15472 /* If necessary, drop THIS back to its stack slot. */
15473 if (this_reg && this_reg != this)
15475 xops[0] = this_reg;
15476 xops[1] = this;
15477 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
15480 xops[0] = XEXP (DECL_RTL (function), 0);
15481 if (TARGET_64BIT)
15483 if (!flag_pic || (*targetm.binds_local_p) (function))
15484 output_asm_insn ("jmp\t%P0", xops);
15485 else
15487 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
15488 tmp = gen_rtx_CONST (Pmode, tmp);
15489 tmp = gen_rtx_MEM (QImode, tmp);
15490 xops[0] = tmp;
15491 output_asm_insn ("jmp\t%A0", xops);
15494 else
15496 if (!flag_pic || (*targetm.binds_local_p) (function))
15497 output_asm_insn ("jmp\t%P0", xops);
15498 else
15499 #if TARGET_MACHO
15500 if (TARGET_MACHO)
15502 const char *ip = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (function));
15503 tmp = gen_rtx_SYMBOL_REF (Pmode, machopic_stub_name (ip));
15504 tmp = gen_rtx_MEM (QImode, tmp);
15505 xops[0] = tmp;
15506 output_asm_insn ("jmp\t%0", xops);
15508 else
15509 #endif /* TARGET_MACHO */
15511 tmp = gen_rtx_REG (SImode, 2 /* ECX */);
15512 output_set_got (tmp);
15514 xops[1] = tmp;
15515 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
15516 output_asm_insn ("jmp\t{*}%1", xops);
15521 static void
15522 x86_file_start (void)
15524 default_file_start ();
15525 if (X86_FILE_START_VERSION_DIRECTIVE)
15526 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
15527 if (X86_FILE_START_FLTUSED)
15528 fputs ("\t.global\t__fltused\n", asm_out_file);
15529 if (ix86_asm_dialect == ASM_INTEL)
15530 fputs ("\t.intel_syntax\n", asm_out_file);
15534 x86_field_alignment (tree field, int computed)
15536 enum machine_mode mode;
15537 tree type = TREE_TYPE (field);
15539 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
15540 return computed;
15541 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
15542 ? get_inner_array_type (type) : type);
15543 if (mode == DFmode || mode == DCmode
15544 || GET_MODE_CLASS (mode) == MODE_INT
15545 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
15546 return MIN (32, computed);
15547 return computed;
15550 /* Output assembler code to FILE to increment profiler label # LABELNO
15551 for profiling a function entry. */
15552 void
15553 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
15555 if (TARGET_64BIT)
15556 if (flag_pic)
15558 #ifndef NO_PROFILE_COUNTERS
15559 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
15560 #endif
15561 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
15563 else
15565 #ifndef NO_PROFILE_COUNTERS
15566 fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno);
15567 #endif
15568 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
15570 else if (flag_pic)
15572 #ifndef NO_PROFILE_COUNTERS
15573 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
15574 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
15575 #endif
15576 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
15578 else
15580 #ifndef NO_PROFILE_COUNTERS
15581 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
15582 PROFILE_COUNT_REGISTER);
15583 #endif
15584 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
15588 /* We don't have exact information about the insn sizes, but we may assume
15589 quite safely that we are informed about all 1 byte insns and memory
15590 address sizes. This is enough to eliminate unnecessary padding in
15591 99% of cases. */
15593 static int
15594 min_insn_size (rtx insn)
15596 int l = 0;
15598 if (!INSN_P (insn) || !active_insn_p (insn))
15599 return 0;
15601 /* Discard alignments we've emit and jump instructions. */
15602 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
15603 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
15604 return 0;
15605 if (GET_CODE (insn) == JUMP_INSN
15606 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
15607 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
15608 return 0;
15610 /* Important case - calls are always 5 bytes.
15611 It is common to have many calls in the row. */
15612 if (GET_CODE (insn) == CALL_INSN
15613 && symbolic_reference_mentioned_p (PATTERN (insn))
15614 && !SIBLING_CALL_P (insn))
15615 return 5;
15616 if (get_attr_length (insn) <= 1)
15617 return 1;
15619 /* For normal instructions we may rely on the sizes of addresses
15620 and the presence of symbol to require 4 bytes of encoding.
15621 This is not the case for jumps where references are PC relative. */
15622 if (GET_CODE (insn) != JUMP_INSN)
15624 l = get_attr_length_address (insn);
15625 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
15626 l = 4;
15628 if (l)
15629 return 1+l;
15630 else
15631 return 2;
15634 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
15635 window. */
15637 static void
15638 k8_avoid_jump_misspredicts (void)
15640 rtx insn, start = get_insns ();
15641 int nbytes = 0, njumps = 0;
15642 int isjump = 0;
15644 /* Look for all minimal intervals of instructions containing 4 jumps.
15645 The intervals are bounded by START and INSN. NBYTES is the total
15646 size of instructions in the interval including INSN and not including
15647 START. When the NBYTES is smaller than 16 bytes, it is possible
15648 that the end of START and INSN ends up in the same 16byte page.
15650 The smallest offset in the page INSN can start is the case where START
15651 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
15652 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
15654 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
15657 nbytes += min_insn_size (insn);
15658 if (rtl_dump_file)
15659 fprintf(rtl_dump_file, "Insn %i estimated to %i bytes\n",
15660 INSN_UID (insn), min_insn_size (insn));
15661 if ((GET_CODE (insn) == JUMP_INSN
15662 && GET_CODE (PATTERN (insn)) != ADDR_VEC
15663 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
15664 || GET_CODE (insn) == CALL_INSN)
15665 njumps++;
15666 else
15667 continue;
15669 while (njumps > 3)
15671 start = NEXT_INSN (start);
15672 if ((GET_CODE (start) == JUMP_INSN
15673 && GET_CODE (PATTERN (start)) != ADDR_VEC
15674 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
15675 || GET_CODE (start) == CALL_INSN)
15676 njumps--, isjump = 1;
15677 else
15678 isjump = 0;
15679 nbytes -= min_insn_size (start);
15681 if (njumps < 0)
15682 abort ();
15683 if (rtl_dump_file)
15684 fprintf(rtl_dump_file, "Interval %i to %i has %i bytes\n",
15685 INSN_UID (start), INSN_UID (insn), nbytes);
15687 if (njumps == 3 && isjump && nbytes < 16)
15689 int padsize = 15 - nbytes + min_insn_size (insn);
15691 if (rtl_dump_file)
15692 fprintf (rtl_dump_file, "Padding insn %i by %i bytes!\n", INSN_UID (insn), padsize);
15693 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
15698 /* Implement machine specific optimizations.
15699 At the moment we implement single transformation: AMD Athlon works faster
15700 when RET is not destination of conditional jump or directly preceded
15701 by other jump instruction. We avoid the penalty by inserting NOP just
15702 before the RET instructions in such cases. */
15703 static void
15704 ix86_reorg (void)
15706 edge e;
15708 if (!TARGET_ATHLON_K8 || !optimize || optimize_size)
15709 return;
15710 for (e = EXIT_BLOCK_PTR->pred; e; e = e->pred_next)
15712 basic_block bb = e->src;
15713 rtx ret = BB_END (bb);
15714 rtx prev;
15715 bool replace = false;
15717 if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN
15718 || !maybe_hot_bb_p (bb))
15719 continue;
15720 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
15721 if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL)
15722 break;
15723 if (prev && GET_CODE (prev) == CODE_LABEL)
15725 edge e;
15726 for (e = bb->pred; e; e = e->pred_next)
15727 if (EDGE_FREQUENCY (e) && e->src->index >= 0
15728 && !(e->flags & EDGE_FALLTHRU))
15729 replace = true;
15731 if (!replace)
15733 prev = prev_active_insn (ret);
15734 if (prev
15735 && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev))
15736 || GET_CODE (prev) == CALL_INSN))
15737 replace = true;
15738 /* Empty functions get branch mispredict even when the jump destination
15739 is not visible to us. */
15740 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
15741 replace = true;
15743 if (replace)
15745 emit_insn_before (gen_return_internal_long (), ret);
15746 delete_insn (ret);
15749 k8_avoid_jump_misspredicts ();
15752 /* Return nonzero when QImode register that must be represented via REX prefix
15753 is used. */
15754 bool
15755 x86_extended_QIreg_mentioned_p (rtx insn)
15757 int i;
15758 extract_insn_cached (insn);
15759 for (i = 0; i < recog_data.n_operands; i++)
15760 if (REG_P (recog_data.operand[i])
15761 && REGNO (recog_data.operand[i]) >= 4)
15762 return true;
15763 return false;
15766 /* Return nonzero when P points to register encoded via REX prefix.
15767 Called via for_each_rtx. */
15768 static int
15769 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
15771 unsigned int regno;
15772 if (!REG_P (*p))
15773 return 0;
15774 regno = REGNO (*p);
15775 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
15778 /* Return true when INSN mentions register that must be encoded using REX
15779 prefix. */
15780 bool
15781 x86_extended_reg_mentioned_p (rtx insn)
15783 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
15786 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
15787 optabs would emit if we didn't have TFmode patterns. */
15789 void
15790 x86_emit_floatuns (rtx operands[2])
15792 rtx neglab, donelab, i0, i1, f0, in, out;
15793 enum machine_mode mode, inmode;
15795 inmode = GET_MODE (operands[1]);
15796 if (inmode != SImode
15797 && inmode != DImode)
15798 abort ();
15800 out = operands[0];
15801 in = force_reg (inmode, operands[1]);
15802 mode = GET_MODE (out);
15803 neglab = gen_label_rtx ();
15804 donelab = gen_label_rtx ();
15805 i1 = gen_reg_rtx (Pmode);
15806 f0 = gen_reg_rtx (mode);
15808 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab);
15810 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
15811 emit_jump_insn (gen_jump (donelab));
15812 emit_barrier ();
15814 emit_label (neglab);
15816 i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
15817 i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
15818 i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
15819 expand_float (f0, i0, 0);
15820 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
15822 emit_label (donelab);
15825 /* Return if we do not know how to pass TYPE solely in registers. */
15826 bool
15827 ix86_must_pass_in_stack (enum machine_mode mode, tree type)
15829 if (default_must_pass_in_stack (mode, type))
15830 return true;
15831 return (!TARGET_64BIT && type && mode == TImode);
15834 /* Initialize vector TARGET via VALS. */
15835 void
15836 ix86_expand_vector_init (rtx target, rtx vals)
15838 enum machine_mode mode = GET_MODE (target);
15839 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
15840 int n_elts = (GET_MODE_SIZE (mode) / elt_size);
15841 int i;
15843 for (i = n_elts - 1; i >= 0; i--)
15844 if (GET_CODE (XVECEXP (vals, 0, i)) != CONST_INT
15845 && GET_CODE (XVECEXP (vals, 0, i)) != CONST_DOUBLE)
15846 break;
15848 /* Few special cases first...
15849 ... constants are best loaded from constant pool. */
15850 if (i < 0)
15852 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
15853 return;
15856 /* ... values where only first field is non-constant are best loaded
15857 from the pool and overwriten via move later. */
15858 if (!i)
15860 rtx op = simplify_gen_subreg (mode, XVECEXP (vals, 0, 0),
15861 GET_MODE_INNER (mode), 0);
15863 op = force_reg (mode, op);
15864 XVECEXP (vals, 0, 0) = CONST0_RTX (GET_MODE_INNER (mode));
15865 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
15866 switch (GET_MODE (target))
15868 case V2DFmode:
15869 emit_insn (gen_sse2_movsd (target, target, op));
15870 break;
15871 case V4SFmode:
15872 emit_insn (gen_sse_movss (target, target, op));
15873 break;
15874 default:
15875 break;
15877 return;
15880 /* And the busy sequence doing rotations. */
15881 switch (GET_MODE (target))
15883 case V2DFmode:
15885 rtx vecop0 =
15886 simplify_gen_subreg (V2DFmode, XVECEXP (vals, 0, 0), DFmode, 0);
15887 rtx vecop1 =
15888 simplify_gen_subreg (V2DFmode, XVECEXP (vals, 0, 1), DFmode, 0);
15890 vecop0 = force_reg (V2DFmode, vecop0);
15891 vecop1 = force_reg (V2DFmode, vecop1);
15892 emit_insn (gen_sse2_unpcklpd (target, vecop0, vecop1));
15894 break;
15895 case V4SFmode:
15897 rtx vecop0 =
15898 simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 0), SFmode, 0);
15899 rtx vecop1 =
15900 simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 1), SFmode, 0);
15901 rtx vecop2 =
15902 simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 2), SFmode, 0);
15903 rtx vecop3 =
15904 simplify_gen_subreg (V4SFmode, XVECEXP (vals, 0, 3), SFmode, 0);
15905 rtx tmp1 = gen_reg_rtx (V4SFmode);
15906 rtx tmp2 = gen_reg_rtx (V4SFmode);
15908 vecop0 = force_reg (V4SFmode, vecop0);
15909 vecop1 = force_reg (V4SFmode, vecop1);
15910 vecop2 = force_reg (V4SFmode, vecop2);
15911 vecop3 = force_reg (V4SFmode, vecop3);
15912 emit_insn (gen_sse_unpcklps (tmp1, vecop1, vecop3));
15913 emit_insn (gen_sse_unpcklps (tmp2, vecop0, vecop2));
15914 emit_insn (gen_sse_unpcklps (target, tmp2, tmp1));
15916 break;
15917 default:
15918 abort ();
15922 #include "gt-i386.h"