Make #if 0 type correct
[official-gcc.git] / gcc / config / i386 / i386.c
blob88fb88d9b019f808a89b635d9673d4ab24b3b6ba
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
20 Boston, MA 02110-1301, USA. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "tree-gimple.h"
51 #include "dwarf2.h"
52 #include "tm-constrs.h"
53 #include "params.h"
55 #ifndef CHECK_STACK_LIMIT
56 #define CHECK_STACK_LIMIT (-1)
57 #endif
59 /* Return index of given mode in mult and division cost tables. */
60 #define MODE_INDEX(mode) \
61 ((mode) == QImode ? 0 \
62 : (mode) == HImode ? 1 \
63 : (mode) == SImode ? 2 \
64 : (mode) == DImode ? 3 \
65 : 4)
67 /* Processor costs (relative to an add) */
68 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
69 #define COSTS_N_BYTES(N) ((N) * 2)
71 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
73 static const
74 struct processor_costs size_cost = { /* costs for tuning for size */
75 COSTS_N_BYTES (2), /* cost of an add instruction */
76 COSTS_N_BYTES (3), /* cost of a lea instruction */
77 COSTS_N_BYTES (2), /* variable shift costs */
78 COSTS_N_BYTES (3), /* constant shift costs */
79 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
80 COSTS_N_BYTES (3), /* HI */
81 COSTS_N_BYTES (3), /* SI */
82 COSTS_N_BYTES (3), /* DI */
83 COSTS_N_BYTES (5)}, /* other */
84 0, /* cost of multiply per each bit set */
85 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
86 COSTS_N_BYTES (3), /* HI */
87 COSTS_N_BYTES (3), /* SI */
88 COSTS_N_BYTES (3), /* DI */
89 COSTS_N_BYTES (5)}, /* other */
90 COSTS_N_BYTES (3), /* cost of movsx */
91 COSTS_N_BYTES (3), /* cost of movzx */
92 0, /* "large" insn */
93 2, /* MOVE_RATIO */
94 2, /* cost for loading QImode using movzbl */
95 {2, 2, 2}, /* cost of loading integer registers
96 in QImode, HImode and SImode.
97 Relative to reg-reg move (2). */
98 {2, 2, 2}, /* cost of storing integer registers */
99 2, /* cost of reg,reg fld/fst */
100 {2, 2, 2}, /* cost of loading fp registers
101 in SFmode, DFmode and XFmode */
102 {2, 2, 2}, /* cost of storing fp registers
103 in SFmode, DFmode and XFmode */
104 3, /* cost of moving MMX register */
105 {3, 3}, /* cost of loading MMX registers
106 in SImode and DImode */
107 {3, 3}, /* cost of storing MMX registers
108 in SImode and DImode */
109 3, /* cost of moving SSE register */
110 {3, 3, 3}, /* cost of loading SSE registers
111 in SImode, DImode and TImode */
112 {3, 3, 3}, /* cost of storing SSE registers
113 in SImode, DImode and TImode */
114 3, /* MMX or SSE register to integer */
115 0, /* size of prefetch block */
116 0, /* number of parallel prefetches */
117 2, /* Branch cost */
118 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
119 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
120 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
121 COSTS_N_BYTES (2), /* cost of FABS instruction. */
122 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
123 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
124 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
125 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
126 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
127 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}}
130 /* Processor costs (relative to an add) */
131 static const
132 struct processor_costs i386_cost = { /* 386 specific costs */
133 COSTS_N_INSNS (1), /* cost of an add instruction */
134 COSTS_N_INSNS (1), /* cost of a lea instruction */
135 COSTS_N_INSNS (3), /* variable shift costs */
136 COSTS_N_INSNS (2), /* constant shift costs */
137 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
138 COSTS_N_INSNS (6), /* HI */
139 COSTS_N_INSNS (6), /* SI */
140 COSTS_N_INSNS (6), /* DI */
141 COSTS_N_INSNS (6)}, /* other */
142 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
143 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
144 COSTS_N_INSNS (23), /* HI */
145 COSTS_N_INSNS (23), /* SI */
146 COSTS_N_INSNS (23), /* DI */
147 COSTS_N_INSNS (23)}, /* other */
148 COSTS_N_INSNS (3), /* cost of movsx */
149 COSTS_N_INSNS (2), /* cost of movzx */
150 15, /* "large" insn */
151 3, /* MOVE_RATIO */
152 4, /* cost for loading QImode using movzbl */
153 {2, 4, 2}, /* cost of loading integer registers
154 in QImode, HImode and SImode.
155 Relative to reg-reg move (2). */
156 {2, 4, 2}, /* cost of storing integer registers */
157 2, /* cost of reg,reg fld/fst */
158 {8, 8, 8}, /* cost of loading fp registers
159 in SFmode, DFmode and XFmode */
160 {8, 8, 8}, /* cost of storing fp registers
161 in SFmode, DFmode and XFmode */
162 2, /* cost of moving MMX register */
163 {4, 8}, /* cost of loading MMX registers
164 in SImode and DImode */
165 {4, 8}, /* cost of storing MMX registers
166 in SImode and DImode */
167 2, /* cost of moving SSE register */
168 {4, 8, 16}, /* cost of loading SSE registers
169 in SImode, DImode and TImode */
170 {4, 8, 16}, /* cost of storing SSE registers
171 in SImode, DImode and TImode */
172 3, /* MMX or SSE register to integer */
173 0, /* size of prefetch block */
174 0, /* number of parallel prefetches */
175 1, /* Branch cost */
176 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
177 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
178 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
179 COSTS_N_INSNS (22), /* cost of FABS instruction. */
180 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
181 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
182 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
183 DUMMY_STRINGOP_ALGS},
184 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
185 DUMMY_STRINGOP_ALGS},
188 static const
189 struct processor_costs i486_cost = { /* 486 specific costs */
190 COSTS_N_INSNS (1), /* cost of an add instruction */
191 COSTS_N_INSNS (1), /* cost of a lea instruction */
192 COSTS_N_INSNS (3), /* variable shift costs */
193 COSTS_N_INSNS (2), /* constant shift costs */
194 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
195 COSTS_N_INSNS (12), /* HI */
196 COSTS_N_INSNS (12), /* SI */
197 COSTS_N_INSNS (12), /* DI */
198 COSTS_N_INSNS (12)}, /* other */
199 1, /* cost of multiply per each bit set */
200 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
201 COSTS_N_INSNS (40), /* HI */
202 COSTS_N_INSNS (40), /* SI */
203 COSTS_N_INSNS (40), /* DI */
204 COSTS_N_INSNS (40)}, /* other */
205 COSTS_N_INSNS (3), /* cost of movsx */
206 COSTS_N_INSNS (2), /* cost of movzx */
207 15, /* "large" insn */
208 3, /* MOVE_RATIO */
209 4, /* cost for loading QImode using movzbl */
210 {2, 4, 2}, /* cost of loading integer registers
211 in QImode, HImode and SImode.
212 Relative to reg-reg move (2). */
213 {2, 4, 2}, /* cost of storing integer registers */
214 2, /* cost of reg,reg fld/fst */
215 {8, 8, 8}, /* cost of loading fp registers
216 in SFmode, DFmode and XFmode */
217 {8, 8, 8}, /* cost of storing fp registers
218 in SFmode, DFmode and XFmode */
219 2, /* cost of moving MMX register */
220 {4, 8}, /* cost of loading MMX registers
221 in SImode and DImode */
222 {4, 8}, /* cost of storing MMX registers
223 in SImode and DImode */
224 2, /* cost of moving SSE register */
225 {4, 8, 16}, /* cost of loading SSE registers
226 in SImode, DImode and TImode */
227 {4, 8, 16}, /* cost of storing SSE registers
228 in SImode, DImode and TImode */
229 3, /* MMX or SSE register to integer */
230 0, /* size of prefetch block */
231 0, /* number of parallel prefetches */
232 1, /* Branch cost */
233 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
234 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
235 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
236 COSTS_N_INSNS (3), /* cost of FABS instruction. */
237 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
238 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
239 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
240 DUMMY_STRINGOP_ALGS},
241 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
242 DUMMY_STRINGOP_ALGS}
245 static const
246 struct processor_costs pentium_cost = {
247 COSTS_N_INSNS (1), /* cost of an add instruction */
248 COSTS_N_INSNS (1), /* cost of a lea instruction */
249 COSTS_N_INSNS (4), /* variable shift costs */
250 COSTS_N_INSNS (1), /* constant shift costs */
251 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
252 COSTS_N_INSNS (11), /* HI */
253 COSTS_N_INSNS (11), /* SI */
254 COSTS_N_INSNS (11), /* DI */
255 COSTS_N_INSNS (11)}, /* other */
256 0, /* cost of multiply per each bit set */
257 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
258 COSTS_N_INSNS (25), /* HI */
259 COSTS_N_INSNS (25), /* SI */
260 COSTS_N_INSNS (25), /* DI */
261 COSTS_N_INSNS (25)}, /* other */
262 COSTS_N_INSNS (3), /* cost of movsx */
263 COSTS_N_INSNS (2), /* cost of movzx */
264 8, /* "large" insn */
265 6, /* MOVE_RATIO */
266 6, /* cost for loading QImode using movzbl */
267 {2, 4, 2}, /* cost of loading integer registers
268 in QImode, HImode and SImode.
269 Relative to reg-reg move (2). */
270 {2, 4, 2}, /* cost of storing integer registers */
271 2, /* cost of reg,reg fld/fst */
272 {2, 2, 6}, /* cost of loading fp registers
273 in SFmode, DFmode and XFmode */
274 {4, 4, 6}, /* cost of storing fp registers
275 in SFmode, DFmode and XFmode */
276 8, /* cost of moving MMX register */
277 {8, 8}, /* cost of loading MMX registers
278 in SImode and DImode */
279 {8, 8}, /* cost of storing MMX registers
280 in SImode and DImode */
281 2, /* cost of moving SSE register */
282 {4, 8, 16}, /* cost of loading SSE registers
283 in SImode, DImode and TImode */
284 {4, 8, 16}, /* cost of storing SSE registers
285 in SImode, DImode and TImode */
286 3, /* MMX or SSE register to integer */
287 0, /* size of prefetch block */
288 0, /* number of parallel prefetches */
289 2, /* Branch cost */
290 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
291 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
292 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
293 COSTS_N_INSNS (1), /* cost of FABS instruction. */
294 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
295 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
296 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
297 DUMMY_STRINGOP_ALGS},
298 {{libcall, {{-1, rep_prefix_4_byte}}},
299 DUMMY_STRINGOP_ALGS}
302 static const
303 struct processor_costs pentiumpro_cost = {
304 COSTS_N_INSNS (1), /* cost of an add instruction */
305 COSTS_N_INSNS (1), /* cost of a lea instruction */
306 COSTS_N_INSNS (1), /* variable shift costs */
307 COSTS_N_INSNS (1), /* constant shift costs */
308 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
309 COSTS_N_INSNS (4), /* HI */
310 COSTS_N_INSNS (4), /* SI */
311 COSTS_N_INSNS (4), /* DI */
312 COSTS_N_INSNS (4)}, /* other */
313 0, /* cost of multiply per each bit set */
314 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
315 COSTS_N_INSNS (17), /* HI */
316 COSTS_N_INSNS (17), /* SI */
317 COSTS_N_INSNS (17), /* DI */
318 COSTS_N_INSNS (17)}, /* other */
319 COSTS_N_INSNS (1), /* cost of movsx */
320 COSTS_N_INSNS (1), /* cost of movzx */
321 8, /* "large" insn */
322 6, /* MOVE_RATIO */
323 2, /* cost for loading QImode using movzbl */
324 {4, 4, 4}, /* cost of loading integer registers
325 in QImode, HImode and SImode.
326 Relative to reg-reg move (2). */
327 {2, 2, 2}, /* cost of storing integer registers */
328 2, /* cost of reg,reg fld/fst */
329 {2, 2, 6}, /* cost of loading fp registers
330 in SFmode, DFmode and XFmode */
331 {4, 4, 6}, /* cost of storing fp registers
332 in SFmode, DFmode and XFmode */
333 2, /* cost of moving MMX register */
334 {2, 2}, /* cost of loading MMX registers
335 in SImode and DImode */
336 {2, 2}, /* cost of storing MMX registers
337 in SImode and DImode */
338 2, /* cost of moving SSE register */
339 {2, 2, 8}, /* cost of loading SSE registers
340 in SImode, DImode and TImode */
341 {2, 2, 8}, /* cost of storing SSE registers
342 in SImode, DImode and TImode */
343 3, /* MMX or SSE register to integer */
344 32, /* size of prefetch block */
345 6, /* number of parallel prefetches */
346 2, /* Branch cost */
347 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
348 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
349 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
350 COSTS_N_INSNS (2), /* cost of FABS instruction. */
351 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
352 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
353 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
354 the alignment). For small blocks inline loop is still a noticeable win, for bigger
355 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
356 more expensive startup time in CPU, but after 4K the difference is down in the noise.
358 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
359 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
360 DUMMY_STRINGOP_ALGS},
361 {{rep_prefix_4_byte, {{1024, unrolled_loop},
362 {8192, rep_prefix_4_byte}, {-1, libcall}}},
363 DUMMY_STRINGOP_ALGS}
366 static const
367 struct processor_costs geode_cost = {
368 COSTS_N_INSNS (1), /* cost of an add instruction */
369 COSTS_N_INSNS (1), /* cost of a lea instruction */
370 COSTS_N_INSNS (2), /* variable shift costs */
371 COSTS_N_INSNS (1), /* constant shift costs */
372 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
373 COSTS_N_INSNS (4), /* HI */
374 COSTS_N_INSNS (7), /* SI */
375 COSTS_N_INSNS (7), /* DI */
376 COSTS_N_INSNS (7)}, /* other */
377 0, /* cost of multiply per each bit set */
378 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
379 COSTS_N_INSNS (23), /* HI */
380 COSTS_N_INSNS (39), /* SI */
381 COSTS_N_INSNS (39), /* DI */
382 COSTS_N_INSNS (39)}, /* other */
383 COSTS_N_INSNS (1), /* cost of movsx */
384 COSTS_N_INSNS (1), /* cost of movzx */
385 8, /* "large" insn */
386 4, /* MOVE_RATIO */
387 1, /* cost for loading QImode using movzbl */
388 {1, 1, 1}, /* cost of loading integer registers
389 in QImode, HImode and SImode.
390 Relative to reg-reg move (2). */
391 {1, 1, 1}, /* cost of storing integer registers */
392 1, /* cost of reg,reg fld/fst */
393 {1, 1, 1}, /* cost of loading fp registers
394 in SFmode, DFmode and XFmode */
395 {4, 6, 6}, /* cost of storing fp registers
396 in SFmode, DFmode and XFmode */
398 1, /* cost of moving MMX register */
399 {1, 1}, /* cost of loading MMX registers
400 in SImode and DImode */
401 {1, 1}, /* cost of storing MMX registers
402 in SImode and DImode */
403 1, /* cost of moving SSE register */
404 {1, 1, 1}, /* cost of loading SSE registers
405 in SImode, DImode and TImode */
406 {1, 1, 1}, /* cost of storing SSE registers
407 in SImode, DImode and TImode */
408 1, /* MMX or SSE register to integer */
409 32, /* size of prefetch block */
410 1, /* number of parallel prefetches */
411 1, /* Branch cost */
412 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
413 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
414 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
415 COSTS_N_INSNS (1), /* cost of FABS instruction. */
416 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
417 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
418 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
419 DUMMY_STRINGOP_ALGS},
420 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
421 DUMMY_STRINGOP_ALGS}
424 static const
425 struct processor_costs k6_cost = {
426 COSTS_N_INSNS (1), /* cost of an add instruction */
427 COSTS_N_INSNS (2), /* cost of a lea instruction */
428 COSTS_N_INSNS (1), /* variable shift costs */
429 COSTS_N_INSNS (1), /* constant shift costs */
430 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
431 COSTS_N_INSNS (3), /* HI */
432 COSTS_N_INSNS (3), /* SI */
433 COSTS_N_INSNS (3), /* DI */
434 COSTS_N_INSNS (3)}, /* other */
435 0, /* cost of multiply per each bit set */
436 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
437 COSTS_N_INSNS (18), /* HI */
438 COSTS_N_INSNS (18), /* SI */
439 COSTS_N_INSNS (18), /* DI */
440 COSTS_N_INSNS (18)}, /* other */
441 COSTS_N_INSNS (2), /* cost of movsx */
442 COSTS_N_INSNS (2), /* cost of movzx */
443 8, /* "large" insn */
444 4, /* MOVE_RATIO */
445 3, /* cost for loading QImode using movzbl */
446 {4, 5, 4}, /* cost of loading integer registers
447 in QImode, HImode and SImode.
448 Relative to reg-reg move (2). */
449 {2, 3, 2}, /* cost of storing integer registers */
450 4, /* cost of reg,reg fld/fst */
451 {6, 6, 6}, /* cost of loading fp registers
452 in SFmode, DFmode and XFmode */
453 {4, 4, 4}, /* cost of storing fp registers
454 in SFmode, DFmode and XFmode */
455 2, /* cost of moving MMX register */
456 {2, 2}, /* cost of loading MMX registers
457 in SImode and DImode */
458 {2, 2}, /* cost of storing MMX registers
459 in SImode and DImode */
460 2, /* cost of moving SSE register */
461 {2, 2, 8}, /* cost of loading SSE registers
462 in SImode, DImode and TImode */
463 {2, 2, 8}, /* cost of storing SSE registers
464 in SImode, DImode and TImode */
465 6, /* MMX or SSE register to integer */
466 32, /* size of prefetch block */
467 1, /* number of parallel prefetches */
468 1, /* Branch cost */
469 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
470 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
471 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
472 COSTS_N_INSNS (2), /* cost of FABS instruction. */
473 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
474 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
475 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
476 DUMMY_STRINGOP_ALGS},
477 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
478 DUMMY_STRINGOP_ALGS}
481 static const
482 struct processor_costs athlon_cost = {
483 COSTS_N_INSNS (1), /* cost of an add instruction */
484 COSTS_N_INSNS (2), /* cost of a lea instruction */
485 COSTS_N_INSNS (1), /* variable shift costs */
486 COSTS_N_INSNS (1), /* constant shift costs */
487 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
488 COSTS_N_INSNS (5), /* HI */
489 COSTS_N_INSNS (5), /* SI */
490 COSTS_N_INSNS (5), /* DI */
491 COSTS_N_INSNS (5)}, /* other */
492 0, /* cost of multiply per each bit set */
493 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
494 COSTS_N_INSNS (26), /* HI */
495 COSTS_N_INSNS (42), /* SI */
496 COSTS_N_INSNS (74), /* DI */
497 COSTS_N_INSNS (74)}, /* other */
498 COSTS_N_INSNS (1), /* cost of movsx */
499 COSTS_N_INSNS (1), /* cost of movzx */
500 8, /* "large" insn */
501 9, /* MOVE_RATIO */
502 4, /* cost for loading QImode using movzbl */
503 {3, 4, 3}, /* cost of loading integer registers
504 in QImode, HImode and SImode.
505 Relative to reg-reg move (2). */
506 {3, 4, 3}, /* cost of storing integer registers */
507 4, /* cost of reg,reg fld/fst */
508 {4, 4, 12}, /* cost of loading fp registers
509 in SFmode, DFmode and XFmode */
510 {6, 6, 8}, /* cost of storing fp registers
511 in SFmode, DFmode and XFmode */
512 2, /* cost of moving MMX register */
513 {4, 4}, /* cost of loading MMX registers
514 in SImode and DImode */
515 {4, 4}, /* cost of storing MMX registers
516 in SImode and DImode */
517 2, /* cost of moving SSE register */
518 {4, 4, 6}, /* cost of loading SSE registers
519 in SImode, DImode and TImode */
520 {4, 4, 5}, /* cost of storing SSE registers
521 in SImode, DImode and TImode */
522 5, /* MMX or SSE register to integer */
523 64, /* size of prefetch block */
524 6, /* number of parallel prefetches */
525 5, /* Branch cost */
526 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
527 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
528 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
529 COSTS_N_INSNS (2), /* cost of FABS instruction. */
530 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
531 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
532 /* For some reason, Athlon deals better with REP prefix (relative to loops)
533 compared to K8. Alignment becomes important after 8 bytes for memcpy and
534 128 bytes for memset. */
535 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
536 DUMMY_STRINGOP_ALGS},
537 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
538 DUMMY_STRINGOP_ALGS}
541 static const
542 struct processor_costs k8_cost = {
543 COSTS_N_INSNS (1), /* cost of an add instruction */
544 COSTS_N_INSNS (2), /* cost of a lea instruction */
545 COSTS_N_INSNS (1), /* variable shift costs */
546 COSTS_N_INSNS (1), /* constant shift costs */
547 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
548 COSTS_N_INSNS (4), /* HI */
549 COSTS_N_INSNS (3), /* SI */
550 COSTS_N_INSNS (4), /* DI */
551 COSTS_N_INSNS (5)}, /* other */
552 0, /* cost of multiply per each bit set */
553 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
554 COSTS_N_INSNS (26), /* HI */
555 COSTS_N_INSNS (42), /* SI */
556 COSTS_N_INSNS (74), /* DI */
557 COSTS_N_INSNS (74)}, /* other */
558 COSTS_N_INSNS (1), /* cost of movsx */
559 COSTS_N_INSNS (1), /* cost of movzx */
560 8, /* "large" insn */
561 9, /* MOVE_RATIO */
562 4, /* cost for loading QImode using movzbl */
563 {3, 4, 3}, /* cost of loading integer registers
564 in QImode, HImode and SImode.
565 Relative to reg-reg move (2). */
566 {3, 4, 3}, /* cost of storing integer registers */
567 4, /* cost of reg,reg fld/fst */
568 {4, 4, 12}, /* cost of loading fp registers
569 in SFmode, DFmode and XFmode */
570 {6, 6, 8}, /* cost of storing fp registers
571 in SFmode, DFmode and XFmode */
572 2, /* cost of moving MMX register */
573 {3, 3}, /* cost of loading MMX registers
574 in SImode and DImode */
575 {4, 4}, /* cost of storing MMX registers
576 in SImode and DImode */
577 2, /* cost of moving SSE register */
578 {4, 3, 6}, /* cost of loading SSE registers
579 in SImode, DImode and TImode */
580 {4, 4, 5}, /* cost of storing SSE registers
581 in SImode, DImode and TImode */
582 5, /* MMX or SSE register to integer */
583 64, /* size of prefetch block */
584 /* New AMD processors never drop prefetches; if they cannot be performed
585 immediately, they are queued. We set number of simultaneous prefetches
586 to a large constant to reflect this (it probably is not a good idea not
587 to limit number of prefetches at all, as their execution also takes some
588 time). */
589 100, /* number of parallel prefetches */
590 5, /* Branch cost */
591 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
592 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
593 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
594 COSTS_N_INSNS (2), /* cost of FABS instruction. */
595 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
596 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
597 /* K8 has optimized REP instruction for medium sized blocks, but for very small
598 blocks it is better to use loop. For large blocks, libcall can do
599 nontemporary accesses and beat inline considerably. */
600 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
601 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
602 {{libcall, {{8, loop}, {24, unrolled_loop},
603 {2048, rep_prefix_4_byte}, {-1, libcall}}},
604 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}
607 static const
608 struct processor_costs pentium4_cost = {
609 COSTS_N_INSNS (1), /* cost of an add instruction */
610 COSTS_N_INSNS (3), /* cost of a lea instruction */
611 COSTS_N_INSNS (4), /* variable shift costs */
612 COSTS_N_INSNS (4), /* constant shift costs */
613 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
614 COSTS_N_INSNS (15), /* HI */
615 COSTS_N_INSNS (15), /* SI */
616 COSTS_N_INSNS (15), /* DI */
617 COSTS_N_INSNS (15)}, /* other */
618 0, /* cost of multiply per each bit set */
619 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
620 COSTS_N_INSNS (56), /* HI */
621 COSTS_N_INSNS (56), /* SI */
622 COSTS_N_INSNS (56), /* DI */
623 COSTS_N_INSNS (56)}, /* other */
624 COSTS_N_INSNS (1), /* cost of movsx */
625 COSTS_N_INSNS (1), /* cost of movzx */
626 16, /* "large" insn */
627 6, /* MOVE_RATIO */
628 2, /* cost for loading QImode using movzbl */
629 {4, 5, 4}, /* cost of loading integer registers
630 in QImode, HImode and SImode.
631 Relative to reg-reg move (2). */
632 {2, 3, 2}, /* cost of storing integer registers */
633 2, /* cost of reg,reg fld/fst */
634 {2, 2, 6}, /* cost of loading fp registers
635 in SFmode, DFmode and XFmode */
636 {4, 4, 6}, /* cost of storing fp registers
637 in SFmode, DFmode and XFmode */
638 2, /* cost of moving MMX register */
639 {2, 2}, /* cost of loading MMX registers
640 in SImode and DImode */
641 {2, 2}, /* cost of storing MMX registers
642 in SImode and DImode */
643 12, /* cost of moving SSE register */
644 {12, 12, 12}, /* cost of loading SSE registers
645 in SImode, DImode and TImode */
646 {2, 2, 8}, /* cost of storing SSE registers
647 in SImode, DImode and TImode */
648 10, /* MMX or SSE register to integer */
649 64, /* size of prefetch block */
650 6, /* number of parallel prefetches */
651 2, /* Branch cost */
652 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
653 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
654 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
655 COSTS_N_INSNS (2), /* cost of FABS instruction. */
656 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
657 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
658 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
659 DUMMY_STRINGOP_ALGS},
660 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
661 {-1, libcall}}},
662 DUMMY_STRINGOP_ALGS},
665 static const
666 struct processor_costs nocona_cost = {
667 COSTS_N_INSNS (1), /* cost of an add instruction */
668 COSTS_N_INSNS (1), /* cost of a lea instruction */
669 COSTS_N_INSNS (1), /* variable shift costs */
670 COSTS_N_INSNS (1), /* constant shift costs */
671 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
672 COSTS_N_INSNS (10), /* HI */
673 COSTS_N_INSNS (10), /* SI */
674 COSTS_N_INSNS (10), /* DI */
675 COSTS_N_INSNS (10)}, /* other */
676 0, /* cost of multiply per each bit set */
677 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
678 COSTS_N_INSNS (66), /* HI */
679 COSTS_N_INSNS (66), /* SI */
680 COSTS_N_INSNS (66), /* DI */
681 COSTS_N_INSNS (66)}, /* other */
682 COSTS_N_INSNS (1), /* cost of movsx */
683 COSTS_N_INSNS (1), /* cost of movzx */
684 16, /* "large" insn */
685 17, /* MOVE_RATIO */
686 4, /* cost for loading QImode using movzbl */
687 {4, 4, 4}, /* cost of loading integer registers
688 in QImode, HImode and SImode.
689 Relative to reg-reg move (2). */
690 {4, 4, 4}, /* cost of storing integer registers */
691 3, /* cost of reg,reg fld/fst */
692 {12, 12, 12}, /* cost of loading fp registers
693 in SFmode, DFmode and XFmode */
694 {4, 4, 4}, /* cost of storing fp registers
695 in SFmode, DFmode and XFmode */
696 6, /* cost of moving MMX register */
697 {12, 12}, /* cost of loading MMX registers
698 in SImode and DImode */
699 {12, 12}, /* cost of storing MMX registers
700 in SImode and DImode */
701 6, /* cost of moving SSE register */
702 {12, 12, 12}, /* cost of loading SSE registers
703 in SImode, DImode and TImode */
704 {12, 12, 12}, /* cost of storing SSE registers
705 in SImode, DImode and TImode */
706 8, /* MMX or SSE register to integer */
707 128, /* size of prefetch block */
708 8, /* number of parallel prefetches */
709 1, /* Branch cost */
710 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
711 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
712 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
713 COSTS_N_INSNS (3), /* cost of FABS instruction. */
714 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
715 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
716 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
717 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
718 {100000, unrolled_loop}, {-1, libcall}}}},
719 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
720 {-1, libcall}}},
721 {libcall, {{24, loop}, {64, unrolled_loop},
722 {8192, rep_prefix_8_byte}, {-1, libcall}}}}
725 static const
726 struct processor_costs core2_cost = {
727 COSTS_N_INSNS (1), /* cost of an add instruction */
728 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
729 COSTS_N_INSNS (1), /* variable shift costs */
730 COSTS_N_INSNS (1), /* constant shift costs */
731 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
732 COSTS_N_INSNS (3), /* HI */
733 COSTS_N_INSNS (3), /* SI */
734 COSTS_N_INSNS (3), /* DI */
735 COSTS_N_INSNS (3)}, /* other */
736 0, /* cost of multiply per each bit set */
737 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
738 COSTS_N_INSNS (22), /* HI */
739 COSTS_N_INSNS (22), /* SI */
740 COSTS_N_INSNS (22), /* DI */
741 COSTS_N_INSNS (22)}, /* other */
742 COSTS_N_INSNS (1), /* cost of movsx */
743 COSTS_N_INSNS (1), /* cost of movzx */
744 8, /* "large" insn */
745 16, /* MOVE_RATIO */
746 2, /* cost for loading QImode using movzbl */
747 {6, 6, 6}, /* cost of loading integer registers
748 in QImode, HImode and SImode.
749 Relative to reg-reg move (2). */
750 {4, 4, 4}, /* cost of storing integer registers */
751 2, /* cost of reg,reg fld/fst */
752 {6, 6, 6}, /* cost of loading fp registers
753 in SFmode, DFmode and XFmode */
754 {4, 4, 4}, /* cost of loading integer registers */
755 2, /* cost of moving MMX register */
756 {6, 6}, /* cost of loading MMX registers
757 in SImode and DImode */
758 {4, 4}, /* cost of storing MMX registers
759 in SImode and DImode */
760 2, /* cost of moving SSE register */
761 {6, 6, 6}, /* cost of loading SSE registers
762 in SImode, DImode and TImode */
763 {4, 4, 4}, /* cost of storing SSE registers
764 in SImode, DImode and TImode */
765 2, /* MMX or SSE register to integer */
766 128, /* size of prefetch block */
767 8, /* number of parallel prefetches */
768 3, /* Branch cost */
769 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
770 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
771 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
772 COSTS_N_INSNS (1), /* cost of FABS instruction. */
773 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
774 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
775 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
776 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
777 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
778 {{libcall, {{8, loop}, {15, unrolled_loop},
779 {2048, rep_prefix_4_byte}, {-1, libcall}}},
780 {libcall, {{24, loop}, {32, unrolled_loop},
781 {8192, rep_prefix_8_byte}, {-1, libcall}}}}
784 /* Generic64 should produce code tuned for Nocona and K8. */
785 static const
786 struct processor_costs generic64_cost = {
787 COSTS_N_INSNS (1), /* cost of an add instruction */
788 /* On all chips taken into consideration lea is 2 cycles and more. With
789 this cost however our current implementation of synth_mult results in
790 use of unnecessary temporary registers causing regression on several
791 SPECfp benchmarks. */
792 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
793 COSTS_N_INSNS (1), /* variable shift costs */
794 COSTS_N_INSNS (1), /* constant shift costs */
795 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
796 COSTS_N_INSNS (4), /* HI */
797 COSTS_N_INSNS (3), /* SI */
798 COSTS_N_INSNS (4), /* DI */
799 COSTS_N_INSNS (2)}, /* other */
800 0, /* cost of multiply per each bit set */
801 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
802 COSTS_N_INSNS (26), /* HI */
803 COSTS_N_INSNS (42), /* SI */
804 COSTS_N_INSNS (74), /* DI */
805 COSTS_N_INSNS (74)}, /* other */
806 COSTS_N_INSNS (1), /* cost of movsx */
807 COSTS_N_INSNS (1), /* cost of movzx */
808 8, /* "large" insn */
809 17, /* MOVE_RATIO */
810 4, /* cost for loading QImode using movzbl */
811 {4, 4, 4}, /* cost of loading integer registers
812 in QImode, HImode and SImode.
813 Relative to reg-reg move (2). */
814 {4, 4, 4}, /* cost of storing integer registers */
815 4, /* cost of reg,reg fld/fst */
816 {12, 12, 12}, /* cost of loading fp registers
817 in SFmode, DFmode and XFmode */
818 {6, 6, 8}, /* cost of storing fp registers
819 in SFmode, DFmode and XFmode */
820 2, /* cost of moving MMX register */
821 {8, 8}, /* cost of loading MMX registers
822 in SImode and DImode */
823 {8, 8}, /* cost of storing MMX registers
824 in SImode and DImode */
825 2, /* cost of moving SSE register */
826 {8, 8, 8}, /* cost of loading SSE registers
827 in SImode, DImode and TImode */
828 {8, 8, 8}, /* cost of storing SSE registers
829 in SImode, DImode and TImode */
830 5, /* MMX or SSE register to integer */
831 64, /* size of prefetch block */
832 6, /* number of parallel prefetches */
833 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
834 is increased to perhaps more appropriate value of 5. */
835 3, /* Branch cost */
836 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
837 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
838 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
839 COSTS_N_INSNS (8), /* cost of FABS instruction. */
840 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
841 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
842 {DUMMY_STRINGOP_ALGS,
843 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
844 {DUMMY_STRINGOP_ALGS,
845 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}}
848 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
849 static const
850 struct processor_costs generic32_cost = {
851 COSTS_N_INSNS (1), /* cost of an add instruction */
852 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
853 COSTS_N_INSNS (1), /* variable shift costs */
854 COSTS_N_INSNS (1), /* constant shift costs */
855 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
856 COSTS_N_INSNS (4), /* HI */
857 COSTS_N_INSNS (3), /* SI */
858 COSTS_N_INSNS (4), /* DI */
859 COSTS_N_INSNS (2)}, /* other */
860 0, /* cost of multiply per each bit set */
861 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
862 COSTS_N_INSNS (26), /* HI */
863 COSTS_N_INSNS (42), /* SI */
864 COSTS_N_INSNS (74), /* DI */
865 COSTS_N_INSNS (74)}, /* other */
866 COSTS_N_INSNS (1), /* cost of movsx */
867 COSTS_N_INSNS (1), /* cost of movzx */
868 8, /* "large" insn */
869 17, /* MOVE_RATIO */
870 4, /* cost for loading QImode using movzbl */
871 {4, 4, 4}, /* cost of loading integer registers
872 in QImode, HImode and SImode.
873 Relative to reg-reg move (2). */
874 {4, 4, 4}, /* cost of storing integer registers */
875 4, /* cost of reg,reg fld/fst */
876 {12, 12, 12}, /* cost of loading fp registers
877 in SFmode, DFmode and XFmode */
878 {6, 6, 8}, /* cost of storing fp registers
879 in SFmode, DFmode and XFmode */
880 2, /* cost of moving MMX register */
881 {8, 8}, /* cost of loading MMX registers
882 in SImode and DImode */
883 {8, 8}, /* cost of storing MMX registers
884 in SImode and DImode */
885 2, /* cost of moving SSE register */
886 {8, 8, 8}, /* cost of loading SSE registers
887 in SImode, DImode and TImode */
888 {8, 8, 8}, /* cost of storing SSE registers
889 in SImode, DImode and TImode */
890 5, /* MMX or SSE register to integer */
891 64, /* size of prefetch block */
892 6, /* number of parallel prefetches */
893 3, /* Branch cost */
894 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
895 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
896 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
897 COSTS_N_INSNS (8), /* cost of FABS instruction. */
898 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
899 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
900 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
901 DUMMY_STRINGOP_ALGS},
902 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
903 DUMMY_STRINGOP_ALGS},
906 const struct processor_costs *ix86_cost = &pentium_cost;
908 /* Processor feature/optimization bitmasks. */
909 #define m_386 (1<<PROCESSOR_I386)
910 #define m_486 (1<<PROCESSOR_I486)
911 #define m_PENT (1<<PROCESSOR_PENTIUM)
912 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
913 #define m_GEODE (1<<PROCESSOR_GEODE)
914 #define m_K6_GEODE (m_K6 | m_GEODE)
915 #define m_K6 (1<<PROCESSOR_K6)
916 #define m_ATHLON (1<<PROCESSOR_ATHLON)
917 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
918 #define m_K8 (1<<PROCESSOR_K8)
919 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
920 #define m_NOCONA (1<<PROCESSOR_NOCONA)
921 #define m_CORE2 (1<<PROCESSOR_CORE2)
922 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
923 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
924 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
926 /* Generic instruction choice should be common subset of supported CPUs
927 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
929 /* Leave is not affecting Nocona SPEC2000 results negatively, so enabling for
930 Generic64 seems like good code size tradeoff. We can't enable it for 32bit
931 generic because it is not working well with PPro base chips. */
932 const int x86_use_leave = m_386 | m_K6_GEODE | m_ATHLON_K8 | m_CORE2 | m_GENERIC64;
933 const int x86_push_memory = m_386 | m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
934 const int x86_zero_extend_with_and = m_486 | m_PENT;
935 const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */;
936 const int x86_double_with_add = ~m_386;
937 const int x86_use_bit_test = m_386;
938 const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6 | m_CORE2 | m_GENERIC;
939 const int x86_cmove = m_PPRO | m_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
940 const int x86_3dnow_a = m_ATHLON_K8;
941 const int x86_deep_branch = m_PPRO | m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
942 /* Branch hints were put in P4 based on simulation result. But
943 after P4 was made, no performance benefit was observed with
944 branch hints. It also increases the code size. As the result,
945 icc never generates branch hints. */
946 const int x86_branch_hints = 0;
947 const int x86_use_sahf = m_PPRO | m_K6_GEODE | m_PENT4 | m_NOCONA | m_GENERIC32; /*m_GENERIC | m_ATHLON_K8 ? */
948 /* We probably ought to watch for partial register stalls on Generic32
949 compilation setting as well. However in current implementation the
950 partial register stalls are not eliminated very well - they can
951 be introduced via subregs synthesized by combine and can happen
952 in caller/callee saving sequences.
953 Because this option pays back little on PPro based chips and is in conflict
954 with partial reg. dependencies used by Athlon/P4 based chips, it is better
955 to leave it off for generic32 for now. */
956 const int x86_partial_reg_stall = m_PPRO;
957 const int x86_partial_flag_reg_stall = m_CORE2 | m_GENERIC;
958 const int x86_use_himode_fiop = m_386 | m_486 | m_K6_GEODE;
959 const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT | m_CORE2 | m_GENERIC);
960 const int x86_use_mov0 = m_K6;
961 const int x86_use_cltd = ~(m_PENT | m_K6 | m_CORE2 | m_GENERIC);
962 const int x86_read_modify_write = ~m_PENT;
963 const int x86_read_modify = ~(m_PENT | m_PPRO);
964 const int x86_split_long_moves = m_PPRO;
965 const int x86_promote_QImode = m_K6_GEODE | m_PENT | m_386 | m_486 | m_ATHLON_K8 | m_CORE2 | m_GENERIC; /* m_PENT4 ? */
966 const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
967 const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
968 const int x86_qimode_math = ~(0);
969 const int x86_promote_qi_regs = 0;
970 /* On PPro this flag is meant to avoid partial register stalls. Just like
971 the x86_partial_reg_stall this option might be considered for Generic32
972 if our scheme for avoiding partial stalls was more effective. */
973 const int x86_himode_math = ~(m_PPRO);
974 const int x86_promote_hi_regs = m_PPRO;
975 const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
976 const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
977 const int x86_add_esp_4 = m_ATHLON_K8 | m_K6_GEODE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
978 const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6_GEODE | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
979 const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC | m_GEODE);
980 const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
981 const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
982 const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
983 const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
984 const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
985 const int x86_shift1 = ~m_486;
986 const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
987 /* In Generic model we have an conflict here in between PPro/Pentium4 based chips
988 that thread 128bit SSE registers as single units versus K8 based chips that
989 divide SSE registers to two 64bit halves.
990 x86_sse_partial_reg_dependency promote all store destinations to be 128bit
991 to allow register renaming on 128bit SSE units, but usually results in one
992 extra microop on 64bit SSE units. Experimental results shows that disabling
993 this option on P4 brings over 20% SPECfp regression, while enabling it on
994 K8 brings roughly 2.4% regression that can be partly masked by careful scheduling
995 of moves. */
996 const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
997 /* Set for machines where the type and dependencies are resolved on SSE
998 register parts instead of whole registers, so we may maintain just
999 lower part of scalar values in proper format leaving the upper part
1000 undefined. */
1001 const int x86_sse_split_regs = m_ATHLON_K8;
1002 const int x86_sse_typeless_stores = m_ATHLON_K8;
1003 const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
1004 const int x86_use_ffreep = m_ATHLON_K8;
1005 const int x86_use_incdec = ~(m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC);
1007 /* ??? Allowing interunit moves makes it all too easy for the compiler to put
1008 integer data in xmm registers. Which results in pretty abysmal code. */
1009 const int x86_inter_unit_moves = 0 /* ~(m_ATHLON_K8) */;
1011 const int x86_ext_80387_constants = m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
1012 /* Some CPU cores are not able to predict more than 4 branch instructions in
1013 the 16 byte window. */
1014 const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
1015 const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6_GEODE | m_PENT | m_CORE2 | m_GENERIC;
1016 const int x86_use_bt = m_ATHLON_K8;
1017 /* Compare and exchange was added for 80486. */
1018 const int x86_cmpxchg = ~m_386;
1019 /* Compare and exchange 8 bytes was added for pentium. */
1020 const int x86_cmpxchg8b = ~(m_386 | m_486);
1021 /* Compare and exchange 16 bytes was added for nocona. */
1022 const int x86_cmpxchg16b = m_NOCONA;
1023 /* Exchange and add was added for 80486. */
1024 const int x86_xadd = ~m_386;
1025 /* Byteswap was added for 80486. */
1026 const int x86_bswap = ~m_386;
1027 const int x86_pad_returns = m_ATHLON_K8 | m_CORE2 | m_GENERIC;
1029 static enum stringop_alg stringop_alg = no_stringop;
1031 /* In case the average insn count for single function invocation is
1032 lower than this constant, emit fast (but longer) prologue and
1033 epilogue code. */
1034 #define FAST_PROLOGUE_INSN_COUNT 20
1036 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1037 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1038 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1039 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1041 /* Array of the smallest class containing reg number REGNO, indexed by
1042 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1044 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1046 /* ax, dx, cx, bx */
1047 AREG, DREG, CREG, BREG,
1048 /* si, di, bp, sp */
1049 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1050 /* FP registers */
1051 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1052 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1053 /* arg pointer */
1054 NON_Q_REGS,
1055 /* flags, fpsr, fpcr, frame */
1056 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1057 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1058 SSE_REGS, SSE_REGS,
1059 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1060 MMX_REGS, MMX_REGS,
1061 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1062 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1063 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1064 SSE_REGS, SSE_REGS,
1067 /* The "default" register map used in 32bit mode. */
1069 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1071 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1072 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1073 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1074 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1075 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1076 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1077 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1080 static int const x86_64_int_parameter_registers[6] =
1082 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
1083 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
1086 static int const x86_64_int_return_registers[4] =
1088 0 /*RAX*/, 1 /*RDI*/, 5 /*RDI*/, 4 /*RSI*/
1091 /* The "default" register map used in 64bit mode. */
1092 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1094 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1095 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1096 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1097 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1098 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1099 8,9,10,11,12,13,14,15, /* extended integer registers */
1100 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1103 /* Define the register numbers to be used in Dwarf debugging information.
1104 The SVR4 reference port C compiler uses the following register numbers
1105 in its Dwarf output code:
1106 0 for %eax (gcc regno = 0)
1107 1 for %ecx (gcc regno = 2)
1108 2 for %edx (gcc regno = 1)
1109 3 for %ebx (gcc regno = 3)
1110 4 for %esp (gcc regno = 7)
1111 5 for %ebp (gcc regno = 6)
1112 6 for %esi (gcc regno = 4)
1113 7 for %edi (gcc regno = 5)
1114 The following three DWARF register numbers are never generated by
1115 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1116 believes these numbers have these meanings.
1117 8 for %eip (no gcc equivalent)
1118 9 for %eflags (gcc regno = 17)
1119 10 for %trapno (no gcc equivalent)
1120 It is not at all clear how we should number the FP stack registers
1121 for the x86 architecture. If the version of SDB on x86/svr4 were
1122 a bit less brain dead with respect to floating-point then we would
1123 have a precedent to follow with respect to DWARF register numbers
1124 for x86 FP registers, but the SDB on x86/svr4 is so completely
1125 broken with respect to FP registers that it is hardly worth thinking
1126 of it as something to strive for compatibility with.
1127 The version of x86/svr4 SDB I have at the moment does (partially)
1128 seem to believe that DWARF register number 11 is associated with
1129 the x86 register %st(0), but that's about all. Higher DWARF
1130 register numbers don't seem to be associated with anything in
1131 particular, and even for DWARF regno 11, SDB only seems to under-
1132 stand that it should say that a variable lives in %st(0) (when
1133 asked via an `=' command) if we said it was in DWARF regno 11,
1134 but SDB still prints garbage when asked for the value of the
1135 variable in question (via a `/' command).
1136 (Also note that the labels SDB prints for various FP stack regs
1137 when doing an `x' command are all wrong.)
1138 Note that these problems generally don't affect the native SVR4
1139 C compiler because it doesn't allow the use of -O with -g and
1140 because when it is *not* optimizing, it allocates a memory
1141 location for each floating-point variable, and the memory
1142 location is what gets described in the DWARF AT_location
1143 attribute for the variable in question.
1144 Regardless of the severe mental illness of the x86/svr4 SDB, we
1145 do something sensible here and we use the following DWARF
1146 register numbers. Note that these are all stack-top-relative
1147 numbers.
1148 11 for %st(0) (gcc regno = 8)
1149 12 for %st(1) (gcc regno = 9)
1150 13 for %st(2) (gcc regno = 10)
1151 14 for %st(3) (gcc regno = 11)
1152 15 for %st(4) (gcc regno = 12)
1153 16 for %st(5) (gcc regno = 13)
1154 17 for %st(6) (gcc regno = 14)
1155 18 for %st(7) (gcc regno = 15)
1157 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1159 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1160 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1161 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1162 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1163 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1164 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1165 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1168 /* Test and compare insns in i386.md store the information needed to
1169 generate branch and scc insns here. */
1171 rtx ix86_compare_op0 = NULL_RTX;
1172 rtx ix86_compare_op1 = NULL_RTX;
1173 rtx ix86_compare_emitted = NULL_RTX;
1175 /* Size of the register save area. */
1176 #define X86_64_VARARGS_SIZE (REGPARM_MAX * UNITS_PER_WORD + SSE_REGPARM_MAX * 16)
1178 /* Define the structure for the machine field in struct function. */
1180 struct stack_local_entry GTY(())
1182 unsigned short mode;
1183 unsigned short n;
1184 rtx rtl;
1185 struct stack_local_entry *next;
1188 /* Structure describing stack frame layout.
1189 Stack grows downward:
1191 [arguments]
1192 <- ARG_POINTER
1193 saved pc
1195 saved frame pointer if frame_pointer_needed
1196 <- HARD_FRAME_POINTER
1197 [saved regs]
1199 [padding1] \
1201 [va_arg registers] (
1202 > to_allocate <- FRAME_POINTER
1203 [frame] (
1205 [padding2] /
1207 struct ix86_frame
1209 int nregs;
1210 int padding1;
1211 int va_arg_size;
1212 HOST_WIDE_INT frame;
1213 int padding2;
1214 int outgoing_arguments_size;
1215 int red_zone_size;
1217 HOST_WIDE_INT to_allocate;
1218 /* The offsets relative to ARG_POINTER. */
1219 HOST_WIDE_INT frame_pointer_offset;
1220 HOST_WIDE_INT hard_frame_pointer_offset;
1221 HOST_WIDE_INT stack_pointer_offset;
1223 /* When save_regs_using_mov is set, emit prologue using
1224 move instead of push instructions. */
1225 bool save_regs_using_mov;
1228 /* Code model option. */
1229 enum cmodel ix86_cmodel;
1230 /* Asm dialect. */
1231 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1232 /* TLS dialects. */
1233 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1235 /* Which unit we are generating floating point math for. */
1236 enum fpmath_unit ix86_fpmath;
1238 /* Which cpu are we scheduling for. */
1239 enum processor_type ix86_tune;
1240 /* Which instruction set architecture to use. */
1241 enum processor_type ix86_arch;
1243 /* true if sse prefetch instruction is not NOOP. */
1244 int x86_prefetch_sse;
1246 /* ix86_regparm_string as a number */
1247 static int ix86_regparm;
1249 /* -mstackrealign option */
1250 extern int ix86_force_align_arg_pointer;
1251 static const char ix86_force_align_arg_pointer_string[] = "force_align_arg_pointer";
1253 /* Preferred alignment for stack boundary in bits. */
1254 unsigned int ix86_preferred_stack_boundary;
1256 /* Values 1-5: see jump.c */
1257 int ix86_branch_cost;
1259 /* Variables which are this size or smaller are put in the data/bss
1260 or ldata/lbss sections. */
1262 int ix86_section_threshold = 65536;
1264 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1265 char internal_label_prefix[16];
1266 int internal_label_prefix_len;
1268 static bool ix86_handle_option (size_t, const char *, int);
1269 static void output_pic_addr_const (FILE *, rtx, int);
1270 static void put_condition_code (enum rtx_code, enum machine_mode,
1271 int, int, FILE *);
1272 static const char *get_some_local_dynamic_name (void);
1273 static int get_some_local_dynamic_name_1 (rtx *, void *);
1274 static rtx ix86_expand_int_compare (enum rtx_code, rtx, rtx);
1275 static enum rtx_code ix86_prepare_fp_compare_args (enum rtx_code, rtx *,
1276 rtx *);
1277 static bool ix86_fixed_condition_code_regs (unsigned int *, unsigned int *);
1278 static enum machine_mode ix86_cc_modes_compatible (enum machine_mode,
1279 enum machine_mode);
1280 static rtx get_thread_pointer (int);
1281 static rtx legitimize_tls_address (rtx, enum tls_model, int);
1282 static void get_pc_thunk_name (char [32], unsigned int);
1283 static rtx gen_push (rtx);
1284 static int ix86_flags_dependent (rtx, rtx, enum attr_type);
1285 static int ix86_agi_dependent (rtx, rtx, enum attr_type);
1286 static struct machine_function * ix86_init_machine_status (void);
1287 static int ix86_split_to_parts (rtx, rtx *, enum machine_mode);
1288 static int ix86_nsaved_regs (void);
1289 static void ix86_emit_save_regs (void);
1290 static void ix86_emit_save_regs_using_mov (rtx, HOST_WIDE_INT);
1291 static void ix86_emit_restore_regs_using_mov (rtx, HOST_WIDE_INT, int);
1292 static void ix86_output_function_epilogue (FILE *, HOST_WIDE_INT);
1293 static HOST_WIDE_INT ix86_GOT_alias_set (void);
1294 static void ix86_adjust_counter (rtx, HOST_WIDE_INT);
1295 static void ix86_expand_strlensi_unroll_1 (rtx, rtx, rtx);
1296 static int ix86_issue_rate (void);
1297 static int ix86_adjust_cost (rtx, rtx, rtx, int);
1298 static int ia32_multipass_dfa_lookahead (void);
1299 static void ix86_init_mmx_sse_builtins (void);
1300 static rtx x86_this_parameter (tree);
1301 static void x86_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
1302 HOST_WIDE_INT, tree);
1303 static bool x86_can_output_mi_thunk (tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
1304 static void x86_file_start (void);
1305 static void ix86_reorg (void);
1306 static bool ix86_expand_carry_flag_compare (enum rtx_code, rtx, rtx, rtx*);
1307 static tree ix86_build_builtin_va_list (void);
1308 static void ix86_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
1309 tree, int *, int);
1310 static tree ix86_gimplify_va_arg (tree, tree, tree *, tree *);
1311 static bool ix86_scalar_mode_supported_p (enum machine_mode);
1312 static bool ix86_vector_mode_supported_p (enum machine_mode);
1314 static int ix86_address_cost (rtx);
1315 static bool ix86_cannot_force_const_mem (rtx);
1316 static rtx ix86_delegitimize_address (rtx);
1318 static void i386_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
1320 struct builtin_description;
1321 static rtx ix86_expand_sse_comi (const struct builtin_description *,
1322 tree, rtx);
1323 static rtx ix86_expand_sse_compare (const struct builtin_description *,
1324 tree, rtx);
1325 static rtx ix86_expand_unop1_builtin (enum insn_code, tree, rtx);
1326 static rtx ix86_expand_unop_builtin (enum insn_code, tree, rtx, int);
1327 static rtx ix86_expand_binop_builtin (enum insn_code, tree, rtx);
1328 static rtx ix86_expand_store_builtin (enum insn_code, tree);
1329 static rtx safe_vector_operand (rtx, enum machine_mode);
1330 static rtx ix86_expand_fp_compare (enum rtx_code, rtx, rtx, rtx, rtx *, rtx *);
1331 static int ix86_fp_comparison_arithmetics_cost (enum rtx_code code);
1332 static int ix86_fp_comparison_fcomi_cost (enum rtx_code code);
1333 static int ix86_fp_comparison_sahf_cost (enum rtx_code code);
1334 static int ix86_fp_comparison_cost (enum rtx_code code);
1335 static unsigned int ix86_select_alt_pic_regnum (void);
1336 static int ix86_save_reg (unsigned int, int);
1337 static void ix86_compute_frame_layout (struct ix86_frame *);
1338 static int ix86_comp_type_attributes (tree, tree);
1339 static int ix86_function_regparm (tree, tree);
1340 const struct attribute_spec ix86_attribute_table[];
1341 static bool ix86_function_ok_for_sibcall (tree, tree);
1342 static tree ix86_handle_cconv_attribute (tree *, tree, tree, int, bool *);
1343 static int ix86_value_regno (enum machine_mode, tree, tree);
1344 static bool contains_128bit_aligned_vector_p (tree);
1345 static rtx ix86_struct_value_rtx (tree, int);
1346 static bool ix86_ms_bitfield_layout_p (tree);
1347 static tree ix86_handle_struct_attribute (tree *, tree, tree, int, bool *);
1348 static int extended_reg_mentioned_1 (rtx *, void *);
1349 static bool ix86_rtx_costs (rtx, int, int, int *);
1350 static int min_insn_size (rtx);
1351 static tree ix86_md_asm_clobbers (tree outputs, tree inputs, tree clobbers);
1352 static bool ix86_must_pass_in_stack (enum machine_mode mode, tree type);
1353 static bool ix86_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
1354 tree, bool);
1355 static void ix86_init_builtins (void);
1356 static rtx ix86_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
1357 static tree ix86_builtin_vectorized_function (enum built_in_function, tree);
1358 static const char *ix86_mangle_fundamental_type (tree);
1359 static tree ix86_stack_protect_fail (void);
1360 static rtx ix86_internal_arg_pointer (void);
1361 static void ix86_dwarf_handle_frame_unspec (const char *, rtx, int);
1363 /* This function is only used on Solaris. */
1364 static void i386_solaris_elf_named_section (const char *, unsigned int, tree)
1365 ATTRIBUTE_UNUSED;
1367 /* Register class used for passing given 64bit part of the argument.
1368 These represent classes as documented by the PS ABI, with the exception
1369 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1370 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1372 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1373 whenever possible (upper half does contain padding).
1375 enum x86_64_reg_class
1377 X86_64_NO_CLASS,
1378 X86_64_INTEGER_CLASS,
1379 X86_64_INTEGERSI_CLASS,
1380 X86_64_SSE_CLASS,
1381 X86_64_SSESF_CLASS,
1382 X86_64_SSEDF_CLASS,
1383 X86_64_SSEUP_CLASS,
1384 X86_64_X87_CLASS,
1385 X86_64_X87UP_CLASS,
1386 X86_64_COMPLEX_X87_CLASS,
1387 X86_64_MEMORY_CLASS
1389 static const char * const x86_64_reg_class_name[] = {
1390 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
1391 "sseup", "x87", "x87up", "cplx87", "no"
1394 #define MAX_CLASSES 4
1396 /* Table of constants used by fldpi, fldln2, etc.... */
1397 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1398 static bool ext_80387_constants_init = 0;
1399 static void init_ext_80387_constants (void);
1400 static bool ix86_in_large_data_p (tree) ATTRIBUTE_UNUSED;
1401 static void ix86_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
1402 static void x86_64_elf_unique_section (tree decl, int reloc) ATTRIBUTE_UNUSED;
1403 static section *x86_64_elf_select_section (tree decl, int reloc,
1404 unsigned HOST_WIDE_INT align)
1405 ATTRIBUTE_UNUSED;
1407 /* Initialize the GCC target structure. */
1408 #undef TARGET_ATTRIBUTE_TABLE
1409 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
1410 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
1411 # undef TARGET_MERGE_DECL_ATTRIBUTES
1412 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
1413 #endif
1415 #undef TARGET_COMP_TYPE_ATTRIBUTES
1416 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
1418 #undef TARGET_INIT_BUILTINS
1419 #define TARGET_INIT_BUILTINS ix86_init_builtins
1420 #undef TARGET_EXPAND_BUILTIN
1421 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
1422 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1423 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION ix86_builtin_vectorized_function
1425 #undef TARGET_ASM_FUNCTION_EPILOGUE
1426 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
1428 #undef TARGET_ENCODE_SECTION_INFO
1429 #ifndef SUBTARGET_ENCODE_SECTION_INFO
1430 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
1431 #else
1432 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
1433 #endif
1435 #undef TARGET_ASM_OPEN_PAREN
1436 #define TARGET_ASM_OPEN_PAREN ""
1437 #undef TARGET_ASM_CLOSE_PAREN
1438 #define TARGET_ASM_CLOSE_PAREN ""
1440 #undef TARGET_ASM_ALIGNED_HI_OP
1441 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
1442 #undef TARGET_ASM_ALIGNED_SI_OP
1443 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
1444 #ifdef ASM_QUAD
1445 #undef TARGET_ASM_ALIGNED_DI_OP
1446 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
1447 #endif
1449 #undef TARGET_ASM_UNALIGNED_HI_OP
1450 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
1451 #undef TARGET_ASM_UNALIGNED_SI_OP
1452 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
1453 #undef TARGET_ASM_UNALIGNED_DI_OP
1454 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
1456 #undef TARGET_SCHED_ADJUST_COST
1457 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
1458 #undef TARGET_SCHED_ISSUE_RATE
1459 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
1460 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1461 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1462 ia32_multipass_dfa_lookahead
1464 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1465 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
1467 #ifdef HAVE_AS_TLS
1468 #undef TARGET_HAVE_TLS
1469 #define TARGET_HAVE_TLS true
1470 #endif
1471 #undef TARGET_CANNOT_FORCE_CONST_MEM
1472 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
1473 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1474 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_rtx_true
1476 #undef TARGET_DELEGITIMIZE_ADDRESS
1477 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
1479 #undef TARGET_MS_BITFIELD_LAYOUT_P
1480 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
1482 #if TARGET_MACHO
1483 #undef TARGET_BINDS_LOCAL_P
1484 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1485 #endif
1487 #undef TARGET_ASM_OUTPUT_MI_THUNK
1488 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
1489 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1490 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
1492 #undef TARGET_ASM_FILE_START
1493 #define TARGET_ASM_FILE_START x86_file_start
1495 #undef TARGET_DEFAULT_TARGET_FLAGS
1496 #define TARGET_DEFAULT_TARGET_FLAGS \
1497 (TARGET_DEFAULT \
1498 | TARGET_64BIT_DEFAULT \
1499 | TARGET_SUBTARGET_DEFAULT \
1500 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
1502 #undef TARGET_HANDLE_OPTION
1503 #define TARGET_HANDLE_OPTION ix86_handle_option
1505 #undef TARGET_RTX_COSTS
1506 #define TARGET_RTX_COSTS ix86_rtx_costs
1507 #undef TARGET_ADDRESS_COST
1508 #define TARGET_ADDRESS_COST ix86_address_cost
1510 #undef TARGET_FIXED_CONDITION_CODE_REGS
1511 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
1512 #undef TARGET_CC_MODES_COMPATIBLE
1513 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
1515 #undef TARGET_MACHINE_DEPENDENT_REORG
1516 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
1518 #undef TARGET_BUILD_BUILTIN_VA_LIST
1519 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
1521 #undef TARGET_MD_ASM_CLOBBERS
1522 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
1524 #undef TARGET_PROMOTE_PROTOTYPES
1525 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1526 #undef TARGET_STRUCT_VALUE_RTX
1527 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
1528 #undef TARGET_SETUP_INCOMING_VARARGS
1529 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
1530 #undef TARGET_MUST_PASS_IN_STACK
1531 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
1532 #undef TARGET_PASS_BY_REFERENCE
1533 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
1534 #undef TARGET_INTERNAL_ARG_POINTER
1535 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
1536 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
1537 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC ix86_dwarf_handle_frame_unspec
1539 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1540 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
1542 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1543 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
1545 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1546 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
1548 #ifdef HAVE_AS_TLS
1549 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1550 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
1551 #endif
1553 #ifdef SUBTARGET_INSERT_ATTRIBUTES
1554 #undef TARGET_INSERT_ATTRIBUTES
1555 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
1556 #endif
1558 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
1559 #define TARGET_MANGLE_FUNDAMENTAL_TYPE ix86_mangle_fundamental_type
1561 #undef TARGET_STACK_PROTECT_FAIL
1562 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
1564 #undef TARGET_FUNCTION_VALUE
1565 #define TARGET_FUNCTION_VALUE ix86_function_value
1567 struct gcc_target targetm = TARGET_INITIALIZER;
1570 /* The svr4 ABI for the i386 says that records and unions are returned
1571 in memory. */
1572 #ifndef DEFAULT_PCC_STRUCT_RETURN
1573 #define DEFAULT_PCC_STRUCT_RETURN 1
1574 #endif
1576 /* Implement TARGET_HANDLE_OPTION. */
1578 static bool
1579 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
1581 switch (code)
1583 case OPT_m3dnow:
1584 if (!value)
1586 target_flags &= ~MASK_3DNOW_A;
1587 target_flags_explicit |= MASK_3DNOW_A;
1589 return true;
1591 case OPT_mmmx:
1592 if (!value)
1594 target_flags &= ~(MASK_3DNOW | MASK_3DNOW_A);
1595 target_flags_explicit |= MASK_3DNOW | MASK_3DNOW_A;
1597 return true;
1599 case OPT_msse:
1600 if (!value)
1602 target_flags &= ~(MASK_SSE2 | MASK_SSE3);
1603 target_flags_explicit |= MASK_SSE2 | MASK_SSE3;
1605 return true;
1607 case OPT_msse2:
1608 if (!value)
1610 target_flags &= ~MASK_SSE3;
1611 target_flags_explicit |= MASK_SSE3;
1613 return true;
1615 default:
1616 return true;
1620 /* Sometimes certain combinations of command options do not make
1621 sense on a particular target machine. You can define a macro
1622 `OVERRIDE_OPTIONS' to take account of this. This macro, if
1623 defined, is executed once just after all the command options have
1624 been parsed.
1626 Don't use this macro to turn on various extra optimizations for
1627 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
1629 void
1630 override_options (void)
1632 int i;
1633 int ix86_tune_defaulted = 0;
1635 /* Comes from final.c -- no real reason to change it. */
1636 #define MAX_CODE_ALIGN 16
1638 static struct ptt
1640 const struct processor_costs *cost; /* Processor costs */
1641 const int target_enable; /* Target flags to enable. */
1642 const int target_disable; /* Target flags to disable. */
1643 const int align_loop; /* Default alignments. */
1644 const int align_loop_max_skip;
1645 const int align_jump;
1646 const int align_jump_max_skip;
1647 const int align_func;
1649 const processor_target_table[PROCESSOR_max] =
1651 {&i386_cost, 0, 0, 4, 3, 4, 3, 4},
1652 {&i486_cost, 0, 0, 16, 15, 16, 15, 16},
1653 {&pentium_cost, 0, 0, 16, 7, 16, 7, 16},
1654 {&pentiumpro_cost, 0, 0, 16, 15, 16, 7, 16},
1655 {&geode_cost, 0, 0, 0, 0, 0, 0, 0},
1656 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1657 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1658 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1659 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1660 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0},
1661 {&core2_cost, 0, 0, 16, 7, 16, 7, 16},
1662 {&generic32_cost, 0, 0, 16, 7, 16, 7, 16},
1663 {&generic64_cost, 0, 0, 16, 7, 16, 7, 16}
1666 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1667 static struct pta
1669 const char *const name; /* processor name or nickname. */
1670 const enum processor_type processor;
1671 const enum pta_flags
1673 PTA_SSE = 1,
1674 PTA_SSE2 = 2,
1675 PTA_SSE3 = 4,
1676 PTA_MMX = 8,
1677 PTA_PREFETCH_SSE = 16,
1678 PTA_3DNOW = 32,
1679 PTA_3DNOW_A = 64,
1680 PTA_64BIT = 128,
1681 PTA_SSSE3 = 256
1682 } flags;
1684 const processor_alias_table[] =
1686 {"i386", PROCESSOR_I386, 0},
1687 {"i486", PROCESSOR_I486, 0},
1688 {"i586", PROCESSOR_PENTIUM, 0},
1689 {"pentium", PROCESSOR_PENTIUM, 0},
1690 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
1691 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
1692 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1693 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
1694 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_PREFETCH_SSE | PTA_SSE},
1695 {"i686", PROCESSOR_PENTIUMPRO, 0},
1696 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
1697 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
1698 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1699 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE},
1700 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1701 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1702 | PTA_MMX | PTA_PREFETCH_SSE},
1703 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1704 | PTA_MMX | PTA_PREFETCH_SSE},
1705 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1706 | PTA_MMX | PTA_PREFETCH_SSE},
1707 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1708 | PTA_MMX | PTA_PREFETCH_SSE},
1709 {"core2", PROCESSOR_CORE2, PTA_SSE | PTA_SSE2 | PTA_SSE3
1710 | PTA_64BIT | PTA_MMX
1711 | PTA_PREFETCH_SSE},
1712 {"geode", PROCESSOR_GEODE, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1713 | PTA_3DNOW_A},
1714 {"k6", PROCESSOR_K6, PTA_MMX},
1715 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1716 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1717 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1718 | PTA_3DNOW_A},
1719 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE
1720 | PTA_3DNOW | PTA_3DNOW_A},
1721 {"athlon-4", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1722 | PTA_3DNOW_A | PTA_SSE},
1723 {"athlon-xp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1724 | PTA_3DNOW_A | PTA_SSE},
1725 {"athlon-mp", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1726 | PTA_3DNOW_A | PTA_SSE},
1727 {"x86-64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_64BIT
1728 | PTA_SSE | PTA_SSE2 },
1729 {"k8", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1730 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1731 {"opteron", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1732 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1733 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1734 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1735 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1736 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1737 {"generic32", PROCESSOR_GENERIC32, 0 /* flags are only used for -march switch. */ },
1738 {"generic64", PROCESSOR_GENERIC64, PTA_64BIT /* flags are only used for -march switch. */ },
1741 int const pta_size = ARRAY_SIZE (processor_alias_table);
1743 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1744 SUBTARGET_OVERRIDE_OPTIONS;
1745 #endif
1747 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
1748 SUBSUBTARGET_OVERRIDE_OPTIONS;
1749 #endif
1751 /* -fPIC is the default for x86_64. */
1752 if (TARGET_MACHO && TARGET_64BIT)
1753 flag_pic = 2;
1755 /* Set the default values for switches whose default depends on TARGET_64BIT
1756 in case they weren't overwritten by command line options. */
1757 if (TARGET_64BIT)
1759 /* Mach-O doesn't support omitting the frame pointer for now. */
1760 if (flag_omit_frame_pointer == 2)
1761 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
1762 if (flag_asynchronous_unwind_tables == 2)
1763 flag_asynchronous_unwind_tables = 1;
1764 if (flag_pcc_struct_return == 2)
1765 flag_pcc_struct_return = 0;
1767 else
1769 if (flag_omit_frame_pointer == 2)
1770 flag_omit_frame_pointer = 0;
1771 if (flag_asynchronous_unwind_tables == 2)
1772 flag_asynchronous_unwind_tables = 0;
1773 if (flag_pcc_struct_return == 2)
1774 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
1777 /* Need to check -mtune=generic first. */
1778 if (ix86_tune_string)
1780 if (!strcmp (ix86_tune_string, "generic")
1781 || !strcmp (ix86_tune_string, "i686")
1782 /* As special support for cross compilers we read -mtune=native
1783 as -mtune=generic. With native compilers we won't see the
1784 -mtune=native, as it was changed by the driver. */
1785 || !strcmp (ix86_tune_string, "native"))
1787 if (TARGET_64BIT)
1788 ix86_tune_string = "generic64";
1789 else
1790 ix86_tune_string = "generic32";
1792 else if (!strncmp (ix86_tune_string, "generic", 7))
1793 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1795 else
1797 if (ix86_arch_string)
1798 ix86_tune_string = ix86_arch_string;
1799 if (!ix86_tune_string)
1801 ix86_tune_string = cpu_names [TARGET_CPU_DEFAULT];
1802 ix86_tune_defaulted = 1;
1805 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
1806 need to use a sensible tune option. */
1807 if (!strcmp (ix86_tune_string, "generic")
1808 || !strcmp (ix86_tune_string, "x86-64")
1809 || !strcmp (ix86_tune_string, "i686"))
1811 if (TARGET_64BIT)
1812 ix86_tune_string = "generic64";
1813 else
1814 ix86_tune_string = "generic32";
1817 if (ix86_stringop_string)
1819 if (!strcmp (ix86_stringop_string, "rep_byte"))
1820 stringop_alg = rep_prefix_1_byte;
1821 else if (!strcmp (ix86_stringop_string, "libcall"))
1822 stringop_alg = libcall;
1823 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
1824 stringop_alg = rep_prefix_4_byte;
1825 else if (!strcmp (ix86_stringop_string, "rep_8byte"))
1826 stringop_alg = rep_prefix_8_byte;
1827 else if (!strcmp (ix86_stringop_string, "byte_loop"))
1828 stringop_alg = loop_1_byte;
1829 else if (!strcmp (ix86_stringop_string, "loop"))
1830 stringop_alg = loop;
1831 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
1832 stringop_alg = unrolled_loop;
1833 else
1834 error ("bad value (%s) for -mstringop-strategy= switch", ix86_stringop_string);
1836 if (!strcmp (ix86_tune_string, "x86-64"))
1837 warning (OPT_Wdeprecated, "-mtune=x86-64 is deprecated. Use -mtune=k8 or "
1838 "-mtune=generic instead as appropriate.");
1840 if (!ix86_arch_string)
1841 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
1842 if (!strcmp (ix86_arch_string, "generic"))
1843 error ("generic CPU can be used only for -mtune= switch");
1844 if (!strncmp (ix86_arch_string, "generic", 7))
1845 error ("bad value (%s) for -march= switch", ix86_arch_string);
1847 if (ix86_cmodel_string != 0)
1849 if (!strcmp (ix86_cmodel_string, "small"))
1850 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1851 else if (!strcmp (ix86_cmodel_string, "medium"))
1852 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
1853 else if (flag_pic)
1854 sorry ("code model %s not supported in PIC mode", ix86_cmodel_string);
1855 else if (!strcmp (ix86_cmodel_string, "32"))
1856 ix86_cmodel = CM_32;
1857 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
1858 ix86_cmodel = CM_KERNEL;
1859 else if (!strcmp (ix86_cmodel_string, "large") && !flag_pic)
1860 ix86_cmodel = CM_LARGE;
1861 else
1862 error ("bad value (%s) for -mcmodel= switch", ix86_cmodel_string);
1864 else
1866 ix86_cmodel = CM_32;
1867 if (TARGET_64BIT)
1868 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
1870 if (ix86_asm_string != 0)
1872 if (! TARGET_MACHO
1873 && !strcmp (ix86_asm_string, "intel"))
1874 ix86_asm_dialect = ASM_INTEL;
1875 else if (!strcmp (ix86_asm_string, "att"))
1876 ix86_asm_dialect = ASM_ATT;
1877 else
1878 error ("bad value (%s) for -masm= switch", ix86_asm_string);
1880 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
1881 error ("code model %qs not supported in the %s bit mode",
1882 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
1883 if (ix86_cmodel == CM_LARGE)
1884 sorry ("code model %<large%> not supported yet");
1885 if ((TARGET_64BIT != 0) != ((target_flags & MASK_64BIT) != 0))
1886 sorry ("%i-bit mode not compiled in",
1887 (target_flags & MASK_64BIT) ? 64 : 32);
1889 for (i = 0; i < pta_size; i++)
1890 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
1892 ix86_arch = processor_alias_table[i].processor;
1893 /* Default cpu tuning to the architecture. */
1894 ix86_tune = ix86_arch;
1895 if (processor_alias_table[i].flags & PTA_MMX
1896 && !(target_flags_explicit & MASK_MMX))
1897 target_flags |= MASK_MMX;
1898 if (processor_alias_table[i].flags & PTA_3DNOW
1899 && !(target_flags_explicit & MASK_3DNOW))
1900 target_flags |= MASK_3DNOW;
1901 if (processor_alias_table[i].flags & PTA_3DNOW_A
1902 && !(target_flags_explicit & MASK_3DNOW_A))
1903 target_flags |= MASK_3DNOW_A;
1904 if (processor_alias_table[i].flags & PTA_SSE
1905 && !(target_flags_explicit & MASK_SSE))
1906 target_flags |= MASK_SSE;
1907 if (processor_alias_table[i].flags & PTA_SSE2
1908 && !(target_flags_explicit & MASK_SSE2))
1909 target_flags |= MASK_SSE2;
1910 if (processor_alias_table[i].flags & PTA_SSE3
1911 && !(target_flags_explicit & MASK_SSE3))
1912 target_flags |= MASK_SSE3;
1913 if (processor_alias_table[i].flags & PTA_SSSE3
1914 && !(target_flags_explicit & MASK_SSSE3))
1915 target_flags |= MASK_SSSE3;
1916 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1917 x86_prefetch_sse = true;
1918 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1919 error ("CPU you selected does not support x86-64 "
1920 "instruction set");
1921 break;
1924 if (i == pta_size)
1925 error ("bad value (%s) for -march= switch", ix86_arch_string);
1927 for (i = 0; i < pta_size; i++)
1928 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
1930 ix86_tune = processor_alias_table[i].processor;
1931 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1933 if (ix86_tune_defaulted)
1935 ix86_tune_string = "x86-64";
1936 for (i = 0; i < pta_size; i++)
1937 if (! strcmp (ix86_tune_string,
1938 processor_alias_table[i].name))
1939 break;
1940 ix86_tune = processor_alias_table[i].processor;
1942 else
1943 error ("CPU you selected does not support x86-64 "
1944 "instruction set");
1946 /* Intel CPUs have always interpreted SSE prefetch instructions as
1947 NOPs; so, we can enable SSE prefetch instructions even when
1948 -mtune (rather than -march) points us to a processor that has them.
1949 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
1950 higher processors. */
1951 if (TARGET_CMOVE && (processor_alias_table[i].flags & PTA_PREFETCH_SSE))
1952 x86_prefetch_sse = true;
1953 break;
1955 if (i == pta_size)
1956 error ("bad value (%s) for -mtune= switch", ix86_tune_string);
1958 if (optimize_size)
1959 ix86_cost = &size_cost;
1960 else
1961 ix86_cost = processor_target_table[ix86_tune].cost;
1962 target_flags |= processor_target_table[ix86_tune].target_enable;
1963 target_flags &= ~processor_target_table[ix86_tune].target_disable;
1965 /* Arrange to set up i386_stack_locals for all functions. */
1966 init_machine_status = ix86_init_machine_status;
1968 /* Validate -mregparm= value. */
1969 if (ix86_regparm_string)
1971 i = atoi (ix86_regparm_string);
1972 if (i < 0 || i > REGPARM_MAX)
1973 error ("-mregparm=%d is not between 0 and %d", i, REGPARM_MAX);
1974 else
1975 ix86_regparm = i;
1977 else
1978 if (TARGET_64BIT)
1979 ix86_regparm = REGPARM_MAX;
1981 /* If the user has provided any of the -malign-* options,
1982 warn and use that value only if -falign-* is not set.
1983 Remove this code in GCC 3.2 or later. */
1984 if (ix86_align_loops_string)
1986 warning (0, "-malign-loops is obsolete, use -falign-loops");
1987 if (align_loops == 0)
1989 i = atoi (ix86_align_loops_string);
1990 if (i < 0 || i > MAX_CODE_ALIGN)
1991 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
1992 else
1993 align_loops = 1 << i;
1997 if (ix86_align_jumps_string)
1999 warning (0, "-malign-jumps is obsolete, use -falign-jumps");
2000 if (align_jumps == 0)
2002 i = atoi (ix86_align_jumps_string);
2003 if (i < 0 || i > MAX_CODE_ALIGN)
2004 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
2005 else
2006 align_jumps = 1 << i;
2010 if (ix86_align_funcs_string)
2012 warning (0, "-malign-functions is obsolete, use -falign-functions");
2013 if (align_functions == 0)
2015 i = atoi (ix86_align_funcs_string);
2016 if (i < 0 || i > MAX_CODE_ALIGN)
2017 error ("-malign-loops=%d is not between 0 and %d", i, MAX_CODE_ALIGN);
2018 else
2019 align_functions = 1 << i;
2023 /* Default align_* from the processor table. */
2024 if (align_loops == 0)
2026 align_loops = processor_target_table[ix86_tune].align_loop;
2027 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
2029 if (align_jumps == 0)
2031 align_jumps = processor_target_table[ix86_tune].align_jump;
2032 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
2034 if (align_functions == 0)
2036 align_functions = processor_target_table[ix86_tune].align_func;
2039 /* Validate -mbranch-cost= value, or provide default. */
2040 ix86_branch_cost = ix86_cost->branch_cost;
2041 if (ix86_branch_cost_string)
2043 i = atoi (ix86_branch_cost_string);
2044 if (i < 0 || i > 5)
2045 error ("-mbranch-cost=%d is not between 0 and 5", i);
2046 else
2047 ix86_branch_cost = i;
2049 if (ix86_section_threshold_string)
2051 i = atoi (ix86_section_threshold_string);
2052 if (i < 0)
2053 error ("-mlarge-data-threshold=%d is negative", i);
2054 else
2055 ix86_section_threshold = i;
2058 if (ix86_tls_dialect_string)
2060 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
2061 ix86_tls_dialect = TLS_DIALECT_GNU;
2062 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
2063 ix86_tls_dialect = TLS_DIALECT_GNU2;
2064 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
2065 ix86_tls_dialect = TLS_DIALECT_SUN;
2066 else
2067 error ("bad value (%s) for -mtls-dialect= switch",
2068 ix86_tls_dialect_string);
2071 /* Keep nonleaf frame pointers. */
2072 if (flag_omit_frame_pointer)
2073 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
2074 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
2075 flag_omit_frame_pointer = 1;
2077 /* If we're doing fast math, we don't care about comparison order
2078 wrt NaNs. This lets us use a shorter comparison sequence. */
2079 if (flag_finite_math_only)
2080 target_flags &= ~MASK_IEEE_FP;
2082 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
2083 since the insns won't need emulation. */
2084 if (x86_arch_always_fancy_math_387 & (1 << ix86_arch))
2085 target_flags &= ~MASK_NO_FANCY_MATH_387;
2087 /* Likewise, if the target doesn't have a 387, or we've specified
2088 software floating point, don't use 387 inline intrinsics. */
2089 if (!TARGET_80387)
2090 target_flags |= MASK_NO_FANCY_MATH_387;
2092 /* Turn on SSE3 builtins for -mssse3. */
2093 if (TARGET_SSSE3)
2094 target_flags |= MASK_SSE3;
2096 /* Turn on SSE2 builtins for -msse3. */
2097 if (TARGET_SSE3)
2098 target_flags |= MASK_SSE2;
2100 /* Turn on SSE builtins for -msse2. */
2101 if (TARGET_SSE2)
2102 target_flags |= MASK_SSE;
2104 /* Turn on MMX builtins for -msse. */
2105 if (TARGET_SSE)
2107 target_flags |= MASK_MMX & ~target_flags_explicit;
2108 x86_prefetch_sse = true;
2111 /* Turn on MMX builtins for 3Dnow. */
2112 if (TARGET_3DNOW)
2113 target_flags |= MASK_MMX;
2115 if (TARGET_64BIT)
2117 if (TARGET_ALIGN_DOUBLE)
2118 error ("-malign-double makes no sense in the 64bit mode");
2119 if (TARGET_RTD)
2120 error ("-mrtd calling convention not supported in the 64bit mode");
2122 /* Enable by default the SSE and MMX builtins. Do allow the user to
2123 explicitly disable any of these. In particular, disabling SSE and
2124 MMX for kernel code is extremely useful. */
2125 target_flags
2126 |= ((MASK_SSE2 | MASK_SSE | MASK_MMX | MASK_128BIT_LONG_DOUBLE)
2127 & ~target_flags_explicit);
2129 else
2131 /* i386 ABI does not specify red zone. It still makes sense to use it
2132 when programmer takes care to stack from being destroyed. */
2133 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
2134 target_flags |= MASK_NO_RED_ZONE;
2137 /* Validate -mpreferred-stack-boundary= value, or provide default.
2138 The default of 128 bits is for Pentium III's SSE __m128. We can't
2139 change it because of optimize_size. Otherwise, we can't mix object
2140 files compiled with -Os and -On. */
2141 ix86_preferred_stack_boundary = 128;
2142 if (ix86_preferred_stack_boundary_string)
2144 i = atoi (ix86_preferred_stack_boundary_string);
2145 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
2146 error ("-mpreferred-stack-boundary=%d is not between %d and 12", i,
2147 TARGET_64BIT ? 4 : 2);
2148 else
2149 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
2152 /* Accept -msseregparm only if at least SSE support is enabled. */
2153 if (TARGET_SSEREGPARM
2154 && ! TARGET_SSE)
2155 error ("-msseregparm used without SSE enabled");
2157 ix86_fpmath = TARGET_FPMATH_DEFAULT;
2159 if (ix86_fpmath_string != 0)
2161 if (! strcmp (ix86_fpmath_string, "387"))
2162 ix86_fpmath = FPMATH_387;
2163 else if (! strcmp (ix86_fpmath_string, "sse"))
2165 if (!TARGET_SSE)
2167 warning (0, "SSE instruction set disabled, using 387 arithmetics");
2168 ix86_fpmath = FPMATH_387;
2170 else
2171 ix86_fpmath = FPMATH_SSE;
2173 else if (! strcmp (ix86_fpmath_string, "387,sse")
2174 || ! strcmp (ix86_fpmath_string, "sse,387"))
2176 if (!TARGET_SSE)
2178 warning (0, "SSE instruction set disabled, using 387 arithmetics");
2179 ix86_fpmath = FPMATH_387;
2181 else if (!TARGET_80387)
2183 warning (0, "387 instruction set disabled, using SSE arithmetics");
2184 ix86_fpmath = FPMATH_SSE;
2186 else
2187 ix86_fpmath = FPMATH_SSE | FPMATH_387;
2189 else
2190 error ("bad value (%s) for -mfpmath= switch", ix86_fpmath_string);
2193 /* If the i387 is disabled, then do not return values in it. */
2194 if (!TARGET_80387)
2195 target_flags &= ~MASK_FLOAT_RETURNS;
2197 if ((x86_accumulate_outgoing_args & TUNEMASK)
2198 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
2199 && !optimize_size)
2200 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
2202 /* ??? Unwind info is not correct around the CFG unless either a frame
2203 pointer is present or M_A_O_A is set. Fixing this requires rewriting
2204 unwind info generation to be aware of the CFG and propagating states
2205 around edges. */
2206 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
2207 || flag_exceptions || flag_non_call_exceptions)
2208 && flag_omit_frame_pointer
2209 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
2211 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
2212 warning (0, "unwind tables currently require either a frame pointer "
2213 "or -maccumulate-outgoing-args for correctness");
2214 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
2217 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
2219 char *p;
2220 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
2221 p = strchr (internal_label_prefix, 'X');
2222 internal_label_prefix_len = p - internal_label_prefix;
2223 *p = '\0';
2226 /* When scheduling description is not available, disable scheduler pass
2227 so it won't slow down the compilation and make x87 code slower. */
2228 if (!TARGET_SCHEDULE)
2229 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
2231 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
2232 set_param_value ("simultaneous-prefetches",
2233 ix86_cost->simultaneous_prefetches);
2234 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
2235 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
2238 /* switch to the appropriate section for output of DECL.
2239 DECL is either a `VAR_DECL' node or a constant of some sort.
2240 RELOC indicates whether forming the initial value of DECL requires
2241 link-time relocations. */
2243 static section *
2244 x86_64_elf_select_section (tree decl, int reloc,
2245 unsigned HOST_WIDE_INT align)
2247 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2248 && ix86_in_large_data_p (decl))
2250 const char *sname = NULL;
2251 unsigned int flags = SECTION_WRITE;
2252 switch (categorize_decl_for_section (decl, reloc, flag_pic))
2254 case SECCAT_DATA:
2255 sname = ".ldata";
2256 break;
2257 case SECCAT_DATA_REL:
2258 sname = ".ldata.rel";
2259 break;
2260 case SECCAT_DATA_REL_LOCAL:
2261 sname = ".ldata.rel.local";
2262 break;
2263 case SECCAT_DATA_REL_RO:
2264 sname = ".ldata.rel.ro";
2265 break;
2266 case SECCAT_DATA_REL_RO_LOCAL:
2267 sname = ".ldata.rel.ro.local";
2268 break;
2269 case SECCAT_BSS:
2270 sname = ".lbss";
2271 flags |= SECTION_BSS;
2272 break;
2273 case SECCAT_RODATA:
2274 case SECCAT_RODATA_MERGE_STR:
2275 case SECCAT_RODATA_MERGE_STR_INIT:
2276 case SECCAT_RODATA_MERGE_CONST:
2277 sname = ".lrodata";
2278 flags = 0;
2279 break;
2280 case SECCAT_SRODATA:
2281 case SECCAT_SDATA:
2282 case SECCAT_SBSS:
2283 gcc_unreachable ();
2284 case SECCAT_TEXT:
2285 case SECCAT_TDATA:
2286 case SECCAT_TBSS:
2287 /* We don't split these for medium model. Place them into
2288 default sections and hope for best. */
2289 break;
2291 if (sname)
2293 /* We might get called with string constants, but get_named_section
2294 doesn't like them as they are not DECLs. Also, we need to set
2295 flags in that case. */
2296 if (!DECL_P (decl))
2297 return get_section (sname, flags, NULL);
2298 return get_named_section (decl, sname, reloc);
2301 return default_elf_select_section (decl, reloc, align);
2304 /* Build up a unique section name, expressed as a
2305 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
2306 RELOC indicates whether the initial value of EXP requires
2307 link-time relocations. */
2309 static void
2310 x86_64_elf_unique_section (tree decl, int reloc)
2312 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2313 && ix86_in_large_data_p (decl))
2315 const char *prefix = NULL;
2316 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
2317 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
2319 switch (categorize_decl_for_section (decl, reloc, flag_pic))
2321 case SECCAT_DATA:
2322 case SECCAT_DATA_REL:
2323 case SECCAT_DATA_REL_LOCAL:
2324 case SECCAT_DATA_REL_RO:
2325 case SECCAT_DATA_REL_RO_LOCAL:
2326 prefix = one_only ? ".gnu.linkonce.ld." : ".ldata.";
2327 break;
2328 case SECCAT_BSS:
2329 prefix = one_only ? ".gnu.linkonce.lb." : ".lbss.";
2330 break;
2331 case SECCAT_RODATA:
2332 case SECCAT_RODATA_MERGE_STR:
2333 case SECCAT_RODATA_MERGE_STR_INIT:
2334 case SECCAT_RODATA_MERGE_CONST:
2335 prefix = one_only ? ".gnu.linkonce.lr." : ".lrodata.";
2336 break;
2337 case SECCAT_SRODATA:
2338 case SECCAT_SDATA:
2339 case SECCAT_SBSS:
2340 gcc_unreachable ();
2341 case SECCAT_TEXT:
2342 case SECCAT_TDATA:
2343 case SECCAT_TBSS:
2344 /* We don't split these for medium model. Place them into
2345 default sections and hope for best. */
2346 break;
2348 if (prefix)
2350 const char *name;
2351 size_t nlen, plen;
2352 char *string;
2353 plen = strlen (prefix);
2355 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
2356 name = targetm.strip_name_encoding (name);
2357 nlen = strlen (name);
2359 string = alloca (nlen + plen + 1);
2360 memcpy (string, prefix, plen);
2361 memcpy (string + plen, name, nlen + 1);
2363 DECL_SECTION_NAME (decl) = build_string (nlen + plen, string);
2364 return;
2367 default_unique_section (decl, reloc);
2370 #ifdef COMMON_ASM_OP
2371 /* This says how to output assembler code to declare an
2372 uninitialized external linkage data object.
2374 For medium model x86-64 we need to use .largecomm opcode for
2375 large objects. */
2376 void
2377 x86_elf_aligned_common (FILE *file,
2378 const char *name, unsigned HOST_WIDE_INT size,
2379 int align)
2381 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2382 && size > (unsigned int)ix86_section_threshold)
2383 fprintf (file, ".largecomm\t");
2384 else
2385 fprintf (file, "%s", COMMON_ASM_OP);
2386 assemble_name (file, name);
2387 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
2388 size, align / BITS_PER_UNIT);
2390 #endif
2391 /* Utility function for targets to use in implementing
2392 ASM_OUTPUT_ALIGNED_BSS. */
2394 void
2395 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
2396 const char *name, unsigned HOST_WIDE_INT size,
2397 int align)
2399 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
2400 && size > (unsigned int)ix86_section_threshold)
2401 switch_to_section (get_named_section (decl, ".lbss", 0));
2402 else
2403 switch_to_section (bss_section);
2404 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
2405 #ifdef ASM_DECLARE_OBJECT_NAME
2406 last_assemble_variable_decl = decl;
2407 ASM_DECLARE_OBJECT_NAME (file, name, decl);
2408 #else
2409 /* Standard thing is just output label for the object. */
2410 ASM_OUTPUT_LABEL (file, name);
2411 #endif /* ASM_DECLARE_OBJECT_NAME */
2412 ASM_OUTPUT_SKIP (file, size ? size : 1);
2415 void
2416 optimization_options (int level, int size ATTRIBUTE_UNUSED)
2418 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
2419 make the problem with not enough registers even worse. */
2420 #ifdef INSN_SCHEDULING
2421 if (level > 1)
2422 flag_schedule_insns = 0;
2423 #endif
2425 if (TARGET_MACHO)
2426 /* The Darwin libraries never set errno, so we might as well
2427 avoid calling them when that's the only reason we would. */
2428 flag_errno_math = 0;
2430 /* The default values of these switches depend on the TARGET_64BIT
2431 that is not known at this moment. Mark these values with 2 and
2432 let user the to override these. In case there is no command line option
2433 specifying them, we will set the defaults in override_options. */
2434 if (optimize >= 1)
2435 flag_omit_frame_pointer = 2;
2436 flag_pcc_struct_return = 2;
2437 flag_asynchronous_unwind_tables = 2;
2438 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
2439 SUBTARGET_OPTIMIZATION_OPTIONS;
2440 #endif
2443 /* Table of valid machine attributes. */
2444 const struct attribute_spec ix86_attribute_table[] =
2446 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2447 /* Stdcall attribute says callee is responsible for popping arguments
2448 if they are not variable. */
2449 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2450 /* Fastcall attribute says callee is responsible for popping arguments
2451 if they are not variable. */
2452 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2453 /* Cdecl attribute says the callee is a normal C declaration */
2454 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2455 /* Regparm attribute specifies how many integer arguments are to be
2456 passed in registers. */
2457 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
2458 /* Sseregparm attribute says we are using x86_64 calling conventions
2459 for FP arguments. */
2460 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
2461 /* force_align_arg_pointer says this function realigns the stack at entry. */
2462 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
2463 false, true, true, ix86_handle_cconv_attribute },
2464 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2465 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2466 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2467 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
2468 #endif
2469 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
2470 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
2471 #ifdef SUBTARGET_ATTRIBUTE_TABLE
2472 SUBTARGET_ATTRIBUTE_TABLE,
2473 #endif
2474 { NULL, 0, 0, false, false, false, NULL }
2477 /* Decide whether we can make a sibling call to a function. DECL is the
2478 declaration of the function being targeted by the call and EXP is the
2479 CALL_EXPR representing the call. */
2481 static bool
2482 ix86_function_ok_for_sibcall (tree decl, tree exp)
2484 tree func;
2485 rtx a, b;
2487 /* If we are generating position-independent code, we cannot sibcall
2488 optimize any indirect call, or a direct call to a global function,
2489 as the PLT requires %ebx be live. */
2490 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
2491 return false;
2493 if (decl)
2494 func = decl;
2495 else
2497 func = TREE_TYPE (TREE_OPERAND (exp, 0));
2498 if (POINTER_TYPE_P (func))
2499 func = TREE_TYPE (func);
2502 /* Check that the return value locations are the same. Like
2503 if we are returning floats on the 80387 register stack, we cannot
2504 make a sibcall from a function that doesn't return a float to a
2505 function that does or, conversely, from a function that does return
2506 a float to a function that doesn't; the necessary stack adjustment
2507 would not be executed. This is also the place we notice
2508 differences in the return value ABI. Note that it is ok for one
2509 of the functions to have void return type as long as the return
2510 value of the other is passed in a register. */
2511 a = ix86_function_value (TREE_TYPE (exp), func, false);
2512 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
2513 cfun->decl, false);
2514 if (STACK_REG_P (a) || STACK_REG_P (b))
2516 if (!rtx_equal_p (a, b))
2517 return false;
2519 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
2521 else if (!rtx_equal_p (a, b))
2522 return false;
2524 /* If this call is indirect, we'll need to be able to use a call-clobbered
2525 register for the address of the target function. Make sure that all
2526 such registers are not used for passing parameters. */
2527 if (!decl && !TARGET_64BIT)
2529 tree type;
2531 /* We're looking at the CALL_EXPR, we need the type of the function. */
2532 type = TREE_OPERAND (exp, 0); /* pointer expression */
2533 type = TREE_TYPE (type); /* pointer type */
2534 type = TREE_TYPE (type); /* function type */
2536 if (ix86_function_regparm (type, NULL) >= 3)
2538 /* ??? Need to count the actual number of registers to be used,
2539 not the possible number of registers. Fix later. */
2540 return false;
2544 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2545 /* Dllimport'd functions are also called indirectly. */
2546 if (decl && DECL_DLLIMPORT_P (decl)
2547 && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
2548 return false;
2549 #endif
2551 /* If we forced aligned the stack, then sibcalling would unalign the
2552 stack, which may break the called function. */
2553 if (cfun->machine->force_align_arg_pointer)
2554 return false;
2556 /* Otherwise okay. That also includes certain types of indirect calls. */
2557 return true;
2560 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
2561 calling convention attributes;
2562 arguments as in struct attribute_spec.handler. */
2564 static tree
2565 ix86_handle_cconv_attribute (tree *node, tree name,
2566 tree args,
2567 int flags ATTRIBUTE_UNUSED,
2568 bool *no_add_attrs)
2570 if (TREE_CODE (*node) != FUNCTION_TYPE
2571 && TREE_CODE (*node) != METHOD_TYPE
2572 && TREE_CODE (*node) != FIELD_DECL
2573 && TREE_CODE (*node) != TYPE_DECL)
2575 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2576 IDENTIFIER_POINTER (name));
2577 *no_add_attrs = true;
2578 return NULL_TREE;
2581 /* Can combine regparm with all attributes but fastcall. */
2582 if (is_attribute_p ("regparm", name))
2584 tree cst;
2586 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2588 error ("fastcall and regparm attributes are not compatible");
2591 cst = TREE_VALUE (args);
2592 if (TREE_CODE (cst) != INTEGER_CST)
2594 warning (OPT_Wattributes,
2595 "%qs attribute requires an integer constant argument",
2596 IDENTIFIER_POINTER (name));
2597 *no_add_attrs = true;
2599 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
2601 warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
2602 IDENTIFIER_POINTER (name), REGPARM_MAX);
2603 *no_add_attrs = true;
2606 if (!TARGET_64BIT
2607 && lookup_attribute (ix86_force_align_arg_pointer_string,
2608 TYPE_ATTRIBUTES (*node))
2609 && compare_tree_int (cst, REGPARM_MAX-1))
2611 error ("%s functions limited to %d register parameters",
2612 ix86_force_align_arg_pointer_string, REGPARM_MAX-1);
2615 return NULL_TREE;
2618 if (TARGET_64BIT)
2620 warning (OPT_Wattributes, "%qs attribute ignored",
2621 IDENTIFIER_POINTER (name));
2622 *no_add_attrs = true;
2623 return NULL_TREE;
2626 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
2627 if (is_attribute_p ("fastcall", name))
2629 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2631 error ("fastcall and cdecl attributes are not compatible");
2633 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2635 error ("fastcall and stdcall attributes are not compatible");
2637 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
2639 error ("fastcall and regparm attributes are not compatible");
2643 /* Can combine stdcall with fastcall (redundant), regparm and
2644 sseregparm. */
2645 else if (is_attribute_p ("stdcall", name))
2647 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
2649 error ("stdcall and cdecl attributes are not compatible");
2651 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2653 error ("stdcall and fastcall attributes are not compatible");
2657 /* Can combine cdecl with regparm and sseregparm. */
2658 else if (is_attribute_p ("cdecl", name))
2660 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
2662 error ("stdcall and cdecl attributes are not compatible");
2664 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
2666 error ("fastcall and cdecl attributes are not compatible");
2670 /* Can combine sseregparm with all attributes. */
2672 return NULL_TREE;
2675 /* Return 0 if the attributes for two types are incompatible, 1 if they
2676 are compatible, and 2 if they are nearly compatible (which causes a
2677 warning to be generated). */
2679 static int
2680 ix86_comp_type_attributes (tree type1, tree type2)
2682 /* Check for mismatch of non-default calling convention. */
2683 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
2685 if (TREE_CODE (type1) != FUNCTION_TYPE)
2686 return 1;
2688 /* Check for mismatched fastcall/regparm types. */
2689 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
2690 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
2691 || (ix86_function_regparm (type1, NULL)
2692 != ix86_function_regparm (type2, NULL)))
2693 return 0;
2695 /* Check for mismatched sseregparm types. */
2696 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
2697 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
2698 return 0;
2700 /* Check for mismatched return types (cdecl vs stdcall). */
2701 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
2702 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
2703 return 0;
2705 return 1;
2708 /* Return the regparm value for a function with the indicated TYPE and DECL.
2709 DECL may be NULL when calling function indirectly
2710 or considering a libcall. */
2712 static int
2713 ix86_function_regparm (tree type, tree decl)
2715 tree attr;
2716 int regparm = ix86_regparm;
2717 bool user_convention = false;
2719 if (!TARGET_64BIT)
2721 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
2722 if (attr)
2724 regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
2725 user_convention = true;
2728 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
2730 regparm = 2;
2731 user_convention = true;
2734 /* Use register calling convention for local functions when possible. */
2735 if (!TARGET_64BIT && !user_convention && decl
2736 && flag_unit_at_a_time && !profile_flag)
2738 struct cgraph_local_info *i = cgraph_local_info (decl);
2739 if (i && i->local)
2741 int local_regparm, globals = 0, regno;
2743 /* Make sure no regparm register is taken by a global register
2744 variable. */
2745 for (local_regparm = 0; local_regparm < 3; local_regparm++)
2746 if (global_regs[local_regparm])
2747 break;
2748 /* We can't use regparm(3) for nested functions as these use
2749 static chain pointer in third argument. */
2750 if (local_regparm == 3
2751 && decl_function_context (decl)
2752 && !DECL_NO_STATIC_CHAIN (decl))
2753 local_regparm = 2;
2754 /* If the function realigns its stackpointer, the
2755 prologue will clobber %ecx. If we've already
2756 generated code for the callee, the callee
2757 DECL_STRUCT_FUNCTION is gone, so we fall back to
2758 scanning the attributes for the self-realigning
2759 property. */
2760 if ((DECL_STRUCT_FUNCTION (decl)
2761 && DECL_STRUCT_FUNCTION (decl)->machine->force_align_arg_pointer)
2762 || (!DECL_STRUCT_FUNCTION (decl)
2763 && lookup_attribute (ix86_force_align_arg_pointer_string,
2764 TYPE_ATTRIBUTES (TREE_TYPE (decl)))))
2765 local_regparm = 2;
2766 /* Each global register variable increases register preassure,
2767 so the more global reg vars there are, the smaller regparm
2768 optimization use, unless requested by the user explicitly. */
2769 for (regno = 0; regno < 6; regno++)
2770 if (global_regs[regno])
2771 globals++;
2772 local_regparm
2773 = globals < local_regparm ? local_regparm - globals : 0;
2775 if (local_regparm > regparm)
2776 regparm = local_regparm;
2780 return regparm;
2783 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
2784 DFmode (2) arguments in SSE registers for a function with the
2785 indicated TYPE and DECL. DECL may be NULL when calling function
2786 indirectly or considering a libcall. Otherwise return 0. */
2788 static int
2789 ix86_function_sseregparm (tree type, tree decl)
2791 /* Use SSE registers to pass SFmode and DFmode arguments if requested
2792 by the sseregparm attribute. */
2793 if (TARGET_SSEREGPARM
2794 || (type
2795 && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
2797 if (!TARGET_SSE)
2799 if (decl)
2800 error ("Calling %qD with attribute sseregparm without "
2801 "SSE/SSE2 enabled", decl);
2802 else
2803 error ("Calling %qT with attribute sseregparm without "
2804 "SSE/SSE2 enabled", type);
2805 return 0;
2808 return 2;
2811 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
2812 (and DFmode for SSE2) arguments in SSE registers,
2813 even for 32-bit targets. */
2814 if (!TARGET_64BIT && decl
2815 && TARGET_SSE_MATH && flag_unit_at_a_time && !profile_flag)
2817 struct cgraph_local_info *i = cgraph_local_info (decl);
2818 if (i && i->local)
2819 return TARGET_SSE2 ? 2 : 1;
2822 return 0;
2825 /* Return true if EAX is live at the start of the function. Used by
2826 ix86_expand_prologue to determine if we need special help before
2827 calling allocate_stack_worker. */
2829 static bool
2830 ix86_eax_live_at_start_p (void)
2832 /* Cheat. Don't bother working forward from ix86_function_regparm
2833 to the function type to whether an actual argument is located in
2834 eax. Instead just look at cfg info, which is still close enough
2835 to correct at this point. This gives false positives for broken
2836 functions that might use uninitialized data that happens to be
2837 allocated in eax, but who cares? */
2838 return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->il.rtl->global_live_at_end, 0);
2841 /* Value is the number of bytes of arguments automatically
2842 popped when returning from a subroutine call.
2843 FUNDECL is the declaration node of the function (as a tree),
2844 FUNTYPE is the data type of the function (as a tree),
2845 or for a library call it is an identifier node for the subroutine name.
2846 SIZE is the number of bytes of arguments passed on the stack.
2848 On the 80386, the RTD insn may be used to pop them if the number
2849 of args is fixed, but if the number is variable then the caller
2850 must pop them all. RTD can't be used for library calls now
2851 because the library is compiled with the Unix compiler.
2852 Use of RTD is a selectable option, since it is incompatible with
2853 standard Unix calling sequences. If the option is not selected,
2854 the caller must always pop the args.
2856 The attribute stdcall is equivalent to RTD on a per module basis. */
2859 ix86_return_pops_args (tree fundecl, tree funtype, int size)
2861 int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
2863 /* Cdecl functions override -mrtd, and never pop the stack. */
2864 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
2866 /* Stdcall and fastcall functions will pop the stack if not
2867 variable args. */
2868 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
2869 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
2870 rtd = 1;
2872 if (rtd
2873 && (TYPE_ARG_TYPES (funtype) == NULL_TREE
2874 || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
2875 == void_type_node)))
2876 return size;
2879 /* Lose any fake structure return argument if it is passed on the stack. */
2880 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
2881 && !TARGET_64BIT
2882 && !KEEP_AGGREGATE_RETURN_POINTER)
2884 int nregs = ix86_function_regparm (funtype, fundecl);
2886 if (!nregs)
2887 return GET_MODE_SIZE (Pmode);
2890 return 0;
2893 /* Argument support functions. */
2895 /* Return true when register may be used to pass function parameters. */
2896 bool
2897 ix86_function_arg_regno_p (int regno)
2899 int i;
2900 if (!TARGET_64BIT)
2901 return (regno < REGPARM_MAX
2902 || (TARGET_MMX && MMX_REGNO_P (regno)
2903 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
2904 || (TARGET_SSE && SSE_REGNO_P (regno)
2905 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
2907 if (TARGET_SSE && SSE_REGNO_P (regno)
2908 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
2909 return true;
2910 /* RAX is used as hidden argument to va_arg functions. */
2911 if (!regno)
2912 return true;
2913 for (i = 0; i < REGPARM_MAX; i++)
2914 if (regno == x86_64_int_parameter_registers[i])
2915 return true;
2916 return false;
2919 /* Return if we do not know how to pass TYPE solely in registers. */
2921 static bool
2922 ix86_must_pass_in_stack (enum machine_mode mode, tree type)
2924 if (must_pass_in_stack_var_size_or_pad (mode, type))
2925 return true;
2927 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
2928 The layout_type routine is crafty and tries to trick us into passing
2929 currently unsupported vector types on the stack by using TImode. */
2930 return (!TARGET_64BIT && mode == TImode
2931 && type && TREE_CODE (type) != VECTOR_TYPE);
2934 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2935 for a call to a function whose data type is FNTYPE.
2936 For a library call, FNTYPE is 0. */
2938 void
2939 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
2940 tree fntype, /* tree ptr for function decl */
2941 rtx libname, /* SYMBOL_REF of library name or 0 */
2942 tree fndecl)
2944 static CUMULATIVE_ARGS zero_cum;
2945 tree param, next_param;
2947 if (TARGET_DEBUG_ARG)
2949 fprintf (stderr, "\ninit_cumulative_args (");
2950 if (fntype)
2951 fprintf (stderr, "fntype code = %s, ret code = %s",
2952 tree_code_name[(int) TREE_CODE (fntype)],
2953 tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
2954 else
2955 fprintf (stderr, "no fntype");
2957 if (libname)
2958 fprintf (stderr, ", libname = %s", XSTR (libname, 0));
2961 *cum = zero_cum;
2963 /* Set up the number of registers to use for passing arguments. */
2964 cum->nregs = ix86_regparm;
2965 if (TARGET_SSE)
2966 cum->sse_nregs = SSE_REGPARM_MAX;
2967 if (TARGET_MMX)
2968 cum->mmx_nregs = MMX_REGPARM_MAX;
2969 cum->warn_sse = true;
2970 cum->warn_mmx = true;
2971 cum->maybe_vaarg = false;
2973 /* Use ecx and edx registers if function has fastcall attribute,
2974 else look for regparm information. */
2975 if (fntype && !TARGET_64BIT)
2977 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
2979 cum->nregs = 2;
2980 cum->fastcall = 1;
2982 else
2983 cum->nregs = ix86_function_regparm (fntype, fndecl);
2986 /* Set up the number of SSE registers used for passing SFmode
2987 and DFmode arguments. Warn for mismatching ABI. */
2988 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl);
2990 /* Determine if this function has variable arguments. This is
2991 indicated by the last argument being 'void_type_mode' if there
2992 are no variable arguments. If there are variable arguments, then
2993 we won't pass anything in registers in 32-bit mode. */
2995 if (cum->nregs || cum->mmx_nregs || cum->sse_nregs)
2997 for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
2998 param != 0; param = next_param)
3000 next_param = TREE_CHAIN (param);
3001 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
3003 if (!TARGET_64BIT)
3005 cum->nregs = 0;
3006 cum->sse_nregs = 0;
3007 cum->mmx_nregs = 0;
3008 cum->warn_sse = 0;
3009 cum->warn_mmx = 0;
3010 cum->fastcall = 0;
3011 cum->float_in_sse = 0;
3013 cum->maybe_vaarg = true;
3017 if ((!fntype && !libname)
3018 || (fntype && !TYPE_ARG_TYPES (fntype)))
3019 cum->maybe_vaarg = true;
3021 if (TARGET_DEBUG_ARG)
3022 fprintf (stderr, ", nregs=%d )\n", cum->nregs);
3024 return;
3027 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
3028 But in the case of vector types, it is some vector mode.
3030 When we have only some of our vector isa extensions enabled, then there
3031 are some modes for which vector_mode_supported_p is false. For these
3032 modes, the generic vector support in gcc will choose some non-vector mode
3033 in order to implement the type. By computing the natural mode, we'll
3034 select the proper ABI location for the operand and not depend on whatever
3035 the middle-end decides to do with these vector types. */
3037 static enum machine_mode
3038 type_natural_mode (tree type)
3040 enum machine_mode mode = TYPE_MODE (type);
3042 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
3044 HOST_WIDE_INT size = int_size_in_bytes (type);
3045 if ((size == 8 || size == 16)
3046 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
3047 && TYPE_VECTOR_SUBPARTS (type) > 1)
3049 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
3051 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
3052 mode = MIN_MODE_VECTOR_FLOAT;
3053 else
3054 mode = MIN_MODE_VECTOR_INT;
3056 /* Get the mode which has this inner mode and number of units. */
3057 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
3058 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
3059 && GET_MODE_INNER (mode) == innermode)
3060 return mode;
3062 gcc_unreachable ();
3066 return mode;
3069 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
3070 this may not agree with the mode that the type system has chosen for the
3071 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
3072 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
3074 static rtx
3075 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
3076 unsigned int regno)
3078 rtx tmp;
3080 if (orig_mode != BLKmode)
3081 tmp = gen_rtx_REG (orig_mode, regno);
3082 else
3084 tmp = gen_rtx_REG (mode, regno);
3085 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
3086 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
3089 return tmp;
3092 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
3093 of this code is to classify each 8bytes of incoming argument by the register
3094 class and assign registers accordingly. */
3096 /* Return the union class of CLASS1 and CLASS2.
3097 See the x86-64 PS ABI for details. */
3099 static enum x86_64_reg_class
3100 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
3102 /* Rule #1: If both classes are equal, this is the resulting class. */
3103 if (class1 == class2)
3104 return class1;
3106 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
3107 the other class. */
3108 if (class1 == X86_64_NO_CLASS)
3109 return class2;
3110 if (class2 == X86_64_NO_CLASS)
3111 return class1;
3113 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
3114 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
3115 return X86_64_MEMORY_CLASS;
3117 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
3118 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
3119 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
3120 return X86_64_INTEGERSI_CLASS;
3121 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
3122 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
3123 return X86_64_INTEGER_CLASS;
3125 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
3126 MEMORY is used. */
3127 if (class1 == X86_64_X87_CLASS
3128 || class1 == X86_64_X87UP_CLASS
3129 || class1 == X86_64_COMPLEX_X87_CLASS
3130 || class2 == X86_64_X87_CLASS
3131 || class2 == X86_64_X87UP_CLASS
3132 || class2 == X86_64_COMPLEX_X87_CLASS)
3133 return X86_64_MEMORY_CLASS;
3135 /* Rule #6: Otherwise class SSE is used. */
3136 return X86_64_SSE_CLASS;
3139 /* Classify the argument of type TYPE and mode MODE.
3140 CLASSES will be filled by the register class used to pass each word
3141 of the operand. The number of words is returned. In case the parameter
3142 should be passed in memory, 0 is returned. As a special case for zero
3143 sized containers, classes[0] will be NO_CLASS and 1 is returned.
3145 BIT_OFFSET is used internally for handling records and specifies offset
3146 of the offset in bits modulo 256 to avoid overflow cases.
3148 See the x86-64 PS ABI for details.
3151 static int
3152 classify_argument (enum machine_mode mode, tree type,
3153 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
3155 HOST_WIDE_INT bytes =
3156 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3157 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3159 /* Variable sized entities are always passed/returned in memory. */
3160 if (bytes < 0)
3161 return 0;
3163 if (mode != VOIDmode
3164 && targetm.calls.must_pass_in_stack (mode, type))
3165 return 0;
3167 if (type && AGGREGATE_TYPE_P (type))
3169 int i;
3170 tree field;
3171 enum x86_64_reg_class subclasses[MAX_CLASSES];
3173 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
3174 if (bytes > 16)
3175 return 0;
3177 for (i = 0; i < words; i++)
3178 classes[i] = X86_64_NO_CLASS;
3180 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
3181 signalize memory class, so handle it as special case. */
3182 if (!words)
3184 classes[0] = X86_64_NO_CLASS;
3185 return 1;
3188 /* Classify each field of record and merge classes. */
3189 switch (TREE_CODE (type))
3191 case RECORD_TYPE:
3192 /* And now merge the fields of structure. */
3193 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3195 if (TREE_CODE (field) == FIELD_DECL)
3197 int num;
3199 if (TREE_TYPE (field) == error_mark_node)
3200 continue;
3202 /* Bitfields are always classified as integer. Handle them
3203 early, since later code would consider them to be
3204 misaligned integers. */
3205 if (DECL_BIT_FIELD (field))
3207 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
3208 i < ((int_bit_position (field) + (bit_offset % 64))
3209 + tree_low_cst (DECL_SIZE (field), 0)
3210 + 63) / 8 / 8; i++)
3211 classes[i] =
3212 merge_classes (X86_64_INTEGER_CLASS,
3213 classes[i]);
3215 else
3217 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
3218 TREE_TYPE (field), subclasses,
3219 (int_bit_position (field)
3220 + bit_offset) % 256);
3221 if (!num)
3222 return 0;
3223 for (i = 0; i < num; i++)
3225 int pos =
3226 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
3227 classes[i + pos] =
3228 merge_classes (subclasses[i], classes[i + pos]);
3233 break;
3235 case ARRAY_TYPE:
3236 /* Arrays are handled as small records. */
3238 int num;
3239 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
3240 TREE_TYPE (type), subclasses, bit_offset);
3241 if (!num)
3242 return 0;
3244 /* The partial classes are now full classes. */
3245 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
3246 subclasses[0] = X86_64_SSE_CLASS;
3247 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
3248 subclasses[0] = X86_64_INTEGER_CLASS;
3250 for (i = 0; i < words; i++)
3251 classes[i] = subclasses[i % num];
3253 break;
3255 case UNION_TYPE:
3256 case QUAL_UNION_TYPE:
3257 /* Unions are similar to RECORD_TYPE but offset is always 0.
3259 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3261 if (TREE_CODE (field) == FIELD_DECL)
3263 int num;
3265 if (TREE_TYPE (field) == error_mark_node)
3266 continue;
3268 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
3269 TREE_TYPE (field), subclasses,
3270 bit_offset);
3271 if (!num)
3272 return 0;
3273 for (i = 0; i < num; i++)
3274 classes[i] = merge_classes (subclasses[i], classes[i]);
3277 break;
3279 default:
3280 gcc_unreachable ();
3283 /* Final merger cleanup. */
3284 for (i = 0; i < words; i++)
3286 /* If one class is MEMORY, everything should be passed in
3287 memory. */
3288 if (classes[i] == X86_64_MEMORY_CLASS)
3289 return 0;
3291 /* The X86_64_SSEUP_CLASS should be always preceded by
3292 X86_64_SSE_CLASS. */
3293 if (classes[i] == X86_64_SSEUP_CLASS
3294 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
3295 classes[i] = X86_64_SSE_CLASS;
3297 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
3298 if (classes[i] == X86_64_X87UP_CLASS
3299 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
3300 classes[i] = X86_64_SSE_CLASS;
3302 return words;
3305 /* Compute alignment needed. We align all types to natural boundaries with
3306 exception of XFmode that is aligned to 64bits. */
3307 if (mode != VOIDmode && mode != BLKmode)
3309 int mode_alignment = GET_MODE_BITSIZE (mode);
3311 if (mode == XFmode)
3312 mode_alignment = 128;
3313 else if (mode == XCmode)
3314 mode_alignment = 256;
3315 if (COMPLEX_MODE_P (mode))
3316 mode_alignment /= 2;
3317 /* Misaligned fields are always returned in memory. */
3318 if (bit_offset % mode_alignment)
3319 return 0;
3322 /* for V1xx modes, just use the base mode */
3323 if (VECTOR_MODE_P (mode)
3324 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
3325 mode = GET_MODE_INNER (mode);
3327 /* Classification of atomic types. */
3328 switch (mode)
3330 case SDmode:
3331 case DDmode:
3332 classes[0] = X86_64_SSE_CLASS;
3333 return 1;
3334 case TDmode:
3335 classes[0] = X86_64_SSE_CLASS;
3336 classes[1] = X86_64_SSEUP_CLASS;
3337 return 2;
3338 case DImode:
3339 case SImode:
3340 case HImode:
3341 case QImode:
3342 case CSImode:
3343 case CHImode:
3344 case CQImode:
3345 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
3346 classes[0] = X86_64_INTEGERSI_CLASS;
3347 else
3348 classes[0] = X86_64_INTEGER_CLASS;
3349 return 1;
3350 case CDImode:
3351 case TImode:
3352 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
3353 return 2;
3354 case CTImode:
3355 return 0;
3356 case SFmode:
3357 if (!(bit_offset % 64))
3358 classes[0] = X86_64_SSESF_CLASS;
3359 else
3360 classes[0] = X86_64_SSE_CLASS;
3361 return 1;
3362 case DFmode:
3363 classes[0] = X86_64_SSEDF_CLASS;
3364 return 1;
3365 case XFmode:
3366 classes[0] = X86_64_X87_CLASS;
3367 classes[1] = X86_64_X87UP_CLASS;
3368 return 2;
3369 case TFmode:
3370 classes[0] = X86_64_SSE_CLASS;
3371 classes[1] = X86_64_SSEUP_CLASS;
3372 return 2;
3373 case SCmode:
3374 classes[0] = X86_64_SSE_CLASS;
3375 return 1;
3376 case DCmode:
3377 classes[0] = X86_64_SSEDF_CLASS;
3378 classes[1] = X86_64_SSEDF_CLASS;
3379 return 2;
3380 case XCmode:
3381 classes[0] = X86_64_COMPLEX_X87_CLASS;
3382 return 1;
3383 case TCmode:
3384 /* This modes is larger than 16 bytes. */
3385 return 0;
3386 case V4SFmode:
3387 case V4SImode:
3388 case V16QImode:
3389 case V8HImode:
3390 case V2DFmode:
3391 case V2DImode:
3392 classes[0] = X86_64_SSE_CLASS;
3393 classes[1] = X86_64_SSEUP_CLASS;
3394 return 2;
3395 case V2SFmode:
3396 case V2SImode:
3397 case V4HImode:
3398 case V8QImode:
3399 classes[0] = X86_64_SSE_CLASS;
3400 return 1;
3401 case BLKmode:
3402 case VOIDmode:
3403 return 0;
3404 default:
3405 gcc_assert (VECTOR_MODE_P (mode));
3407 if (bytes > 16)
3408 return 0;
3410 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
3412 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
3413 classes[0] = X86_64_INTEGERSI_CLASS;
3414 else
3415 classes[0] = X86_64_INTEGER_CLASS;
3416 classes[1] = X86_64_INTEGER_CLASS;
3417 return 1 + (bytes > 8);
3421 /* Examine the argument and return set number of register required in each
3422 class. Return 0 iff parameter should be passed in memory. */
3423 static int
3424 examine_argument (enum machine_mode mode, tree type, int in_return,
3425 int *int_nregs, int *sse_nregs)
3427 enum x86_64_reg_class class[MAX_CLASSES];
3428 int n = classify_argument (mode, type, class, 0);
3430 *int_nregs = 0;
3431 *sse_nregs = 0;
3432 if (!n)
3433 return 0;
3434 for (n--; n >= 0; n--)
3435 switch (class[n])
3437 case X86_64_INTEGER_CLASS:
3438 case X86_64_INTEGERSI_CLASS:
3439 (*int_nregs)++;
3440 break;
3441 case X86_64_SSE_CLASS:
3442 case X86_64_SSESF_CLASS:
3443 case X86_64_SSEDF_CLASS:
3444 (*sse_nregs)++;
3445 break;
3446 case X86_64_NO_CLASS:
3447 case X86_64_SSEUP_CLASS:
3448 break;
3449 case X86_64_X87_CLASS:
3450 case X86_64_X87UP_CLASS:
3451 if (!in_return)
3452 return 0;
3453 break;
3454 case X86_64_COMPLEX_X87_CLASS:
3455 return in_return ? 2 : 0;
3456 case X86_64_MEMORY_CLASS:
3457 gcc_unreachable ();
3459 return 1;
3462 /* Construct container for the argument used by GCC interface. See
3463 FUNCTION_ARG for the detailed description. */
3465 static rtx
3466 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
3467 tree type, int in_return, int nintregs, int nsseregs,
3468 const int *intreg, int sse_regno)
3470 /* The following variables hold the static issued_error state. */
3471 static bool issued_sse_arg_error;
3472 static bool issued_sse_ret_error;
3473 static bool issued_x87_ret_error;
3475 enum machine_mode tmpmode;
3476 int bytes =
3477 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3478 enum x86_64_reg_class class[MAX_CLASSES];
3479 int n;
3480 int i;
3481 int nexps = 0;
3482 int needed_sseregs, needed_intregs;
3483 rtx exp[MAX_CLASSES];
3484 rtx ret;
3486 n = classify_argument (mode, type, class, 0);
3487 if (TARGET_DEBUG_ARG)
3489 if (!n)
3490 fprintf (stderr, "Memory class\n");
3491 else
3493 fprintf (stderr, "Classes:");
3494 for (i = 0; i < n; i++)
3496 fprintf (stderr, " %s", x86_64_reg_class_name[class[i]]);
3498 fprintf (stderr, "\n");
3501 if (!n)
3502 return NULL;
3503 if (!examine_argument (mode, type, in_return, &needed_intregs,
3504 &needed_sseregs))
3505 return NULL;
3506 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
3507 return NULL;
3509 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
3510 some less clueful developer tries to use floating-point anyway. */
3511 if (needed_sseregs && !TARGET_SSE)
3513 if (in_return)
3515 if (!issued_sse_ret_error)
3517 error ("SSE register return with SSE disabled");
3518 issued_sse_ret_error = true;
3521 else if (!issued_sse_arg_error)
3523 error ("SSE register argument with SSE disabled");
3524 issued_sse_arg_error = true;
3526 return NULL;
3529 /* Likewise, error if the ABI requires us to return values in the
3530 x87 registers and the user specified -mno-80387. */
3531 if (!TARGET_80387 && in_return)
3532 for (i = 0; i < n; i++)
3533 if (class[i] == X86_64_X87_CLASS
3534 || class[i] == X86_64_X87UP_CLASS
3535 || class[i] == X86_64_COMPLEX_X87_CLASS)
3537 if (!issued_x87_ret_error)
3539 error ("x87 register return with x87 disabled");
3540 issued_x87_ret_error = true;
3542 return NULL;
3545 /* First construct simple cases. Avoid SCmode, since we want to use
3546 single register to pass this type. */
3547 if (n == 1 && mode != SCmode)
3548 switch (class[0])
3550 case X86_64_INTEGER_CLASS:
3551 case X86_64_INTEGERSI_CLASS:
3552 return gen_rtx_REG (mode, intreg[0]);
3553 case X86_64_SSE_CLASS:
3554 case X86_64_SSESF_CLASS:
3555 case X86_64_SSEDF_CLASS:
3556 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
3557 case X86_64_X87_CLASS:
3558 case X86_64_COMPLEX_X87_CLASS:
3559 return gen_rtx_REG (mode, FIRST_STACK_REG);
3560 case X86_64_NO_CLASS:
3561 /* Zero sized array, struct or class. */
3562 return NULL;
3563 default:
3564 gcc_unreachable ();
3566 if (n == 2 && class[0] == X86_64_SSE_CLASS && class[1] == X86_64_SSEUP_CLASS
3567 && mode != BLKmode)
3568 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
3569 if (n == 2
3570 && class[0] == X86_64_X87_CLASS && class[1] == X86_64_X87UP_CLASS)
3571 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
3572 if (n == 2 && class[0] == X86_64_INTEGER_CLASS
3573 && class[1] == X86_64_INTEGER_CLASS
3574 && (mode == CDImode || mode == TImode || mode == TFmode)
3575 && intreg[0] + 1 == intreg[1])
3576 return gen_rtx_REG (mode, intreg[0]);
3578 /* Otherwise figure out the entries of the PARALLEL. */
3579 for (i = 0; i < n; i++)
3581 switch (class[i])
3583 case X86_64_NO_CLASS:
3584 break;
3585 case X86_64_INTEGER_CLASS:
3586 case X86_64_INTEGERSI_CLASS:
3587 /* Merge TImodes on aligned occasions here too. */
3588 if (i * 8 + 8 > bytes)
3589 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
3590 else if (class[i] == X86_64_INTEGERSI_CLASS)
3591 tmpmode = SImode;
3592 else
3593 tmpmode = DImode;
3594 /* We've requested 24 bytes we don't have mode for. Use DImode. */
3595 if (tmpmode == BLKmode)
3596 tmpmode = DImode;
3597 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3598 gen_rtx_REG (tmpmode, *intreg),
3599 GEN_INT (i*8));
3600 intreg++;
3601 break;
3602 case X86_64_SSESF_CLASS:
3603 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3604 gen_rtx_REG (SFmode,
3605 SSE_REGNO (sse_regno)),
3606 GEN_INT (i*8));
3607 sse_regno++;
3608 break;
3609 case X86_64_SSEDF_CLASS:
3610 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3611 gen_rtx_REG (DFmode,
3612 SSE_REGNO (sse_regno)),
3613 GEN_INT (i*8));
3614 sse_regno++;
3615 break;
3616 case X86_64_SSE_CLASS:
3617 if (i < n - 1 && class[i + 1] == X86_64_SSEUP_CLASS)
3618 tmpmode = TImode;
3619 else
3620 tmpmode = DImode;
3621 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
3622 gen_rtx_REG (tmpmode,
3623 SSE_REGNO (sse_regno)),
3624 GEN_INT (i*8));
3625 if (tmpmode == TImode)
3626 i++;
3627 sse_regno++;
3628 break;
3629 default:
3630 gcc_unreachable ();
3634 /* Empty aligned struct, union or class. */
3635 if (nexps == 0)
3636 return NULL;
3638 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
3639 for (i = 0; i < nexps; i++)
3640 XVECEXP (ret, 0, i) = exp [i];
3641 return ret;
3644 /* Update the data in CUM to advance over an argument
3645 of mode MODE and data type TYPE.
3646 (TYPE is null for libcalls where that information may not be available.) */
3648 void
3649 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3650 tree type, int named)
3652 int bytes =
3653 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3654 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3656 if (type)
3657 mode = type_natural_mode (type);
3659 if (TARGET_DEBUG_ARG)
3660 fprintf (stderr, "function_adv (sz=%d, wds=%2d, nregs=%d, ssenregs=%d, "
3661 "mode=%s, named=%d)\n\n",
3662 words, cum->words, cum->nregs, cum->sse_nregs,
3663 GET_MODE_NAME (mode), named);
3665 if (TARGET_64BIT)
3667 int int_nregs, sse_nregs;
3668 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
3669 cum->words += words;
3670 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
3672 cum->nregs -= int_nregs;
3673 cum->sse_nregs -= sse_nregs;
3674 cum->regno += int_nregs;
3675 cum->sse_regno += sse_nregs;
3677 else
3678 cum->words += words;
3680 else
3682 switch (mode)
3684 default:
3685 break;
3687 case BLKmode:
3688 if (bytes < 0)
3689 break;
3690 /* FALLTHRU */
3692 case DImode:
3693 case SImode:
3694 case HImode:
3695 case QImode:
3696 cum->words += words;
3697 cum->nregs -= words;
3698 cum->regno += words;
3700 if (cum->nregs <= 0)
3702 cum->nregs = 0;
3703 cum->regno = 0;
3705 break;
3707 case DFmode:
3708 if (cum->float_in_sse < 2)
3709 break;
3710 case SFmode:
3711 if (cum->float_in_sse < 1)
3712 break;
3713 /* FALLTHRU */
3715 case TImode:
3716 case V16QImode:
3717 case V8HImode:
3718 case V4SImode:
3719 case V2DImode:
3720 case V4SFmode:
3721 case V2DFmode:
3722 if (!type || !AGGREGATE_TYPE_P (type))
3724 cum->sse_words += words;
3725 cum->sse_nregs -= 1;
3726 cum->sse_regno += 1;
3727 if (cum->sse_nregs <= 0)
3729 cum->sse_nregs = 0;
3730 cum->sse_regno = 0;
3733 break;
3735 case V8QImode:
3736 case V4HImode:
3737 case V2SImode:
3738 case V2SFmode:
3739 if (!type || !AGGREGATE_TYPE_P (type))
3741 cum->mmx_words += words;
3742 cum->mmx_nregs -= 1;
3743 cum->mmx_regno += 1;
3744 if (cum->mmx_nregs <= 0)
3746 cum->mmx_nregs = 0;
3747 cum->mmx_regno = 0;
3750 break;
3755 /* Define where to put the arguments to a function.
3756 Value is zero to push the argument on the stack,
3757 or a hard register in which to store the argument.
3759 MODE is the argument's machine mode.
3760 TYPE is the data type of the argument (as a tree).
3761 This is null for libcalls where that information may
3762 not be available.
3763 CUM is a variable of type CUMULATIVE_ARGS which gives info about
3764 the preceding args and about the function being called.
3765 NAMED is nonzero if this argument is a named parameter
3766 (otherwise it is an extra parameter matching an ellipsis). */
3769 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode orig_mode,
3770 tree type, int named)
3772 enum machine_mode mode = orig_mode;
3773 rtx ret = NULL_RTX;
3774 int bytes =
3775 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
3776 int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3777 static bool warnedsse, warnedmmx;
3779 /* To simplify the code below, represent vector types with a vector mode
3780 even if MMX/SSE are not active. */
3781 if (type && TREE_CODE (type) == VECTOR_TYPE)
3782 mode = type_natural_mode (type);
3784 /* Handle a hidden AL argument containing number of registers for varargs
3785 x86-64 functions. For i386 ABI just return constm1_rtx to avoid
3786 any AL settings. */
3787 if (mode == VOIDmode)
3789 if (TARGET_64BIT)
3790 return GEN_INT (cum->maybe_vaarg
3791 ? (cum->sse_nregs < 0
3792 ? SSE_REGPARM_MAX
3793 : cum->sse_regno)
3794 : -1);
3795 else
3796 return constm1_rtx;
3798 if (TARGET_64BIT)
3799 ret = construct_container (mode, orig_mode, type, 0, cum->nregs,
3800 cum->sse_nregs,
3801 &x86_64_int_parameter_registers [cum->regno],
3802 cum->sse_regno);
3803 else
3804 switch (mode)
3806 /* For now, pass fp/complex values on the stack. */
3807 default:
3808 break;
3810 case BLKmode:
3811 if (bytes < 0)
3812 break;
3813 /* FALLTHRU */
3814 case DImode:
3815 case SImode:
3816 case HImode:
3817 case QImode:
3818 if (words <= cum->nregs)
3820 int regno = cum->regno;
3822 /* Fastcall allocates the first two DWORD (SImode) or
3823 smaller arguments to ECX and EDX. */
3824 if (cum->fastcall)
3826 if (mode == BLKmode || mode == DImode)
3827 break;
3829 /* ECX not EAX is the first allocated register. */
3830 if (regno == 0)
3831 regno = 2;
3833 ret = gen_rtx_REG (mode, regno);
3835 break;
3836 case DFmode:
3837 if (cum->float_in_sse < 2)
3838 break;
3839 case SFmode:
3840 if (cum->float_in_sse < 1)
3841 break;
3842 /* FALLTHRU */
3843 case TImode:
3844 case V16QImode:
3845 case V8HImode:
3846 case V4SImode:
3847 case V2DImode:
3848 case V4SFmode:
3849 case V2DFmode:
3850 if (!type || !AGGREGATE_TYPE_P (type))
3852 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
3854 warnedsse = true;
3855 warning (0, "SSE vector argument without SSE enabled "
3856 "changes the ABI");
3858 if (cum->sse_nregs)
3859 ret = gen_reg_or_parallel (mode, orig_mode,
3860 cum->sse_regno + FIRST_SSE_REG);
3862 break;
3863 case V8QImode:
3864 case V4HImode:
3865 case V2SImode:
3866 case V2SFmode:
3867 if (!type || !AGGREGATE_TYPE_P (type))
3869 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
3871 warnedmmx = true;
3872 warning (0, "MMX vector argument without MMX enabled "
3873 "changes the ABI");
3875 if (cum->mmx_nregs)
3876 ret = gen_reg_or_parallel (mode, orig_mode,
3877 cum->mmx_regno + FIRST_MMX_REG);
3879 break;
3882 if (TARGET_DEBUG_ARG)
3884 fprintf (stderr,
3885 "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d, ",
3886 words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
3888 if (ret)
3889 print_simple_rtl (stderr, ret);
3890 else
3891 fprintf (stderr, ", stack");
3893 fprintf (stderr, " )\n");
3896 return ret;
3899 /* A C expression that indicates when an argument must be passed by
3900 reference. If nonzero for an argument, a copy of that argument is
3901 made in memory and a pointer to the argument is passed instead of
3902 the argument itself. The pointer is passed in whatever way is
3903 appropriate for passing a pointer to that type. */
3905 static bool
3906 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3907 enum machine_mode mode ATTRIBUTE_UNUSED,
3908 tree type, bool named ATTRIBUTE_UNUSED)
3910 if (!TARGET_64BIT)
3911 return 0;
3913 if (type && int_size_in_bytes (type) == -1)
3915 if (TARGET_DEBUG_ARG)
3916 fprintf (stderr, "function_arg_pass_by_reference\n");
3917 return 1;
3920 return 0;
3923 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
3924 ABI. Only called if TARGET_SSE. */
3925 static bool
3926 contains_128bit_aligned_vector_p (tree type)
3928 enum machine_mode mode = TYPE_MODE (type);
3929 if (SSE_REG_MODE_P (mode)
3930 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
3931 return true;
3932 if (TYPE_ALIGN (type) < 128)
3933 return false;
3935 if (AGGREGATE_TYPE_P (type))
3937 /* Walk the aggregates recursively. */
3938 switch (TREE_CODE (type))
3940 case RECORD_TYPE:
3941 case UNION_TYPE:
3942 case QUAL_UNION_TYPE:
3944 tree field;
3946 /* Walk all the structure fields. */
3947 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3949 if (TREE_CODE (field) == FIELD_DECL
3950 && contains_128bit_aligned_vector_p (TREE_TYPE (field)))
3951 return true;
3953 break;
3956 case ARRAY_TYPE:
3957 /* Just for use if some languages passes arrays by value. */
3958 if (contains_128bit_aligned_vector_p (TREE_TYPE (type)))
3959 return true;
3960 break;
3962 default:
3963 gcc_unreachable ();
3966 return false;
3969 /* Gives the alignment boundary, in bits, of an argument with the
3970 specified mode and type. */
3973 ix86_function_arg_boundary (enum machine_mode mode, tree type)
3975 int align;
3976 if (type)
3977 align = TYPE_ALIGN (type);
3978 else
3979 align = GET_MODE_ALIGNMENT (mode);
3980 if (align < PARM_BOUNDARY)
3981 align = PARM_BOUNDARY;
3982 if (!TARGET_64BIT)
3984 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
3985 make an exception for SSE modes since these require 128bit
3986 alignment.
3988 The handling here differs from field_alignment. ICC aligns MMX
3989 arguments to 4 byte boundaries, while structure fields are aligned
3990 to 8 byte boundaries. */
3991 if (!TARGET_SSE)
3992 align = PARM_BOUNDARY;
3993 else if (!type)
3995 if (!SSE_REG_MODE_P (mode))
3996 align = PARM_BOUNDARY;
3998 else
4000 if (!contains_128bit_aligned_vector_p (type))
4001 align = PARM_BOUNDARY;
4004 if (align > 128)
4005 align = 128;
4006 return align;
4009 /* Return true if N is a possible register number of function value. */
4010 bool
4011 ix86_function_value_regno_p (int regno)
4013 if (regno == 0
4014 || (regno == FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387)
4015 || (regno == FIRST_SSE_REG && TARGET_SSE))
4016 return true;
4018 if (!TARGET_64BIT
4019 && (regno == FIRST_MMX_REG && TARGET_MMX))
4020 return true;
4022 return false;
4025 /* Define how to find the value returned by a function.
4026 VALTYPE is the data type of the value (as a tree).
4027 If the precise function being called is known, FUNC is its FUNCTION_DECL;
4028 otherwise, FUNC is 0. */
4030 ix86_function_value (tree valtype, tree fntype_or_decl,
4031 bool outgoing ATTRIBUTE_UNUSED)
4033 enum machine_mode natmode = type_natural_mode (valtype);
4035 if (TARGET_64BIT)
4037 rtx ret = construct_container (natmode, TYPE_MODE (valtype), valtype,
4038 1, REGPARM_MAX, SSE_REGPARM_MAX,
4039 x86_64_int_return_registers, 0);
4040 /* For zero sized structures, construct_container return NULL, but we
4041 need to keep rest of compiler happy by returning meaningful value. */
4042 if (!ret)
4043 ret = gen_rtx_REG (TYPE_MODE (valtype), 0);
4044 return ret;
4046 else
4048 tree fn = NULL_TREE, fntype;
4049 if (fntype_or_decl
4050 && DECL_P (fntype_or_decl))
4051 fn = fntype_or_decl;
4052 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
4053 return gen_rtx_REG (TYPE_MODE (valtype),
4054 ix86_value_regno (natmode, fn, fntype));
4058 /* Return true iff type is returned in memory. */
4060 ix86_return_in_memory (tree type)
4062 int needed_intregs, needed_sseregs, size;
4063 enum machine_mode mode = type_natural_mode (type);
4065 if (TARGET_64BIT)
4066 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
4068 if (mode == BLKmode)
4069 return 1;
4071 size = int_size_in_bytes (type);
4073 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
4074 return 0;
4076 if (VECTOR_MODE_P (mode) || mode == TImode)
4078 /* User-created vectors small enough to fit in EAX. */
4079 if (size < 8)
4080 return 0;
4082 /* MMX/3dNow values are returned in MM0,
4083 except when it doesn't exits. */
4084 if (size == 8)
4085 return (TARGET_MMX ? 0 : 1);
4087 /* SSE values are returned in XMM0, except when it doesn't exist. */
4088 if (size == 16)
4089 return (TARGET_SSE ? 0 : 1);
4092 if (mode == XFmode)
4093 return 0;
4095 if (mode == TDmode)
4096 return 1;
4098 if (size > 12)
4099 return 1;
4100 return 0;
4103 /* When returning SSE vector types, we have a choice of either
4104 (1) being abi incompatible with a -march switch, or
4105 (2) generating an error.
4106 Given no good solution, I think the safest thing is one warning.
4107 The user won't be able to use -Werror, but....
4109 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
4110 called in response to actually generating a caller or callee that
4111 uses such a type. As opposed to RETURN_IN_MEMORY, which is called
4112 via aggregate_value_p for general type probing from tree-ssa. */
4114 static rtx
4115 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
4117 static bool warnedsse, warnedmmx;
4119 if (type)
4121 /* Look at the return type of the function, not the function type. */
4122 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
4124 if (!TARGET_SSE && !warnedsse)
4126 if (mode == TImode
4127 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
4129 warnedsse = true;
4130 warning (0, "SSE vector return without SSE enabled "
4131 "changes the ABI");
4135 if (!TARGET_MMX && !warnedmmx)
4137 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
4139 warnedmmx = true;
4140 warning (0, "MMX vector return without MMX enabled "
4141 "changes the ABI");
4146 return NULL;
4149 /* Define how to find the value returned by a library function
4150 assuming the value has mode MODE. */
4152 ix86_libcall_value (enum machine_mode mode)
4154 if (TARGET_64BIT)
4156 switch (mode)
4158 case SFmode:
4159 case SCmode:
4160 case DFmode:
4161 case DCmode:
4162 case TFmode:
4163 case SDmode:
4164 case DDmode:
4165 case TDmode:
4166 return gen_rtx_REG (mode, FIRST_SSE_REG);
4167 case XFmode:
4168 case XCmode:
4169 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
4170 case TCmode:
4171 return NULL;
4172 default:
4173 return gen_rtx_REG (mode, 0);
4176 else
4177 return gen_rtx_REG (mode, ix86_value_regno (mode, NULL, NULL));
4180 /* Given a mode, return the register to use for a return value. */
4182 static int
4183 ix86_value_regno (enum machine_mode mode, tree func, tree fntype)
4185 gcc_assert (!TARGET_64BIT);
4187 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
4188 we normally prevent this case when mmx is not available. However
4189 some ABIs may require the result to be returned like DImode. */
4190 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
4191 return TARGET_MMX ? FIRST_MMX_REG : 0;
4193 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
4194 we prevent this case when sse is not available. However some ABIs
4195 may require the result to be returned like integer TImode. */
4196 if (mode == TImode || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
4197 return TARGET_SSE ? FIRST_SSE_REG : 0;
4199 /* Decimal floating point values can go in %eax, unlike other float modes. */
4200 if (DECIMAL_FLOAT_MODE_P (mode))
4201 return 0;
4203 /* Most things go in %eax, except (unless -mno-fp-ret-in-387) fp values. */
4204 if (!SCALAR_FLOAT_MODE_P (mode) || !TARGET_FLOAT_RETURNS_IN_80387)
4205 return 0;
4207 /* Floating point return values in %st(0), except for local functions when
4208 SSE math is enabled or for functions with sseregparm attribute. */
4209 if ((func || fntype)
4210 && (mode == SFmode || mode == DFmode))
4212 int sse_level = ix86_function_sseregparm (fntype, func);
4213 if ((sse_level >= 1 && mode == SFmode)
4214 || (sse_level == 2 && mode == DFmode))
4215 return FIRST_SSE_REG;
4218 return FIRST_FLOAT_REG;
4221 /* Create the va_list data type. */
4223 static tree
4224 ix86_build_builtin_va_list (void)
4226 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
4228 /* For i386 we use plain pointer to argument area. */
4229 if (!TARGET_64BIT)
4230 return build_pointer_type (char_type_node);
4232 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4233 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
4235 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
4236 unsigned_type_node);
4237 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
4238 unsigned_type_node);
4239 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
4240 ptr_type_node);
4241 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
4242 ptr_type_node);
4244 va_list_gpr_counter_field = f_gpr;
4245 va_list_fpr_counter_field = f_fpr;
4247 DECL_FIELD_CONTEXT (f_gpr) = record;
4248 DECL_FIELD_CONTEXT (f_fpr) = record;
4249 DECL_FIELD_CONTEXT (f_ovf) = record;
4250 DECL_FIELD_CONTEXT (f_sav) = record;
4252 TREE_CHAIN (record) = type_decl;
4253 TYPE_NAME (record) = type_decl;
4254 TYPE_FIELDS (record) = f_gpr;
4255 TREE_CHAIN (f_gpr) = f_fpr;
4256 TREE_CHAIN (f_fpr) = f_ovf;
4257 TREE_CHAIN (f_ovf) = f_sav;
4259 layout_type (record);
4261 /* The correct type is an array type of one element. */
4262 return build_array_type (record, build_index_type (size_zero_node));
4265 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
4267 static void
4268 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4269 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4270 int no_rtl)
4272 CUMULATIVE_ARGS next_cum;
4273 rtx save_area = NULL_RTX, mem;
4274 rtx label;
4275 rtx label_ref;
4276 rtx tmp_reg;
4277 rtx nsse_reg;
4278 int set;
4279 tree fntype;
4280 int stdarg_p;
4281 int i;
4283 if (!TARGET_64BIT)
4284 return;
4286 if (! cfun->va_list_gpr_size && ! cfun->va_list_fpr_size)
4287 return;
4289 /* Indicate to allocate space on the stack for varargs save area. */
4290 ix86_save_varrargs_registers = 1;
4292 cfun->stack_alignment_needed = 128;
4294 fntype = TREE_TYPE (current_function_decl);
4295 stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
4296 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
4297 != void_type_node));
4299 /* For varargs, we do not want to skip the dummy va_dcl argument.
4300 For stdargs, we do want to skip the last named argument. */
4301 next_cum = *cum;
4302 if (stdarg_p)
4303 function_arg_advance (&next_cum, mode, type, 1);
4305 if (!no_rtl)
4306 save_area = frame_pointer_rtx;
4308 set = get_varargs_alias_set ();
4310 for (i = next_cum.regno;
4311 i < ix86_regparm
4312 && i < next_cum.regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
4313 i++)
4315 mem = gen_rtx_MEM (Pmode,
4316 plus_constant (save_area, i * UNITS_PER_WORD));
4317 MEM_NOTRAP_P (mem) = 1;
4318 set_mem_alias_set (mem, set);
4319 emit_move_insn (mem, gen_rtx_REG (Pmode,
4320 x86_64_int_parameter_registers[i]));
4323 if (next_cum.sse_nregs && cfun->va_list_fpr_size)
4325 /* Now emit code to save SSE registers. The AX parameter contains number
4326 of SSE parameter registers used to call this function. We use
4327 sse_prologue_save insn template that produces computed jump across
4328 SSE saves. We need some preparation work to get this working. */
4330 label = gen_label_rtx ();
4331 label_ref = gen_rtx_LABEL_REF (Pmode, label);
4333 /* Compute address to jump to :
4334 label - 5*eax + nnamed_sse_arguments*5 */
4335 tmp_reg = gen_reg_rtx (Pmode);
4336 nsse_reg = gen_reg_rtx (Pmode);
4337 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, 0)));
4338 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
4339 gen_rtx_MULT (Pmode, nsse_reg,
4340 GEN_INT (4))));
4341 if (next_cum.sse_regno)
4342 emit_move_insn
4343 (nsse_reg,
4344 gen_rtx_CONST (DImode,
4345 gen_rtx_PLUS (DImode,
4346 label_ref,
4347 GEN_INT (next_cum.sse_regno * 4))));
4348 else
4349 emit_move_insn (nsse_reg, label_ref);
4350 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
4352 /* Compute address of memory block we save into. We always use pointer
4353 pointing 127 bytes after first byte to store - this is needed to keep
4354 instruction size limited by 4 bytes. */
4355 tmp_reg = gen_reg_rtx (Pmode);
4356 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
4357 plus_constant (save_area,
4358 8 * REGPARM_MAX + 127)));
4359 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
4360 MEM_NOTRAP_P (mem) = 1;
4361 set_mem_alias_set (mem, set);
4362 set_mem_align (mem, BITS_PER_WORD);
4364 /* And finally do the dirty job! */
4365 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
4366 GEN_INT (next_cum.sse_regno), label));
4371 /* Implement va_start. */
4373 void
4374 ix86_va_start (tree valist, rtx nextarg)
4376 HOST_WIDE_INT words, n_gpr, n_fpr;
4377 tree f_gpr, f_fpr, f_ovf, f_sav;
4378 tree gpr, fpr, ovf, sav, t;
4379 tree type;
4381 /* Only 64bit target needs something special. */
4382 if (!TARGET_64BIT)
4384 std_expand_builtin_va_start (valist, nextarg);
4385 return;
4388 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
4389 f_fpr = TREE_CHAIN (f_gpr);
4390 f_ovf = TREE_CHAIN (f_fpr);
4391 f_sav = TREE_CHAIN (f_ovf);
4393 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
4394 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
4395 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
4396 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
4397 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
4399 /* Count number of gp and fp argument registers used. */
4400 words = current_function_args_info.words;
4401 n_gpr = current_function_args_info.regno;
4402 n_fpr = current_function_args_info.sse_regno;
4404 if (TARGET_DEBUG_ARG)
4405 fprintf (stderr, "va_start: words = %d, n_gpr = %d, n_fpr = %d\n",
4406 (int) words, (int) n_gpr, (int) n_fpr);
4408 if (cfun->va_list_gpr_size)
4410 type = TREE_TYPE (gpr);
4411 t = build2 (GIMPLE_MODIFY_STMT, type, gpr,
4412 build_int_cst (type, n_gpr * 8));
4413 TREE_SIDE_EFFECTS (t) = 1;
4414 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4417 if (cfun->va_list_fpr_size)
4419 type = TREE_TYPE (fpr);
4420 t = build2 (GIMPLE_MODIFY_STMT, type, fpr,
4421 build_int_cst (type, n_fpr * 16 + 8*REGPARM_MAX));
4422 TREE_SIDE_EFFECTS (t) = 1;
4423 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4426 /* Find the overflow area. */
4427 type = TREE_TYPE (ovf);
4428 t = make_tree (type, virtual_incoming_args_rtx);
4429 if (words != 0)
4430 t = build2 (PLUS_EXPR, type, t,
4431 build_int_cst (type, words * UNITS_PER_WORD));
4432 t = build2 (GIMPLE_MODIFY_STMT, type, ovf, t);
4433 TREE_SIDE_EFFECTS (t) = 1;
4434 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4436 if (cfun->va_list_gpr_size || cfun->va_list_fpr_size)
4438 /* Find the register save area.
4439 Prologue of the function save it right above stack frame. */
4440 type = TREE_TYPE (sav);
4441 t = make_tree (type, frame_pointer_rtx);
4442 t = build2 (GIMPLE_MODIFY_STMT, type, sav, t);
4443 TREE_SIDE_EFFECTS (t) = 1;
4444 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4448 /* Implement va_arg. */
4450 tree
4451 ix86_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
4453 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
4454 tree f_gpr, f_fpr, f_ovf, f_sav;
4455 tree gpr, fpr, ovf, sav, t;
4456 int size, rsize;
4457 tree lab_false, lab_over = NULL_TREE;
4458 tree addr, t2;
4459 rtx container;
4460 int indirect_p = 0;
4461 tree ptrtype;
4462 enum machine_mode nat_mode;
4464 /* Only 64bit target needs something special. */
4465 if (!TARGET_64BIT)
4466 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4468 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
4469 f_fpr = TREE_CHAIN (f_gpr);
4470 f_ovf = TREE_CHAIN (f_fpr);
4471 f_sav = TREE_CHAIN (f_ovf);
4473 valist = build_va_arg_indirect_ref (valist);
4474 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
4475 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
4476 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
4477 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
4479 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
4480 if (indirect_p)
4481 type = build_pointer_type (type);
4482 size = int_size_in_bytes (type);
4483 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4485 nat_mode = type_natural_mode (type);
4486 container = construct_container (nat_mode, TYPE_MODE (type), type, 0,
4487 REGPARM_MAX, SSE_REGPARM_MAX, intreg, 0);
4489 /* Pull the value out of the saved registers. */
4491 addr = create_tmp_var (ptr_type_node, "addr");
4492 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
4494 if (container)
4496 int needed_intregs, needed_sseregs;
4497 bool need_temp;
4498 tree int_addr, sse_addr;
4500 lab_false = create_artificial_label ();
4501 lab_over = create_artificial_label ();
4503 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
4505 need_temp = (!REG_P (container)
4506 && ((needed_intregs && TYPE_ALIGN (type) > 64)
4507 || TYPE_ALIGN (type) > 128));
4509 /* In case we are passing structure, verify that it is consecutive block
4510 on the register save area. If not we need to do moves. */
4511 if (!need_temp && !REG_P (container))
4513 /* Verify that all registers are strictly consecutive */
4514 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
4516 int i;
4518 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
4520 rtx slot = XVECEXP (container, 0, i);
4521 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
4522 || INTVAL (XEXP (slot, 1)) != i * 16)
4523 need_temp = 1;
4526 else
4528 int i;
4530 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
4532 rtx slot = XVECEXP (container, 0, i);
4533 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
4534 || INTVAL (XEXP (slot, 1)) != i * 8)
4535 need_temp = 1;
4539 if (!need_temp)
4541 int_addr = addr;
4542 sse_addr = addr;
4544 else
4546 int_addr = create_tmp_var (ptr_type_node, "int_addr");
4547 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
4548 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
4549 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
4552 /* First ensure that we fit completely in registers. */
4553 if (needed_intregs)
4555 t = build_int_cst (TREE_TYPE (gpr),
4556 (REGPARM_MAX - needed_intregs + 1) * 8);
4557 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
4558 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
4559 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
4560 gimplify_and_add (t, pre_p);
4562 if (needed_sseregs)
4564 t = build_int_cst (TREE_TYPE (fpr),
4565 (SSE_REGPARM_MAX - needed_sseregs + 1) * 16
4566 + REGPARM_MAX * 8);
4567 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
4568 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
4569 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
4570 gimplify_and_add (t, pre_p);
4573 /* Compute index to start of area used for integer regs. */
4574 if (needed_intregs)
4576 /* int_addr = gpr + sav; */
4577 t = fold_convert (ptr_type_node, gpr);
4578 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
4579 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, int_addr, t);
4580 gimplify_and_add (t, pre_p);
4582 if (needed_sseregs)
4584 /* sse_addr = fpr + sav; */
4585 t = fold_convert (ptr_type_node, fpr);
4586 t = build2 (PLUS_EXPR, ptr_type_node, sav, t);
4587 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, sse_addr, t);
4588 gimplify_and_add (t, pre_p);
4590 if (need_temp)
4592 int i;
4593 tree temp = create_tmp_var (type, "va_arg_tmp");
4595 /* addr = &temp; */
4596 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
4597 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
4598 gimplify_and_add (t, pre_p);
4600 for (i = 0; i < XVECLEN (container, 0); i++)
4602 rtx slot = XVECEXP (container, 0, i);
4603 rtx reg = XEXP (slot, 0);
4604 enum machine_mode mode = GET_MODE (reg);
4605 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
4606 tree addr_type = build_pointer_type (piece_type);
4607 tree src_addr, src;
4608 int src_offset;
4609 tree dest_addr, dest;
4611 if (SSE_REGNO_P (REGNO (reg)))
4613 src_addr = sse_addr;
4614 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
4616 else
4618 src_addr = int_addr;
4619 src_offset = REGNO (reg) * 8;
4621 src_addr = fold_convert (addr_type, src_addr);
4622 src_addr = fold (build2 (PLUS_EXPR, addr_type, src_addr,
4623 size_int (src_offset)));
4624 src = build_va_arg_indirect_ref (src_addr);
4626 dest_addr = fold_convert (addr_type, addr);
4627 dest_addr = fold (build2 (PLUS_EXPR, addr_type, dest_addr,
4628 size_int (INTVAL (XEXP (slot, 1)))));
4629 dest = build_va_arg_indirect_ref (dest_addr);
4631 t = build2 (GIMPLE_MODIFY_STMT, void_type_node, dest, src);
4632 gimplify_and_add (t, pre_p);
4636 if (needed_intregs)
4638 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
4639 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
4640 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gpr), gpr, t);
4641 gimplify_and_add (t, pre_p);
4643 if (needed_sseregs)
4645 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
4646 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
4647 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (fpr), fpr, t);
4648 gimplify_and_add (t, pre_p);
4651 t = build1 (GOTO_EXPR, void_type_node, lab_over);
4652 gimplify_and_add (t, pre_p);
4654 t = build1 (LABEL_EXPR, void_type_node, lab_false);
4655 append_to_statement_list (t, pre_p);
4658 /* ... otherwise out of the overflow area. */
4660 /* Care for on-stack alignment if needed. */
4661 if (FUNCTION_ARG_BOUNDARY (VOIDmode, type) <= 64
4662 || integer_zerop (TYPE_SIZE (type)))
4663 t = ovf;
4664 else
4666 HOST_WIDE_INT align = FUNCTION_ARG_BOUNDARY (VOIDmode, type) / 8;
4667 t = build2 (PLUS_EXPR, TREE_TYPE (ovf), ovf,
4668 build_int_cst (TREE_TYPE (ovf), align - 1));
4669 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4670 build_int_cst (TREE_TYPE (t), -align));
4672 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
4674 t2 = build2 (GIMPLE_MODIFY_STMT, void_type_node, addr, t);
4675 gimplify_and_add (t2, pre_p);
4677 t = build2 (PLUS_EXPR, TREE_TYPE (t), t,
4678 build_int_cst (TREE_TYPE (t), rsize * UNITS_PER_WORD));
4679 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovf), ovf, t);
4680 gimplify_and_add (t, pre_p);
4682 if (container)
4684 t = build1 (LABEL_EXPR, void_type_node, lab_over);
4685 append_to_statement_list (t, pre_p);
4688 ptrtype = build_pointer_type (type);
4689 addr = fold_convert (ptrtype, addr);
4691 if (indirect_p)
4692 addr = build_va_arg_indirect_ref (addr);
4693 return build_va_arg_indirect_ref (addr);
4696 /* Return nonzero if OPNUM's MEM should be matched
4697 in movabs* patterns. */
4700 ix86_check_movabs (rtx insn, int opnum)
4702 rtx set, mem;
4704 set = PATTERN (insn);
4705 if (GET_CODE (set) == PARALLEL)
4706 set = XVECEXP (set, 0, 0);
4707 gcc_assert (GET_CODE (set) == SET);
4708 mem = XEXP (set, opnum);
4709 while (GET_CODE (mem) == SUBREG)
4710 mem = SUBREG_REG (mem);
4711 gcc_assert (GET_CODE (mem) == MEM);
4712 return (volatile_ok || !MEM_VOLATILE_P (mem));
4715 /* Initialize the table of extra 80387 mathematical constants. */
4717 static void
4718 init_ext_80387_constants (void)
4720 static const char * cst[5] =
4722 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
4723 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
4724 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
4725 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
4726 "3.1415926535897932385128089594061862044", /* 4: fldpi */
4728 int i;
4730 for (i = 0; i < 5; i++)
4732 real_from_string (&ext_80387_constants_table[i], cst[i]);
4733 /* Ensure each constant is rounded to XFmode precision. */
4734 real_convert (&ext_80387_constants_table[i],
4735 XFmode, &ext_80387_constants_table[i]);
4738 ext_80387_constants_init = 1;
4741 /* Return true if the constant is something that can be loaded with
4742 a special instruction. */
4745 standard_80387_constant_p (rtx x)
4747 REAL_VALUE_TYPE r;
4749 if (GET_CODE (x) != CONST_DOUBLE || !FLOAT_MODE_P (GET_MODE (x)))
4750 return -1;
4752 if (x == CONST0_RTX (GET_MODE (x)))
4753 return 1;
4754 if (x == CONST1_RTX (GET_MODE (x)))
4755 return 2;
4757 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
4759 /* For XFmode constants, try to find a special 80387 instruction when
4760 optimizing for size or on those CPUs that benefit from them. */
4761 if (GET_MODE (x) == XFmode
4762 && (optimize_size || x86_ext_80387_constants & TUNEMASK))
4764 int i;
4766 if (! ext_80387_constants_init)
4767 init_ext_80387_constants ();
4769 for (i = 0; i < 5; i++)
4770 if (real_identical (&r, &ext_80387_constants_table[i]))
4771 return i + 3;
4774 /* Load of the constant -0.0 or -1.0 will be split as
4775 fldz;fchs or fld1;fchs sequence. */
4776 if (real_isnegzero (&r))
4777 return 8;
4778 if (real_identical (&r, &dconstm1))
4779 return 9;
4781 return 0;
4784 /* Return the opcode of the special instruction to be used to load
4785 the constant X. */
4787 const char *
4788 standard_80387_constant_opcode (rtx x)
4790 switch (standard_80387_constant_p (x))
4792 case 1:
4793 return "fldz";
4794 case 2:
4795 return "fld1";
4796 case 3:
4797 return "fldlg2";
4798 case 4:
4799 return "fldln2";
4800 case 5:
4801 return "fldl2e";
4802 case 6:
4803 return "fldl2t";
4804 case 7:
4805 return "fldpi";
4806 case 8:
4807 case 9:
4808 return "#";
4809 default:
4810 gcc_unreachable ();
4814 /* Return the CONST_DOUBLE representing the 80387 constant that is
4815 loaded by the specified special instruction. The argument IDX
4816 matches the return value from standard_80387_constant_p. */
4819 standard_80387_constant_rtx (int idx)
4821 int i;
4823 if (! ext_80387_constants_init)
4824 init_ext_80387_constants ();
4826 switch (idx)
4828 case 3:
4829 case 4:
4830 case 5:
4831 case 6:
4832 case 7:
4833 i = idx - 3;
4834 break;
4836 default:
4837 gcc_unreachable ();
4840 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
4841 XFmode);
4844 /* Return 1 if mode is a valid mode for sse. */
4845 static int
4846 standard_sse_mode_p (enum machine_mode mode)
4848 switch (mode)
4850 case V16QImode:
4851 case V8HImode:
4852 case V4SImode:
4853 case V2DImode:
4854 case V4SFmode:
4855 case V2DFmode:
4856 return 1;
4858 default:
4859 return 0;
4863 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
4866 standard_sse_constant_p (rtx x)
4868 enum machine_mode mode = GET_MODE (x);
4870 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
4871 return 1;
4872 if (vector_all_ones_operand (x, mode)
4873 && standard_sse_mode_p (mode))
4874 return TARGET_SSE2 ? 2 : -1;
4876 return 0;
4879 /* Return the opcode of the special instruction to be used to load
4880 the constant X. */
4882 const char *
4883 standard_sse_constant_opcode (rtx insn, rtx x)
4885 switch (standard_sse_constant_p (x))
4887 case 1:
4888 if (get_attr_mode (insn) == MODE_V4SF)
4889 return "xorps\t%0, %0";
4890 else if (get_attr_mode (insn) == MODE_V2DF)
4891 return "xorpd\t%0, %0";
4892 else
4893 return "pxor\t%0, %0";
4894 case 2:
4895 return "pcmpeqd\t%0, %0";
4897 gcc_unreachable ();
4900 /* Returns 1 if OP contains a symbol reference */
4903 symbolic_reference_mentioned_p (rtx op)
4905 const char *fmt;
4906 int i;
4908 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
4909 return 1;
4911 fmt = GET_RTX_FORMAT (GET_CODE (op));
4912 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
4914 if (fmt[i] == 'E')
4916 int j;
4918 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
4919 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
4920 return 1;
4923 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
4924 return 1;
4927 return 0;
4930 /* Return 1 if it is appropriate to emit `ret' instructions in the
4931 body of a function. Do this only if the epilogue is simple, needing a
4932 couple of insns. Prior to reloading, we can't tell how many registers
4933 must be saved, so return 0 then. Return 0 if there is no frame
4934 marker to de-allocate. */
4937 ix86_can_use_return_insn_p (void)
4939 struct ix86_frame frame;
4941 if (! reload_completed || frame_pointer_needed)
4942 return 0;
4944 /* Don't allow more than 32 pop, since that's all we can do
4945 with one instruction. */
4946 if (current_function_pops_args
4947 && current_function_args_size >= 32768)
4948 return 0;
4950 ix86_compute_frame_layout (&frame);
4951 return frame.to_allocate == 0 && frame.nregs == 0;
4954 /* Value should be nonzero if functions must have frame pointers.
4955 Zero means the frame pointer need not be set up (and parms may
4956 be accessed via the stack pointer) in functions that seem suitable. */
4959 ix86_frame_pointer_required (void)
4961 /* If we accessed previous frames, then the generated code expects
4962 to be able to access the saved ebp value in our frame. */
4963 if (cfun->machine->accesses_prev_frame)
4964 return 1;
4966 /* Several x86 os'es need a frame pointer for other reasons,
4967 usually pertaining to setjmp. */
4968 if (SUBTARGET_FRAME_POINTER_REQUIRED)
4969 return 1;
4971 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
4972 the frame pointer by default. Turn it back on now if we've not
4973 got a leaf function. */
4974 if (TARGET_OMIT_LEAF_FRAME_POINTER
4975 && (!current_function_is_leaf
4976 || ix86_current_function_calls_tls_descriptor))
4977 return 1;
4979 if (current_function_profile)
4980 return 1;
4982 return 0;
4985 /* Record that the current function accesses previous call frames. */
4987 void
4988 ix86_setup_frame_addresses (void)
4990 cfun->machine->accesses_prev_frame = 1;
4993 #if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
4994 # define USE_HIDDEN_LINKONCE 1
4995 #else
4996 # define USE_HIDDEN_LINKONCE 0
4997 #endif
4999 static int pic_labels_used;
5001 /* Fills in the label name that should be used for a pc thunk for
5002 the given register. */
5004 static void
5005 get_pc_thunk_name (char name[32], unsigned int regno)
5007 gcc_assert (!TARGET_64BIT);
5009 if (USE_HIDDEN_LINKONCE)
5010 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
5011 else
5012 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
5016 /* This function generates code for -fpic that loads %ebx with
5017 the return address of the caller and then returns. */
5019 void
5020 ix86_file_end (void)
5022 rtx xops[2];
5023 int regno;
5025 for (regno = 0; regno < 8; ++regno)
5027 char name[32];
5029 if (! ((pic_labels_used >> regno) & 1))
5030 continue;
5032 get_pc_thunk_name (name, regno);
5034 #if TARGET_MACHO
5035 if (TARGET_MACHO)
5037 switch_to_section (darwin_sections[text_coal_section]);
5038 fputs ("\t.weak_definition\t", asm_out_file);
5039 assemble_name (asm_out_file, name);
5040 fputs ("\n\t.private_extern\t", asm_out_file);
5041 assemble_name (asm_out_file, name);
5042 fputs ("\n", asm_out_file);
5043 ASM_OUTPUT_LABEL (asm_out_file, name);
5045 else
5046 #endif
5047 if (USE_HIDDEN_LINKONCE)
5049 tree decl;
5051 decl = build_decl (FUNCTION_DECL, get_identifier (name),
5052 error_mark_node);
5053 TREE_PUBLIC (decl) = 1;
5054 TREE_STATIC (decl) = 1;
5055 DECL_ONE_ONLY (decl) = 1;
5057 (*targetm.asm_out.unique_section) (decl, 0);
5058 switch_to_section (get_named_section (decl, NULL, 0));
5060 (*targetm.asm_out.globalize_label) (asm_out_file, name);
5061 fputs ("\t.hidden\t", asm_out_file);
5062 assemble_name (asm_out_file, name);
5063 fputc ('\n', asm_out_file);
5064 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
5066 else
5068 switch_to_section (text_section);
5069 ASM_OUTPUT_LABEL (asm_out_file, name);
5072 xops[0] = gen_rtx_REG (SImode, regno);
5073 xops[1] = gen_rtx_MEM (SImode, stack_pointer_rtx);
5074 output_asm_insn ("mov{l}\t{%1, %0|%0, %1}", xops);
5075 output_asm_insn ("ret", xops);
5078 if (NEED_INDICATE_EXEC_STACK)
5079 file_end_indicate_exec_stack ();
5082 /* Emit code for the SET_GOT patterns. */
5084 const char *
5085 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
5087 rtx xops[3];
5089 xops[0] = dest;
5090 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
5092 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
5094 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
5096 if (!flag_pic)
5097 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
5098 else
5099 output_asm_insn ("call\t%a2", xops);
5101 #if TARGET_MACHO
5102 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
5103 is what will be referenced by the Mach-O PIC subsystem. */
5104 if (!label)
5105 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
5106 #endif
5108 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5109 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
5111 if (flag_pic)
5112 output_asm_insn ("pop{l}\t%0", xops);
5114 else
5116 char name[32];
5117 get_pc_thunk_name (name, REGNO (dest));
5118 pic_labels_used |= 1 << REGNO (dest);
5120 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
5121 xops[2] = gen_rtx_MEM (QImode, xops[2]);
5122 output_asm_insn ("call\t%X2", xops);
5123 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
5124 is what will be referenced by the Mach-O PIC subsystem. */
5125 #if TARGET_MACHO
5126 if (!label)
5127 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
5128 else
5129 targetm.asm_out.internal_label (asm_out_file, "L",
5130 CODE_LABEL_NUMBER (label));
5131 #endif
5134 if (TARGET_MACHO)
5135 return "";
5137 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
5138 output_asm_insn ("add{l}\t{%1, %0|%0, %1}", xops);
5139 else
5140 output_asm_insn ("add{l}\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
5142 return "";
5145 /* Generate an "push" pattern for input ARG. */
5147 static rtx
5148 gen_push (rtx arg)
5150 return gen_rtx_SET (VOIDmode,
5151 gen_rtx_MEM (Pmode,
5152 gen_rtx_PRE_DEC (Pmode,
5153 stack_pointer_rtx)),
5154 arg);
5157 /* Return >= 0 if there is an unused call-clobbered register available
5158 for the entire function. */
5160 static unsigned int
5161 ix86_select_alt_pic_regnum (void)
5163 if (current_function_is_leaf && !current_function_profile
5164 && !ix86_current_function_calls_tls_descriptor)
5166 int i;
5167 for (i = 2; i >= 0; --i)
5168 if (!regs_ever_live[i])
5169 return i;
5172 return INVALID_REGNUM;
5175 /* Return 1 if we need to save REGNO. */
5176 static int
5177 ix86_save_reg (unsigned int regno, int maybe_eh_return)
5179 if (pic_offset_table_rtx
5180 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
5181 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
5182 || current_function_profile
5183 || current_function_calls_eh_return
5184 || current_function_uses_const_pool))
5186 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
5187 return 0;
5188 return 1;
5191 if (current_function_calls_eh_return && maybe_eh_return)
5193 unsigned i;
5194 for (i = 0; ; i++)
5196 unsigned test = EH_RETURN_DATA_REGNO (i);
5197 if (test == INVALID_REGNUM)
5198 break;
5199 if (test == regno)
5200 return 1;
5204 if (cfun->machine->force_align_arg_pointer
5205 && regno == REGNO (cfun->machine->force_align_arg_pointer))
5206 return 1;
5208 return (regs_ever_live[regno]
5209 && !call_used_regs[regno]
5210 && !fixed_regs[regno]
5211 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
5214 /* Return number of registers to be saved on the stack. */
5216 static int
5217 ix86_nsaved_regs (void)
5219 int nregs = 0;
5220 int regno;
5222 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
5223 if (ix86_save_reg (regno, true))
5224 nregs++;
5225 return nregs;
5228 /* Return the offset between two registers, one to be eliminated, and the other
5229 its replacement, at the start of a routine. */
5231 HOST_WIDE_INT
5232 ix86_initial_elimination_offset (int from, int to)
5234 struct ix86_frame frame;
5235 ix86_compute_frame_layout (&frame);
5237 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
5238 return frame.hard_frame_pointer_offset;
5239 else if (from == FRAME_POINTER_REGNUM
5240 && to == HARD_FRAME_POINTER_REGNUM)
5241 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
5242 else
5244 gcc_assert (to == STACK_POINTER_REGNUM);
5246 if (from == ARG_POINTER_REGNUM)
5247 return frame.stack_pointer_offset;
5249 gcc_assert (from == FRAME_POINTER_REGNUM);
5250 return frame.stack_pointer_offset - frame.frame_pointer_offset;
5254 /* Fill structure ix86_frame about frame of currently computed function. */
5256 static void
5257 ix86_compute_frame_layout (struct ix86_frame *frame)
5259 HOST_WIDE_INT total_size;
5260 unsigned int stack_alignment_needed;
5261 HOST_WIDE_INT offset;
5262 unsigned int preferred_alignment;
5263 HOST_WIDE_INT size = get_frame_size ();
5265 frame->nregs = ix86_nsaved_regs ();
5266 total_size = size;
5268 stack_alignment_needed = cfun->stack_alignment_needed / BITS_PER_UNIT;
5269 preferred_alignment = cfun->preferred_stack_boundary / BITS_PER_UNIT;
5271 /* During reload iteration the amount of registers saved can change.
5272 Recompute the value as needed. Do not recompute when amount of registers
5273 didn't change as reload does multiple calls to the function and does not
5274 expect the decision to change within single iteration. */
5275 if (!optimize_size
5276 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
5278 int count = frame->nregs;
5280 cfun->machine->use_fast_prologue_epilogue_nregs = count;
5281 /* The fast prologue uses move instead of push to save registers. This
5282 is significantly longer, but also executes faster as modern hardware
5283 can execute the moves in parallel, but can't do that for push/pop.
5285 Be careful about choosing what prologue to emit: When function takes
5286 many instructions to execute we may use slow version as well as in
5287 case function is known to be outside hot spot (this is known with
5288 feedback only). Weight the size of function by number of registers
5289 to save as it is cheap to use one or two push instructions but very
5290 slow to use many of them. */
5291 if (count)
5292 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
5293 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
5294 || (flag_branch_probabilities
5295 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
5296 cfun->machine->use_fast_prologue_epilogue = false;
5297 else
5298 cfun->machine->use_fast_prologue_epilogue
5299 = !expensive_function_p (count);
5301 if (TARGET_PROLOGUE_USING_MOVE
5302 && cfun->machine->use_fast_prologue_epilogue)
5303 frame->save_regs_using_mov = true;
5304 else
5305 frame->save_regs_using_mov = false;
5308 /* Skip return address and saved base pointer. */
5309 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
5311 frame->hard_frame_pointer_offset = offset;
5313 /* Do some sanity checking of stack_alignment_needed and
5314 preferred_alignment, since i386 port is the only using those features
5315 that may break easily. */
5317 gcc_assert (!size || stack_alignment_needed);
5318 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
5319 gcc_assert (preferred_alignment <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
5320 gcc_assert (stack_alignment_needed
5321 <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
5323 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
5324 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
5326 /* Register save area */
5327 offset += frame->nregs * UNITS_PER_WORD;
5329 /* Va-arg area */
5330 if (ix86_save_varrargs_registers)
5332 offset += X86_64_VARARGS_SIZE;
5333 frame->va_arg_size = X86_64_VARARGS_SIZE;
5335 else
5336 frame->va_arg_size = 0;
5338 /* Align start of frame for local function. */
5339 frame->padding1 = ((offset + stack_alignment_needed - 1)
5340 & -stack_alignment_needed) - offset;
5342 offset += frame->padding1;
5344 /* Frame pointer points here. */
5345 frame->frame_pointer_offset = offset;
5347 offset += size;
5349 /* Add outgoing arguments area. Can be skipped if we eliminated
5350 all the function calls as dead code.
5351 Skipping is however impossible when function calls alloca. Alloca
5352 expander assumes that last current_function_outgoing_args_size
5353 of stack frame are unused. */
5354 if (ACCUMULATE_OUTGOING_ARGS
5355 && (!current_function_is_leaf || current_function_calls_alloca
5356 || ix86_current_function_calls_tls_descriptor))
5358 offset += current_function_outgoing_args_size;
5359 frame->outgoing_arguments_size = current_function_outgoing_args_size;
5361 else
5362 frame->outgoing_arguments_size = 0;
5364 /* Align stack boundary. Only needed if we're calling another function
5365 or using alloca. */
5366 if (!current_function_is_leaf || current_function_calls_alloca
5367 || ix86_current_function_calls_tls_descriptor)
5368 frame->padding2 = ((offset + preferred_alignment - 1)
5369 & -preferred_alignment) - offset;
5370 else
5371 frame->padding2 = 0;
5373 offset += frame->padding2;
5375 /* We've reached end of stack frame. */
5376 frame->stack_pointer_offset = offset;
5378 /* Size prologue needs to allocate. */
5379 frame->to_allocate =
5380 (size + frame->padding1 + frame->padding2
5381 + frame->outgoing_arguments_size + frame->va_arg_size);
5383 if ((!frame->to_allocate && frame->nregs <= 1)
5384 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
5385 frame->save_regs_using_mov = false;
5387 if (TARGET_RED_ZONE && current_function_sp_is_unchanging
5388 && current_function_is_leaf
5389 && !ix86_current_function_calls_tls_descriptor)
5391 frame->red_zone_size = frame->to_allocate;
5392 if (frame->save_regs_using_mov)
5393 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
5394 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
5395 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
5397 else
5398 frame->red_zone_size = 0;
5399 frame->to_allocate -= frame->red_zone_size;
5400 frame->stack_pointer_offset -= frame->red_zone_size;
5401 #if 0
5402 fprintf (stderr, "\n");
5403 fprintf (stderr, "nregs: %ld\n", (long)frame->nregs);
5404 fprintf (stderr, "size: %ld\n", (long)size);
5405 fprintf (stderr, "alignment1: %ld\n", (long)stack_alignment_needed);
5406 fprintf (stderr, "padding1: %ld\n", (long)frame->padding1);
5407 fprintf (stderr, "va_arg: %ld\n", (long)frame->va_arg_size);
5408 fprintf (stderr, "padding2: %ld\n", (long)frame->padding2);
5409 fprintf (stderr, "to_allocate: %ld\n", (long)frame->to_allocate);
5410 fprintf (stderr, "red_zone_size: %ld\n", (long)frame->red_zone_size);
5411 fprintf (stderr, "frame_pointer_offset: %ld\n", (long)frame->frame_pointer_offset);
5412 fprintf (stderr, "hard_frame_pointer_offset: %ld\n",
5413 (long)frame->hard_frame_pointer_offset);
5414 fprintf (stderr, "stack_pointer_offset: %ld\n", (long)frame->stack_pointer_offset);
5415 fprintf (stderr, "current_function_is_leaf: %ld\n", (long)current_function_is_leaf);
5416 fprintf (stderr, "current_function_calls_alloca: %ld\n", (long)current_function_calls_alloca);
5417 fprintf (stderr, "x86_current_function_calls_tls_descriptor: %ld\n", (long)ix86_current_function_calls_tls_descriptor);
5418 #endif
5421 /* Emit code to save registers in the prologue. */
5423 static void
5424 ix86_emit_save_regs (void)
5426 unsigned int regno;
5427 rtx insn;
5429 for (regno = FIRST_PSEUDO_REGISTER; regno-- > 0; )
5430 if (ix86_save_reg (regno, true))
5432 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
5433 RTX_FRAME_RELATED_P (insn) = 1;
5437 /* Emit code to save registers using MOV insns. First register
5438 is restored from POINTER + OFFSET. */
5439 static void
5440 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
5442 unsigned int regno;
5443 rtx insn;
5445 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5446 if (ix86_save_reg (regno, true))
5448 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
5449 Pmode, offset),
5450 gen_rtx_REG (Pmode, regno));
5451 RTX_FRAME_RELATED_P (insn) = 1;
5452 offset += UNITS_PER_WORD;
5456 /* Expand prologue or epilogue stack adjustment.
5457 The pattern exist to put a dependency on all ebp-based memory accesses.
5458 STYLE should be negative if instructions should be marked as frame related,
5459 zero if %r11 register is live and cannot be freely used and positive
5460 otherwise. */
5462 static void
5463 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
5465 rtx insn;
5467 if (! TARGET_64BIT)
5468 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
5469 else if (x86_64_immediate_operand (offset, DImode))
5470 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
5471 else
5473 rtx r11;
5474 /* r11 is used by indirect sibcall return as well, set before the
5475 epilogue and used after the epilogue. ATM indirect sibcall
5476 shouldn't be used together with huge frame sizes in one
5477 function because of the frame_size check in sibcall.c. */
5478 gcc_assert (style);
5479 r11 = gen_rtx_REG (DImode, R11_REG);
5480 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
5481 if (style < 0)
5482 RTX_FRAME_RELATED_P (insn) = 1;
5483 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
5484 offset));
5486 if (style < 0)
5487 RTX_FRAME_RELATED_P (insn) = 1;
5490 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
5492 static rtx
5493 ix86_internal_arg_pointer (void)
5495 bool has_force_align_arg_pointer =
5496 (0 != lookup_attribute (ix86_force_align_arg_pointer_string,
5497 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))));
5498 if ((FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN
5499 && DECL_NAME (current_function_decl)
5500 && MAIN_NAME_P (DECL_NAME (current_function_decl))
5501 && DECL_FILE_SCOPE_P (current_function_decl))
5502 || ix86_force_align_arg_pointer
5503 || has_force_align_arg_pointer)
5505 /* Nested functions can't realign the stack due to a register
5506 conflict. */
5507 if (DECL_CONTEXT (current_function_decl)
5508 && TREE_CODE (DECL_CONTEXT (current_function_decl)) == FUNCTION_DECL)
5510 if (ix86_force_align_arg_pointer)
5511 warning (0, "-mstackrealign ignored for nested functions");
5512 if (has_force_align_arg_pointer)
5513 error ("%s not supported for nested functions",
5514 ix86_force_align_arg_pointer_string);
5515 return virtual_incoming_args_rtx;
5517 cfun->machine->force_align_arg_pointer = gen_rtx_REG (Pmode, 2);
5518 return copy_to_reg (cfun->machine->force_align_arg_pointer);
5520 else
5521 return virtual_incoming_args_rtx;
5524 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
5525 This is called from dwarf2out.c to emit call frame instructions
5526 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
5527 static void
5528 ix86_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
5530 rtx unspec = SET_SRC (pattern);
5531 gcc_assert (GET_CODE (unspec) == UNSPEC);
5533 switch (index)
5535 case UNSPEC_REG_SAVE:
5536 dwarf2out_reg_save_reg (label, XVECEXP (unspec, 0, 0),
5537 SET_DEST (pattern));
5538 break;
5539 case UNSPEC_DEF_CFA:
5540 dwarf2out_def_cfa (label, REGNO (SET_DEST (pattern)),
5541 INTVAL (XVECEXP (unspec, 0, 0)));
5542 break;
5543 default:
5544 gcc_unreachable ();
5548 /* Expand the prologue into a bunch of separate insns. */
5550 void
5551 ix86_expand_prologue (void)
5553 rtx insn;
5554 bool pic_reg_used;
5555 struct ix86_frame frame;
5556 HOST_WIDE_INT allocate;
5558 ix86_compute_frame_layout (&frame);
5560 if (cfun->machine->force_align_arg_pointer)
5562 rtx x, y;
5564 /* Grab the argument pointer. */
5565 x = plus_constant (stack_pointer_rtx, 4);
5566 y = cfun->machine->force_align_arg_pointer;
5567 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
5568 RTX_FRAME_RELATED_P (insn) = 1;
5570 /* The unwind info consists of two parts: install the fafp as the cfa,
5571 and record the fafp as the "save register" of the stack pointer.
5572 The later is there in order that the unwinder can see where it
5573 should restore the stack pointer across the and insn. */
5574 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_DEF_CFA);
5575 x = gen_rtx_SET (VOIDmode, y, x);
5576 RTX_FRAME_RELATED_P (x) = 1;
5577 y = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, stack_pointer_rtx),
5578 UNSPEC_REG_SAVE);
5579 y = gen_rtx_SET (VOIDmode, cfun->machine->force_align_arg_pointer, y);
5580 RTX_FRAME_RELATED_P (y) = 1;
5581 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x, y));
5582 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
5583 REG_NOTES (insn) = x;
5585 /* Align the stack. */
5586 emit_insn (gen_andsi3 (stack_pointer_rtx, stack_pointer_rtx,
5587 GEN_INT (-16)));
5589 /* And here we cheat like madmen with the unwind info. We force the
5590 cfa register back to sp+4, which is exactly what it was at the
5591 start of the function. Re-pushing the return address results in
5592 the return at the same spot relative to the cfa, and thus is
5593 correct wrt the unwind info. */
5594 x = cfun->machine->force_align_arg_pointer;
5595 x = gen_frame_mem (Pmode, plus_constant (x, -4));
5596 insn = emit_insn (gen_push (x));
5597 RTX_FRAME_RELATED_P (insn) = 1;
5599 x = GEN_INT (4);
5600 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, x), UNSPEC_DEF_CFA);
5601 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
5602 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
5603 REG_NOTES (insn) = x;
5606 /* Note: AT&T enter does NOT have reversed args. Enter is probably
5607 slower on all targets. Also sdb doesn't like it. */
5609 if (frame_pointer_needed)
5611 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
5612 RTX_FRAME_RELATED_P (insn) = 1;
5614 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
5615 RTX_FRAME_RELATED_P (insn) = 1;
5618 allocate = frame.to_allocate;
5620 if (!frame.save_regs_using_mov)
5621 ix86_emit_save_regs ();
5622 else
5623 allocate += frame.nregs * UNITS_PER_WORD;
5625 /* When using red zone we may start register saving before allocating
5626 the stack frame saving one cycle of the prologue. */
5627 if (TARGET_RED_ZONE && frame.save_regs_using_mov)
5628 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
5629 : stack_pointer_rtx,
5630 -frame.nregs * UNITS_PER_WORD);
5632 if (allocate == 0)
5634 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
5635 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5636 GEN_INT (-allocate), -1);
5637 else
5639 /* Only valid for Win32. */
5640 rtx eax = gen_rtx_REG (SImode, 0);
5641 bool eax_live = ix86_eax_live_at_start_p ();
5642 rtx t;
5644 gcc_assert (!TARGET_64BIT);
5646 if (eax_live)
5648 emit_insn (gen_push (eax));
5649 allocate -= 4;
5652 emit_move_insn (eax, GEN_INT (allocate));
5654 insn = emit_insn (gen_allocate_stack_worker (eax));
5655 RTX_FRAME_RELATED_P (insn) = 1;
5656 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
5657 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
5658 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
5659 t, REG_NOTES (insn));
5661 if (eax_live)
5663 if (frame_pointer_needed)
5664 t = plus_constant (hard_frame_pointer_rtx,
5665 allocate
5666 - frame.to_allocate
5667 - frame.nregs * UNITS_PER_WORD);
5668 else
5669 t = plus_constant (stack_pointer_rtx, allocate);
5670 emit_move_insn (eax, gen_rtx_MEM (SImode, t));
5674 if (frame.save_regs_using_mov && !TARGET_RED_ZONE)
5676 if (!frame_pointer_needed || !frame.to_allocate)
5677 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
5678 else
5679 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
5680 -frame.nregs * UNITS_PER_WORD);
5683 pic_reg_used = false;
5684 if (pic_offset_table_rtx
5685 && (regs_ever_live[REAL_PIC_OFFSET_TABLE_REGNUM]
5686 || current_function_profile))
5688 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
5690 if (alt_pic_reg_used != INVALID_REGNUM)
5691 REGNO (pic_offset_table_rtx) = alt_pic_reg_used;
5693 pic_reg_used = true;
5696 if (pic_reg_used)
5698 if (TARGET_64BIT)
5699 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
5700 else
5701 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
5703 /* Even with accurate pre-reload life analysis, we can wind up
5704 deleting all references to the pic register after reload.
5705 Consider if cross-jumping unifies two sides of a branch
5706 controlled by a comparison vs the only read from a global.
5707 In which case, allow the set_got to be deleted, though we're
5708 too late to do anything about the ebx save in the prologue. */
5709 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, NULL);
5712 /* Prevent function calls from be scheduled before the call to mcount.
5713 In the pic_reg_used case, make sure that the got load isn't deleted. */
5714 if (current_function_profile)
5715 emit_insn (gen_blockage (pic_reg_used ? pic_offset_table_rtx : const0_rtx));
5718 /* Emit code to restore saved registers using MOV insns. First register
5719 is restored from POINTER + OFFSET. */
5720 static void
5721 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
5722 int maybe_eh_return)
5724 int regno;
5725 rtx base_address = gen_rtx_MEM (Pmode, pointer);
5727 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5728 if (ix86_save_reg (regno, maybe_eh_return))
5730 /* Ensure that adjust_address won't be forced to produce pointer
5731 out of range allowed by x86-64 instruction set. */
5732 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
5734 rtx r11;
5736 r11 = gen_rtx_REG (DImode, R11_REG);
5737 emit_move_insn (r11, GEN_INT (offset));
5738 emit_insn (gen_adddi3 (r11, r11, pointer));
5739 base_address = gen_rtx_MEM (Pmode, r11);
5740 offset = 0;
5742 emit_move_insn (gen_rtx_REG (Pmode, regno),
5743 adjust_address (base_address, Pmode, offset));
5744 offset += UNITS_PER_WORD;
5748 /* Restore function stack, frame, and registers. */
5750 void
5751 ix86_expand_epilogue (int style)
5753 int regno;
5754 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
5755 struct ix86_frame frame;
5756 HOST_WIDE_INT offset;
5758 ix86_compute_frame_layout (&frame);
5760 /* Calculate start of saved registers relative to ebp. Special care
5761 must be taken for the normal return case of a function using
5762 eh_return: the eax and edx registers are marked as saved, but not
5763 restored along this path. */
5764 offset = frame.nregs;
5765 if (current_function_calls_eh_return && style != 2)
5766 offset -= 2;
5767 offset *= -UNITS_PER_WORD;
5769 /* If we're only restoring one register and sp is not valid then
5770 using a move instruction to restore the register since it's
5771 less work than reloading sp and popping the register.
5773 The default code result in stack adjustment using add/lea instruction,
5774 while this code results in LEAVE instruction (or discrete equivalent),
5775 so it is profitable in some other cases as well. Especially when there
5776 are no registers to restore. We also use this code when TARGET_USE_LEAVE
5777 and there is exactly one register to pop. This heuristic may need some
5778 tuning in future. */
5779 if ((!sp_valid && frame.nregs <= 1)
5780 || (TARGET_EPILOGUE_USING_MOVE
5781 && cfun->machine->use_fast_prologue_epilogue
5782 && (frame.nregs > 1 || frame.to_allocate))
5783 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
5784 || (frame_pointer_needed && TARGET_USE_LEAVE
5785 && cfun->machine->use_fast_prologue_epilogue
5786 && frame.nregs == 1)
5787 || current_function_calls_eh_return)
5789 /* Restore registers. We can use ebp or esp to address the memory
5790 locations. If both are available, default to ebp, since offsets
5791 are known to be small. Only exception is esp pointing directly to the
5792 end of block of saved registers, where we may simplify addressing
5793 mode. */
5795 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
5796 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
5797 frame.to_allocate, style == 2);
5798 else
5799 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
5800 offset, style == 2);
5802 /* eh_return epilogues need %ecx added to the stack pointer. */
5803 if (style == 2)
5805 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
5807 if (frame_pointer_needed)
5809 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
5810 tmp = plus_constant (tmp, UNITS_PER_WORD);
5811 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
5813 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
5814 emit_move_insn (hard_frame_pointer_rtx, tmp);
5816 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
5817 const0_rtx, style);
5819 else
5821 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
5822 tmp = plus_constant (tmp, (frame.to_allocate
5823 + frame.nregs * UNITS_PER_WORD));
5824 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
5827 else if (!frame_pointer_needed)
5828 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5829 GEN_INT (frame.to_allocate
5830 + frame.nregs * UNITS_PER_WORD),
5831 style);
5832 /* If not an i386, mov & pop is faster than "leave". */
5833 else if (TARGET_USE_LEAVE || optimize_size
5834 || !cfun->machine->use_fast_prologue_epilogue)
5835 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5836 else
5838 pro_epilogue_adjust_stack (stack_pointer_rtx,
5839 hard_frame_pointer_rtx,
5840 const0_rtx, style);
5841 if (TARGET_64BIT)
5842 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5843 else
5844 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5847 else
5849 /* First step is to deallocate the stack frame so that we can
5850 pop the registers. */
5851 if (!sp_valid)
5853 gcc_assert (frame_pointer_needed);
5854 pro_epilogue_adjust_stack (stack_pointer_rtx,
5855 hard_frame_pointer_rtx,
5856 GEN_INT (offset), style);
5858 else if (frame.to_allocate)
5859 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
5860 GEN_INT (frame.to_allocate), style);
5862 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5863 if (ix86_save_reg (regno, false))
5865 if (TARGET_64BIT)
5866 emit_insn (gen_popdi1 (gen_rtx_REG (Pmode, regno)));
5867 else
5868 emit_insn (gen_popsi1 (gen_rtx_REG (Pmode, regno)));
5870 if (frame_pointer_needed)
5872 /* Leave results in shorter dependency chains on CPUs that are
5873 able to grok it fast. */
5874 if (TARGET_USE_LEAVE)
5875 emit_insn (TARGET_64BIT ? gen_leave_rex64 () : gen_leave ());
5876 else if (TARGET_64BIT)
5877 emit_insn (gen_popdi1 (hard_frame_pointer_rtx));
5878 else
5879 emit_insn (gen_popsi1 (hard_frame_pointer_rtx));
5883 if (cfun->machine->force_align_arg_pointer)
5885 emit_insn (gen_addsi3 (stack_pointer_rtx,
5886 cfun->machine->force_align_arg_pointer,
5887 GEN_INT (-4)));
5890 /* Sibcall epilogues don't want a return instruction. */
5891 if (style == 0)
5892 return;
5894 if (current_function_pops_args && current_function_args_size)
5896 rtx popc = GEN_INT (current_function_pops_args);
5898 /* i386 can only pop 64K bytes. If asked to pop more, pop
5899 return address, do explicit add, and jump indirectly to the
5900 caller. */
5902 if (current_function_pops_args >= 65536)
5904 rtx ecx = gen_rtx_REG (SImode, 2);
5906 /* There is no "pascal" calling convention in 64bit ABI. */
5907 gcc_assert (!TARGET_64BIT);
5909 emit_insn (gen_popsi1 (ecx));
5910 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
5911 emit_jump_insn (gen_return_indirect_internal (ecx));
5913 else
5914 emit_jump_insn (gen_return_pop_internal (popc));
5916 else
5917 emit_jump_insn (gen_return_internal ());
5920 /* Reset from the function's potential modifications. */
5922 static void
5923 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
5924 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5926 if (pic_offset_table_rtx)
5927 REGNO (pic_offset_table_rtx) = REAL_PIC_OFFSET_TABLE_REGNUM;
5928 #if TARGET_MACHO
5929 /* Mach-O doesn't support labels at the end of objects, so if
5930 it looks like we might want one, insert a NOP. */
5932 rtx insn = get_last_insn ();
5933 while (insn
5934 && NOTE_P (insn)
5935 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_DELETED_LABEL)
5936 insn = PREV_INSN (insn);
5937 if (insn
5938 && (LABEL_P (insn)
5939 || (NOTE_P (insn)
5940 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_DELETED_LABEL)))
5941 fputs ("\tnop\n", file);
5943 #endif
5947 /* Extract the parts of an RTL expression that is a valid memory address
5948 for an instruction. Return 0 if the structure of the address is
5949 grossly off. Return -1 if the address contains ASHIFT, so it is not
5950 strictly valid, but still used for computing length of lea instruction. */
5953 ix86_decompose_address (rtx addr, struct ix86_address *out)
5955 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
5956 rtx base_reg, index_reg;
5957 HOST_WIDE_INT scale = 1;
5958 rtx scale_rtx = NULL_RTX;
5959 int retval = 1;
5960 enum ix86_address_seg seg = SEG_DEFAULT;
5962 if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
5963 base = addr;
5964 else if (GET_CODE (addr) == PLUS)
5966 rtx addends[4], op;
5967 int n = 0, i;
5969 op = addr;
5972 if (n >= 4)
5973 return 0;
5974 addends[n++] = XEXP (op, 1);
5975 op = XEXP (op, 0);
5977 while (GET_CODE (op) == PLUS);
5978 if (n >= 4)
5979 return 0;
5980 addends[n] = op;
5982 for (i = n; i >= 0; --i)
5984 op = addends[i];
5985 switch (GET_CODE (op))
5987 case MULT:
5988 if (index)
5989 return 0;
5990 index = XEXP (op, 0);
5991 scale_rtx = XEXP (op, 1);
5992 break;
5994 case UNSPEC:
5995 if (XINT (op, 1) == UNSPEC_TP
5996 && TARGET_TLS_DIRECT_SEG_REFS
5997 && seg == SEG_DEFAULT)
5998 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
5999 else
6000 return 0;
6001 break;
6003 case REG:
6004 case SUBREG:
6005 if (!base)
6006 base = op;
6007 else if (!index)
6008 index = op;
6009 else
6010 return 0;
6011 break;
6013 case CONST:
6014 case CONST_INT:
6015 case SYMBOL_REF:
6016 case LABEL_REF:
6017 if (disp)
6018 return 0;
6019 disp = op;
6020 break;
6022 default:
6023 return 0;
6027 else if (GET_CODE (addr) == MULT)
6029 index = XEXP (addr, 0); /* index*scale */
6030 scale_rtx = XEXP (addr, 1);
6032 else if (GET_CODE (addr) == ASHIFT)
6034 rtx tmp;
6036 /* We're called for lea too, which implements ashift on occasion. */
6037 index = XEXP (addr, 0);
6038 tmp = XEXP (addr, 1);
6039 if (GET_CODE (tmp) != CONST_INT)
6040 return 0;
6041 scale = INTVAL (tmp);
6042 if ((unsigned HOST_WIDE_INT) scale > 3)
6043 return 0;
6044 scale = 1 << scale;
6045 retval = -1;
6047 else
6048 disp = addr; /* displacement */
6050 /* Extract the integral value of scale. */
6051 if (scale_rtx)
6053 if (GET_CODE (scale_rtx) != CONST_INT)
6054 return 0;
6055 scale = INTVAL (scale_rtx);
6058 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
6059 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
6061 /* Allow arg pointer and stack pointer as index if there is not scaling. */
6062 if (base_reg && index_reg && scale == 1
6063 && (index_reg == arg_pointer_rtx
6064 || index_reg == frame_pointer_rtx
6065 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
6067 rtx tmp;
6068 tmp = base, base = index, index = tmp;
6069 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
6072 /* Special case: %ebp cannot be encoded as a base without a displacement. */
6073 if ((base_reg == hard_frame_pointer_rtx
6074 || base_reg == frame_pointer_rtx
6075 || base_reg == arg_pointer_rtx) && !disp)
6076 disp = const0_rtx;
6078 /* Special case: on K6, [%esi] makes the instruction vector decoded.
6079 Avoid this by transforming to [%esi+0]. */
6080 if (ix86_tune == PROCESSOR_K6 && !optimize_size
6081 && base_reg && !index_reg && !disp
6082 && REG_P (base_reg)
6083 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
6084 disp = const0_rtx;
6086 /* Special case: encode reg+reg instead of reg*2. */
6087 if (!base && index && scale && scale == 2)
6088 base = index, base_reg = index_reg, scale = 1;
6090 /* Special case: scaling cannot be encoded without base or displacement. */
6091 if (!base && !disp && index && scale != 1)
6092 disp = const0_rtx;
6094 out->base = base;
6095 out->index = index;
6096 out->disp = disp;
6097 out->scale = scale;
6098 out->seg = seg;
6100 return retval;
6103 /* Return cost of the memory address x.
6104 For i386, it is better to use a complex address than let gcc copy
6105 the address into a reg and make a new pseudo. But not if the address
6106 requires to two regs - that would mean more pseudos with longer
6107 lifetimes. */
6108 static int
6109 ix86_address_cost (rtx x)
6111 struct ix86_address parts;
6112 int cost = 1;
6113 int ok = ix86_decompose_address (x, &parts);
6115 gcc_assert (ok);
6117 if (parts.base && GET_CODE (parts.base) == SUBREG)
6118 parts.base = SUBREG_REG (parts.base);
6119 if (parts.index && GET_CODE (parts.index) == SUBREG)
6120 parts.index = SUBREG_REG (parts.index);
6122 /* More complex memory references are better. */
6123 if (parts.disp && parts.disp != const0_rtx)
6124 cost--;
6125 if (parts.seg != SEG_DEFAULT)
6126 cost--;
6128 /* Attempt to minimize number of registers in the address. */
6129 if ((parts.base
6130 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
6131 || (parts.index
6132 && (!REG_P (parts.index)
6133 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
6134 cost++;
6136 if (parts.base
6137 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
6138 && parts.index
6139 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
6140 && parts.base != parts.index)
6141 cost++;
6143 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
6144 since it's predecode logic can't detect the length of instructions
6145 and it degenerates to vector decoded. Increase cost of such
6146 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
6147 to split such addresses or even refuse such addresses at all.
6149 Following addressing modes are affected:
6150 [base+scale*index]
6151 [scale*index+disp]
6152 [base+index]
6154 The first and last case may be avoidable by explicitly coding the zero in
6155 memory address, but I don't have AMD-K6 machine handy to check this
6156 theory. */
6158 if (TARGET_K6
6159 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
6160 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
6161 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
6162 cost += 10;
6164 return cost;
6167 /* If X is a machine specific address (i.e. a symbol or label being
6168 referenced as a displacement from the GOT implemented using an
6169 UNSPEC), then return the base term. Otherwise return X. */
6172 ix86_find_base_term (rtx x)
6174 rtx term;
6176 if (TARGET_64BIT)
6178 if (GET_CODE (x) != CONST)
6179 return x;
6180 term = XEXP (x, 0);
6181 if (GET_CODE (term) == PLUS
6182 && (GET_CODE (XEXP (term, 1)) == CONST_INT
6183 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
6184 term = XEXP (term, 0);
6185 if (GET_CODE (term) != UNSPEC
6186 || XINT (term, 1) != UNSPEC_GOTPCREL)
6187 return x;
6189 term = XVECEXP (term, 0, 0);
6191 if (GET_CODE (term) != SYMBOL_REF
6192 && GET_CODE (term) != LABEL_REF)
6193 return x;
6195 return term;
6198 term = ix86_delegitimize_address (x);
6200 if (GET_CODE (term) != SYMBOL_REF
6201 && GET_CODE (term) != LABEL_REF)
6202 return x;
6204 return term;
6207 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
6208 this is used for to form addresses to local data when -fPIC is in
6209 use. */
6211 static bool
6212 darwin_local_data_pic (rtx disp)
6214 if (GET_CODE (disp) == MINUS)
6216 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
6217 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
6218 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
6220 const char *sym_name = XSTR (XEXP (disp, 1), 0);
6221 if (! strcmp (sym_name, "<pic base>"))
6222 return true;
6226 return false;
6229 /* Determine if a given RTX is a valid constant. We already know this
6230 satisfies CONSTANT_P. */
6232 bool
6233 legitimate_constant_p (rtx x)
6235 switch (GET_CODE (x))
6237 case CONST:
6238 x = XEXP (x, 0);
6240 if (GET_CODE (x) == PLUS)
6242 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6243 return false;
6244 x = XEXP (x, 0);
6247 if (TARGET_MACHO && darwin_local_data_pic (x))
6248 return true;
6250 /* Only some unspecs are valid as "constants". */
6251 if (GET_CODE (x) == UNSPEC)
6252 switch (XINT (x, 1))
6254 case UNSPEC_GOTOFF:
6255 return TARGET_64BIT;
6256 case UNSPEC_TPOFF:
6257 case UNSPEC_NTPOFF:
6258 x = XVECEXP (x, 0, 0);
6259 return (GET_CODE (x) == SYMBOL_REF
6260 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
6261 case UNSPEC_DTPOFF:
6262 x = XVECEXP (x, 0, 0);
6263 return (GET_CODE (x) == SYMBOL_REF
6264 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
6265 default:
6266 return false;
6269 /* We must have drilled down to a symbol. */
6270 if (GET_CODE (x) == LABEL_REF)
6271 return true;
6272 if (GET_CODE (x) != SYMBOL_REF)
6273 return false;
6274 /* FALLTHRU */
6276 case SYMBOL_REF:
6277 /* TLS symbols are never valid. */
6278 if (SYMBOL_REF_TLS_MODEL (x))
6279 return false;
6280 break;
6282 case CONST_DOUBLE:
6283 if (GET_MODE (x) == TImode
6284 && x != CONST0_RTX (TImode)
6285 && !TARGET_64BIT)
6286 return false;
6287 break;
6289 case CONST_VECTOR:
6290 if (x == CONST0_RTX (GET_MODE (x)))
6291 return true;
6292 return false;
6294 default:
6295 break;
6298 /* Otherwise we handle everything else in the move patterns. */
6299 return true;
6302 /* Determine if it's legal to put X into the constant pool. This
6303 is not possible for the address of thread-local symbols, which
6304 is checked above. */
6306 static bool
6307 ix86_cannot_force_const_mem (rtx x)
6309 /* We can always put integral constants and vectors in memory. */
6310 switch (GET_CODE (x))
6312 case CONST_INT:
6313 case CONST_DOUBLE:
6314 case CONST_VECTOR:
6315 return false;
6317 default:
6318 break;
6320 return !legitimate_constant_p (x);
6323 /* Determine if a given RTX is a valid constant address. */
6325 bool
6326 constant_address_p (rtx x)
6328 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
6331 /* Nonzero if the constant value X is a legitimate general operand
6332 when generating PIC code. It is given that flag_pic is on and
6333 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
6335 bool
6336 legitimate_pic_operand_p (rtx x)
6338 rtx inner;
6340 switch (GET_CODE (x))
6342 case CONST:
6343 inner = XEXP (x, 0);
6344 if (GET_CODE (inner) == PLUS
6345 && GET_CODE (XEXP (inner, 1)) == CONST_INT)
6346 inner = XEXP (inner, 0);
6348 /* Only some unspecs are valid as "constants". */
6349 if (GET_CODE (inner) == UNSPEC)
6350 switch (XINT (inner, 1))
6352 case UNSPEC_GOTOFF:
6353 return TARGET_64BIT;
6354 case UNSPEC_TPOFF:
6355 x = XVECEXP (inner, 0, 0);
6356 return (GET_CODE (x) == SYMBOL_REF
6357 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
6358 default:
6359 return false;
6361 /* FALLTHRU */
6363 case SYMBOL_REF:
6364 case LABEL_REF:
6365 return legitimate_pic_address_disp_p (x);
6367 default:
6368 return true;
6372 /* Determine if a given CONST RTX is a valid memory displacement
6373 in PIC mode. */
6376 legitimate_pic_address_disp_p (rtx disp)
6378 bool saw_plus;
6380 /* In 64bit mode we can allow direct addresses of symbols and labels
6381 when they are not dynamic symbols. */
6382 if (TARGET_64BIT)
6384 rtx op0 = disp, op1;
6386 switch (GET_CODE (disp))
6388 case LABEL_REF:
6389 return true;
6391 case CONST:
6392 if (GET_CODE (XEXP (disp, 0)) != PLUS)
6393 break;
6394 op0 = XEXP (XEXP (disp, 0), 0);
6395 op1 = XEXP (XEXP (disp, 0), 1);
6396 if (GET_CODE (op1) != CONST_INT
6397 || INTVAL (op1) >= 16*1024*1024
6398 || INTVAL (op1) < -16*1024*1024)
6399 break;
6400 if (GET_CODE (op0) == LABEL_REF)
6401 return true;
6402 if (GET_CODE (op0) != SYMBOL_REF)
6403 break;
6404 /* FALLTHRU */
6406 case SYMBOL_REF:
6407 /* TLS references should always be enclosed in UNSPEC. */
6408 if (SYMBOL_REF_TLS_MODEL (op0))
6409 return false;
6410 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0))
6411 return true;
6412 break;
6414 default:
6415 break;
6418 if (GET_CODE (disp) != CONST)
6419 return 0;
6420 disp = XEXP (disp, 0);
6422 if (TARGET_64BIT)
6424 /* We are unsafe to allow PLUS expressions. This limit allowed distance
6425 of GOT tables. We should not need these anyway. */
6426 if (GET_CODE (disp) != UNSPEC
6427 || (XINT (disp, 1) != UNSPEC_GOTPCREL
6428 && XINT (disp, 1) != UNSPEC_GOTOFF))
6429 return 0;
6431 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
6432 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
6433 return 0;
6434 return 1;
6437 saw_plus = false;
6438 if (GET_CODE (disp) == PLUS)
6440 if (GET_CODE (XEXP (disp, 1)) != CONST_INT)
6441 return 0;
6442 disp = XEXP (disp, 0);
6443 saw_plus = true;
6446 if (TARGET_MACHO && darwin_local_data_pic (disp))
6447 return 1;
6449 if (GET_CODE (disp) != UNSPEC)
6450 return 0;
6452 switch (XINT (disp, 1))
6454 case UNSPEC_GOT:
6455 if (saw_plus)
6456 return false;
6457 return GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF;
6458 case UNSPEC_GOTOFF:
6459 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
6460 While ABI specify also 32bit relocation but we don't produce it in
6461 small PIC model at all. */
6462 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
6463 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
6464 && !TARGET_64BIT)
6465 return local_symbolic_operand (XVECEXP (disp, 0, 0), Pmode);
6466 return false;
6467 case UNSPEC_GOTTPOFF:
6468 case UNSPEC_GOTNTPOFF:
6469 case UNSPEC_INDNTPOFF:
6470 if (saw_plus)
6471 return false;
6472 disp = XVECEXP (disp, 0, 0);
6473 return (GET_CODE (disp) == SYMBOL_REF
6474 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
6475 case UNSPEC_NTPOFF:
6476 disp = XVECEXP (disp, 0, 0);
6477 return (GET_CODE (disp) == SYMBOL_REF
6478 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
6479 case UNSPEC_DTPOFF:
6480 disp = XVECEXP (disp, 0, 0);
6481 return (GET_CODE (disp) == SYMBOL_REF
6482 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
6485 return 0;
6488 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
6489 memory address for an instruction. The MODE argument is the machine mode
6490 for the MEM expression that wants to use this address.
6492 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
6493 convert common non-canonical forms to canonical form so that they will
6494 be recognized. */
6497 legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
6499 struct ix86_address parts;
6500 rtx base, index, disp;
6501 HOST_WIDE_INT scale;
6502 const char *reason = NULL;
6503 rtx reason_rtx = NULL_RTX;
6505 if (TARGET_DEBUG_ADDR)
6507 fprintf (stderr,
6508 "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
6509 GET_MODE_NAME (mode), strict);
6510 debug_rtx (addr);
6513 if (ix86_decompose_address (addr, &parts) <= 0)
6515 reason = "decomposition failed";
6516 goto report_error;
6519 base = parts.base;
6520 index = parts.index;
6521 disp = parts.disp;
6522 scale = parts.scale;
6524 /* Validate base register.
6526 Don't allow SUBREG's that span more than a word here. It can lead to spill
6527 failures when the base is one word out of a two word structure, which is
6528 represented internally as a DImode int. */
6530 if (base)
6532 rtx reg;
6533 reason_rtx = base;
6535 if (REG_P (base))
6536 reg = base;
6537 else if (GET_CODE (base) == SUBREG
6538 && REG_P (SUBREG_REG (base))
6539 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
6540 <= UNITS_PER_WORD)
6541 reg = SUBREG_REG (base);
6542 else
6544 reason = "base is not a register";
6545 goto report_error;
6548 if (GET_MODE (base) != Pmode)
6550 reason = "base is not in Pmode";
6551 goto report_error;
6554 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
6555 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
6557 reason = "base is not valid";
6558 goto report_error;
6562 /* Validate index register.
6564 Don't allow SUBREG's that span more than a word here -- same as above. */
6566 if (index)
6568 rtx reg;
6569 reason_rtx = index;
6571 if (REG_P (index))
6572 reg = index;
6573 else if (GET_CODE (index) == SUBREG
6574 && REG_P (SUBREG_REG (index))
6575 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
6576 <= UNITS_PER_WORD)
6577 reg = SUBREG_REG (index);
6578 else
6580 reason = "index is not a register";
6581 goto report_error;
6584 if (GET_MODE (index) != Pmode)
6586 reason = "index is not in Pmode";
6587 goto report_error;
6590 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
6591 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
6593 reason = "index is not valid";
6594 goto report_error;
6598 /* Validate scale factor. */
6599 if (scale != 1)
6601 reason_rtx = GEN_INT (scale);
6602 if (!index)
6604 reason = "scale without index";
6605 goto report_error;
6608 if (scale != 2 && scale != 4 && scale != 8)
6610 reason = "scale is not a valid multiplier";
6611 goto report_error;
6615 /* Validate displacement. */
6616 if (disp)
6618 reason_rtx = disp;
6620 if (GET_CODE (disp) == CONST
6621 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
6622 switch (XINT (XEXP (disp, 0), 1))
6624 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
6625 used. While ABI specify also 32bit relocations, we don't produce
6626 them at all and use IP relative instead. */
6627 case UNSPEC_GOT:
6628 case UNSPEC_GOTOFF:
6629 gcc_assert (flag_pic);
6630 if (!TARGET_64BIT)
6631 goto is_legitimate_pic;
6632 reason = "64bit address unspec";
6633 goto report_error;
6635 case UNSPEC_GOTPCREL:
6636 gcc_assert (flag_pic);
6637 goto is_legitimate_pic;
6639 case UNSPEC_GOTTPOFF:
6640 case UNSPEC_GOTNTPOFF:
6641 case UNSPEC_INDNTPOFF:
6642 case UNSPEC_NTPOFF:
6643 case UNSPEC_DTPOFF:
6644 break;
6646 default:
6647 reason = "invalid address unspec";
6648 goto report_error;
6651 else if (SYMBOLIC_CONST (disp)
6652 && (flag_pic
6653 || (TARGET_MACHO
6654 #if TARGET_MACHO
6655 && MACHOPIC_INDIRECT
6656 && !machopic_operand_p (disp)
6657 #endif
6661 is_legitimate_pic:
6662 if (TARGET_64BIT && (index || base))
6664 /* foo@dtpoff(%rX) is ok. */
6665 if (GET_CODE (disp) != CONST
6666 || GET_CODE (XEXP (disp, 0)) != PLUS
6667 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
6668 || GET_CODE (XEXP (XEXP (disp, 0), 1)) != CONST_INT
6669 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
6670 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
6672 reason = "non-constant pic memory reference";
6673 goto report_error;
6676 else if (! legitimate_pic_address_disp_p (disp))
6678 reason = "displacement is an invalid pic construct";
6679 goto report_error;
6682 /* This code used to verify that a symbolic pic displacement
6683 includes the pic_offset_table_rtx register.
6685 While this is good idea, unfortunately these constructs may
6686 be created by "adds using lea" optimization for incorrect
6687 code like:
6689 int a;
6690 int foo(int i)
6692 return *(&a+i);
6695 This code is nonsensical, but results in addressing
6696 GOT table with pic_offset_table_rtx base. We can't
6697 just refuse it easily, since it gets matched by
6698 "addsi3" pattern, that later gets split to lea in the
6699 case output register differs from input. While this
6700 can be handled by separate addsi pattern for this case
6701 that never results in lea, this seems to be easier and
6702 correct fix for crash to disable this test. */
6704 else if (GET_CODE (disp) != LABEL_REF
6705 && GET_CODE (disp) != CONST_INT
6706 && (GET_CODE (disp) != CONST
6707 || !legitimate_constant_p (disp))
6708 && (GET_CODE (disp) != SYMBOL_REF
6709 || !legitimate_constant_p (disp)))
6711 reason = "displacement is not constant";
6712 goto report_error;
6714 else if (TARGET_64BIT
6715 && !x86_64_immediate_operand (disp, VOIDmode))
6717 reason = "displacement is out of range";
6718 goto report_error;
6722 /* Everything looks valid. */
6723 if (TARGET_DEBUG_ADDR)
6724 fprintf (stderr, "Success.\n");
6725 return TRUE;
6727 report_error:
6728 if (TARGET_DEBUG_ADDR)
6730 fprintf (stderr, "Error: %s\n", reason);
6731 debug_rtx (reason_rtx);
6733 return FALSE;
6736 /* Return a unique alias set for the GOT. */
6738 static HOST_WIDE_INT
6739 ix86_GOT_alias_set (void)
6741 static HOST_WIDE_INT set = -1;
6742 if (set == -1)
6743 set = new_alias_set ();
6744 return set;
6747 /* Return a legitimate reference for ORIG (an address) using the
6748 register REG. If REG is 0, a new pseudo is generated.
6750 There are two types of references that must be handled:
6752 1. Global data references must load the address from the GOT, via
6753 the PIC reg. An insn is emitted to do this load, and the reg is
6754 returned.
6756 2. Static data references, constant pool addresses, and code labels
6757 compute the address as an offset from the GOT, whose base is in
6758 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
6759 differentiate them from global data objects. The returned
6760 address is the PIC reg + an unspec constant.
6762 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
6763 reg also appears in the address. */
6765 static rtx
6766 legitimize_pic_address (rtx orig, rtx reg)
6768 rtx addr = orig;
6769 rtx new = orig;
6770 rtx base;
6772 #if TARGET_MACHO
6773 if (TARGET_MACHO && !TARGET_64BIT)
6775 if (reg == 0)
6776 reg = gen_reg_rtx (Pmode);
6777 /* Use the generic Mach-O PIC machinery. */
6778 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
6780 #endif
6782 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
6783 new = addr;
6784 else if (TARGET_64BIT
6785 && ix86_cmodel != CM_SMALL_PIC
6786 && local_symbolic_operand (addr, Pmode))
6788 rtx tmpreg;
6789 /* This symbol may be referenced via a displacement from the PIC
6790 base address (@GOTOFF). */
6792 if (reload_in_progress)
6793 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6794 if (GET_CODE (addr) == CONST)
6795 addr = XEXP (addr, 0);
6796 if (GET_CODE (addr) == PLUS)
6798 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
6799 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
6801 else
6802 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
6803 new = gen_rtx_CONST (Pmode, new);
6804 if (!reg)
6805 tmpreg = gen_reg_rtx (Pmode);
6806 else
6807 tmpreg = reg;
6808 emit_move_insn (tmpreg, new);
6810 if (reg != 0)
6812 new = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
6813 tmpreg, 1, OPTAB_DIRECT);
6814 new = reg;
6816 else new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
6818 else if (!TARGET_64BIT && local_symbolic_operand (addr, Pmode))
6820 /* This symbol may be referenced via a displacement from the PIC
6821 base address (@GOTOFF). */
6823 if (reload_in_progress)
6824 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6825 if (GET_CODE (addr) == CONST)
6826 addr = XEXP (addr, 0);
6827 if (GET_CODE (addr) == PLUS)
6829 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)), UNSPEC_GOTOFF);
6830 new = gen_rtx_PLUS (Pmode, new, XEXP (addr, 1));
6832 else
6833 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
6834 new = gen_rtx_CONST (Pmode, new);
6835 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6837 if (reg != 0)
6839 emit_move_insn (reg, new);
6840 new = reg;
6843 else if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
6845 if (TARGET_64BIT)
6847 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
6848 new = gen_rtx_CONST (Pmode, new);
6849 new = gen_const_mem (Pmode, new);
6850 set_mem_alias_set (new, ix86_GOT_alias_set ());
6852 if (reg == 0)
6853 reg = gen_reg_rtx (Pmode);
6854 /* Use directly gen_movsi, otherwise the address is loaded
6855 into register for CSE. We don't want to CSE this addresses,
6856 instead we CSE addresses from the GOT table, so skip this. */
6857 emit_insn (gen_movsi (reg, new));
6858 new = reg;
6860 else
6862 /* This symbol must be referenced via a load from the
6863 Global Offset Table (@GOT). */
6865 if (reload_in_progress)
6866 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6867 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
6868 new = gen_rtx_CONST (Pmode, new);
6869 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6870 new = gen_const_mem (Pmode, new);
6871 set_mem_alias_set (new, ix86_GOT_alias_set ());
6873 if (reg == 0)
6874 reg = gen_reg_rtx (Pmode);
6875 emit_move_insn (reg, new);
6876 new = reg;
6879 else
6881 if (GET_CODE (addr) == CONST_INT
6882 && !x86_64_immediate_operand (addr, VOIDmode))
6884 if (reg)
6886 emit_move_insn (reg, addr);
6887 new = reg;
6889 else
6890 new = force_reg (Pmode, addr);
6892 else if (GET_CODE (addr) == CONST)
6894 addr = XEXP (addr, 0);
6896 /* We must match stuff we generate before. Assume the only
6897 unspecs that can get here are ours. Not that we could do
6898 anything with them anyway.... */
6899 if (GET_CODE (addr) == UNSPEC
6900 || (GET_CODE (addr) == PLUS
6901 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
6902 return orig;
6903 gcc_assert (GET_CODE (addr) == PLUS);
6905 if (GET_CODE (addr) == PLUS)
6907 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
6909 /* Check first to see if this is a constant offset from a @GOTOFF
6910 symbol reference. */
6911 if (local_symbolic_operand (op0, Pmode)
6912 && GET_CODE (op1) == CONST_INT)
6914 if (!TARGET_64BIT)
6916 if (reload_in_progress)
6917 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
6918 new = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
6919 UNSPEC_GOTOFF);
6920 new = gen_rtx_PLUS (Pmode, new, op1);
6921 new = gen_rtx_CONST (Pmode, new);
6922 new = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new);
6924 if (reg != 0)
6926 emit_move_insn (reg, new);
6927 new = reg;
6930 else
6932 if (INTVAL (op1) < -16*1024*1024
6933 || INTVAL (op1) >= 16*1024*1024)
6935 if (!x86_64_immediate_operand (op1, Pmode))
6936 op1 = force_reg (Pmode, op1);
6937 new = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
6941 else
6943 base = legitimize_pic_address (XEXP (addr, 0), reg);
6944 new = legitimize_pic_address (XEXP (addr, 1),
6945 base == reg ? NULL_RTX : reg);
6947 if (GET_CODE (new) == CONST_INT)
6948 new = plus_constant (base, INTVAL (new));
6949 else
6951 if (GET_CODE (new) == PLUS && CONSTANT_P (XEXP (new, 1)))
6953 base = gen_rtx_PLUS (Pmode, base, XEXP (new, 0));
6954 new = XEXP (new, 1);
6956 new = gen_rtx_PLUS (Pmode, base, new);
6961 return new;
6964 /* Load the thread pointer. If TO_REG is true, force it into a register. */
6966 static rtx
6967 get_thread_pointer (int to_reg)
6969 rtx tp, reg, insn;
6971 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
6972 if (!to_reg)
6973 return tp;
6975 reg = gen_reg_rtx (Pmode);
6976 insn = gen_rtx_SET (VOIDmode, reg, tp);
6977 insn = emit_insn (insn);
6979 return reg;
6982 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
6983 false if we expect this to be used for a memory address and true if
6984 we expect to load the address into a register. */
6986 static rtx
6987 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
6989 rtx dest, base, off, pic, tp;
6990 int type;
6992 switch (model)
6994 case TLS_MODEL_GLOBAL_DYNAMIC:
6995 dest = gen_reg_rtx (Pmode);
6996 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
6998 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
7000 rtx rax = gen_rtx_REG (Pmode, 0), insns;
7002 start_sequence ();
7003 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
7004 insns = get_insns ();
7005 end_sequence ();
7007 emit_libcall_block (insns, dest, rax, x);
7009 else if (TARGET_64BIT && TARGET_GNU2_TLS)
7010 emit_insn (gen_tls_global_dynamic_64 (dest, x));
7011 else
7012 emit_insn (gen_tls_global_dynamic_32 (dest, x));
7014 if (TARGET_GNU2_TLS)
7016 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
7018 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
7020 break;
7022 case TLS_MODEL_LOCAL_DYNAMIC:
7023 base = gen_reg_rtx (Pmode);
7024 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
7026 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
7028 rtx rax = gen_rtx_REG (Pmode, 0), insns, note;
7030 start_sequence ();
7031 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
7032 insns = get_insns ();
7033 end_sequence ();
7035 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
7036 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
7037 emit_libcall_block (insns, base, rax, note);
7039 else if (TARGET_64BIT && TARGET_GNU2_TLS)
7040 emit_insn (gen_tls_local_dynamic_base_64 (base));
7041 else
7042 emit_insn (gen_tls_local_dynamic_base_32 (base));
7044 if (TARGET_GNU2_TLS)
7046 rtx x = ix86_tls_module_base ();
7048 set_unique_reg_note (get_last_insn (), REG_EQUIV,
7049 gen_rtx_MINUS (Pmode, x, tp));
7052 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
7053 off = gen_rtx_CONST (Pmode, off);
7055 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
7057 if (TARGET_GNU2_TLS)
7059 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
7061 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
7064 break;
7066 case TLS_MODEL_INITIAL_EXEC:
7067 if (TARGET_64BIT)
7069 pic = NULL;
7070 type = UNSPEC_GOTNTPOFF;
7072 else if (flag_pic)
7074 if (reload_in_progress)
7075 regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
7076 pic = pic_offset_table_rtx;
7077 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
7079 else if (!TARGET_ANY_GNU_TLS)
7081 pic = gen_reg_rtx (Pmode);
7082 emit_insn (gen_set_got (pic));
7083 type = UNSPEC_GOTTPOFF;
7085 else
7087 pic = NULL;
7088 type = UNSPEC_INDNTPOFF;
7091 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
7092 off = gen_rtx_CONST (Pmode, off);
7093 if (pic)
7094 off = gen_rtx_PLUS (Pmode, pic, off);
7095 off = gen_const_mem (Pmode, off);
7096 set_mem_alias_set (off, ix86_GOT_alias_set ());
7098 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7100 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
7101 off = force_reg (Pmode, off);
7102 return gen_rtx_PLUS (Pmode, base, off);
7104 else
7106 base = get_thread_pointer (true);
7107 dest = gen_reg_rtx (Pmode);
7108 emit_insn (gen_subsi3 (dest, base, off));
7110 break;
7112 case TLS_MODEL_LOCAL_EXEC:
7113 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
7114 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7115 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
7116 off = gen_rtx_CONST (Pmode, off);
7118 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
7120 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
7121 return gen_rtx_PLUS (Pmode, base, off);
7123 else
7125 base = get_thread_pointer (true);
7126 dest = gen_reg_rtx (Pmode);
7127 emit_insn (gen_subsi3 (dest, base, off));
7129 break;
7131 default:
7132 gcc_unreachable ();
7135 return dest;
7138 /* Try machine-dependent ways of modifying an illegitimate address
7139 to be legitimate. If we find one, return the new, valid address.
7140 This macro is used in only one place: `memory_address' in explow.c.
7142 OLDX is the address as it was before break_out_memory_refs was called.
7143 In some cases it is useful to look at this to decide what needs to be done.
7145 MODE and WIN are passed so that this macro can use
7146 GO_IF_LEGITIMATE_ADDRESS.
7148 It is always safe for this macro to do nothing. It exists to recognize
7149 opportunities to optimize the output.
7151 For the 80386, we handle X+REG by loading X into a register R and
7152 using R+REG. R will go in a general reg and indexing will be used.
7153 However, if REG is a broken-out memory address or multiplication,
7154 nothing needs to be done because REG can certainly go in a general reg.
7156 When -fpic is used, special handling is needed for symbolic references.
7157 See comments by legitimize_pic_address in i386.c for details. */
7160 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
7162 int changed = 0;
7163 unsigned log;
7165 if (TARGET_DEBUG_ADDR)
7167 fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
7168 GET_MODE_NAME (mode));
7169 debug_rtx (x);
7172 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
7173 if (log)
7174 return legitimize_tls_address (x, log, false);
7175 if (GET_CODE (x) == CONST
7176 && GET_CODE (XEXP (x, 0)) == PLUS
7177 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
7178 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
7180 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0), log, false);
7181 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
7184 if (flag_pic && SYMBOLIC_CONST (x))
7185 return legitimize_pic_address (x, 0);
7187 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
7188 if (GET_CODE (x) == ASHIFT
7189 && GET_CODE (XEXP (x, 1)) == CONST_INT
7190 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
7192 changed = 1;
7193 log = INTVAL (XEXP (x, 1));
7194 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
7195 GEN_INT (1 << log));
7198 if (GET_CODE (x) == PLUS)
7200 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
7202 if (GET_CODE (XEXP (x, 0)) == ASHIFT
7203 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
7204 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
7206 changed = 1;
7207 log = INTVAL (XEXP (XEXP (x, 0), 1));
7208 XEXP (x, 0) = gen_rtx_MULT (Pmode,
7209 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
7210 GEN_INT (1 << log));
7213 if (GET_CODE (XEXP (x, 1)) == ASHIFT
7214 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
7215 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
7217 changed = 1;
7218 log = INTVAL (XEXP (XEXP (x, 1), 1));
7219 XEXP (x, 1) = gen_rtx_MULT (Pmode,
7220 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
7221 GEN_INT (1 << log));
7224 /* Put multiply first if it isn't already. */
7225 if (GET_CODE (XEXP (x, 1)) == MULT)
7227 rtx tmp = XEXP (x, 0);
7228 XEXP (x, 0) = XEXP (x, 1);
7229 XEXP (x, 1) = tmp;
7230 changed = 1;
7233 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
7234 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
7235 created by virtual register instantiation, register elimination, and
7236 similar optimizations. */
7237 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
7239 changed = 1;
7240 x = gen_rtx_PLUS (Pmode,
7241 gen_rtx_PLUS (Pmode, XEXP (x, 0),
7242 XEXP (XEXP (x, 1), 0)),
7243 XEXP (XEXP (x, 1), 1));
7246 /* Canonicalize
7247 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
7248 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
7249 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
7250 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
7251 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
7252 && CONSTANT_P (XEXP (x, 1)))
7254 rtx constant;
7255 rtx other = NULL_RTX;
7257 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
7259 constant = XEXP (x, 1);
7260 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
7262 else if (GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 1)) == CONST_INT)
7264 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
7265 other = XEXP (x, 1);
7267 else
7268 constant = 0;
7270 if (constant)
7272 changed = 1;
7273 x = gen_rtx_PLUS (Pmode,
7274 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
7275 XEXP (XEXP (XEXP (x, 0), 1), 0)),
7276 plus_constant (other, INTVAL (constant)));
7280 if (changed && legitimate_address_p (mode, x, FALSE))
7281 return x;
7283 if (GET_CODE (XEXP (x, 0)) == MULT)
7285 changed = 1;
7286 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
7289 if (GET_CODE (XEXP (x, 1)) == MULT)
7291 changed = 1;
7292 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
7295 if (changed
7296 && GET_CODE (XEXP (x, 1)) == REG
7297 && GET_CODE (XEXP (x, 0)) == REG)
7298 return x;
7300 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
7302 changed = 1;
7303 x = legitimize_pic_address (x, 0);
7306 if (changed && legitimate_address_p (mode, x, FALSE))
7307 return x;
7309 if (GET_CODE (XEXP (x, 0)) == REG)
7311 rtx temp = gen_reg_rtx (Pmode);
7312 rtx val = force_operand (XEXP (x, 1), temp);
7313 if (val != temp)
7314 emit_move_insn (temp, val);
7316 XEXP (x, 1) = temp;
7317 return x;
7320 else if (GET_CODE (XEXP (x, 1)) == REG)
7322 rtx temp = gen_reg_rtx (Pmode);
7323 rtx val = force_operand (XEXP (x, 0), temp);
7324 if (val != temp)
7325 emit_move_insn (temp, val);
7327 XEXP (x, 0) = temp;
7328 return x;
7332 return x;
7335 /* Print an integer constant expression in assembler syntax. Addition
7336 and subtraction are the only arithmetic that may appear in these
7337 expressions. FILE is the stdio stream to write to, X is the rtx, and
7338 CODE is the operand print code from the output string. */
7340 static void
7341 output_pic_addr_const (FILE *file, rtx x, int code)
7343 char buf[256];
7345 switch (GET_CODE (x))
7347 case PC:
7348 gcc_assert (flag_pic);
7349 putc ('.', file);
7350 break;
7352 case SYMBOL_REF:
7353 output_addr_const (file, x);
7354 if (!TARGET_MACHO && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
7355 fputs ("@PLT", file);
7356 break;
7358 case LABEL_REF:
7359 x = XEXP (x, 0);
7360 /* FALLTHRU */
7361 case CODE_LABEL:
7362 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
7363 assemble_name (asm_out_file, buf);
7364 break;
7366 case CONST_INT:
7367 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
7368 break;
7370 case CONST:
7371 /* This used to output parentheses around the expression,
7372 but that does not work on the 386 (either ATT or BSD assembler). */
7373 output_pic_addr_const (file, XEXP (x, 0), code);
7374 break;
7376 case CONST_DOUBLE:
7377 if (GET_MODE (x) == VOIDmode)
7379 /* We can use %d if the number is <32 bits and positive. */
7380 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
7381 fprintf (file, "0x%lx%08lx",
7382 (unsigned long) CONST_DOUBLE_HIGH (x),
7383 (unsigned long) CONST_DOUBLE_LOW (x));
7384 else
7385 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
7387 else
7388 /* We can't handle floating point constants;
7389 PRINT_OPERAND must handle them. */
7390 output_operand_lossage ("floating constant misused");
7391 break;
7393 case PLUS:
7394 /* Some assemblers need integer constants to appear first. */
7395 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
7397 output_pic_addr_const (file, XEXP (x, 0), code);
7398 putc ('+', file);
7399 output_pic_addr_const (file, XEXP (x, 1), code);
7401 else
7403 gcc_assert (GET_CODE (XEXP (x, 1)) == CONST_INT);
7404 output_pic_addr_const (file, XEXP (x, 1), code);
7405 putc ('+', file);
7406 output_pic_addr_const (file, XEXP (x, 0), code);
7408 break;
7410 case MINUS:
7411 if (!TARGET_MACHO)
7412 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
7413 output_pic_addr_const (file, XEXP (x, 0), code);
7414 putc ('-', file);
7415 output_pic_addr_const (file, XEXP (x, 1), code);
7416 if (!TARGET_MACHO)
7417 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
7418 break;
7420 case UNSPEC:
7421 gcc_assert (XVECLEN (x, 0) == 1);
7422 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
7423 switch (XINT (x, 1))
7425 case UNSPEC_GOT:
7426 fputs ("@GOT", file);
7427 break;
7428 case UNSPEC_GOTOFF:
7429 fputs ("@GOTOFF", file);
7430 break;
7431 case UNSPEC_GOTPCREL:
7432 fputs ("@GOTPCREL(%rip)", file);
7433 break;
7434 case UNSPEC_GOTTPOFF:
7435 /* FIXME: This might be @TPOFF in Sun ld too. */
7436 fputs ("@GOTTPOFF", file);
7437 break;
7438 case UNSPEC_TPOFF:
7439 fputs ("@TPOFF", file);
7440 break;
7441 case UNSPEC_NTPOFF:
7442 if (TARGET_64BIT)
7443 fputs ("@TPOFF", file);
7444 else
7445 fputs ("@NTPOFF", file);
7446 break;
7447 case UNSPEC_DTPOFF:
7448 fputs ("@DTPOFF", file);
7449 break;
7450 case UNSPEC_GOTNTPOFF:
7451 if (TARGET_64BIT)
7452 fputs ("@GOTTPOFF(%rip)", file);
7453 else
7454 fputs ("@GOTNTPOFF", file);
7455 break;
7456 case UNSPEC_INDNTPOFF:
7457 fputs ("@INDNTPOFF", file);
7458 break;
7459 default:
7460 output_operand_lossage ("invalid UNSPEC as operand");
7461 break;
7463 break;
7465 default:
7466 output_operand_lossage ("invalid expression as operand");
7470 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
7471 We need to emit DTP-relative relocations. */
7473 static void
7474 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
7476 fputs (ASM_LONG, file);
7477 output_addr_const (file, x);
7478 fputs ("@DTPOFF", file);
7479 switch (size)
7481 case 4:
7482 break;
7483 case 8:
7484 fputs (", 0", file);
7485 break;
7486 default:
7487 gcc_unreachable ();
7491 /* In the name of slightly smaller debug output, and to cater to
7492 general assembler lossage, recognize PIC+GOTOFF and turn it back
7493 into a direct symbol reference.
7495 On Darwin, this is necessary to avoid a crash, because Darwin
7496 has a different PIC label for each routine but the DWARF debugging
7497 information is not associated with any particular routine, so it's
7498 necessary to remove references to the PIC label from RTL stored by
7499 the DWARF output code. */
7501 static rtx
7502 ix86_delegitimize_address (rtx orig_x)
7504 rtx x = orig_x;
7505 /* reg_addend is NULL or a multiple of some register. */
7506 rtx reg_addend = NULL_RTX;
7507 /* const_addend is NULL or a const_int. */
7508 rtx const_addend = NULL_RTX;
7509 /* This is the result, or NULL. */
7510 rtx result = NULL_RTX;
7512 if (GET_CODE (x) == MEM)
7513 x = XEXP (x, 0);
7515 if (TARGET_64BIT)
7517 if (GET_CODE (x) != CONST
7518 || GET_CODE (XEXP (x, 0)) != UNSPEC
7519 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
7520 || GET_CODE (orig_x) != MEM)
7521 return orig_x;
7522 return XVECEXP (XEXP (x, 0), 0, 0);
7525 if (GET_CODE (x) != PLUS
7526 || GET_CODE (XEXP (x, 1)) != CONST)
7527 return orig_x;
7529 if (GET_CODE (XEXP (x, 0)) == REG
7530 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
7531 /* %ebx + GOT/GOTOFF */
7533 else if (GET_CODE (XEXP (x, 0)) == PLUS)
7535 /* %ebx + %reg * scale + GOT/GOTOFF */
7536 reg_addend = XEXP (x, 0);
7537 if (GET_CODE (XEXP (reg_addend, 0)) == REG
7538 && REGNO (XEXP (reg_addend, 0)) == PIC_OFFSET_TABLE_REGNUM)
7539 reg_addend = XEXP (reg_addend, 1);
7540 else if (GET_CODE (XEXP (reg_addend, 1)) == REG
7541 && REGNO (XEXP (reg_addend, 1)) == PIC_OFFSET_TABLE_REGNUM)
7542 reg_addend = XEXP (reg_addend, 0);
7543 else
7544 return orig_x;
7545 if (GET_CODE (reg_addend) != REG
7546 && GET_CODE (reg_addend) != MULT
7547 && GET_CODE (reg_addend) != ASHIFT)
7548 return orig_x;
7550 else
7551 return orig_x;
7553 x = XEXP (XEXP (x, 1), 0);
7554 if (GET_CODE (x) == PLUS
7555 && GET_CODE (XEXP (x, 1)) == CONST_INT)
7557 const_addend = XEXP (x, 1);
7558 x = XEXP (x, 0);
7561 if (GET_CODE (x) == UNSPEC
7562 && ((XINT (x, 1) == UNSPEC_GOT && GET_CODE (orig_x) == MEM)
7563 || (XINT (x, 1) == UNSPEC_GOTOFF && GET_CODE (orig_x) != MEM)))
7564 result = XVECEXP (x, 0, 0);
7566 if (TARGET_MACHO && darwin_local_data_pic (x)
7567 && GET_CODE (orig_x) != MEM)
7568 result = XEXP (x, 0);
7570 if (! result)
7571 return orig_x;
7573 if (const_addend)
7574 result = gen_rtx_PLUS (Pmode, result, const_addend);
7575 if (reg_addend)
7576 result = gen_rtx_PLUS (Pmode, reg_addend, result);
7577 return result;
7580 static void
7581 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
7582 int fp, FILE *file)
7584 const char *suffix;
7586 if (mode == CCFPmode || mode == CCFPUmode)
7588 enum rtx_code second_code, bypass_code;
7589 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
7590 gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
7591 code = ix86_fp_compare_code_to_integer (code);
7592 mode = CCmode;
7594 if (reverse)
7595 code = reverse_condition (code);
7597 switch (code)
7599 case EQ:
7600 suffix = "e";
7601 break;
7602 case NE:
7603 suffix = "ne";
7604 break;
7605 case GT:
7606 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
7607 suffix = "g";
7608 break;
7609 case GTU:
7610 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
7611 Those same assemblers have the same but opposite lossage on cmov. */
7612 gcc_assert (mode == CCmode);
7613 suffix = fp ? "nbe" : "a";
7614 break;
7615 case LT:
7616 switch (mode)
7618 case CCNOmode:
7619 case CCGOCmode:
7620 suffix = "s";
7621 break;
7623 case CCmode:
7624 case CCGCmode:
7625 suffix = "l";
7626 break;
7628 default:
7629 gcc_unreachable ();
7631 break;
7632 case LTU:
7633 gcc_assert (mode == CCmode);
7634 suffix = "b";
7635 break;
7636 case GE:
7637 switch (mode)
7639 case CCNOmode:
7640 case CCGOCmode:
7641 suffix = "ns";
7642 break;
7644 case CCmode:
7645 case CCGCmode:
7646 suffix = "ge";
7647 break;
7649 default:
7650 gcc_unreachable ();
7652 break;
7653 case GEU:
7654 /* ??? As above. */
7655 gcc_assert (mode == CCmode);
7656 suffix = fp ? "nb" : "ae";
7657 break;
7658 case LE:
7659 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
7660 suffix = "le";
7661 break;
7662 case LEU:
7663 gcc_assert (mode == CCmode);
7664 suffix = "be";
7665 break;
7666 case UNORDERED:
7667 suffix = fp ? "u" : "p";
7668 break;
7669 case ORDERED:
7670 suffix = fp ? "nu" : "np";
7671 break;
7672 default:
7673 gcc_unreachable ();
7675 fputs (suffix, file);
7678 /* Print the name of register X to FILE based on its machine mode and number.
7679 If CODE is 'w', pretend the mode is HImode.
7680 If CODE is 'b', pretend the mode is QImode.
7681 If CODE is 'k', pretend the mode is SImode.
7682 If CODE is 'q', pretend the mode is DImode.
7683 If CODE is 'h', pretend the reg is the 'high' byte register.
7684 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
7686 void
7687 print_reg (rtx x, int code, FILE *file)
7689 gcc_assert (REGNO (x) != ARG_POINTER_REGNUM
7690 && REGNO (x) != FRAME_POINTER_REGNUM
7691 && REGNO (x) != FLAGS_REG
7692 && REGNO (x) != FPSR_REG
7693 && REGNO (x) != FPCR_REG);
7695 if (ASSEMBLER_DIALECT == ASM_ATT || USER_LABEL_PREFIX[0] == 0)
7696 putc ('%', file);
7698 if (code == 'w' || MMX_REG_P (x))
7699 code = 2;
7700 else if (code == 'b')
7701 code = 1;
7702 else if (code == 'k')
7703 code = 4;
7704 else if (code == 'q')
7705 code = 8;
7706 else if (code == 'y')
7707 code = 3;
7708 else if (code == 'h')
7709 code = 0;
7710 else
7711 code = GET_MODE_SIZE (GET_MODE (x));
7713 /* Irritatingly, AMD extended registers use different naming convention
7714 from the normal registers. */
7715 if (REX_INT_REG_P (x))
7717 gcc_assert (TARGET_64BIT);
7718 switch (code)
7720 case 0:
7721 error ("extended registers have no high halves");
7722 break;
7723 case 1:
7724 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
7725 break;
7726 case 2:
7727 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
7728 break;
7729 case 4:
7730 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
7731 break;
7732 case 8:
7733 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
7734 break;
7735 default:
7736 error ("unsupported operand size for extended register");
7737 break;
7739 return;
7741 switch (code)
7743 case 3:
7744 if (STACK_TOP_P (x))
7746 fputs ("st(0)", file);
7747 break;
7749 /* FALLTHRU */
7750 case 8:
7751 case 4:
7752 case 12:
7753 if (! ANY_FP_REG_P (x))
7754 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
7755 /* FALLTHRU */
7756 case 16:
7757 case 2:
7758 normal:
7759 fputs (hi_reg_name[REGNO (x)], file);
7760 break;
7761 case 1:
7762 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
7763 goto normal;
7764 fputs (qi_reg_name[REGNO (x)], file);
7765 break;
7766 case 0:
7767 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
7768 goto normal;
7769 fputs (qi_high_reg_name[REGNO (x)], file);
7770 break;
7771 default:
7772 gcc_unreachable ();
7776 /* Locate some local-dynamic symbol still in use by this function
7777 so that we can print its name in some tls_local_dynamic_base
7778 pattern. */
7780 static const char *
7781 get_some_local_dynamic_name (void)
7783 rtx insn;
7785 if (cfun->machine->some_ld_name)
7786 return cfun->machine->some_ld_name;
7788 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
7789 if (INSN_P (insn)
7790 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
7791 return cfun->machine->some_ld_name;
7793 gcc_unreachable ();
7796 static int
7797 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
7799 rtx x = *px;
7801 if (GET_CODE (x) == SYMBOL_REF
7802 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
7804 cfun->machine->some_ld_name = XSTR (x, 0);
7805 return 1;
7808 return 0;
7811 /* Meaning of CODE:
7812 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
7813 C -- print opcode suffix for set/cmov insn.
7814 c -- like C, but print reversed condition
7815 F,f -- likewise, but for floating-point.
7816 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
7817 otherwise nothing
7818 R -- print the prefix for register names.
7819 z -- print the opcode suffix for the size of the current operand.
7820 * -- print a star (in certain assembler syntax)
7821 A -- print an absolute memory reference.
7822 w -- print the operand as if it's a "word" (HImode) even if it isn't.
7823 s -- print a shift double count, followed by the assemblers argument
7824 delimiter.
7825 b -- print the QImode name of the register for the indicated operand.
7826 %b0 would print %al if operands[0] is reg 0.
7827 w -- likewise, print the HImode name of the register.
7828 k -- likewise, print the SImode name of the register.
7829 q -- likewise, print the DImode name of the register.
7830 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
7831 y -- print "st(0)" instead of "st" as a register.
7832 D -- print condition for SSE cmp instruction.
7833 P -- if PIC, print an @PLT suffix.
7834 X -- don't print any sort of PIC '@' suffix for a symbol.
7835 & -- print some in-use local-dynamic symbol name.
7836 H -- print a memory address offset by 8; used for sse high-parts
7839 void
7840 print_operand (FILE *file, rtx x, int code)
7842 if (code)
7844 switch (code)
7846 case '*':
7847 if (ASSEMBLER_DIALECT == ASM_ATT)
7848 putc ('*', file);
7849 return;
7851 case '&':
7852 assemble_name (file, get_some_local_dynamic_name ());
7853 return;
7855 case 'A':
7856 switch (ASSEMBLER_DIALECT)
7858 case ASM_ATT:
7859 putc ('*', file);
7860 break;
7862 case ASM_INTEL:
7863 /* Intel syntax. For absolute addresses, registers should not
7864 be surrounded by braces. */
7865 if (GET_CODE (x) != REG)
7867 putc ('[', file);
7868 PRINT_OPERAND (file, x, 0);
7869 putc (']', file);
7870 return;
7872 break;
7874 default:
7875 gcc_unreachable ();
7878 PRINT_OPERAND (file, x, 0);
7879 return;
7882 case 'L':
7883 if (ASSEMBLER_DIALECT == ASM_ATT)
7884 putc ('l', file);
7885 return;
7887 case 'W':
7888 if (ASSEMBLER_DIALECT == ASM_ATT)
7889 putc ('w', file);
7890 return;
7892 case 'B':
7893 if (ASSEMBLER_DIALECT == ASM_ATT)
7894 putc ('b', file);
7895 return;
7897 case 'Q':
7898 if (ASSEMBLER_DIALECT == ASM_ATT)
7899 putc ('l', file);
7900 return;
7902 case 'S':
7903 if (ASSEMBLER_DIALECT == ASM_ATT)
7904 putc ('s', file);
7905 return;
7907 case 'T':
7908 if (ASSEMBLER_DIALECT == ASM_ATT)
7909 putc ('t', file);
7910 return;
7912 case 'z':
7913 /* 387 opcodes don't get size suffixes if the operands are
7914 registers. */
7915 if (STACK_REG_P (x))
7916 return;
7918 /* Likewise if using Intel opcodes. */
7919 if (ASSEMBLER_DIALECT == ASM_INTEL)
7920 return;
7922 /* This is the size of op from size of operand. */
7923 switch (GET_MODE_SIZE (GET_MODE (x)))
7925 case 1:
7926 putc ('b', file);
7927 return;
7929 case 2:
7930 #ifdef HAVE_GAS_FILDS_FISTS
7931 putc ('s', file);
7932 #endif
7933 return;
7935 case 4:
7936 if (GET_MODE (x) == SFmode)
7938 putc ('s', file);
7939 return;
7941 else
7942 putc ('l', file);
7943 return;
7945 case 12:
7946 case 16:
7947 putc ('t', file);
7948 return;
7950 case 8:
7951 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
7953 #ifdef GAS_MNEMONICS
7954 putc ('q', file);
7955 #else
7956 putc ('l', file);
7957 putc ('l', file);
7958 #endif
7960 else
7961 putc ('l', file);
7962 return;
7964 default:
7965 gcc_unreachable ();
7968 case 'b':
7969 case 'w':
7970 case 'k':
7971 case 'q':
7972 case 'h':
7973 case 'y':
7974 case 'X':
7975 case 'P':
7976 break;
7978 case 's':
7979 if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
7981 PRINT_OPERAND (file, x, 0);
7982 putc (',', file);
7984 return;
7986 case 'D':
7987 /* Little bit of braindamage here. The SSE compare instructions
7988 does use completely different names for the comparisons that the
7989 fp conditional moves. */
7990 switch (GET_CODE (x))
7992 case EQ:
7993 case UNEQ:
7994 fputs ("eq", file);
7995 break;
7996 case LT:
7997 case UNLT:
7998 fputs ("lt", file);
7999 break;
8000 case LE:
8001 case UNLE:
8002 fputs ("le", file);
8003 break;
8004 case UNORDERED:
8005 fputs ("unord", file);
8006 break;
8007 case NE:
8008 case LTGT:
8009 fputs ("neq", file);
8010 break;
8011 case UNGE:
8012 case GE:
8013 fputs ("nlt", file);
8014 break;
8015 case UNGT:
8016 case GT:
8017 fputs ("nle", file);
8018 break;
8019 case ORDERED:
8020 fputs ("ord", file);
8021 break;
8022 default:
8023 gcc_unreachable ();
8025 return;
8026 case 'O':
8027 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8028 if (ASSEMBLER_DIALECT == ASM_ATT)
8030 switch (GET_MODE (x))
8032 case HImode: putc ('w', file); break;
8033 case SImode:
8034 case SFmode: putc ('l', file); break;
8035 case DImode:
8036 case DFmode: putc ('q', file); break;
8037 default: gcc_unreachable ();
8039 putc ('.', file);
8041 #endif
8042 return;
8043 case 'C':
8044 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
8045 return;
8046 case 'F':
8047 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8048 if (ASSEMBLER_DIALECT == ASM_ATT)
8049 putc ('.', file);
8050 #endif
8051 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
8052 return;
8054 /* Like above, but reverse condition */
8055 case 'c':
8056 /* Check to see if argument to %c is really a constant
8057 and not a condition code which needs to be reversed. */
8058 if (!COMPARISON_P (x))
8060 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
8061 return;
8063 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
8064 return;
8065 case 'f':
8066 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
8067 if (ASSEMBLER_DIALECT == ASM_ATT)
8068 putc ('.', file);
8069 #endif
8070 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
8071 return;
8073 case 'H':
8074 /* It doesn't actually matter what mode we use here, as we're
8075 only going to use this for printing. */
8076 x = adjust_address_nv (x, DImode, 8);
8077 break;
8079 case '+':
8081 rtx x;
8083 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
8084 return;
8086 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
8087 if (x)
8089 int pred_val = INTVAL (XEXP (x, 0));
8091 if (pred_val < REG_BR_PROB_BASE * 45 / 100
8092 || pred_val > REG_BR_PROB_BASE * 55 / 100)
8094 int taken = pred_val > REG_BR_PROB_BASE / 2;
8095 int cputaken = final_forward_branch_p (current_output_insn) == 0;
8097 /* Emit hints only in the case default branch prediction
8098 heuristics would fail. */
8099 if (taken != cputaken)
8101 /* We use 3e (DS) prefix for taken branches and
8102 2e (CS) prefix for not taken branches. */
8103 if (taken)
8104 fputs ("ds ; ", file);
8105 else
8106 fputs ("cs ; ", file);
8110 return;
8112 default:
8113 output_operand_lossage ("invalid operand code '%c'", code);
8117 if (GET_CODE (x) == REG)
8118 print_reg (x, code, file);
8120 else if (GET_CODE (x) == MEM)
8122 /* No `byte ptr' prefix for call instructions. */
8123 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P')
8125 const char * size;
8126 switch (GET_MODE_SIZE (GET_MODE (x)))
8128 case 1: size = "BYTE"; break;
8129 case 2: size = "WORD"; break;
8130 case 4: size = "DWORD"; break;
8131 case 8: size = "QWORD"; break;
8132 case 12: size = "XWORD"; break;
8133 case 16: size = "XMMWORD"; break;
8134 default:
8135 gcc_unreachable ();
8138 /* Check for explicit size override (codes 'b', 'w' and 'k') */
8139 if (code == 'b')
8140 size = "BYTE";
8141 else if (code == 'w')
8142 size = "WORD";
8143 else if (code == 'k')
8144 size = "DWORD";
8146 fputs (size, file);
8147 fputs (" PTR ", file);
8150 x = XEXP (x, 0);
8151 /* Avoid (%rip) for call operands. */
8152 if (CONSTANT_ADDRESS_P (x) && code == 'P'
8153 && GET_CODE (x) != CONST_INT)
8154 output_addr_const (file, x);
8155 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
8156 output_operand_lossage ("invalid constraints for operand");
8157 else
8158 output_address (x);
8161 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
8163 REAL_VALUE_TYPE r;
8164 long l;
8166 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8167 REAL_VALUE_TO_TARGET_SINGLE (r, l);
8169 if (ASSEMBLER_DIALECT == ASM_ATT)
8170 putc ('$', file);
8171 fprintf (file, "0x%08lx", l);
8174 /* These float cases don't actually occur as immediate operands. */
8175 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
8177 char dstr[30];
8179 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
8180 fprintf (file, "%s", dstr);
8183 else if (GET_CODE (x) == CONST_DOUBLE
8184 && GET_MODE (x) == XFmode)
8186 char dstr[30];
8188 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
8189 fprintf (file, "%s", dstr);
8192 else
8194 /* We have patterns that allow zero sets of memory, for instance.
8195 In 64-bit mode, we should probably support all 8-byte vectors,
8196 since we can in fact encode that into an immediate. */
8197 if (GET_CODE (x) == CONST_VECTOR)
8199 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
8200 x = const0_rtx;
8203 if (code != 'P')
8205 if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
8207 if (ASSEMBLER_DIALECT == ASM_ATT)
8208 putc ('$', file);
8210 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
8211 || GET_CODE (x) == LABEL_REF)
8213 if (ASSEMBLER_DIALECT == ASM_ATT)
8214 putc ('$', file);
8215 else
8216 fputs ("OFFSET FLAT:", file);
8219 if (GET_CODE (x) == CONST_INT)
8220 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
8221 else if (flag_pic)
8222 output_pic_addr_const (file, x, code);
8223 else
8224 output_addr_const (file, x);
8228 /* Print a memory operand whose address is ADDR. */
8230 void
8231 print_operand_address (FILE *file, rtx addr)
8233 struct ix86_address parts;
8234 rtx base, index, disp;
8235 int scale;
8236 int ok = ix86_decompose_address (addr, &parts);
8238 gcc_assert (ok);
8240 base = parts.base;
8241 index = parts.index;
8242 disp = parts.disp;
8243 scale = parts.scale;
8245 switch (parts.seg)
8247 case SEG_DEFAULT:
8248 break;
8249 case SEG_FS:
8250 case SEG_GS:
8251 if (USER_LABEL_PREFIX[0] == 0)
8252 putc ('%', file);
8253 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
8254 break;
8255 default:
8256 gcc_unreachable ();
8259 if (!base && !index)
8261 /* Displacement only requires special attention. */
8263 if (GET_CODE (disp) == CONST_INT)
8265 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
8267 if (USER_LABEL_PREFIX[0] == 0)
8268 putc ('%', file);
8269 fputs ("ds:", file);
8271 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
8273 else if (flag_pic)
8274 output_pic_addr_const (file, disp, 0);
8275 else
8276 output_addr_const (file, disp);
8278 /* Use one byte shorter RIP relative addressing for 64bit mode. */
8279 if (TARGET_64BIT)
8281 if (GET_CODE (disp) == CONST
8282 && GET_CODE (XEXP (disp, 0)) == PLUS
8283 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
8284 disp = XEXP (XEXP (disp, 0), 0);
8285 if (GET_CODE (disp) == LABEL_REF
8286 || (GET_CODE (disp) == SYMBOL_REF
8287 && SYMBOL_REF_TLS_MODEL (disp) == 0))
8288 fputs ("(%rip)", file);
8291 else
8293 if (ASSEMBLER_DIALECT == ASM_ATT)
8295 if (disp)
8297 if (flag_pic)
8298 output_pic_addr_const (file, disp, 0);
8299 else if (GET_CODE (disp) == LABEL_REF)
8300 output_asm_label (disp);
8301 else
8302 output_addr_const (file, disp);
8305 putc ('(', file);
8306 if (base)
8307 print_reg (base, 0, file);
8308 if (index)
8310 putc (',', file);
8311 print_reg (index, 0, file);
8312 if (scale != 1)
8313 fprintf (file, ",%d", scale);
8315 putc (')', file);
8317 else
8319 rtx offset = NULL_RTX;
8321 if (disp)
8323 /* Pull out the offset of a symbol; print any symbol itself. */
8324 if (GET_CODE (disp) == CONST
8325 && GET_CODE (XEXP (disp, 0)) == PLUS
8326 && GET_CODE (XEXP (XEXP (disp, 0), 1)) == CONST_INT)
8328 offset = XEXP (XEXP (disp, 0), 1);
8329 disp = gen_rtx_CONST (VOIDmode,
8330 XEXP (XEXP (disp, 0), 0));
8333 if (flag_pic)
8334 output_pic_addr_const (file, disp, 0);
8335 else if (GET_CODE (disp) == LABEL_REF)
8336 output_asm_label (disp);
8337 else if (GET_CODE (disp) == CONST_INT)
8338 offset = disp;
8339 else
8340 output_addr_const (file, disp);
8343 putc ('[', file);
8344 if (base)
8346 print_reg (base, 0, file);
8347 if (offset)
8349 if (INTVAL (offset) >= 0)
8350 putc ('+', file);
8351 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
8354 else if (offset)
8355 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
8356 else
8357 putc ('0', file);
8359 if (index)
8361 putc ('+', file);
8362 print_reg (index, 0, file);
8363 if (scale != 1)
8364 fprintf (file, "*%d", scale);
8366 putc (']', file);
8371 bool
8372 output_addr_const_extra (FILE *file, rtx x)
8374 rtx op;
8376 if (GET_CODE (x) != UNSPEC)
8377 return false;
8379 op = XVECEXP (x, 0, 0);
8380 switch (XINT (x, 1))
8382 case UNSPEC_GOTTPOFF:
8383 output_addr_const (file, op);
8384 /* FIXME: This might be @TPOFF in Sun ld. */
8385 fputs ("@GOTTPOFF", file);
8386 break;
8387 case UNSPEC_TPOFF:
8388 output_addr_const (file, op);
8389 fputs ("@TPOFF", file);
8390 break;
8391 case UNSPEC_NTPOFF:
8392 output_addr_const (file, op);
8393 if (TARGET_64BIT)
8394 fputs ("@TPOFF", file);
8395 else
8396 fputs ("@NTPOFF", file);
8397 break;
8398 case UNSPEC_DTPOFF:
8399 output_addr_const (file, op);
8400 fputs ("@DTPOFF", file);
8401 break;
8402 case UNSPEC_GOTNTPOFF:
8403 output_addr_const (file, op);
8404 if (TARGET_64BIT)
8405 fputs ("@GOTTPOFF(%rip)", file);
8406 else
8407 fputs ("@GOTNTPOFF", file);
8408 break;
8409 case UNSPEC_INDNTPOFF:
8410 output_addr_const (file, op);
8411 fputs ("@INDNTPOFF", file);
8412 break;
8414 default:
8415 return false;
8418 return true;
8421 /* Split one or more DImode RTL references into pairs of SImode
8422 references. The RTL can be REG, offsettable MEM, integer constant, or
8423 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
8424 split and "num" is its length. lo_half and hi_half are output arrays
8425 that parallel "operands". */
8427 void
8428 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
8430 while (num--)
8432 rtx op = operands[num];
8434 /* simplify_subreg refuse to split volatile memory addresses,
8435 but we still have to handle it. */
8436 if (GET_CODE (op) == MEM)
8438 lo_half[num] = adjust_address (op, SImode, 0);
8439 hi_half[num] = adjust_address (op, SImode, 4);
8441 else
8443 lo_half[num] = simplify_gen_subreg (SImode, op,
8444 GET_MODE (op) == VOIDmode
8445 ? DImode : GET_MODE (op), 0);
8446 hi_half[num] = simplify_gen_subreg (SImode, op,
8447 GET_MODE (op) == VOIDmode
8448 ? DImode : GET_MODE (op), 4);
8452 /* Split one or more TImode RTL references into pairs of DImode
8453 references. The RTL can be REG, offsettable MEM, integer constant, or
8454 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
8455 split and "num" is its length. lo_half and hi_half are output arrays
8456 that parallel "operands". */
8458 void
8459 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
8461 while (num--)
8463 rtx op = operands[num];
8465 /* simplify_subreg refuse to split volatile memory addresses, but we
8466 still have to handle it. */
8467 if (GET_CODE (op) == MEM)
8469 lo_half[num] = adjust_address (op, DImode, 0);
8470 hi_half[num] = adjust_address (op, DImode, 8);
8472 else
8474 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
8475 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
8480 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
8481 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
8482 is the expression of the binary operation. The output may either be
8483 emitted here, or returned to the caller, like all output_* functions.
8485 There is no guarantee that the operands are the same mode, as they
8486 might be within FLOAT or FLOAT_EXTEND expressions. */
8488 #ifndef SYSV386_COMPAT
8489 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
8490 wants to fix the assemblers because that causes incompatibility
8491 with gcc. No-one wants to fix gcc because that causes
8492 incompatibility with assemblers... You can use the option of
8493 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
8494 #define SYSV386_COMPAT 1
8495 #endif
8497 const char *
8498 output_387_binary_op (rtx insn, rtx *operands)
8500 static char buf[30];
8501 const char *p;
8502 const char *ssep;
8503 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
8505 #ifdef ENABLE_CHECKING
8506 /* Even if we do not want to check the inputs, this documents input
8507 constraints. Which helps in understanding the following code. */
8508 if (STACK_REG_P (operands[0])
8509 && ((REG_P (operands[1])
8510 && REGNO (operands[0]) == REGNO (operands[1])
8511 && (STACK_REG_P (operands[2]) || GET_CODE (operands[2]) == MEM))
8512 || (REG_P (operands[2])
8513 && REGNO (operands[0]) == REGNO (operands[2])
8514 && (STACK_REG_P (operands[1]) || GET_CODE (operands[1]) == MEM)))
8515 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
8516 ; /* ok */
8517 else
8518 gcc_assert (is_sse);
8519 #endif
8521 switch (GET_CODE (operands[3]))
8523 case PLUS:
8524 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8525 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8526 p = "fiadd";
8527 else
8528 p = "fadd";
8529 ssep = "add";
8530 break;
8532 case MINUS:
8533 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8534 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8535 p = "fisub";
8536 else
8537 p = "fsub";
8538 ssep = "sub";
8539 break;
8541 case MULT:
8542 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8543 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8544 p = "fimul";
8545 else
8546 p = "fmul";
8547 ssep = "mul";
8548 break;
8550 case DIV:
8551 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
8552 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
8553 p = "fidiv";
8554 else
8555 p = "fdiv";
8556 ssep = "div";
8557 break;
8559 default:
8560 gcc_unreachable ();
8563 if (is_sse)
8565 strcpy (buf, ssep);
8566 if (GET_MODE (operands[0]) == SFmode)
8567 strcat (buf, "ss\t{%2, %0|%0, %2}");
8568 else
8569 strcat (buf, "sd\t{%2, %0|%0, %2}");
8570 return buf;
8572 strcpy (buf, p);
8574 switch (GET_CODE (operands[3]))
8576 case MULT:
8577 case PLUS:
8578 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
8580 rtx temp = operands[2];
8581 operands[2] = operands[1];
8582 operands[1] = temp;
8585 /* know operands[0] == operands[1]. */
8587 if (GET_CODE (operands[2]) == MEM)
8589 p = "%z2\t%2";
8590 break;
8593 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
8595 if (STACK_TOP_P (operands[0]))
8596 /* How is it that we are storing to a dead operand[2]?
8597 Well, presumably operands[1] is dead too. We can't
8598 store the result to st(0) as st(0) gets popped on this
8599 instruction. Instead store to operands[2] (which I
8600 think has to be st(1)). st(1) will be popped later.
8601 gcc <= 2.8.1 didn't have this check and generated
8602 assembly code that the Unixware assembler rejected. */
8603 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
8604 else
8605 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
8606 break;
8609 if (STACK_TOP_P (operands[0]))
8610 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
8611 else
8612 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
8613 break;
8615 case MINUS:
8616 case DIV:
8617 if (GET_CODE (operands[1]) == MEM)
8619 p = "r%z1\t%1";
8620 break;
8623 if (GET_CODE (operands[2]) == MEM)
8625 p = "%z2\t%2";
8626 break;
8629 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
8631 #if SYSV386_COMPAT
8632 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
8633 derived assemblers, confusingly reverse the direction of
8634 the operation for fsub{r} and fdiv{r} when the
8635 destination register is not st(0). The Intel assembler
8636 doesn't have this brain damage. Read !SYSV386_COMPAT to
8637 figure out what the hardware really does. */
8638 if (STACK_TOP_P (operands[0]))
8639 p = "{p\t%0, %2|rp\t%2, %0}";
8640 else
8641 p = "{rp\t%2, %0|p\t%0, %2}";
8642 #else
8643 if (STACK_TOP_P (operands[0]))
8644 /* As above for fmul/fadd, we can't store to st(0). */
8645 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
8646 else
8647 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
8648 #endif
8649 break;
8652 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
8654 #if SYSV386_COMPAT
8655 if (STACK_TOP_P (operands[0]))
8656 p = "{rp\t%0, %1|p\t%1, %0}";
8657 else
8658 p = "{p\t%1, %0|rp\t%0, %1}";
8659 #else
8660 if (STACK_TOP_P (operands[0]))
8661 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
8662 else
8663 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
8664 #endif
8665 break;
8668 if (STACK_TOP_P (operands[0]))
8670 if (STACK_TOP_P (operands[1]))
8671 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
8672 else
8673 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
8674 break;
8676 else if (STACK_TOP_P (operands[1]))
8678 #if SYSV386_COMPAT
8679 p = "{\t%1, %0|r\t%0, %1}";
8680 #else
8681 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
8682 #endif
8684 else
8686 #if SYSV386_COMPAT
8687 p = "{r\t%2, %0|\t%0, %2}";
8688 #else
8689 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
8690 #endif
8692 break;
8694 default:
8695 gcc_unreachable ();
8698 strcat (buf, p);
8699 return buf;
8702 /* Return needed mode for entity in optimize_mode_switching pass. */
8705 ix86_mode_needed (int entity, rtx insn)
8707 enum attr_i387_cw mode;
8709 /* The mode UNINITIALIZED is used to store control word after a
8710 function call or ASM pattern. The mode ANY specify that function
8711 has no requirements on the control word and make no changes in the
8712 bits we are interested in. */
8714 if (CALL_P (insn)
8715 || (NONJUMP_INSN_P (insn)
8716 && (asm_noperands (PATTERN (insn)) >= 0
8717 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
8718 return I387_CW_UNINITIALIZED;
8720 if (recog_memoized (insn) < 0)
8721 return I387_CW_ANY;
8723 mode = get_attr_i387_cw (insn);
8725 switch (entity)
8727 case I387_TRUNC:
8728 if (mode == I387_CW_TRUNC)
8729 return mode;
8730 break;
8732 case I387_FLOOR:
8733 if (mode == I387_CW_FLOOR)
8734 return mode;
8735 break;
8737 case I387_CEIL:
8738 if (mode == I387_CW_CEIL)
8739 return mode;
8740 break;
8742 case I387_MASK_PM:
8743 if (mode == I387_CW_MASK_PM)
8744 return mode;
8745 break;
8747 default:
8748 gcc_unreachable ();
8751 return I387_CW_ANY;
8754 /* Output code to initialize control word copies used by trunc?f?i and
8755 rounding patterns. CURRENT_MODE is set to current control word,
8756 while NEW_MODE is set to new control word. */
8758 void
8759 emit_i387_cw_initialization (int mode)
8761 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
8762 rtx new_mode;
8764 int slot;
8766 rtx reg = gen_reg_rtx (HImode);
8768 emit_insn (gen_x86_fnstcw_1 (stored_mode));
8769 emit_move_insn (reg, copy_rtx (stored_mode));
8771 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL || optimize_size)
8773 switch (mode)
8775 case I387_CW_TRUNC:
8776 /* round toward zero (truncate) */
8777 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
8778 slot = SLOT_CW_TRUNC;
8779 break;
8781 case I387_CW_FLOOR:
8782 /* round down toward -oo */
8783 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
8784 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
8785 slot = SLOT_CW_FLOOR;
8786 break;
8788 case I387_CW_CEIL:
8789 /* round up toward +oo */
8790 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
8791 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
8792 slot = SLOT_CW_CEIL;
8793 break;
8795 case I387_CW_MASK_PM:
8796 /* mask precision exception for nearbyint() */
8797 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
8798 slot = SLOT_CW_MASK_PM;
8799 break;
8801 default:
8802 gcc_unreachable ();
8805 else
8807 switch (mode)
8809 case I387_CW_TRUNC:
8810 /* round toward zero (truncate) */
8811 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
8812 slot = SLOT_CW_TRUNC;
8813 break;
8815 case I387_CW_FLOOR:
8816 /* round down toward -oo */
8817 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
8818 slot = SLOT_CW_FLOOR;
8819 break;
8821 case I387_CW_CEIL:
8822 /* round up toward +oo */
8823 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
8824 slot = SLOT_CW_CEIL;
8825 break;
8827 case I387_CW_MASK_PM:
8828 /* mask precision exception for nearbyint() */
8829 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
8830 slot = SLOT_CW_MASK_PM;
8831 break;
8833 default:
8834 gcc_unreachable ();
8838 gcc_assert (slot < MAX_386_STACK_LOCALS);
8840 new_mode = assign_386_stack_local (HImode, slot);
8841 emit_move_insn (new_mode, reg);
8844 /* Output code for INSN to convert a float to a signed int. OPERANDS
8845 are the insn operands. The output may be [HSD]Imode and the input
8846 operand may be [SDX]Fmode. */
8848 const char *
8849 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
8851 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
8852 int dimode_p = GET_MODE (operands[0]) == DImode;
8853 int round_mode = get_attr_i387_cw (insn);
8855 /* Jump through a hoop or two for DImode, since the hardware has no
8856 non-popping instruction. We used to do this a different way, but
8857 that was somewhat fragile and broke with post-reload splitters. */
8858 if ((dimode_p || fisttp) && !stack_top_dies)
8859 output_asm_insn ("fld\t%y1", operands);
8861 gcc_assert (STACK_TOP_P (operands[1]));
8862 gcc_assert (GET_CODE (operands[0]) == MEM);
8864 if (fisttp)
8865 output_asm_insn ("fisttp%z0\t%0", operands);
8866 else
8868 if (round_mode != I387_CW_ANY)
8869 output_asm_insn ("fldcw\t%3", operands);
8870 if (stack_top_dies || dimode_p)
8871 output_asm_insn ("fistp%z0\t%0", operands);
8872 else
8873 output_asm_insn ("fist%z0\t%0", operands);
8874 if (round_mode != I387_CW_ANY)
8875 output_asm_insn ("fldcw\t%2", operands);
8878 return "";
8881 /* Output code for x87 ffreep insn. The OPNO argument, which may only
8882 have the values zero or one, indicates the ffreep insn's operand
8883 from the OPERANDS array. */
8885 static const char *
8886 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
8888 if (TARGET_USE_FFREEP)
8889 #if HAVE_AS_IX86_FFREEP
8890 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
8891 #else
8893 static char retval[] = ".word\t0xc_df";
8894 int regno = REGNO (operands[opno]);
8896 gcc_assert (FP_REGNO_P (regno));
8898 retval[9] = '0' + (regno - FIRST_STACK_REG);
8899 return retval;
8901 #endif
8903 return opno ? "fstp\t%y1" : "fstp\t%y0";
8907 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
8908 should be used. UNORDERED_P is true when fucom should be used. */
8910 const char *
8911 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
8913 int stack_top_dies;
8914 rtx cmp_op0, cmp_op1;
8915 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
8917 if (eflags_p)
8919 cmp_op0 = operands[0];
8920 cmp_op1 = operands[1];
8922 else
8924 cmp_op0 = operands[1];
8925 cmp_op1 = operands[2];
8928 if (is_sse)
8930 if (GET_MODE (operands[0]) == SFmode)
8931 if (unordered_p)
8932 return "ucomiss\t{%1, %0|%0, %1}";
8933 else
8934 return "comiss\t{%1, %0|%0, %1}";
8935 else
8936 if (unordered_p)
8937 return "ucomisd\t{%1, %0|%0, %1}";
8938 else
8939 return "comisd\t{%1, %0|%0, %1}";
8942 gcc_assert (STACK_TOP_P (cmp_op0));
8944 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
8946 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
8948 if (stack_top_dies)
8950 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
8951 return output_387_ffreep (operands, 1);
8953 else
8954 return "ftst\n\tfnstsw\t%0";
8957 if (STACK_REG_P (cmp_op1)
8958 && stack_top_dies
8959 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
8960 && REGNO (cmp_op1) != FIRST_STACK_REG)
8962 /* If both the top of the 387 stack dies, and the other operand
8963 is also a stack register that dies, then this must be a
8964 `fcompp' float compare */
8966 if (eflags_p)
8968 /* There is no double popping fcomi variant. Fortunately,
8969 eflags is immune from the fstp's cc clobbering. */
8970 if (unordered_p)
8971 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
8972 else
8973 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
8974 return output_387_ffreep (operands, 0);
8976 else
8978 if (unordered_p)
8979 return "fucompp\n\tfnstsw\t%0";
8980 else
8981 return "fcompp\n\tfnstsw\t%0";
8984 else
8986 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
8988 static const char * const alt[16] =
8990 "fcom%z2\t%y2\n\tfnstsw\t%0",
8991 "fcomp%z2\t%y2\n\tfnstsw\t%0",
8992 "fucom%z2\t%y2\n\tfnstsw\t%0",
8993 "fucomp%z2\t%y2\n\tfnstsw\t%0",
8995 "ficom%z2\t%y2\n\tfnstsw\t%0",
8996 "ficomp%z2\t%y2\n\tfnstsw\t%0",
8997 NULL,
8998 NULL,
9000 "fcomi\t{%y1, %0|%0, %y1}",
9001 "fcomip\t{%y1, %0|%0, %y1}",
9002 "fucomi\t{%y1, %0|%0, %y1}",
9003 "fucomip\t{%y1, %0|%0, %y1}",
9005 NULL,
9006 NULL,
9007 NULL,
9008 NULL
9011 int mask;
9012 const char *ret;
9014 mask = eflags_p << 3;
9015 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
9016 mask |= unordered_p << 1;
9017 mask |= stack_top_dies;
9019 gcc_assert (mask < 16);
9020 ret = alt[mask];
9021 gcc_assert (ret);
9023 return ret;
9027 void
9028 ix86_output_addr_vec_elt (FILE *file, int value)
9030 const char *directive = ASM_LONG;
9032 #ifdef ASM_QUAD
9033 if (TARGET_64BIT)
9034 directive = ASM_QUAD;
9035 #else
9036 gcc_assert (!TARGET_64BIT);
9037 #endif
9039 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
9042 void
9043 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
9045 if (TARGET_64BIT)
9046 fprintf (file, "%s%s%d-%s%d\n",
9047 ASM_LONG, LPREFIX, value, LPREFIX, rel);
9048 else if (HAVE_AS_GOTOFF_IN_DATA)
9049 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
9050 #if TARGET_MACHO
9051 else if (TARGET_MACHO)
9053 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
9054 machopic_output_function_base_name (file);
9055 fprintf(file, "\n");
9057 #endif
9058 else
9059 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
9060 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
9063 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
9064 for the target. */
9066 void
9067 ix86_expand_clear (rtx dest)
9069 rtx tmp;
9071 /* We play register width games, which are only valid after reload. */
9072 gcc_assert (reload_completed);
9074 /* Avoid HImode and its attendant prefix byte. */
9075 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
9076 dest = gen_rtx_REG (SImode, REGNO (dest));
9078 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
9080 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
9081 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
9083 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, 17));
9084 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
9087 emit_insn (tmp);
9090 /* X is an unchanging MEM. If it is a constant pool reference, return
9091 the constant pool rtx, else NULL. */
9094 maybe_get_pool_constant (rtx x)
9096 x = ix86_delegitimize_address (XEXP (x, 0));
9098 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
9099 return get_pool_constant (x);
9101 return NULL_RTX;
9104 void
9105 ix86_expand_move (enum machine_mode mode, rtx operands[])
9107 int strict = (reload_in_progress || reload_completed);
9108 rtx op0, op1;
9109 enum tls_model model;
9111 op0 = operands[0];
9112 op1 = operands[1];
9114 if (GET_CODE (op1) == SYMBOL_REF)
9116 model = SYMBOL_REF_TLS_MODEL (op1);
9117 if (model)
9119 op1 = legitimize_tls_address (op1, model, true);
9120 op1 = force_operand (op1, op0);
9121 if (op1 == op0)
9122 return;
9125 else if (GET_CODE (op1) == CONST
9126 && GET_CODE (XEXP (op1, 0)) == PLUS
9127 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
9129 model = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (op1, 0), 0));
9130 if (model)
9132 rtx addend = XEXP (XEXP (op1, 0), 1);
9133 op1 = legitimize_tls_address (XEXP (XEXP (op1, 0), 0), model, true);
9134 op1 = force_operand (op1, NULL);
9135 op1 = expand_simple_binop (Pmode, PLUS, op1, addend,
9136 op0, 1, OPTAB_DIRECT);
9137 if (op1 == op0)
9138 return;
9142 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
9144 if (TARGET_MACHO && !TARGET_64BIT)
9146 #if TARGET_MACHO
9147 if (MACHOPIC_PURE)
9149 rtx temp = ((reload_in_progress
9150 || ((op0 && GET_CODE (op0) == REG)
9151 && mode == Pmode))
9152 ? op0 : gen_reg_rtx (Pmode));
9153 op1 = machopic_indirect_data_reference (op1, temp);
9154 op1 = machopic_legitimize_pic_address (op1, mode,
9155 temp == op1 ? 0 : temp);
9157 else if (MACHOPIC_INDIRECT)
9158 op1 = machopic_indirect_data_reference (op1, 0);
9159 if (op0 == op1)
9160 return;
9161 #endif
9163 else
9165 if (GET_CODE (op0) == MEM)
9166 op1 = force_reg (Pmode, op1);
9167 else
9168 op1 = legitimize_address (op1, op1, Pmode);
9171 else
9173 if (GET_CODE (op0) == MEM
9174 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
9175 || !push_operand (op0, mode))
9176 && GET_CODE (op1) == MEM)
9177 op1 = force_reg (mode, op1);
9179 if (push_operand (op0, mode)
9180 && ! general_no_elim_operand (op1, mode))
9181 op1 = copy_to_mode_reg (mode, op1);
9183 /* Force large constants in 64bit compilation into register
9184 to get them CSEed. */
9185 if (TARGET_64BIT && mode == DImode
9186 && immediate_operand (op1, mode)
9187 && !x86_64_zext_immediate_operand (op1, VOIDmode)
9188 && !register_operand (op0, mode)
9189 && optimize && !reload_completed && !reload_in_progress)
9190 op1 = copy_to_mode_reg (mode, op1);
9192 if (FLOAT_MODE_P (mode))
9194 /* If we are loading a floating point constant to a register,
9195 force the value to memory now, since we'll get better code
9196 out the back end. */
9198 if (strict)
9200 else if (GET_CODE (op1) == CONST_DOUBLE)
9202 op1 = validize_mem (force_const_mem (mode, op1));
9203 if (!register_operand (op0, mode))
9205 rtx temp = gen_reg_rtx (mode);
9206 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
9207 emit_move_insn (op0, temp);
9208 return;
9214 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
9217 void
9218 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
9220 rtx op0 = operands[0], op1 = operands[1];
9222 /* Force constants other than zero into memory. We do not know how
9223 the instructions used to build constants modify the upper 64 bits
9224 of the register, once we have that information we may be able
9225 to handle some of them more efficiently. */
9226 if ((reload_in_progress | reload_completed) == 0
9227 && register_operand (op0, mode)
9228 && CONSTANT_P (op1)
9229 && standard_sse_constant_p (op1) <= 0)
9230 op1 = validize_mem (force_const_mem (mode, op1));
9232 /* Make operand1 a register if it isn't already. */
9233 if (!no_new_pseudos
9234 && !register_operand (op0, mode)
9235 && !register_operand (op1, mode))
9237 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
9238 return;
9241 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
9244 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
9245 straight to ix86_expand_vector_move. */
9247 void
9248 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
9250 rtx op0, op1, m;
9252 op0 = operands[0];
9253 op1 = operands[1];
9255 if (MEM_P (op1))
9257 /* If we're optimizing for size, movups is the smallest. */
9258 if (optimize_size)
9260 op0 = gen_lowpart (V4SFmode, op0);
9261 op1 = gen_lowpart (V4SFmode, op1);
9262 emit_insn (gen_sse_movups (op0, op1));
9263 return;
9266 /* ??? If we have typed data, then it would appear that using
9267 movdqu is the only way to get unaligned data loaded with
9268 integer type. */
9269 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
9271 op0 = gen_lowpart (V16QImode, op0);
9272 op1 = gen_lowpart (V16QImode, op1);
9273 emit_insn (gen_sse2_movdqu (op0, op1));
9274 return;
9277 if (TARGET_SSE2 && mode == V2DFmode)
9279 rtx zero;
9281 /* When SSE registers are split into halves, we can avoid
9282 writing to the top half twice. */
9283 if (TARGET_SSE_SPLIT_REGS)
9285 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
9286 zero = op0;
9288 else
9290 /* ??? Not sure about the best option for the Intel chips.
9291 The following would seem to satisfy; the register is
9292 entirely cleared, breaking the dependency chain. We
9293 then store to the upper half, with a dependency depth
9294 of one. A rumor has it that Intel recommends two movsd
9295 followed by an unpacklpd, but this is unconfirmed. And
9296 given that the dependency depth of the unpacklpd would
9297 still be one, I'm not sure why this would be better. */
9298 zero = CONST0_RTX (V2DFmode);
9301 m = adjust_address (op1, DFmode, 0);
9302 emit_insn (gen_sse2_loadlpd (op0, zero, m));
9303 m = adjust_address (op1, DFmode, 8);
9304 emit_insn (gen_sse2_loadhpd (op0, op0, m));
9306 else
9308 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
9309 emit_move_insn (op0, CONST0_RTX (mode));
9310 else
9311 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
9313 if (mode != V4SFmode)
9314 op0 = gen_lowpart (V4SFmode, op0);
9315 m = adjust_address (op1, V2SFmode, 0);
9316 emit_insn (gen_sse_loadlps (op0, op0, m));
9317 m = adjust_address (op1, V2SFmode, 8);
9318 emit_insn (gen_sse_loadhps (op0, op0, m));
9321 else if (MEM_P (op0))
9323 /* If we're optimizing for size, movups is the smallest. */
9324 if (optimize_size)
9326 op0 = gen_lowpart (V4SFmode, op0);
9327 op1 = gen_lowpart (V4SFmode, op1);
9328 emit_insn (gen_sse_movups (op0, op1));
9329 return;
9332 /* ??? Similar to above, only less clear because of quote
9333 typeless stores unquote. */
9334 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
9335 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
9337 op0 = gen_lowpart (V16QImode, op0);
9338 op1 = gen_lowpart (V16QImode, op1);
9339 emit_insn (gen_sse2_movdqu (op0, op1));
9340 return;
9343 if (TARGET_SSE2 && mode == V2DFmode)
9345 m = adjust_address (op0, DFmode, 0);
9346 emit_insn (gen_sse2_storelpd (m, op1));
9347 m = adjust_address (op0, DFmode, 8);
9348 emit_insn (gen_sse2_storehpd (m, op1));
9350 else
9352 if (mode != V4SFmode)
9353 op1 = gen_lowpart (V4SFmode, op1);
9354 m = adjust_address (op0, V2SFmode, 0);
9355 emit_insn (gen_sse_storelps (m, op1));
9356 m = adjust_address (op0, V2SFmode, 8);
9357 emit_insn (gen_sse_storehps (m, op1));
9360 else
9361 gcc_unreachable ();
9364 /* Expand a push in MODE. This is some mode for which we do not support
9365 proper push instructions, at least from the registers that we expect
9366 the value to live in. */
9368 void
9369 ix86_expand_push (enum machine_mode mode, rtx x)
9371 rtx tmp;
9373 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
9374 GEN_INT (-GET_MODE_SIZE (mode)),
9375 stack_pointer_rtx, 1, OPTAB_DIRECT);
9376 if (tmp != stack_pointer_rtx)
9377 emit_move_insn (stack_pointer_rtx, tmp);
9379 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
9380 emit_move_insn (tmp, x);
9383 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
9384 destination to use for the operation. If different from the true
9385 destination in operands[0], a copy operation will be required. */
9388 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
9389 rtx operands[])
9391 int matching_memory;
9392 rtx src1, src2, dst;
9394 dst = operands[0];
9395 src1 = operands[1];
9396 src2 = operands[2];
9398 /* Recognize <var1> = <value> <op> <var1> for commutative operators */
9399 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9400 && (rtx_equal_p (dst, src2)
9401 || immediate_operand (src1, mode)))
9403 rtx temp = src1;
9404 src1 = src2;
9405 src2 = temp;
9408 /* If the destination is memory, and we do not have matching source
9409 operands, do things in registers. */
9410 matching_memory = 0;
9411 if (GET_CODE (dst) == MEM)
9413 if (rtx_equal_p (dst, src1))
9414 matching_memory = 1;
9415 else if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9416 && rtx_equal_p (dst, src2))
9417 matching_memory = 2;
9418 else
9419 dst = gen_reg_rtx (mode);
9422 /* Both source operands cannot be in memory. */
9423 if (GET_CODE (src1) == MEM && GET_CODE (src2) == MEM)
9425 if (matching_memory != 2)
9426 src2 = force_reg (mode, src2);
9427 else
9428 src1 = force_reg (mode, src1);
9431 /* If the operation is not commutable, source 1 cannot be a constant
9432 or non-matching memory. */
9433 if ((CONSTANT_P (src1)
9434 || (!matching_memory && GET_CODE (src1) == MEM))
9435 && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
9436 src1 = force_reg (mode, src1);
9438 src1 = operands[1] = src1;
9439 src2 = operands[2] = src2;
9440 return dst;
9443 /* Similarly, but assume that the destination has already been
9444 set up properly. */
9446 void
9447 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
9448 enum machine_mode mode, rtx operands[])
9450 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
9451 gcc_assert (dst == operands[0]);
9454 /* Attempt to expand a binary operator. Make the expansion closer to the
9455 actual machine, then just general_operand, which will allow 3 separate
9456 memory references (one output, two input) in a single insn. */
9458 void
9459 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
9460 rtx operands[])
9462 rtx src1, src2, dst, op, clob;
9464 dst = ix86_fixup_binary_operands (code, mode, operands);
9465 src1 = operands[1];
9466 src2 = operands[2];
9468 /* Emit the instruction. */
9470 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
9471 if (reload_in_progress)
9473 /* Reload doesn't know about the flags register, and doesn't know that
9474 it doesn't want to clobber it. We can only do this with PLUS. */
9475 gcc_assert (code == PLUS);
9476 emit_insn (op);
9478 else
9480 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
9481 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
9484 /* Fix up the destination if needed. */
9485 if (dst != operands[0])
9486 emit_move_insn (operands[0], dst);
9489 /* Return TRUE or FALSE depending on whether the binary operator meets the
9490 appropriate constraints. */
9493 ix86_binary_operator_ok (enum rtx_code code,
9494 enum machine_mode mode ATTRIBUTE_UNUSED,
9495 rtx operands[3])
9497 /* Both source operands cannot be in memory. */
9498 if (GET_CODE (operands[1]) == MEM && GET_CODE (operands[2]) == MEM)
9499 return 0;
9500 /* If the operation is not commutable, source 1 cannot be a constant. */
9501 if (CONSTANT_P (operands[1]) && GET_RTX_CLASS (code) != RTX_COMM_ARITH)
9502 return 0;
9503 /* If the destination is memory, we must have a matching source operand. */
9504 if (GET_CODE (operands[0]) == MEM
9505 && ! (rtx_equal_p (operands[0], operands[1])
9506 || (GET_RTX_CLASS (code) == RTX_COMM_ARITH
9507 && rtx_equal_p (operands[0], operands[2]))))
9508 return 0;
9509 /* If the operation is not commutable and the source 1 is memory, we must
9510 have a matching destination. */
9511 if (GET_CODE (operands[1]) == MEM
9512 && GET_RTX_CLASS (code) != RTX_COMM_ARITH
9513 && ! rtx_equal_p (operands[0], operands[1]))
9514 return 0;
9515 return 1;
9518 /* Attempt to expand a unary operator. Make the expansion closer to the
9519 actual machine, then just general_operand, which will allow 2 separate
9520 memory references (one output, one input) in a single insn. */
9522 void
9523 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
9524 rtx operands[])
9526 int matching_memory;
9527 rtx src, dst, op, clob;
9529 dst = operands[0];
9530 src = operands[1];
9532 /* If the destination is memory, and we do not have matching source
9533 operands, do things in registers. */
9534 matching_memory = 0;
9535 if (MEM_P (dst))
9537 if (rtx_equal_p (dst, src))
9538 matching_memory = 1;
9539 else
9540 dst = gen_reg_rtx (mode);
9543 /* When source operand is memory, destination must match. */
9544 if (MEM_P (src) && !matching_memory)
9545 src = force_reg (mode, src);
9547 /* Emit the instruction. */
9549 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
9550 if (reload_in_progress || code == NOT)
9552 /* Reload doesn't know about the flags register, and doesn't know that
9553 it doesn't want to clobber it. */
9554 gcc_assert (code == NOT);
9555 emit_insn (op);
9557 else
9559 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
9560 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
9563 /* Fix up the destination if needed. */
9564 if (dst != operands[0])
9565 emit_move_insn (operands[0], dst);
9568 /* Return TRUE or FALSE depending on whether the unary operator meets the
9569 appropriate constraints. */
9572 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
9573 enum machine_mode mode ATTRIBUTE_UNUSED,
9574 rtx operands[2] ATTRIBUTE_UNUSED)
9576 /* If one of operands is memory, source and destination must match. */
9577 if ((GET_CODE (operands[0]) == MEM
9578 || GET_CODE (operands[1]) == MEM)
9579 && ! rtx_equal_p (operands[0], operands[1]))
9580 return FALSE;
9581 return TRUE;
9584 /* A subroutine of ix86_expand_fp_absneg_operator and copysign expanders.
9585 Create a mask for the sign bit in MODE for an SSE register. If VECT is
9586 true, then replicate the mask for all elements of the vector register.
9587 If INVERT is true, then create a mask excluding the sign bit. */
9590 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
9592 enum machine_mode vec_mode;
9593 HOST_WIDE_INT hi, lo;
9594 int shift = 63;
9595 rtvec v;
9596 rtx mask;
9598 /* Find the sign bit, sign extended to 2*HWI. */
9599 if (mode == SFmode)
9600 lo = 0x80000000, hi = lo < 0;
9601 else if (HOST_BITS_PER_WIDE_INT >= 64)
9602 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
9603 else
9604 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
9606 if (invert)
9607 lo = ~lo, hi = ~hi;
9609 /* Force this value into the low part of a fp vector constant. */
9610 mask = immed_double_const (lo, hi, mode == SFmode ? SImode : DImode);
9611 mask = gen_lowpart (mode, mask);
9613 if (mode == SFmode)
9615 if (vect)
9616 v = gen_rtvec (4, mask, mask, mask, mask);
9617 else
9618 v = gen_rtvec (4, mask, CONST0_RTX (SFmode),
9619 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
9620 vec_mode = V4SFmode;
9622 else
9624 if (vect)
9625 v = gen_rtvec (2, mask, mask);
9626 else
9627 v = gen_rtvec (2, mask, CONST0_RTX (DFmode));
9628 vec_mode = V2DFmode;
9631 return force_reg (vec_mode, gen_rtx_CONST_VECTOR (vec_mode, v));
9634 /* Generate code for floating point ABS or NEG. */
9636 void
9637 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
9638 rtx operands[])
9640 rtx mask, set, use, clob, dst, src;
9641 bool matching_memory;
9642 bool use_sse = false;
9643 bool vector_mode = VECTOR_MODE_P (mode);
9644 enum machine_mode elt_mode = mode;
9646 if (vector_mode)
9648 elt_mode = GET_MODE_INNER (mode);
9649 use_sse = true;
9651 else if (TARGET_SSE_MATH)
9652 use_sse = SSE_FLOAT_MODE_P (mode);
9654 /* NEG and ABS performed with SSE use bitwise mask operations.
9655 Create the appropriate mask now. */
9656 if (use_sse)
9657 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
9658 else
9659 mask = NULL_RTX;
9661 dst = operands[0];
9662 src = operands[1];
9664 /* If the destination is memory, and we don't have matching source
9665 operands or we're using the x87, do things in registers. */
9666 matching_memory = false;
9667 if (MEM_P (dst))
9669 if (use_sse && rtx_equal_p (dst, src))
9670 matching_memory = true;
9671 else
9672 dst = gen_reg_rtx (mode);
9674 if (MEM_P (src) && !matching_memory)
9675 src = force_reg (mode, src);
9677 if (vector_mode)
9679 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
9680 set = gen_rtx_SET (VOIDmode, dst, set);
9681 emit_insn (set);
9683 else
9685 set = gen_rtx_fmt_e (code, mode, src);
9686 set = gen_rtx_SET (VOIDmode, dst, set);
9687 if (mask)
9689 use = gen_rtx_USE (VOIDmode, mask);
9690 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
9691 emit_insn (gen_rtx_PARALLEL (VOIDmode,
9692 gen_rtvec (3, set, use, clob)));
9694 else
9695 emit_insn (set);
9698 if (dst != operands[0])
9699 emit_move_insn (operands[0], dst);
9702 /* Expand a copysign operation. Special case operand 0 being a constant. */
9704 void
9705 ix86_expand_copysign (rtx operands[])
9707 enum machine_mode mode, vmode;
9708 rtx dest, op0, op1, mask, nmask;
9710 dest = operands[0];
9711 op0 = operands[1];
9712 op1 = operands[2];
9714 mode = GET_MODE (dest);
9715 vmode = mode == SFmode ? V4SFmode : V2DFmode;
9717 if (GET_CODE (op0) == CONST_DOUBLE)
9719 rtvec v;
9721 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
9722 op0 = simplify_unary_operation (ABS, mode, op0, mode);
9724 if (op0 == CONST0_RTX (mode))
9725 op0 = CONST0_RTX (vmode);
9726 else
9728 if (mode == SFmode)
9729 v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
9730 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
9731 else
9732 v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
9733 op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
9736 mask = ix86_build_signbit_mask (mode, 0, 0);
9738 if (mode == SFmode)
9739 emit_insn (gen_copysignsf3_const (dest, op0, op1, mask));
9740 else
9741 emit_insn (gen_copysigndf3_const (dest, op0, op1, mask));
9743 else
9745 nmask = ix86_build_signbit_mask (mode, 0, 1);
9746 mask = ix86_build_signbit_mask (mode, 0, 0);
9748 if (mode == SFmode)
9749 emit_insn (gen_copysignsf3_var (dest, NULL, op0, op1, nmask, mask));
9750 else
9751 emit_insn (gen_copysigndf3_var (dest, NULL, op0, op1, nmask, mask));
9755 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
9756 be a constant, and so has already been expanded into a vector constant. */
9758 void
9759 ix86_split_copysign_const (rtx operands[])
9761 enum machine_mode mode, vmode;
9762 rtx dest, op0, op1, mask, x;
9764 dest = operands[0];
9765 op0 = operands[1];
9766 op1 = operands[2];
9767 mask = operands[3];
9769 mode = GET_MODE (dest);
9770 vmode = GET_MODE (mask);
9772 dest = simplify_gen_subreg (vmode, dest, mode, 0);
9773 x = gen_rtx_AND (vmode, dest, mask);
9774 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9776 if (op0 != CONST0_RTX (vmode))
9778 x = gen_rtx_IOR (vmode, dest, op0);
9779 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9783 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
9784 so we have to do two masks. */
9786 void
9787 ix86_split_copysign_var (rtx operands[])
9789 enum machine_mode mode, vmode;
9790 rtx dest, scratch, op0, op1, mask, nmask, x;
9792 dest = operands[0];
9793 scratch = operands[1];
9794 op0 = operands[2];
9795 op1 = operands[3];
9796 nmask = operands[4];
9797 mask = operands[5];
9799 mode = GET_MODE (dest);
9800 vmode = GET_MODE (mask);
9802 if (rtx_equal_p (op0, op1))
9804 /* Shouldn't happen often (it's useless, obviously), but when it does
9805 we'd generate incorrect code if we continue below. */
9806 emit_move_insn (dest, op0);
9807 return;
9810 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
9812 gcc_assert (REGNO (op1) == REGNO (scratch));
9814 x = gen_rtx_AND (vmode, scratch, mask);
9815 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
9817 dest = mask;
9818 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
9819 x = gen_rtx_NOT (vmode, dest);
9820 x = gen_rtx_AND (vmode, x, op0);
9821 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9823 else
9825 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
9827 x = gen_rtx_AND (vmode, scratch, mask);
9829 else /* alternative 2,4 */
9831 gcc_assert (REGNO (mask) == REGNO (scratch));
9832 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
9833 x = gen_rtx_AND (vmode, scratch, op1);
9835 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
9837 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
9839 dest = simplify_gen_subreg (vmode, op0, mode, 0);
9840 x = gen_rtx_AND (vmode, dest, nmask);
9842 else /* alternative 3,4 */
9844 gcc_assert (REGNO (nmask) == REGNO (dest));
9845 dest = nmask;
9846 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
9847 x = gen_rtx_AND (vmode, dest, op0);
9849 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9852 x = gen_rtx_IOR (vmode, dest, scratch);
9853 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
9856 /* Return TRUE or FALSE depending on whether the first SET in INSN
9857 has source and destination with matching CC modes, and that the
9858 CC mode is at least as constrained as REQ_MODE. */
9861 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
9863 rtx set;
9864 enum machine_mode set_mode;
9866 set = PATTERN (insn);
9867 if (GET_CODE (set) == PARALLEL)
9868 set = XVECEXP (set, 0, 0);
9869 gcc_assert (GET_CODE (set) == SET);
9870 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
9872 set_mode = GET_MODE (SET_DEST (set));
9873 switch (set_mode)
9875 case CCNOmode:
9876 if (req_mode != CCNOmode
9877 && (req_mode != CCmode
9878 || XEXP (SET_SRC (set), 1) != const0_rtx))
9879 return 0;
9880 break;
9881 case CCmode:
9882 if (req_mode == CCGCmode)
9883 return 0;
9884 /* FALLTHRU */
9885 case CCGCmode:
9886 if (req_mode == CCGOCmode || req_mode == CCNOmode)
9887 return 0;
9888 /* FALLTHRU */
9889 case CCGOCmode:
9890 if (req_mode == CCZmode)
9891 return 0;
9892 /* FALLTHRU */
9893 case CCZmode:
9894 break;
9896 default:
9897 gcc_unreachable ();
9900 return (GET_MODE (SET_SRC (set)) == set_mode);
9903 /* Generate insn patterns to do an integer compare of OPERANDS. */
9905 static rtx
9906 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
9908 enum machine_mode cmpmode;
9909 rtx tmp, flags;
9911 cmpmode = SELECT_CC_MODE (code, op0, op1);
9912 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
9914 /* This is very simple, but making the interface the same as in the
9915 FP case makes the rest of the code easier. */
9916 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
9917 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
9919 /* Return the test that should be put into the flags user, i.e.
9920 the bcc, scc, or cmov instruction. */
9921 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
9924 /* Figure out whether to use ordered or unordered fp comparisons.
9925 Return the appropriate mode to use. */
9927 enum machine_mode
9928 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
9930 /* ??? In order to make all comparisons reversible, we do all comparisons
9931 non-trapping when compiling for IEEE. Once gcc is able to distinguish
9932 all forms trapping and nontrapping comparisons, we can make inequality
9933 comparisons trapping again, since it results in better code when using
9934 FCOM based compares. */
9935 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
9938 enum machine_mode
9939 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
9941 if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
9942 return ix86_fp_compare_mode (code);
9943 switch (code)
9945 /* Only zero flag is needed. */
9946 case EQ: /* ZF=0 */
9947 case NE: /* ZF!=0 */
9948 return CCZmode;
9949 /* Codes needing carry flag. */
9950 case GEU: /* CF=0 */
9951 case GTU: /* CF=0 & ZF=0 */
9952 case LTU: /* CF=1 */
9953 case LEU: /* CF=1 | ZF=1 */
9954 return CCmode;
9955 /* Codes possibly doable only with sign flag when
9956 comparing against zero. */
9957 case GE: /* SF=OF or SF=0 */
9958 case LT: /* SF<>OF or SF=1 */
9959 if (op1 == const0_rtx)
9960 return CCGOCmode;
9961 else
9962 /* For other cases Carry flag is not required. */
9963 return CCGCmode;
9964 /* Codes doable only with sign flag when comparing
9965 against zero, but we miss jump instruction for it
9966 so we need to use relational tests against overflow
9967 that thus needs to be zero. */
9968 case GT: /* ZF=0 & SF=OF */
9969 case LE: /* ZF=1 | SF<>OF */
9970 if (op1 == const0_rtx)
9971 return CCNOmode;
9972 else
9973 return CCGCmode;
9974 /* strcmp pattern do (use flags) and combine may ask us for proper
9975 mode. */
9976 case USE:
9977 return CCmode;
9978 default:
9979 gcc_unreachable ();
9983 /* Return the fixed registers used for condition codes. */
9985 static bool
9986 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
9988 *p1 = FLAGS_REG;
9989 *p2 = FPSR_REG;
9990 return true;
9993 /* If two condition code modes are compatible, return a condition code
9994 mode which is compatible with both. Otherwise, return
9995 VOIDmode. */
9997 static enum machine_mode
9998 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
10000 if (m1 == m2)
10001 return m1;
10003 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
10004 return VOIDmode;
10006 if ((m1 == CCGCmode && m2 == CCGOCmode)
10007 || (m1 == CCGOCmode && m2 == CCGCmode))
10008 return CCGCmode;
10010 switch (m1)
10012 default:
10013 gcc_unreachable ();
10015 case CCmode:
10016 case CCGCmode:
10017 case CCGOCmode:
10018 case CCNOmode:
10019 case CCZmode:
10020 switch (m2)
10022 default:
10023 return VOIDmode;
10025 case CCmode:
10026 case CCGCmode:
10027 case CCGOCmode:
10028 case CCNOmode:
10029 case CCZmode:
10030 return CCmode;
10033 case CCFPmode:
10034 case CCFPUmode:
10035 /* These are only compatible with themselves, which we already
10036 checked above. */
10037 return VOIDmode;
10041 /* Return true if we should use an FCOMI instruction for this fp comparison. */
10044 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
10046 enum rtx_code swapped_code = swap_condition (code);
10047 return ((ix86_fp_comparison_cost (code) == ix86_fp_comparison_fcomi_cost (code))
10048 || (ix86_fp_comparison_cost (swapped_code)
10049 == ix86_fp_comparison_fcomi_cost (swapped_code)));
10052 /* Swap, force into registers, or otherwise massage the two operands
10053 to a fp comparison. The operands are updated in place; the new
10054 comparison code is returned. */
10056 static enum rtx_code
10057 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
10059 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
10060 rtx op0 = *pop0, op1 = *pop1;
10061 enum machine_mode op_mode = GET_MODE (op0);
10062 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
10064 /* All of the unordered compare instructions only work on registers.
10065 The same is true of the fcomi compare instructions. The XFmode
10066 compare instructions require registers except when comparing
10067 against zero or when converting operand 1 from fixed point to
10068 floating point. */
10070 if (!is_sse
10071 && (fpcmp_mode == CCFPUmode
10072 || (op_mode == XFmode
10073 && ! (standard_80387_constant_p (op0) == 1
10074 || standard_80387_constant_p (op1) == 1)
10075 && GET_CODE (op1) != FLOAT)
10076 || ix86_use_fcomi_compare (code)))
10078 op0 = force_reg (op_mode, op0);
10079 op1 = force_reg (op_mode, op1);
10081 else
10083 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
10084 things around if they appear profitable, otherwise force op0
10085 into a register. */
10087 if (standard_80387_constant_p (op0) == 0
10088 || (GET_CODE (op0) == MEM
10089 && ! (standard_80387_constant_p (op1) == 0
10090 || GET_CODE (op1) == MEM)))
10092 rtx tmp;
10093 tmp = op0, op0 = op1, op1 = tmp;
10094 code = swap_condition (code);
10097 if (GET_CODE (op0) != REG)
10098 op0 = force_reg (op_mode, op0);
10100 if (CONSTANT_P (op1))
10102 int tmp = standard_80387_constant_p (op1);
10103 if (tmp == 0)
10104 op1 = validize_mem (force_const_mem (op_mode, op1));
10105 else if (tmp == 1)
10107 if (TARGET_CMOVE)
10108 op1 = force_reg (op_mode, op1);
10110 else
10111 op1 = force_reg (op_mode, op1);
10115 /* Try to rearrange the comparison to make it cheaper. */
10116 if (ix86_fp_comparison_cost (code)
10117 > ix86_fp_comparison_cost (swap_condition (code))
10118 && (GET_CODE (op1) == REG || !no_new_pseudos))
10120 rtx tmp;
10121 tmp = op0, op0 = op1, op1 = tmp;
10122 code = swap_condition (code);
10123 if (GET_CODE (op0) != REG)
10124 op0 = force_reg (op_mode, op0);
10127 *pop0 = op0;
10128 *pop1 = op1;
10129 return code;
10132 /* Convert comparison codes we use to represent FP comparison to integer
10133 code that will result in proper branch. Return UNKNOWN if no such code
10134 is available. */
10136 enum rtx_code
10137 ix86_fp_compare_code_to_integer (enum rtx_code code)
10139 switch (code)
10141 case GT:
10142 return GTU;
10143 case GE:
10144 return GEU;
10145 case ORDERED:
10146 case UNORDERED:
10147 return code;
10148 break;
10149 case UNEQ:
10150 return EQ;
10151 break;
10152 case UNLT:
10153 return LTU;
10154 break;
10155 case UNLE:
10156 return LEU;
10157 break;
10158 case LTGT:
10159 return NE;
10160 break;
10161 default:
10162 return UNKNOWN;
10166 /* Split comparison code CODE into comparisons we can do using branch
10167 instructions. BYPASS_CODE is comparison code for branch that will
10168 branch around FIRST_CODE and SECOND_CODE. If some of branches
10169 is not required, set value to UNKNOWN.
10170 We never require more than two branches. */
10172 void
10173 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
10174 enum rtx_code *first_code,
10175 enum rtx_code *second_code)
10177 *first_code = code;
10178 *bypass_code = UNKNOWN;
10179 *second_code = UNKNOWN;
10181 /* The fcomi comparison sets flags as follows:
10183 cmp ZF PF CF
10184 > 0 0 0
10185 < 0 0 1
10186 = 1 0 0
10187 un 1 1 1 */
10189 switch (code)
10191 case GT: /* GTU - CF=0 & ZF=0 */
10192 case GE: /* GEU - CF=0 */
10193 case ORDERED: /* PF=0 */
10194 case UNORDERED: /* PF=1 */
10195 case UNEQ: /* EQ - ZF=1 */
10196 case UNLT: /* LTU - CF=1 */
10197 case UNLE: /* LEU - CF=1 | ZF=1 */
10198 case LTGT: /* EQ - ZF=0 */
10199 break;
10200 case LT: /* LTU - CF=1 - fails on unordered */
10201 *first_code = UNLT;
10202 *bypass_code = UNORDERED;
10203 break;
10204 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
10205 *first_code = UNLE;
10206 *bypass_code = UNORDERED;
10207 break;
10208 case EQ: /* EQ - ZF=1 - fails on unordered */
10209 *first_code = UNEQ;
10210 *bypass_code = UNORDERED;
10211 break;
10212 case NE: /* NE - ZF=0 - fails on unordered */
10213 *first_code = LTGT;
10214 *second_code = UNORDERED;
10215 break;
10216 case UNGE: /* GEU - CF=0 - fails on unordered */
10217 *first_code = GE;
10218 *second_code = UNORDERED;
10219 break;
10220 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
10221 *first_code = GT;
10222 *second_code = UNORDERED;
10223 break;
10224 default:
10225 gcc_unreachable ();
10227 if (!TARGET_IEEE_FP)
10229 *second_code = UNKNOWN;
10230 *bypass_code = UNKNOWN;
10234 /* Return cost of comparison done fcom + arithmetics operations on AX.
10235 All following functions do use number of instructions as a cost metrics.
10236 In future this should be tweaked to compute bytes for optimize_size and
10237 take into account performance of various instructions on various CPUs. */
10238 static int
10239 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
10241 if (!TARGET_IEEE_FP)
10242 return 4;
10243 /* The cost of code output by ix86_expand_fp_compare. */
10244 switch (code)
10246 case UNLE:
10247 case UNLT:
10248 case LTGT:
10249 case GT:
10250 case GE:
10251 case UNORDERED:
10252 case ORDERED:
10253 case UNEQ:
10254 return 4;
10255 break;
10256 case LT:
10257 case NE:
10258 case EQ:
10259 case UNGE:
10260 return 5;
10261 break;
10262 case LE:
10263 case UNGT:
10264 return 6;
10265 break;
10266 default:
10267 gcc_unreachable ();
10271 /* Return cost of comparison done using fcomi operation.
10272 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10273 static int
10274 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
10276 enum rtx_code bypass_code, first_code, second_code;
10277 /* Return arbitrarily high cost when instruction is not supported - this
10278 prevents gcc from using it. */
10279 if (!TARGET_CMOVE)
10280 return 1024;
10281 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10282 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
10285 /* Return cost of comparison done using sahf operation.
10286 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10287 static int
10288 ix86_fp_comparison_sahf_cost (enum rtx_code code)
10290 enum rtx_code bypass_code, first_code, second_code;
10291 /* Return arbitrarily high cost when instruction is not preferred - this
10292 avoids gcc from using it. */
10293 if (!TARGET_USE_SAHF && !optimize_size)
10294 return 1024;
10295 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10296 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
10299 /* Compute cost of the comparison done using any method.
10300 See ix86_fp_comparison_arithmetics_cost for the metrics. */
10301 static int
10302 ix86_fp_comparison_cost (enum rtx_code code)
10304 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
10305 int min;
10307 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
10308 sahf_cost = ix86_fp_comparison_sahf_cost (code);
10310 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
10311 if (min > sahf_cost)
10312 min = sahf_cost;
10313 if (min > fcomi_cost)
10314 min = fcomi_cost;
10315 return min;
10318 /* Generate insn patterns to do a floating point compare of OPERANDS. */
10320 static rtx
10321 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
10322 rtx *second_test, rtx *bypass_test)
10324 enum machine_mode fpcmp_mode, intcmp_mode;
10325 rtx tmp, tmp2;
10326 int cost = ix86_fp_comparison_cost (code);
10327 enum rtx_code bypass_code, first_code, second_code;
10329 fpcmp_mode = ix86_fp_compare_mode (code);
10330 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
10332 if (second_test)
10333 *second_test = NULL_RTX;
10334 if (bypass_test)
10335 *bypass_test = NULL_RTX;
10337 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10339 /* Do fcomi/sahf based test when profitable. */
10340 if ((bypass_code == UNKNOWN || bypass_test)
10341 && (second_code == UNKNOWN || second_test)
10342 && ix86_fp_comparison_arithmetics_cost (code) > cost)
10344 if (TARGET_CMOVE)
10346 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
10347 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
10348 tmp);
10349 emit_insn (tmp);
10351 else
10353 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
10354 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
10355 if (!scratch)
10356 scratch = gen_reg_rtx (HImode);
10357 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
10358 emit_insn (gen_x86_sahf_1 (scratch));
10361 /* The FP codes work out to act like unsigned. */
10362 intcmp_mode = fpcmp_mode;
10363 code = first_code;
10364 if (bypass_code != UNKNOWN)
10365 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
10366 gen_rtx_REG (intcmp_mode, FLAGS_REG),
10367 const0_rtx);
10368 if (second_code != UNKNOWN)
10369 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
10370 gen_rtx_REG (intcmp_mode, FLAGS_REG),
10371 const0_rtx);
10373 else
10375 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
10376 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
10377 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
10378 if (!scratch)
10379 scratch = gen_reg_rtx (HImode);
10380 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
10382 /* In the unordered case, we have to check C2 for NaN's, which
10383 doesn't happen to work out to anything nice combination-wise.
10384 So do some bit twiddling on the value we've got in AH to come
10385 up with an appropriate set of condition codes. */
10387 intcmp_mode = CCNOmode;
10388 switch (code)
10390 case GT:
10391 case UNGT:
10392 if (code == GT || !TARGET_IEEE_FP)
10394 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
10395 code = EQ;
10397 else
10399 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10400 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
10401 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
10402 intcmp_mode = CCmode;
10403 code = GEU;
10405 break;
10406 case LT:
10407 case UNLT:
10408 if (code == LT && TARGET_IEEE_FP)
10410 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10411 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
10412 intcmp_mode = CCmode;
10413 code = EQ;
10415 else
10417 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
10418 code = NE;
10420 break;
10421 case GE:
10422 case UNGE:
10423 if (code == GE || !TARGET_IEEE_FP)
10425 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
10426 code = EQ;
10428 else
10430 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10431 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
10432 GEN_INT (0x01)));
10433 code = NE;
10435 break;
10436 case LE:
10437 case UNLE:
10438 if (code == LE && TARGET_IEEE_FP)
10440 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10441 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
10442 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
10443 intcmp_mode = CCmode;
10444 code = LTU;
10446 else
10448 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
10449 code = NE;
10451 break;
10452 case EQ:
10453 case UNEQ:
10454 if (code == EQ && TARGET_IEEE_FP)
10456 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10457 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
10458 intcmp_mode = CCmode;
10459 code = EQ;
10461 else
10463 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
10464 code = NE;
10465 break;
10467 break;
10468 case NE:
10469 case LTGT:
10470 if (code == NE && TARGET_IEEE_FP)
10472 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
10473 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
10474 GEN_INT (0x40)));
10475 code = NE;
10477 else
10479 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
10480 code = EQ;
10482 break;
10484 case UNORDERED:
10485 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
10486 code = NE;
10487 break;
10488 case ORDERED:
10489 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
10490 code = EQ;
10491 break;
10493 default:
10494 gcc_unreachable ();
10498 /* Return the test that should be put into the flags user, i.e.
10499 the bcc, scc, or cmov instruction. */
10500 return gen_rtx_fmt_ee (code, VOIDmode,
10501 gen_rtx_REG (intcmp_mode, FLAGS_REG),
10502 const0_rtx);
10506 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
10508 rtx op0, op1, ret;
10509 op0 = ix86_compare_op0;
10510 op1 = ix86_compare_op1;
10512 if (second_test)
10513 *second_test = NULL_RTX;
10514 if (bypass_test)
10515 *bypass_test = NULL_RTX;
10517 if (ix86_compare_emitted)
10519 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
10520 ix86_compare_emitted = NULL_RTX;
10522 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
10523 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
10524 second_test, bypass_test);
10525 else
10526 ret = ix86_expand_int_compare (code, op0, op1);
10528 return ret;
10531 /* Return true if the CODE will result in nontrivial jump sequence. */
10532 bool
10533 ix86_fp_jump_nontrivial_p (enum rtx_code code)
10535 enum rtx_code bypass_code, first_code, second_code;
10536 if (!TARGET_CMOVE)
10537 return true;
10538 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10539 return bypass_code != UNKNOWN || second_code != UNKNOWN;
10542 void
10543 ix86_expand_branch (enum rtx_code code, rtx label)
10545 rtx tmp;
10547 /* If we have emitted a compare insn, go straight to simple.
10548 ix86_expand_compare won't emit anything if ix86_compare_emitted
10549 is non NULL. */
10550 if (ix86_compare_emitted)
10551 goto simple;
10553 switch (GET_MODE (ix86_compare_op0))
10555 case QImode:
10556 case HImode:
10557 case SImode:
10558 simple:
10559 tmp = ix86_expand_compare (code, NULL, NULL);
10560 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
10561 gen_rtx_LABEL_REF (VOIDmode, label),
10562 pc_rtx);
10563 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
10564 return;
10566 case SFmode:
10567 case DFmode:
10568 case XFmode:
10570 rtvec vec;
10571 int use_fcomi;
10572 enum rtx_code bypass_code, first_code, second_code;
10574 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
10575 &ix86_compare_op1);
10577 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
10579 /* Check whether we will use the natural sequence with one jump. If
10580 so, we can expand jump early. Otherwise delay expansion by
10581 creating compound insn to not confuse optimizers. */
10582 if (bypass_code == UNKNOWN && second_code == UNKNOWN
10583 && TARGET_CMOVE)
10585 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
10586 gen_rtx_LABEL_REF (VOIDmode, label),
10587 pc_rtx, NULL_RTX, NULL_RTX);
10589 else
10591 tmp = gen_rtx_fmt_ee (code, VOIDmode,
10592 ix86_compare_op0, ix86_compare_op1);
10593 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
10594 gen_rtx_LABEL_REF (VOIDmode, label),
10595 pc_rtx);
10596 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
10598 use_fcomi = ix86_use_fcomi_compare (code);
10599 vec = rtvec_alloc (3 + !use_fcomi);
10600 RTVEC_ELT (vec, 0) = tmp;
10601 RTVEC_ELT (vec, 1)
10602 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 18));
10603 RTVEC_ELT (vec, 2)
10604 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, 17));
10605 if (! use_fcomi)
10606 RTVEC_ELT (vec, 3)
10607 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
10609 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
10611 return;
10614 case DImode:
10615 if (TARGET_64BIT)
10616 goto simple;
10617 case TImode:
10618 /* Expand DImode branch into multiple compare+branch. */
10620 rtx lo[2], hi[2], label2;
10621 enum rtx_code code1, code2, code3;
10622 enum machine_mode submode;
10624 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
10626 tmp = ix86_compare_op0;
10627 ix86_compare_op0 = ix86_compare_op1;
10628 ix86_compare_op1 = tmp;
10629 code = swap_condition (code);
10631 if (GET_MODE (ix86_compare_op0) == DImode)
10633 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
10634 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
10635 submode = SImode;
10637 else
10639 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
10640 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
10641 submode = DImode;
10644 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
10645 avoid two branches. This costs one extra insn, so disable when
10646 optimizing for size. */
10648 if ((code == EQ || code == NE)
10649 && (!optimize_size
10650 || hi[1] == const0_rtx || lo[1] == const0_rtx))
10652 rtx xor0, xor1;
10654 xor1 = hi[0];
10655 if (hi[1] != const0_rtx)
10656 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
10657 NULL_RTX, 0, OPTAB_WIDEN);
10659 xor0 = lo[0];
10660 if (lo[1] != const0_rtx)
10661 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
10662 NULL_RTX, 0, OPTAB_WIDEN);
10664 tmp = expand_binop (submode, ior_optab, xor1, xor0,
10665 NULL_RTX, 0, OPTAB_WIDEN);
10667 ix86_compare_op0 = tmp;
10668 ix86_compare_op1 = const0_rtx;
10669 ix86_expand_branch (code, label);
10670 return;
10673 /* Otherwise, if we are doing less-than or greater-or-equal-than,
10674 op1 is a constant and the low word is zero, then we can just
10675 examine the high word. */
10677 if (GET_CODE (hi[1]) == CONST_INT && lo[1] == const0_rtx)
10678 switch (code)
10680 case LT: case LTU: case GE: case GEU:
10681 ix86_compare_op0 = hi[0];
10682 ix86_compare_op1 = hi[1];
10683 ix86_expand_branch (code, label);
10684 return;
10685 default:
10686 break;
10689 /* Otherwise, we need two or three jumps. */
10691 label2 = gen_label_rtx ();
10693 code1 = code;
10694 code2 = swap_condition (code);
10695 code3 = unsigned_condition (code);
10697 switch (code)
10699 case LT: case GT: case LTU: case GTU:
10700 break;
10702 case LE: code1 = LT; code2 = GT; break;
10703 case GE: code1 = GT; code2 = LT; break;
10704 case LEU: code1 = LTU; code2 = GTU; break;
10705 case GEU: code1 = GTU; code2 = LTU; break;
10707 case EQ: code1 = UNKNOWN; code2 = NE; break;
10708 case NE: code2 = UNKNOWN; break;
10710 default:
10711 gcc_unreachable ();
10715 * a < b =>
10716 * if (hi(a) < hi(b)) goto true;
10717 * if (hi(a) > hi(b)) goto false;
10718 * if (lo(a) < lo(b)) goto true;
10719 * false:
10722 ix86_compare_op0 = hi[0];
10723 ix86_compare_op1 = hi[1];
10725 if (code1 != UNKNOWN)
10726 ix86_expand_branch (code1, label);
10727 if (code2 != UNKNOWN)
10728 ix86_expand_branch (code2, label2);
10730 ix86_compare_op0 = lo[0];
10731 ix86_compare_op1 = lo[1];
10732 ix86_expand_branch (code3, label);
10734 if (code2 != UNKNOWN)
10735 emit_label (label2);
10736 return;
10739 default:
10740 gcc_unreachable ();
10744 /* Split branch based on floating point condition. */
10745 void
10746 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
10747 rtx target1, rtx target2, rtx tmp, rtx pushed)
10749 rtx second, bypass;
10750 rtx label = NULL_RTX;
10751 rtx condition;
10752 int bypass_probability = -1, second_probability = -1, probability = -1;
10753 rtx i;
10755 if (target2 != pc_rtx)
10757 rtx tmp = target2;
10758 code = reverse_condition_maybe_unordered (code);
10759 target2 = target1;
10760 target1 = tmp;
10763 condition = ix86_expand_fp_compare (code, op1, op2,
10764 tmp, &second, &bypass);
10766 /* Remove pushed operand from stack. */
10767 if (pushed)
10768 ix86_free_from_memory (GET_MODE (pushed));
10770 if (split_branch_probability >= 0)
10772 /* Distribute the probabilities across the jumps.
10773 Assume the BYPASS and SECOND to be always test
10774 for UNORDERED. */
10775 probability = split_branch_probability;
10777 /* Value of 1 is low enough to make no need for probability
10778 to be updated. Later we may run some experiments and see
10779 if unordered values are more frequent in practice. */
10780 if (bypass)
10781 bypass_probability = 1;
10782 if (second)
10783 second_probability = 1;
10785 if (bypass != NULL_RTX)
10787 label = gen_label_rtx ();
10788 i = emit_jump_insn (gen_rtx_SET
10789 (VOIDmode, pc_rtx,
10790 gen_rtx_IF_THEN_ELSE (VOIDmode,
10791 bypass,
10792 gen_rtx_LABEL_REF (VOIDmode,
10793 label),
10794 pc_rtx)));
10795 if (bypass_probability >= 0)
10796 REG_NOTES (i)
10797 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10798 GEN_INT (bypass_probability),
10799 REG_NOTES (i));
10801 i = emit_jump_insn (gen_rtx_SET
10802 (VOIDmode, pc_rtx,
10803 gen_rtx_IF_THEN_ELSE (VOIDmode,
10804 condition, target1, target2)));
10805 if (probability >= 0)
10806 REG_NOTES (i)
10807 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10808 GEN_INT (probability),
10809 REG_NOTES (i));
10810 if (second != NULL_RTX)
10812 i = emit_jump_insn (gen_rtx_SET
10813 (VOIDmode, pc_rtx,
10814 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
10815 target2)));
10816 if (second_probability >= 0)
10817 REG_NOTES (i)
10818 = gen_rtx_EXPR_LIST (REG_BR_PROB,
10819 GEN_INT (second_probability),
10820 REG_NOTES (i));
10822 if (label != NULL_RTX)
10823 emit_label (label);
10827 ix86_expand_setcc (enum rtx_code code, rtx dest)
10829 rtx ret, tmp, tmpreg, equiv;
10830 rtx second_test, bypass_test;
10832 if (GET_MODE (ix86_compare_op0) == (TARGET_64BIT ? TImode : DImode))
10833 return 0; /* FAIL */
10835 gcc_assert (GET_MODE (dest) == QImode);
10837 ret = ix86_expand_compare (code, &second_test, &bypass_test);
10838 PUT_MODE (ret, QImode);
10840 tmp = dest;
10841 tmpreg = dest;
10843 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
10844 if (bypass_test || second_test)
10846 rtx test = second_test;
10847 int bypass = 0;
10848 rtx tmp2 = gen_reg_rtx (QImode);
10849 if (bypass_test)
10851 gcc_assert (!second_test);
10852 test = bypass_test;
10853 bypass = 1;
10854 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
10856 PUT_MODE (test, QImode);
10857 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
10859 if (bypass)
10860 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
10861 else
10862 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
10865 /* Attach a REG_EQUAL note describing the comparison result. */
10866 if (ix86_compare_op0 && ix86_compare_op1)
10868 equiv = simplify_gen_relational (code, QImode,
10869 GET_MODE (ix86_compare_op0),
10870 ix86_compare_op0, ix86_compare_op1);
10871 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
10874 return 1; /* DONE */
10877 /* Expand comparison setting or clearing carry flag. Return true when
10878 successful and set pop for the operation. */
10879 static bool
10880 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
10882 enum machine_mode mode =
10883 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
10885 /* Do not handle DImode compares that go through special path. Also we can't
10886 deal with FP compares yet. This is possible to add. */
10887 if (mode == (TARGET_64BIT ? TImode : DImode))
10888 return false;
10889 if (FLOAT_MODE_P (mode))
10891 rtx second_test = NULL, bypass_test = NULL;
10892 rtx compare_op, compare_seq;
10894 /* Shortcut: following common codes never translate into carry flag compares. */
10895 if (code == EQ || code == NE || code == UNEQ || code == LTGT
10896 || code == ORDERED || code == UNORDERED)
10897 return false;
10899 /* These comparisons require zero flag; swap operands so they won't. */
10900 if ((code == GT || code == UNLE || code == LE || code == UNGT)
10901 && !TARGET_IEEE_FP)
10903 rtx tmp = op0;
10904 op0 = op1;
10905 op1 = tmp;
10906 code = swap_condition (code);
10909 /* Try to expand the comparison and verify that we end up with carry flag
10910 based comparison. This is fails to be true only when we decide to expand
10911 comparison using arithmetic that is not too common scenario. */
10912 start_sequence ();
10913 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
10914 &second_test, &bypass_test);
10915 compare_seq = get_insns ();
10916 end_sequence ();
10918 if (second_test || bypass_test)
10919 return false;
10920 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
10921 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
10922 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
10923 else
10924 code = GET_CODE (compare_op);
10925 if (code != LTU && code != GEU)
10926 return false;
10927 emit_insn (compare_seq);
10928 *pop = compare_op;
10929 return true;
10931 if (!INTEGRAL_MODE_P (mode))
10932 return false;
10933 switch (code)
10935 case LTU:
10936 case GEU:
10937 break;
10939 /* Convert a==0 into (unsigned)a<1. */
10940 case EQ:
10941 case NE:
10942 if (op1 != const0_rtx)
10943 return false;
10944 op1 = const1_rtx;
10945 code = (code == EQ ? LTU : GEU);
10946 break;
10948 /* Convert a>b into b<a or a>=b-1. */
10949 case GTU:
10950 case LEU:
10951 if (GET_CODE (op1) == CONST_INT)
10953 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
10954 /* Bail out on overflow. We still can swap operands but that
10955 would force loading of the constant into register. */
10956 if (op1 == const0_rtx
10957 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
10958 return false;
10959 code = (code == GTU ? GEU : LTU);
10961 else
10963 rtx tmp = op1;
10964 op1 = op0;
10965 op0 = tmp;
10966 code = (code == GTU ? LTU : GEU);
10968 break;
10970 /* Convert a>=0 into (unsigned)a<0x80000000. */
10971 case LT:
10972 case GE:
10973 if (mode == DImode || op1 != const0_rtx)
10974 return false;
10975 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
10976 code = (code == LT ? GEU : LTU);
10977 break;
10978 case LE:
10979 case GT:
10980 if (mode == DImode || op1 != constm1_rtx)
10981 return false;
10982 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
10983 code = (code == LE ? GEU : LTU);
10984 break;
10986 default:
10987 return false;
10989 /* Swapping operands may cause constant to appear as first operand. */
10990 if (!nonimmediate_operand (op0, VOIDmode))
10992 if (no_new_pseudos)
10993 return false;
10994 op0 = force_reg (mode, op0);
10996 ix86_compare_op0 = op0;
10997 ix86_compare_op1 = op1;
10998 *pop = ix86_expand_compare (code, NULL, NULL);
10999 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
11000 return true;
11004 ix86_expand_int_movcc (rtx operands[])
11006 enum rtx_code code = GET_CODE (operands[1]), compare_code;
11007 rtx compare_seq, compare_op;
11008 rtx second_test, bypass_test;
11009 enum machine_mode mode = GET_MODE (operands[0]);
11010 bool sign_bit_compare_p = false;;
11012 start_sequence ();
11013 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11014 compare_seq = get_insns ();
11015 end_sequence ();
11017 compare_code = GET_CODE (compare_op);
11019 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
11020 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
11021 sign_bit_compare_p = true;
11023 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
11024 HImode insns, we'd be swallowed in word prefix ops. */
11026 if ((mode != HImode || TARGET_FAST_PREFIX)
11027 && (mode != (TARGET_64BIT ? TImode : DImode))
11028 && GET_CODE (operands[2]) == CONST_INT
11029 && GET_CODE (operands[3]) == CONST_INT)
11031 rtx out = operands[0];
11032 HOST_WIDE_INT ct = INTVAL (operands[2]);
11033 HOST_WIDE_INT cf = INTVAL (operands[3]);
11034 HOST_WIDE_INT diff;
11036 diff = ct - cf;
11037 /* Sign bit compares are better done using shifts than we do by using
11038 sbb. */
11039 if (sign_bit_compare_p
11040 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
11041 ix86_compare_op1, &compare_op))
11043 /* Detect overlap between destination and compare sources. */
11044 rtx tmp = out;
11046 if (!sign_bit_compare_p)
11048 bool fpcmp = false;
11050 compare_code = GET_CODE (compare_op);
11052 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
11053 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
11055 fpcmp = true;
11056 compare_code = ix86_fp_compare_code_to_integer (compare_code);
11059 /* To simplify rest of code, restrict to the GEU case. */
11060 if (compare_code == LTU)
11062 HOST_WIDE_INT tmp = ct;
11063 ct = cf;
11064 cf = tmp;
11065 compare_code = reverse_condition (compare_code);
11066 code = reverse_condition (code);
11068 else
11070 if (fpcmp)
11071 PUT_CODE (compare_op,
11072 reverse_condition_maybe_unordered
11073 (GET_CODE (compare_op)));
11074 else
11075 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
11077 diff = ct - cf;
11079 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
11080 || reg_overlap_mentioned_p (out, ix86_compare_op1))
11081 tmp = gen_reg_rtx (mode);
11083 if (mode == DImode)
11084 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
11085 else
11086 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
11088 else
11090 if (code == GT || code == GE)
11091 code = reverse_condition (code);
11092 else
11094 HOST_WIDE_INT tmp = ct;
11095 ct = cf;
11096 cf = tmp;
11097 diff = ct - cf;
11099 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
11100 ix86_compare_op1, VOIDmode, 0, -1);
11103 if (diff == 1)
11106 * cmpl op0,op1
11107 * sbbl dest,dest
11108 * [addl dest, ct]
11110 * Size 5 - 8.
11112 if (ct)
11113 tmp = expand_simple_binop (mode, PLUS,
11114 tmp, GEN_INT (ct),
11115 copy_rtx (tmp), 1, OPTAB_DIRECT);
11117 else if (cf == -1)
11120 * cmpl op0,op1
11121 * sbbl dest,dest
11122 * orl $ct, dest
11124 * Size 8.
11126 tmp = expand_simple_binop (mode, IOR,
11127 tmp, GEN_INT (ct),
11128 copy_rtx (tmp), 1, OPTAB_DIRECT);
11130 else if (diff == -1 && ct)
11133 * cmpl op0,op1
11134 * sbbl dest,dest
11135 * notl dest
11136 * [addl dest, cf]
11138 * Size 8 - 11.
11140 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
11141 if (cf)
11142 tmp = expand_simple_binop (mode, PLUS,
11143 copy_rtx (tmp), GEN_INT (cf),
11144 copy_rtx (tmp), 1, OPTAB_DIRECT);
11146 else
11149 * cmpl op0,op1
11150 * sbbl dest,dest
11151 * [notl dest]
11152 * andl cf - ct, dest
11153 * [addl dest, ct]
11155 * Size 8 - 11.
11158 if (cf == 0)
11160 cf = ct;
11161 ct = 0;
11162 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
11165 tmp = expand_simple_binop (mode, AND,
11166 copy_rtx (tmp),
11167 gen_int_mode (cf - ct, mode),
11168 copy_rtx (tmp), 1, OPTAB_DIRECT);
11169 if (ct)
11170 tmp = expand_simple_binop (mode, PLUS,
11171 copy_rtx (tmp), GEN_INT (ct),
11172 copy_rtx (tmp), 1, OPTAB_DIRECT);
11175 if (!rtx_equal_p (tmp, out))
11176 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
11178 return 1; /* DONE */
11181 if (diff < 0)
11183 HOST_WIDE_INT tmp;
11184 tmp = ct, ct = cf, cf = tmp;
11185 diff = -diff;
11186 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
11188 /* We may be reversing unordered compare to normal compare, that
11189 is not valid in general (we may convert non-trapping condition
11190 to trapping one), however on i386 we currently emit all
11191 comparisons unordered. */
11192 compare_code = reverse_condition_maybe_unordered (compare_code);
11193 code = reverse_condition_maybe_unordered (code);
11195 else
11197 compare_code = reverse_condition (compare_code);
11198 code = reverse_condition (code);
11202 compare_code = UNKNOWN;
11203 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
11204 && GET_CODE (ix86_compare_op1) == CONST_INT)
11206 if (ix86_compare_op1 == const0_rtx
11207 && (code == LT || code == GE))
11208 compare_code = code;
11209 else if (ix86_compare_op1 == constm1_rtx)
11211 if (code == LE)
11212 compare_code = LT;
11213 else if (code == GT)
11214 compare_code = GE;
11218 /* Optimize dest = (op0 < 0) ? -1 : cf. */
11219 if (compare_code != UNKNOWN
11220 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
11221 && (cf == -1 || ct == -1))
11223 /* If lea code below could be used, only optimize
11224 if it results in a 2 insn sequence. */
11226 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
11227 || diff == 3 || diff == 5 || diff == 9)
11228 || (compare_code == LT && ct == -1)
11229 || (compare_code == GE && cf == -1))
11232 * notl op1 (if necessary)
11233 * sarl $31, op1
11234 * orl cf, op1
11236 if (ct != -1)
11238 cf = ct;
11239 ct = -1;
11240 code = reverse_condition (code);
11243 out = emit_store_flag (out, code, ix86_compare_op0,
11244 ix86_compare_op1, VOIDmode, 0, -1);
11246 out = expand_simple_binop (mode, IOR,
11247 out, GEN_INT (cf),
11248 out, 1, OPTAB_DIRECT);
11249 if (out != operands[0])
11250 emit_move_insn (operands[0], out);
11252 return 1; /* DONE */
11257 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
11258 || diff == 3 || diff == 5 || diff == 9)
11259 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
11260 && (mode != DImode
11261 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
11264 * xorl dest,dest
11265 * cmpl op1,op2
11266 * setcc dest
11267 * lea cf(dest*(ct-cf)),dest
11269 * Size 14.
11271 * This also catches the degenerate setcc-only case.
11274 rtx tmp;
11275 int nops;
11277 out = emit_store_flag (out, code, ix86_compare_op0,
11278 ix86_compare_op1, VOIDmode, 0, 1);
11280 nops = 0;
11281 /* On x86_64 the lea instruction operates on Pmode, so we need
11282 to get arithmetics done in proper mode to match. */
11283 if (diff == 1)
11284 tmp = copy_rtx (out);
11285 else
11287 rtx out1;
11288 out1 = copy_rtx (out);
11289 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
11290 nops++;
11291 if (diff & 1)
11293 tmp = gen_rtx_PLUS (mode, tmp, out1);
11294 nops++;
11297 if (cf != 0)
11299 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
11300 nops++;
11302 if (!rtx_equal_p (tmp, out))
11304 if (nops == 1)
11305 out = force_operand (tmp, copy_rtx (out));
11306 else
11307 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
11309 if (!rtx_equal_p (out, operands[0]))
11310 emit_move_insn (operands[0], copy_rtx (out));
11312 return 1; /* DONE */
11316 * General case: Jumpful:
11317 * xorl dest,dest cmpl op1, op2
11318 * cmpl op1, op2 movl ct, dest
11319 * setcc dest jcc 1f
11320 * decl dest movl cf, dest
11321 * andl (cf-ct),dest 1:
11322 * addl ct,dest
11324 * Size 20. Size 14.
11326 * This is reasonably steep, but branch mispredict costs are
11327 * high on modern cpus, so consider failing only if optimizing
11328 * for space.
11331 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
11332 && BRANCH_COST >= 2)
11334 if (cf == 0)
11336 cf = ct;
11337 ct = 0;
11338 if (FLOAT_MODE_P (GET_MODE (ix86_compare_op0)))
11339 /* We may be reversing unordered compare to normal compare,
11340 that is not valid in general (we may convert non-trapping
11341 condition to trapping one), however on i386 we currently
11342 emit all comparisons unordered. */
11343 code = reverse_condition_maybe_unordered (code);
11344 else
11346 code = reverse_condition (code);
11347 if (compare_code != UNKNOWN)
11348 compare_code = reverse_condition (compare_code);
11352 if (compare_code != UNKNOWN)
11354 /* notl op1 (if needed)
11355 sarl $31, op1
11356 andl (cf-ct), op1
11357 addl ct, op1
11359 For x < 0 (resp. x <= -1) there will be no notl,
11360 so if possible swap the constants to get rid of the
11361 complement.
11362 True/false will be -1/0 while code below (store flag
11363 followed by decrement) is 0/-1, so the constants need
11364 to be exchanged once more. */
11366 if (compare_code == GE || !cf)
11368 code = reverse_condition (code);
11369 compare_code = LT;
11371 else
11373 HOST_WIDE_INT tmp = cf;
11374 cf = ct;
11375 ct = tmp;
11378 out = emit_store_flag (out, code, ix86_compare_op0,
11379 ix86_compare_op1, VOIDmode, 0, -1);
11381 else
11383 out = emit_store_flag (out, code, ix86_compare_op0,
11384 ix86_compare_op1, VOIDmode, 0, 1);
11386 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
11387 copy_rtx (out), 1, OPTAB_DIRECT);
11390 out = expand_simple_binop (mode, AND, copy_rtx (out),
11391 gen_int_mode (cf - ct, mode),
11392 copy_rtx (out), 1, OPTAB_DIRECT);
11393 if (ct)
11394 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
11395 copy_rtx (out), 1, OPTAB_DIRECT);
11396 if (!rtx_equal_p (out, operands[0]))
11397 emit_move_insn (operands[0], copy_rtx (out));
11399 return 1; /* DONE */
11403 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
11405 /* Try a few things more with specific constants and a variable. */
11407 optab op;
11408 rtx var, orig_out, out, tmp;
11410 if (BRANCH_COST <= 2)
11411 return 0; /* FAIL */
11413 /* If one of the two operands is an interesting constant, load a
11414 constant with the above and mask it in with a logical operation. */
11416 if (GET_CODE (operands[2]) == CONST_INT)
11418 var = operands[3];
11419 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
11420 operands[3] = constm1_rtx, op = and_optab;
11421 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
11422 operands[3] = const0_rtx, op = ior_optab;
11423 else
11424 return 0; /* FAIL */
11426 else if (GET_CODE (operands[3]) == CONST_INT)
11428 var = operands[2];
11429 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
11430 operands[2] = constm1_rtx, op = and_optab;
11431 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
11432 operands[2] = const0_rtx, op = ior_optab;
11433 else
11434 return 0; /* FAIL */
11436 else
11437 return 0; /* FAIL */
11439 orig_out = operands[0];
11440 tmp = gen_reg_rtx (mode);
11441 operands[0] = tmp;
11443 /* Recurse to get the constant loaded. */
11444 if (ix86_expand_int_movcc (operands) == 0)
11445 return 0; /* FAIL */
11447 /* Mask in the interesting variable. */
11448 out = expand_binop (mode, op, var, tmp, orig_out, 0,
11449 OPTAB_WIDEN);
11450 if (!rtx_equal_p (out, orig_out))
11451 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
11453 return 1; /* DONE */
11457 * For comparison with above,
11459 * movl cf,dest
11460 * movl ct,tmp
11461 * cmpl op1,op2
11462 * cmovcc tmp,dest
11464 * Size 15.
11467 if (! nonimmediate_operand (operands[2], mode))
11468 operands[2] = force_reg (mode, operands[2]);
11469 if (! nonimmediate_operand (operands[3], mode))
11470 operands[3] = force_reg (mode, operands[3]);
11472 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
11474 rtx tmp = gen_reg_rtx (mode);
11475 emit_move_insn (tmp, operands[3]);
11476 operands[3] = tmp;
11478 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
11480 rtx tmp = gen_reg_rtx (mode);
11481 emit_move_insn (tmp, operands[2]);
11482 operands[2] = tmp;
11485 if (! register_operand (operands[2], VOIDmode)
11486 && (mode == QImode
11487 || ! register_operand (operands[3], VOIDmode)))
11488 operands[2] = force_reg (mode, operands[2]);
11490 if (mode == QImode
11491 && ! register_operand (operands[3], VOIDmode))
11492 operands[3] = force_reg (mode, operands[3]);
11494 emit_insn (compare_seq);
11495 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11496 gen_rtx_IF_THEN_ELSE (mode,
11497 compare_op, operands[2],
11498 operands[3])));
11499 if (bypass_test)
11500 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
11501 gen_rtx_IF_THEN_ELSE (mode,
11502 bypass_test,
11503 copy_rtx (operands[3]),
11504 copy_rtx (operands[0]))));
11505 if (second_test)
11506 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
11507 gen_rtx_IF_THEN_ELSE (mode,
11508 second_test,
11509 copy_rtx (operands[2]),
11510 copy_rtx (operands[0]))));
11512 return 1; /* DONE */
11515 /* Swap, force into registers, or otherwise massage the two operands
11516 to an sse comparison with a mask result. Thus we differ a bit from
11517 ix86_prepare_fp_compare_args which expects to produce a flags result.
11519 The DEST operand exists to help determine whether to commute commutative
11520 operators. The POP0/POP1 operands are updated in place. The new
11521 comparison code is returned, or UNKNOWN if not implementable. */
11523 static enum rtx_code
11524 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
11525 rtx *pop0, rtx *pop1)
11527 rtx tmp;
11529 switch (code)
11531 case LTGT:
11532 case UNEQ:
11533 /* We have no LTGT as an operator. We could implement it with
11534 NE & ORDERED, but this requires an extra temporary. It's
11535 not clear that it's worth it. */
11536 return UNKNOWN;
11538 case LT:
11539 case LE:
11540 case UNGT:
11541 case UNGE:
11542 /* These are supported directly. */
11543 break;
11545 case EQ:
11546 case NE:
11547 case UNORDERED:
11548 case ORDERED:
11549 /* For commutative operators, try to canonicalize the destination
11550 operand to be first in the comparison - this helps reload to
11551 avoid extra moves. */
11552 if (!dest || !rtx_equal_p (dest, *pop1))
11553 break;
11554 /* FALLTHRU */
11556 case GE:
11557 case GT:
11558 case UNLE:
11559 case UNLT:
11560 /* These are not supported directly. Swap the comparison operands
11561 to transform into something that is supported. */
11562 tmp = *pop0;
11563 *pop0 = *pop1;
11564 *pop1 = tmp;
11565 code = swap_condition (code);
11566 break;
11568 default:
11569 gcc_unreachable ();
11572 return code;
11575 /* Detect conditional moves that exactly match min/max operational
11576 semantics. Note that this is IEEE safe, as long as we don't
11577 interchange the operands.
11579 Returns FALSE if this conditional move doesn't match a MIN/MAX,
11580 and TRUE if the operation is successful and instructions are emitted. */
11582 static bool
11583 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
11584 rtx cmp_op1, rtx if_true, rtx if_false)
11586 enum machine_mode mode;
11587 bool is_min;
11588 rtx tmp;
11590 if (code == LT)
11592 else if (code == UNGE)
11594 tmp = if_true;
11595 if_true = if_false;
11596 if_false = tmp;
11598 else
11599 return false;
11601 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
11602 is_min = true;
11603 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
11604 is_min = false;
11605 else
11606 return false;
11608 mode = GET_MODE (dest);
11610 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
11611 but MODE may be a vector mode and thus not appropriate. */
11612 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
11614 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
11615 rtvec v;
11617 if_true = force_reg (mode, if_true);
11618 v = gen_rtvec (2, if_true, if_false);
11619 tmp = gen_rtx_UNSPEC (mode, v, u);
11621 else
11623 code = is_min ? SMIN : SMAX;
11624 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
11627 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
11628 return true;
11631 /* Expand an sse vector comparison. Return the register with the result. */
11633 static rtx
11634 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
11635 rtx op_true, rtx op_false)
11637 enum machine_mode mode = GET_MODE (dest);
11638 rtx x;
11640 cmp_op0 = force_reg (mode, cmp_op0);
11641 if (!nonimmediate_operand (cmp_op1, mode))
11642 cmp_op1 = force_reg (mode, cmp_op1);
11644 if (optimize
11645 || reg_overlap_mentioned_p (dest, op_true)
11646 || reg_overlap_mentioned_p (dest, op_false))
11647 dest = gen_reg_rtx (mode);
11649 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
11650 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11652 return dest;
11655 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
11656 operations. This is used for both scalar and vector conditional moves. */
11658 static void
11659 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
11661 enum machine_mode mode = GET_MODE (dest);
11662 rtx t2, t3, x;
11664 if (op_false == CONST0_RTX (mode))
11666 op_true = force_reg (mode, op_true);
11667 x = gen_rtx_AND (mode, cmp, op_true);
11668 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11670 else if (op_true == CONST0_RTX (mode))
11672 op_false = force_reg (mode, op_false);
11673 x = gen_rtx_NOT (mode, cmp);
11674 x = gen_rtx_AND (mode, x, op_false);
11675 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11677 else
11679 op_true = force_reg (mode, op_true);
11680 op_false = force_reg (mode, op_false);
11682 t2 = gen_reg_rtx (mode);
11683 if (optimize)
11684 t3 = gen_reg_rtx (mode);
11685 else
11686 t3 = dest;
11688 x = gen_rtx_AND (mode, op_true, cmp);
11689 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
11691 x = gen_rtx_NOT (mode, cmp);
11692 x = gen_rtx_AND (mode, x, op_false);
11693 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
11695 x = gen_rtx_IOR (mode, t3, t2);
11696 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
11700 /* Expand a floating-point conditional move. Return true if successful. */
11703 ix86_expand_fp_movcc (rtx operands[])
11705 enum machine_mode mode = GET_MODE (operands[0]);
11706 enum rtx_code code = GET_CODE (operands[1]);
11707 rtx tmp, compare_op, second_test, bypass_test;
11709 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
11711 enum machine_mode cmode;
11713 /* Since we've no cmove for sse registers, don't force bad register
11714 allocation just to gain access to it. Deny movcc when the
11715 comparison mode doesn't match the move mode. */
11716 cmode = GET_MODE (ix86_compare_op0);
11717 if (cmode == VOIDmode)
11718 cmode = GET_MODE (ix86_compare_op1);
11719 if (cmode != mode)
11720 return 0;
11722 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
11723 &ix86_compare_op0,
11724 &ix86_compare_op1);
11725 if (code == UNKNOWN)
11726 return 0;
11728 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
11729 ix86_compare_op1, operands[2],
11730 operands[3]))
11731 return 1;
11733 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
11734 ix86_compare_op1, operands[2], operands[3]);
11735 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
11736 return 1;
11739 /* The floating point conditional move instructions don't directly
11740 support conditions resulting from a signed integer comparison. */
11742 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11744 /* The floating point conditional move instructions don't directly
11745 support signed integer comparisons. */
11747 if (!fcmov_comparison_operator (compare_op, VOIDmode))
11749 gcc_assert (!second_test && !bypass_test);
11750 tmp = gen_reg_rtx (QImode);
11751 ix86_expand_setcc (code, tmp);
11752 code = NE;
11753 ix86_compare_op0 = tmp;
11754 ix86_compare_op1 = const0_rtx;
11755 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
11757 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
11759 tmp = gen_reg_rtx (mode);
11760 emit_move_insn (tmp, operands[3]);
11761 operands[3] = tmp;
11763 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
11765 tmp = gen_reg_rtx (mode);
11766 emit_move_insn (tmp, operands[2]);
11767 operands[2] = tmp;
11770 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11771 gen_rtx_IF_THEN_ELSE (mode, compare_op,
11772 operands[2], operands[3])));
11773 if (bypass_test)
11774 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11775 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
11776 operands[3], operands[0])));
11777 if (second_test)
11778 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
11779 gen_rtx_IF_THEN_ELSE (mode, second_test,
11780 operands[2], operands[0])));
11782 return 1;
11785 /* Expand a floating-point vector conditional move; a vcond operation
11786 rather than a movcc operation. */
11788 bool
11789 ix86_expand_fp_vcond (rtx operands[])
11791 enum rtx_code code = GET_CODE (operands[3]);
11792 rtx cmp;
11794 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
11795 &operands[4], &operands[5]);
11796 if (code == UNKNOWN)
11797 return false;
11799 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
11800 operands[5], operands[1], operands[2]))
11801 return true;
11803 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
11804 operands[1], operands[2]);
11805 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
11806 return true;
11809 /* Expand a signed integral vector conditional move. */
11811 bool
11812 ix86_expand_int_vcond (rtx operands[])
11814 enum machine_mode mode = GET_MODE (operands[0]);
11815 enum rtx_code code = GET_CODE (operands[3]);
11816 bool negate = false;
11817 rtx x, cop0, cop1;
11819 cop0 = operands[4];
11820 cop1 = operands[5];
11822 /* Canonicalize the comparison to EQ, GT, GTU. */
11823 switch (code)
11825 case EQ:
11826 case GT:
11827 case GTU:
11828 break;
11830 case NE:
11831 case LE:
11832 case LEU:
11833 code = reverse_condition (code);
11834 negate = true;
11835 break;
11837 case GE:
11838 case GEU:
11839 code = reverse_condition (code);
11840 negate = true;
11841 /* FALLTHRU */
11843 case LT:
11844 case LTU:
11845 code = swap_condition (code);
11846 x = cop0, cop0 = cop1, cop1 = x;
11847 break;
11849 default:
11850 gcc_unreachable ();
11853 /* Unsigned parallel compare is not supported by the hardware. Play some
11854 tricks to turn this into a signed comparison against 0. */
11855 if (code == GTU)
11857 cop0 = force_reg (mode, cop0);
11859 switch (mode)
11861 case V4SImode:
11863 rtx t1, t2, mask;
11865 /* Perform a parallel modulo subtraction. */
11866 t1 = gen_reg_rtx (mode);
11867 emit_insn (gen_subv4si3 (t1, cop0, cop1));
11869 /* Extract the original sign bit of op0. */
11870 mask = GEN_INT (-0x80000000);
11871 mask = gen_rtx_CONST_VECTOR (mode,
11872 gen_rtvec (4, mask, mask, mask, mask));
11873 mask = force_reg (mode, mask);
11874 t2 = gen_reg_rtx (mode);
11875 emit_insn (gen_andv4si3 (t2, cop0, mask));
11877 /* XOR it back into the result of the subtraction. This results
11878 in the sign bit set iff we saw unsigned underflow. */
11879 x = gen_reg_rtx (mode);
11880 emit_insn (gen_xorv4si3 (x, t1, t2));
11882 code = GT;
11884 break;
11886 case V16QImode:
11887 case V8HImode:
11888 /* Perform a parallel unsigned saturating subtraction. */
11889 x = gen_reg_rtx (mode);
11890 emit_insn (gen_rtx_SET (VOIDmode, x,
11891 gen_rtx_US_MINUS (mode, cop0, cop1)));
11893 code = EQ;
11894 negate = !negate;
11895 break;
11897 default:
11898 gcc_unreachable ();
11901 cop0 = x;
11902 cop1 = CONST0_RTX (mode);
11905 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
11906 operands[1+negate], operands[2-negate]);
11908 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
11909 operands[2-negate]);
11910 return true;
11913 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
11914 true if we should do zero extension, else sign extension. HIGH_P is
11915 true if we want the N/2 high elements, else the low elements. */
11917 void
11918 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
11920 enum machine_mode imode = GET_MODE (operands[1]);
11921 rtx (*unpack)(rtx, rtx, rtx);
11922 rtx se, dest;
11924 switch (imode)
11926 case V16QImode:
11927 if (high_p)
11928 unpack = gen_vec_interleave_highv16qi;
11929 else
11930 unpack = gen_vec_interleave_lowv16qi;
11931 break;
11932 case V8HImode:
11933 if (high_p)
11934 unpack = gen_vec_interleave_highv8hi;
11935 else
11936 unpack = gen_vec_interleave_lowv8hi;
11937 break;
11938 case V4SImode:
11939 if (high_p)
11940 unpack = gen_vec_interleave_highv4si;
11941 else
11942 unpack = gen_vec_interleave_lowv4si;
11943 break;
11944 default:
11945 gcc_unreachable ();
11948 dest = gen_lowpart (imode, operands[0]);
11950 if (unsigned_p)
11951 se = force_reg (imode, CONST0_RTX (imode));
11952 else
11953 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
11954 operands[1], pc_rtx, pc_rtx);
11956 emit_insn (unpack (dest, operands[1], se));
11959 /* Expand conditional increment or decrement using adb/sbb instructions.
11960 The default case using setcc followed by the conditional move can be
11961 done by generic code. */
11963 ix86_expand_int_addcc (rtx operands[])
11965 enum rtx_code code = GET_CODE (operands[1]);
11966 rtx compare_op;
11967 rtx val = const0_rtx;
11968 bool fpcmp = false;
11969 enum machine_mode mode = GET_MODE (operands[0]);
11971 if (operands[3] != const1_rtx
11972 && operands[3] != constm1_rtx)
11973 return 0;
11974 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
11975 ix86_compare_op1, &compare_op))
11976 return 0;
11977 code = GET_CODE (compare_op);
11979 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
11980 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
11982 fpcmp = true;
11983 code = ix86_fp_compare_code_to_integer (code);
11986 if (code != LTU)
11988 val = constm1_rtx;
11989 if (fpcmp)
11990 PUT_CODE (compare_op,
11991 reverse_condition_maybe_unordered
11992 (GET_CODE (compare_op)));
11993 else
11994 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
11996 PUT_MODE (compare_op, mode);
11998 /* Construct either adc or sbb insn. */
11999 if ((code == LTU) == (operands[3] == constm1_rtx))
12001 switch (GET_MODE (operands[0]))
12003 case QImode:
12004 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
12005 break;
12006 case HImode:
12007 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
12008 break;
12009 case SImode:
12010 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
12011 break;
12012 case DImode:
12013 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
12014 break;
12015 default:
12016 gcc_unreachable ();
12019 else
12021 switch (GET_MODE (operands[0]))
12023 case QImode:
12024 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
12025 break;
12026 case HImode:
12027 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
12028 break;
12029 case SImode:
12030 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
12031 break;
12032 case DImode:
12033 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
12034 break;
12035 default:
12036 gcc_unreachable ();
12039 return 1; /* DONE */
12043 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
12044 works for floating pointer parameters and nonoffsetable memories.
12045 For pushes, it returns just stack offsets; the values will be saved
12046 in the right order. Maximally three parts are generated. */
12048 static int
12049 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
12051 int size;
12053 if (!TARGET_64BIT)
12054 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
12055 else
12056 size = (GET_MODE_SIZE (mode) + 4) / 8;
12058 gcc_assert (GET_CODE (operand) != REG || !MMX_REGNO_P (REGNO (operand)));
12059 gcc_assert (size >= 2 && size <= 3);
12061 /* Optimize constant pool reference to immediates. This is used by fp
12062 moves, that force all constants to memory to allow combining. */
12063 if (GET_CODE (operand) == MEM && MEM_READONLY_P (operand))
12065 rtx tmp = maybe_get_pool_constant (operand);
12066 if (tmp)
12067 operand = tmp;
12070 if (GET_CODE (operand) == MEM && !offsettable_memref_p (operand))
12072 /* The only non-offsetable memories we handle are pushes. */
12073 int ok = push_operand (operand, VOIDmode);
12075 gcc_assert (ok);
12077 operand = copy_rtx (operand);
12078 PUT_MODE (operand, Pmode);
12079 parts[0] = parts[1] = parts[2] = operand;
12080 return size;
12083 if (GET_CODE (operand) == CONST_VECTOR)
12085 enum machine_mode imode = int_mode_for_mode (mode);
12086 /* Caution: if we looked through a constant pool memory above,
12087 the operand may actually have a different mode now. That's
12088 ok, since we want to pun this all the way back to an integer. */
12089 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
12090 gcc_assert (operand != NULL);
12091 mode = imode;
12094 if (!TARGET_64BIT)
12096 if (mode == DImode)
12097 split_di (&operand, 1, &parts[0], &parts[1]);
12098 else
12100 if (REG_P (operand))
12102 gcc_assert (reload_completed);
12103 parts[0] = gen_rtx_REG (SImode, REGNO (operand) + 0);
12104 parts[1] = gen_rtx_REG (SImode, REGNO (operand) + 1);
12105 if (size == 3)
12106 parts[2] = gen_rtx_REG (SImode, REGNO (operand) + 2);
12108 else if (offsettable_memref_p (operand))
12110 operand = adjust_address (operand, SImode, 0);
12111 parts[0] = operand;
12112 parts[1] = adjust_address (operand, SImode, 4);
12113 if (size == 3)
12114 parts[2] = adjust_address (operand, SImode, 8);
12116 else if (GET_CODE (operand) == CONST_DOUBLE)
12118 REAL_VALUE_TYPE r;
12119 long l[4];
12121 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
12122 switch (mode)
12124 case XFmode:
12125 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
12126 parts[2] = gen_int_mode (l[2], SImode);
12127 break;
12128 case DFmode:
12129 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
12130 break;
12131 default:
12132 gcc_unreachable ();
12134 parts[1] = gen_int_mode (l[1], SImode);
12135 parts[0] = gen_int_mode (l[0], SImode);
12137 else
12138 gcc_unreachable ();
12141 else
12143 if (mode == TImode)
12144 split_ti (&operand, 1, &parts[0], &parts[1]);
12145 if (mode == XFmode || mode == TFmode)
12147 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
12148 if (REG_P (operand))
12150 gcc_assert (reload_completed);
12151 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
12152 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
12154 else if (offsettable_memref_p (operand))
12156 operand = adjust_address (operand, DImode, 0);
12157 parts[0] = operand;
12158 parts[1] = adjust_address (operand, upper_mode, 8);
12160 else if (GET_CODE (operand) == CONST_DOUBLE)
12162 REAL_VALUE_TYPE r;
12163 long l[4];
12165 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
12166 real_to_target (l, &r, mode);
12168 /* Do not use shift by 32 to avoid warning on 32bit systems. */
12169 if (HOST_BITS_PER_WIDE_INT >= 64)
12170 parts[0]
12171 = gen_int_mode
12172 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
12173 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
12174 DImode);
12175 else
12176 parts[0] = immed_double_const (l[0], l[1], DImode);
12178 if (upper_mode == SImode)
12179 parts[1] = gen_int_mode (l[2], SImode);
12180 else if (HOST_BITS_PER_WIDE_INT >= 64)
12181 parts[1]
12182 = gen_int_mode
12183 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
12184 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
12185 DImode);
12186 else
12187 parts[1] = immed_double_const (l[2], l[3], DImode);
12189 else
12190 gcc_unreachable ();
12194 return size;
12197 /* Emit insns to perform a move or push of DI, DF, and XF values.
12198 Return false when normal moves are needed; true when all required
12199 insns have been emitted. Operands 2-4 contain the input values
12200 int the correct order; operands 5-7 contain the output values. */
12202 void
12203 ix86_split_long_move (rtx operands[])
12205 rtx part[2][3];
12206 int nparts;
12207 int push = 0;
12208 int collisions = 0;
12209 enum machine_mode mode = GET_MODE (operands[0]);
12211 /* The DFmode expanders may ask us to move double.
12212 For 64bit target this is single move. By hiding the fact
12213 here we simplify i386.md splitters. */
12214 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
12216 /* Optimize constant pool reference to immediates. This is used by
12217 fp moves, that force all constants to memory to allow combining. */
12219 if (GET_CODE (operands[1]) == MEM
12220 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
12221 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
12222 operands[1] = get_pool_constant (XEXP (operands[1], 0));
12223 if (push_operand (operands[0], VOIDmode))
12225 operands[0] = copy_rtx (operands[0]);
12226 PUT_MODE (operands[0], Pmode);
12228 else
12229 operands[0] = gen_lowpart (DImode, operands[0]);
12230 operands[1] = gen_lowpart (DImode, operands[1]);
12231 emit_move_insn (operands[0], operands[1]);
12232 return;
12235 /* The only non-offsettable memory we handle is push. */
12236 if (push_operand (operands[0], VOIDmode))
12237 push = 1;
12238 else
12239 gcc_assert (GET_CODE (operands[0]) != MEM
12240 || offsettable_memref_p (operands[0]));
12242 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
12243 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
12245 /* When emitting push, take care for source operands on the stack. */
12246 if (push && GET_CODE (operands[1]) == MEM
12247 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
12249 if (nparts == 3)
12250 part[1][1] = change_address (part[1][1], GET_MODE (part[1][1]),
12251 XEXP (part[1][2], 0));
12252 part[1][0] = change_address (part[1][0], GET_MODE (part[1][0]),
12253 XEXP (part[1][1], 0));
12256 /* We need to do copy in the right order in case an address register
12257 of the source overlaps the destination. */
12258 if (REG_P (part[0][0]) && GET_CODE (part[1][0]) == MEM)
12260 if (reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0)))
12261 collisions++;
12262 if (reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
12263 collisions++;
12264 if (nparts == 3
12265 && reg_overlap_mentioned_p (part[0][2], XEXP (part[1][0], 0)))
12266 collisions++;
12268 /* Collision in the middle part can be handled by reordering. */
12269 if (collisions == 1 && nparts == 3
12270 && reg_overlap_mentioned_p (part[0][1], XEXP (part[1][0], 0)))
12272 rtx tmp;
12273 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
12274 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
12277 /* If there are more collisions, we can't handle it by reordering.
12278 Do an lea to the last part and use only one colliding move. */
12279 else if (collisions > 1)
12281 rtx base;
12283 collisions = 1;
12285 base = part[0][nparts - 1];
12287 /* Handle the case when the last part isn't valid for lea.
12288 Happens in 64-bit mode storing the 12-byte XFmode. */
12289 if (GET_MODE (base) != Pmode)
12290 base = gen_rtx_REG (Pmode, REGNO (base));
12292 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
12293 part[1][0] = replace_equiv_address (part[1][0], base);
12294 part[1][1] = replace_equiv_address (part[1][1],
12295 plus_constant (base, UNITS_PER_WORD));
12296 if (nparts == 3)
12297 part[1][2] = replace_equiv_address (part[1][2],
12298 plus_constant (base, 8));
12302 if (push)
12304 if (!TARGET_64BIT)
12306 if (nparts == 3)
12308 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
12309 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
12310 emit_move_insn (part[0][2], part[1][2]);
12313 else
12315 /* In 64bit mode we don't have 32bit push available. In case this is
12316 register, it is OK - we will just use larger counterpart. We also
12317 retype memory - these comes from attempt to avoid REX prefix on
12318 moving of second half of TFmode value. */
12319 if (GET_MODE (part[1][1]) == SImode)
12321 switch (GET_CODE (part[1][1]))
12323 case MEM:
12324 part[1][1] = adjust_address (part[1][1], DImode, 0);
12325 break;
12327 case REG:
12328 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
12329 break;
12331 default:
12332 gcc_unreachable ();
12335 if (GET_MODE (part[1][0]) == SImode)
12336 part[1][0] = part[1][1];
12339 emit_move_insn (part[0][1], part[1][1]);
12340 emit_move_insn (part[0][0], part[1][0]);
12341 return;
12344 /* Choose correct order to not overwrite the source before it is copied. */
12345 if ((REG_P (part[0][0])
12346 && REG_P (part[1][1])
12347 && (REGNO (part[0][0]) == REGNO (part[1][1])
12348 || (nparts == 3
12349 && REGNO (part[0][0]) == REGNO (part[1][2]))))
12350 || (collisions > 0
12351 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
12353 if (nparts == 3)
12355 operands[2] = part[0][2];
12356 operands[3] = part[0][1];
12357 operands[4] = part[0][0];
12358 operands[5] = part[1][2];
12359 operands[6] = part[1][1];
12360 operands[7] = part[1][0];
12362 else
12364 operands[2] = part[0][1];
12365 operands[3] = part[0][0];
12366 operands[5] = part[1][1];
12367 operands[6] = part[1][0];
12370 else
12372 if (nparts == 3)
12374 operands[2] = part[0][0];
12375 operands[3] = part[0][1];
12376 operands[4] = part[0][2];
12377 operands[5] = part[1][0];
12378 operands[6] = part[1][1];
12379 operands[7] = part[1][2];
12381 else
12383 operands[2] = part[0][0];
12384 operands[3] = part[0][1];
12385 operands[5] = part[1][0];
12386 operands[6] = part[1][1];
12390 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
12391 if (optimize_size)
12393 if (GET_CODE (operands[5]) == CONST_INT
12394 && operands[5] != const0_rtx
12395 && REG_P (operands[2]))
12397 if (GET_CODE (operands[6]) == CONST_INT
12398 && INTVAL (operands[6]) == INTVAL (operands[5]))
12399 operands[6] = operands[2];
12401 if (nparts == 3
12402 && GET_CODE (operands[7]) == CONST_INT
12403 && INTVAL (operands[7]) == INTVAL (operands[5]))
12404 operands[7] = operands[2];
12407 if (nparts == 3
12408 && GET_CODE (operands[6]) == CONST_INT
12409 && operands[6] != const0_rtx
12410 && REG_P (operands[3])
12411 && GET_CODE (operands[7]) == CONST_INT
12412 && INTVAL (operands[7]) == INTVAL (operands[6]))
12413 operands[7] = operands[3];
12416 emit_move_insn (operands[2], operands[5]);
12417 emit_move_insn (operands[3], operands[6]);
12418 if (nparts == 3)
12419 emit_move_insn (operands[4], operands[7]);
12421 return;
12424 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
12425 left shift by a constant, either using a single shift or
12426 a sequence of add instructions. */
12428 static void
12429 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
12431 if (count == 1)
12433 emit_insn ((mode == DImode
12434 ? gen_addsi3
12435 : gen_adddi3) (operand, operand, operand));
12437 else if (!optimize_size
12438 && count * ix86_cost->add <= ix86_cost->shift_const)
12440 int i;
12441 for (i=0; i<count; i++)
12443 emit_insn ((mode == DImode
12444 ? gen_addsi3
12445 : gen_adddi3) (operand, operand, operand));
12448 else
12449 emit_insn ((mode == DImode
12450 ? gen_ashlsi3
12451 : gen_ashldi3) (operand, operand, GEN_INT (count)));
12454 void
12455 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
12457 rtx low[2], high[2];
12458 int count;
12459 const int single_width = mode == DImode ? 32 : 64;
12461 if (GET_CODE (operands[2]) == CONST_INT)
12463 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
12464 count = INTVAL (operands[2]) & (single_width * 2 - 1);
12466 if (count >= single_width)
12468 emit_move_insn (high[0], low[1]);
12469 emit_move_insn (low[0], const0_rtx);
12471 if (count > single_width)
12472 ix86_expand_ashl_const (high[0], count - single_width, mode);
12474 else
12476 if (!rtx_equal_p (operands[0], operands[1]))
12477 emit_move_insn (operands[0], operands[1]);
12478 emit_insn ((mode == DImode
12479 ? gen_x86_shld_1
12480 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
12481 ix86_expand_ashl_const (low[0], count, mode);
12483 return;
12486 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12488 if (operands[1] == const1_rtx)
12490 /* Assuming we've chosen a QImode capable registers, then 1 << N
12491 can be done with two 32/64-bit shifts, no branches, no cmoves. */
12492 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
12494 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
12496 ix86_expand_clear (low[0]);
12497 ix86_expand_clear (high[0]);
12498 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
12500 d = gen_lowpart (QImode, low[0]);
12501 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
12502 s = gen_rtx_EQ (QImode, flags, const0_rtx);
12503 emit_insn (gen_rtx_SET (VOIDmode, d, s));
12505 d = gen_lowpart (QImode, high[0]);
12506 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
12507 s = gen_rtx_NE (QImode, flags, const0_rtx);
12508 emit_insn (gen_rtx_SET (VOIDmode, d, s));
12511 /* Otherwise, we can get the same results by manually performing
12512 a bit extract operation on bit 5/6, and then performing the two
12513 shifts. The two methods of getting 0/1 into low/high are exactly
12514 the same size. Avoiding the shift in the bit extract case helps
12515 pentium4 a bit; no one else seems to care much either way. */
12516 else
12518 rtx x;
12520 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
12521 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
12522 else
12523 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
12524 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
12526 emit_insn ((mode == DImode
12527 ? gen_lshrsi3
12528 : gen_lshrdi3) (high[0], high[0], GEN_INT (mode == DImode ? 5 : 6)));
12529 emit_insn ((mode == DImode
12530 ? gen_andsi3
12531 : gen_anddi3) (high[0], high[0], GEN_INT (1)));
12532 emit_move_insn (low[0], high[0]);
12533 emit_insn ((mode == DImode
12534 ? gen_xorsi3
12535 : gen_xordi3) (low[0], low[0], GEN_INT (1)));
12538 emit_insn ((mode == DImode
12539 ? gen_ashlsi3
12540 : gen_ashldi3) (low[0], low[0], operands[2]));
12541 emit_insn ((mode == DImode
12542 ? gen_ashlsi3
12543 : gen_ashldi3) (high[0], high[0], operands[2]));
12544 return;
12547 if (operands[1] == constm1_rtx)
12549 /* For -1 << N, we can avoid the shld instruction, because we
12550 know that we're shifting 0...31/63 ones into a -1. */
12551 emit_move_insn (low[0], constm1_rtx);
12552 if (optimize_size)
12553 emit_move_insn (high[0], low[0]);
12554 else
12555 emit_move_insn (high[0], constm1_rtx);
12557 else
12559 if (!rtx_equal_p (operands[0], operands[1]))
12560 emit_move_insn (operands[0], operands[1]);
12562 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12563 emit_insn ((mode == DImode
12564 ? gen_x86_shld_1
12565 : gen_x86_64_shld) (high[0], low[0], operands[2]));
12568 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
12570 if (TARGET_CMOVE && scratch)
12572 ix86_expand_clear (scratch);
12573 emit_insn ((mode == DImode
12574 ? gen_x86_shift_adj_1
12575 : gen_x86_64_shift_adj) (high[0], low[0], operands[2], scratch));
12577 else
12578 emit_insn (gen_x86_shift_adj_2 (high[0], low[0], operands[2]));
12581 void
12582 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
12584 rtx low[2], high[2];
12585 int count;
12586 const int single_width = mode == DImode ? 32 : 64;
12588 if (GET_CODE (operands[2]) == CONST_INT)
12590 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
12591 count = INTVAL (operands[2]) & (single_width * 2 - 1);
12593 if (count == single_width * 2 - 1)
12595 emit_move_insn (high[0], high[1]);
12596 emit_insn ((mode == DImode
12597 ? gen_ashrsi3
12598 : gen_ashrdi3) (high[0], high[0],
12599 GEN_INT (single_width - 1)));
12600 emit_move_insn (low[0], high[0]);
12603 else if (count >= single_width)
12605 emit_move_insn (low[0], high[1]);
12606 emit_move_insn (high[0], low[0]);
12607 emit_insn ((mode == DImode
12608 ? gen_ashrsi3
12609 : gen_ashrdi3) (high[0], high[0],
12610 GEN_INT (single_width - 1)));
12611 if (count > single_width)
12612 emit_insn ((mode == DImode
12613 ? gen_ashrsi3
12614 : gen_ashrdi3) (low[0], low[0],
12615 GEN_INT (count - single_width)));
12617 else
12619 if (!rtx_equal_p (operands[0], operands[1]))
12620 emit_move_insn (operands[0], operands[1]);
12621 emit_insn ((mode == DImode
12622 ? gen_x86_shrd_1
12623 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
12624 emit_insn ((mode == DImode
12625 ? gen_ashrsi3
12626 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
12629 else
12631 if (!rtx_equal_p (operands[0], operands[1]))
12632 emit_move_insn (operands[0], operands[1]);
12634 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12636 emit_insn ((mode == DImode
12637 ? gen_x86_shrd_1
12638 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
12639 emit_insn ((mode == DImode
12640 ? gen_ashrsi3
12641 : gen_ashrdi3) (high[0], high[0], operands[2]));
12643 if (TARGET_CMOVE && scratch)
12645 emit_move_insn (scratch, high[0]);
12646 emit_insn ((mode == DImode
12647 ? gen_ashrsi3
12648 : gen_ashrdi3) (scratch, scratch,
12649 GEN_INT (single_width - 1)));
12650 emit_insn ((mode == DImode
12651 ? gen_x86_shift_adj_1
12652 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
12653 scratch));
12655 else
12656 emit_insn (gen_x86_shift_adj_3 (low[0], high[0], operands[2]));
12660 void
12661 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
12663 rtx low[2], high[2];
12664 int count;
12665 const int single_width = mode == DImode ? 32 : 64;
12667 if (GET_CODE (operands[2]) == CONST_INT)
12669 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
12670 count = INTVAL (operands[2]) & (single_width * 2 - 1);
12672 if (count >= single_width)
12674 emit_move_insn (low[0], high[1]);
12675 ix86_expand_clear (high[0]);
12677 if (count > single_width)
12678 emit_insn ((mode == DImode
12679 ? gen_lshrsi3
12680 : gen_lshrdi3) (low[0], low[0],
12681 GEN_INT (count - single_width)));
12683 else
12685 if (!rtx_equal_p (operands[0], operands[1]))
12686 emit_move_insn (operands[0], operands[1]);
12687 emit_insn ((mode == DImode
12688 ? gen_x86_shrd_1
12689 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
12690 emit_insn ((mode == DImode
12691 ? gen_lshrsi3
12692 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
12695 else
12697 if (!rtx_equal_p (operands[0], operands[1]))
12698 emit_move_insn (operands[0], operands[1]);
12700 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
12702 emit_insn ((mode == DImode
12703 ? gen_x86_shrd_1
12704 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
12705 emit_insn ((mode == DImode
12706 ? gen_lshrsi3
12707 : gen_lshrdi3) (high[0], high[0], operands[2]));
12709 /* Heh. By reversing the arguments, we can reuse this pattern. */
12710 if (TARGET_CMOVE && scratch)
12712 ix86_expand_clear (scratch);
12713 emit_insn ((mode == DImode
12714 ? gen_x86_shift_adj_1
12715 : gen_x86_64_shift_adj) (low[0], high[0], operands[2],
12716 scratch));
12718 else
12719 emit_insn (gen_x86_shift_adj_2 (low[0], high[0], operands[2]));
12723 /* Predict just emitted jump instruction to be taken with probability PROB. */
12724 static void
12725 predict_jump (int prob)
12727 rtx insn = get_last_insn ();
12728 gcc_assert (GET_CODE (insn) == JUMP_INSN);
12729 REG_NOTES (insn)
12730 = gen_rtx_EXPR_LIST (REG_BR_PROB,
12731 GEN_INT (prob),
12732 REG_NOTES (insn));
12735 /* Helper function for the string operations below. Dest VARIABLE whether
12736 it is aligned to VALUE bytes. If true, jump to the label. */
12737 static rtx
12738 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
12740 rtx label = gen_label_rtx ();
12741 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
12742 if (GET_MODE (variable) == DImode)
12743 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
12744 else
12745 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
12746 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
12747 1, label);
12748 if (epilogue)
12749 predict_jump (REG_BR_PROB_BASE * 50 / 100);
12750 else
12751 predict_jump (REG_BR_PROB_BASE * 90 / 100);
12752 return label;
12755 /* Adjust COUNTER by the VALUE. */
12756 static void
12757 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
12759 if (GET_MODE (countreg) == DImode)
12760 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
12761 else
12762 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
12765 /* Zero extend possibly SImode EXP to Pmode register. */
12767 ix86_zero_extend_to_Pmode (rtx exp)
12769 rtx r;
12770 if (GET_MODE (exp) == VOIDmode)
12771 return force_reg (Pmode, exp);
12772 if (GET_MODE (exp) == Pmode)
12773 return copy_to_mode_reg (Pmode, exp);
12774 r = gen_reg_rtx (Pmode);
12775 emit_insn (gen_zero_extendsidi2 (r, exp));
12776 return r;
12779 /* Divide COUNTREG by SCALE. */
12780 static rtx
12781 scale_counter (rtx countreg, int scale)
12783 rtx sc;
12784 rtx piece_size_mask;
12786 if (scale == 1)
12787 return countreg;
12788 if (GET_CODE (countreg) == CONST_INT)
12789 return GEN_INT (INTVAL (countreg) / scale);
12790 gcc_assert (REG_P (countreg));
12792 piece_size_mask = GEN_INT (scale - 1);
12793 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
12794 GEN_INT (exact_log2 (scale)),
12795 NULL, 1, OPTAB_DIRECT);
12796 return sc;
12799 /* When SRCPTR is non-NULL, output simple loop to move memory
12800 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
12801 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
12802 equivalent loop to set memory by VALUE (supposed to be in MODE).
12804 The size is rounded down to whole number of chunk size moved at once.
12805 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
12808 static void
12809 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
12810 rtx destptr, rtx srcptr, rtx value,
12811 rtx count, enum machine_mode mode, int unroll,
12812 int expected_size)
12814 rtx out_label, top_label, iter, tmp;
12815 enum machine_mode iter_mode;
12816 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
12817 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
12818 rtx size;
12819 rtx x_addr;
12820 rtx y_addr;
12821 int i;
12823 iter_mode = GET_MODE (count);
12824 if (iter_mode == VOIDmode)
12825 iter_mode = word_mode;
12827 top_label = gen_label_rtx ();
12828 out_label = gen_label_rtx ();
12829 iter = gen_reg_rtx (iter_mode);
12831 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
12832 NULL, 1, OPTAB_DIRECT);
12833 /* Those two should combine. */
12834 if (piece_size == const1_rtx)
12836 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
12837 true, out_label);
12838 predict_jump (REG_BR_PROB_BASE * 10 / 100);
12840 emit_move_insn (iter, const0_rtx);
12842 emit_label (top_label);
12844 tmp = convert_modes (Pmode, iter_mode, iter, true);
12845 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
12846 destmem = change_address (destmem, mode, x_addr);
12848 if (srcmem)
12850 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
12851 srcmem = change_address (srcmem, mode, y_addr);
12853 /* When unrolling for chips that reorder memory reads and writes,
12854 we can save registers by using single temporary.
12855 Also using 4 temporaries is overkill in 32bit mode. */
12856 if (!TARGET_64BIT && 0)
12858 for (i = 0; i < unroll; i++)
12860 if (i)
12862 destmem =
12863 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
12864 srcmem =
12865 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
12867 emit_move_insn (destmem, srcmem);
12870 else
12872 rtx tmpreg[4];
12873 gcc_assert (unroll <= 4);
12874 for (i = 0; i < unroll; i++)
12876 tmpreg[i] = gen_reg_rtx (mode);
12877 if (i)
12879 srcmem =
12880 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
12882 emit_move_insn (tmpreg[i], srcmem);
12884 for (i = 0; i < unroll; i++)
12886 if (i)
12888 destmem =
12889 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
12891 emit_move_insn (destmem, tmpreg[i]);
12895 else
12896 for (i = 0; i < unroll; i++)
12898 if (i)
12899 destmem =
12900 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
12901 emit_move_insn (destmem, value);
12904 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
12905 true, OPTAB_LIB_WIDEN);
12906 if (tmp != iter)
12907 emit_move_insn (iter, tmp);
12909 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
12910 true, top_label);
12911 if (expected_size != -1)
12913 expected_size /= GET_MODE_SIZE (mode) * unroll;
12914 if (expected_size == 0)
12915 predict_jump (0);
12916 else if (expected_size > REG_BR_PROB_BASE)
12917 predict_jump (REG_BR_PROB_BASE - 1);
12918 else
12919 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
12921 else
12922 predict_jump (REG_BR_PROB_BASE * 80 / 100);
12923 iter = ix86_zero_extend_to_Pmode (iter);
12924 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
12925 true, OPTAB_LIB_WIDEN);
12926 if (tmp != destptr)
12927 emit_move_insn (destptr, tmp);
12928 if (srcptr)
12930 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
12931 true, OPTAB_LIB_WIDEN);
12932 if (tmp != srcptr)
12933 emit_move_insn (srcptr, tmp);
12935 emit_label (out_label);
12938 /* Output "rep; mov" instruction.
12939 Arguments have same meaning as for previous function */
12940 static void
12941 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
12942 rtx destptr, rtx srcptr,
12943 rtx count,
12944 enum machine_mode mode)
12946 rtx destexp;
12947 rtx srcexp;
12948 rtx countreg;
12950 /* If the size is known, it is shorter to use rep movs. */
12951 if (mode == QImode && GET_CODE (count) == CONST_INT
12952 && !(INTVAL (count) & 3))
12953 mode = SImode;
12955 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
12956 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
12957 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
12958 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
12959 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
12960 if (mode != QImode)
12962 destexp = gen_rtx_ASHIFT (Pmode, countreg,
12963 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
12964 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
12965 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
12966 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
12967 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
12969 else
12971 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
12972 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
12974 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
12975 destexp, srcexp));
12978 /* Output "rep; stos" instruction.
12979 Arguments have same meaning as for previous function */
12980 static void
12981 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
12982 rtx count,
12983 enum machine_mode mode)
12985 rtx destexp;
12986 rtx countreg;
12988 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
12989 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
12990 value = force_reg (mode, gen_lowpart (mode, value));
12991 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
12992 if (mode != QImode)
12994 destexp = gen_rtx_ASHIFT (Pmode, countreg,
12995 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
12996 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
12998 else
12999 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
13000 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
13003 static void
13004 emit_strmov (rtx destmem, rtx srcmem,
13005 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
13007 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
13008 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
13009 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13012 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
13013 static void
13014 expand_movmem_epilogue (rtx destmem, rtx srcmem,
13015 rtx destptr, rtx srcptr, rtx count, int max_size)
13017 rtx src, dest;
13018 if (GET_CODE (count) == CONST_INT)
13020 HOST_WIDE_INT countval = INTVAL (count);
13021 int offset = 0;
13023 if ((countval & 0x16) && max_size > 16)
13025 if (TARGET_64BIT)
13027 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
13028 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
13030 else
13031 gcc_unreachable ();
13032 offset += 16;
13034 if ((countval & 0x08) && max_size > 8)
13036 if (TARGET_64BIT)
13037 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
13038 else
13040 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
13041 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 4);
13043 offset += 8;
13045 if ((countval & 0x04) && max_size > 4)
13047 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
13048 offset += 4;
13050 if ((countval & 0x02) && max_size > 2)
13052 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
13053 offset += 2;
13055 if ((countval & 0x01) && max_size > 1)
13057 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
13058 offset += 1;
13060 return;
13062 if (max_size > 8)
13064 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
13065 count, 1, OPTAB_DIRECT);
13066 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
13067 count, QImode, 1, 4);
13068 return;
13071 /* When there are stringops, we can cheaply increase dest and src pointers.
13072 Otherwise we save code size by maintaining offset (zero is readily
13073 available from preceding rep operation) and using x86 addressing modes.
13075 if (TARGET_SINGLE_STRINGOP)
13077 if (max_size > 4)
13079 rtx label = ix86_expand_aligntest (count, 4, true);
13080 src = change_address (srcmem, SImode, srcptr);
13081 dest = change_address (destmem, SImode, destptr);
13082 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13083 emit_label (label);
13084 LABEL_NUSES (label) = 1;
13086 if (max_size > 2)
13088 rtx label = ix86_expand_aligntest (count, 2, true);
13089 src = change_address (srcmem, HImode, srcptr);
13090 dest = change_address (destmem, HImode, destptr);
13091 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13092 emit_label (label);
13093 LABEL_NUSES (label) = 1;
13095 if (max_size > 1)
13097 rtx label = ix86_expand_aligntest (count, 1, true);
13098 src = change_address (srcmem, QImode, srcptr);
13099 dest = change_address (destmem, QImode, destptr);
13100 emit_insn (gen_strmov (destptr, dest, srcptr, src));
13101 emit_label (label);
13102 LABEL_NUSES (label) = 1;
13105 else
13107 rtx offset = force_reg (Pmode, const0_rtx);
13108 rtx tmp;
13110 if (max_size > 4)
13112 rtx label = ix86_expand_aligntest (count, 4, true);
13113 src = change_address (srcmem, SImode, srcptr);
13114 dest = change_address (destmem, SImode, destptr);
13115 emit_move_insn (dest, src);
13116 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
13117 true, OPTAB_LIB_WIDEN);
13118 if (tmp != offset)
13119 emit_move_insn (offset, tmp);
13120 emit_label (label);
13121 LABEL_NUSES (label) = 1;
13123 if (max_size > 2)
13125 rtx label = ix86_expand_aligntest (count, 2, true);
13126 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
13127 src = change_address (srcmem, HImode, tmp);
13128 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
13129 dest = change_address (destmem, HImode, tmp);
13130 emit_move_insn (dest, src);
13131 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
13132 true, OPTAB_LIB_WIDEN);
13133 if (tmp != offset)
13134 emit_move_insn (offset, tmp);
13135 emit_label (label);
13136 LABEL_NUSES (label) = 1;
13138 if (max_size > 1)
13140 rtx label = ix86_expand_aligntest (count, 1, true);
13141 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
13142 src = change_address (srcmem, QImode, tmp);
13143 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
13144 dest = change_address (destmem, QImode, tmp);
13145 emit_move_insn (dest, src);
13146 emit_label (label);
13147 LABEL_NUSES (label) = 1;
13152 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
13153 static void
13154 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
13155 rtx count, int max_size)
13157 count =
13158 expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
13159 count, 1, OPTAB_DIRECT);
13160 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
13161 gen_lowpart (QImode, value), count, QImode,
13162 1, max_size / 2);
13165 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
13166 static void
13167 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
13169 rtx dest;
13171 if (GET_CODE (count) == CONST_INT)
13173 HOST_WIDE_INT countval = INTVAL (count);
13174 int offset = 0;
13176 if ((countval & 0x16) && max_size > 16)
13178 if (TARGET_64BIT)
13180 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
13181 emit_insn (gen_strset (destptr, dest, value));
13182 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
13183 emit_insn (gen_strset (destptr, dest, value));
13185 else
13186 gcc_unreachable ();
13187 offset += 16;
13189 if ((countval & 0x08) && max_size > 8)
13191 if (TARGET_64BIT)
13193 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
13194 emit_insn (gen_strset (destptr, dest, value));
13196 else
13198 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
13199 emit_insn (gen_strset (destptr, dest, value));
13200 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
13201 emit_insn (gen_strset (destptr, dest, value));
13203 offset += 8;
13205 if ((countval & 0x04) && max_size > 4)
13207 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
13208 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
13209 offset += 4;
13211 if ((countval & 0x02) && max_size > 2)
13213 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
13214 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
13215 offset += 2;
13217 if ((countval & 0x01) && max_size > 1)
13219 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
13220 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
13221 offset += 1;
13223 return;
13225 if (max_size > 32)
13227 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
13228 return;
13230 if (max_size > 16)
13232 rtx label = ix86_expand_aligntest (count, 16, true);
13233 if (TARGET_64BIT)
13235 dest = change_address (destmem, DImode, destptr);
13236 emit_insn (gen_strset (destptr, dest, value));
13237 emit_insn (gen_strset (destptr, dest, value));
13239 else
13241 dest = change_address (destmem, SImode, destptr);
13242 emit_insn (gen_strset (destptr, dest, value));
13243 emit_insn (gen_strset (destptr, dest, value));
13244 emit_insn (gen_strset (destptr, dest, value));
13245 emit_insn (gen_strset (destptr, dest, value));
13247 emit_label (label);
13248 LABEL_NUSES (label) = 1;
13250 if (max_size > 8)
13252 rtx label = ix86_expand_aligntest (count, 8, true);
13253 if (TARGET_64BIT)
13255 dest = change_address (destmem, DImode, destptr);
13256 emit_insn (gen_strset (destptr, dest, value));
13258 else
13260 dest = change_address (destmem, SImode, destptr);
13261 emit_insn (gen_strset (destptr, dest, value));
13262 emit_insn (gen_strset (destptr, dest, value));
13264 emit_label (label);
13265 LABEL_NUSES (label) = 1;
13267 if (max_size > 4)
13269 rtx label = ix86_expand_aligntest (count, 4, true);
13270 dest = change_address (destmem, SImode, destptr);
13271 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
13272 emit_label (label);
13273 LABEL_NUSES (label) = 1;
13275 if (max_size > 2)
13277 rtx label = ix86_expand_aligntest (count, 2, true);
13278 dest = change_address (destmem, HImode, destptr);
13279 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
13280 emit_label (label);
13281 LABEL_NUSES (label) = 1;
13283 if (max_size > 1)
13285 rtx label = ix86_expand_aligntest (count, 1, true);
13286 dest = change_address (destmem, QImode, destptr);
13287 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
13288 emit_label (label);
13289 LABEL_NUSES (label) = 1;
13293 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
13294 DESIRED_ALIGNMENT. */
13295 static void
13296 expand_movmem_prologue (rtx destmem, rtx srcmem,
13297 rtx destptr, rtx srcptr, rtx count,
13298 int align, int desired_alignment)
13300 if (align <= 1 && desired_alignment > 1)
13302 rtx label = ix86_expand_aligntest (destptr, 1, false);
13303 srcmem = change_address (srcmem, QImode, srcptr);
13304 destmem = change_address (destmem, QImode, destptr);
13305 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
13306 ix86_adjust_counter (count, 1);
13307 emit_label (label);
13308 LABEL_NUSES (label) = 1;
13310 if (align <= 2 && desired_alignment > 2)
13312 rtx label = ix86_expand_aligntest (destptr, 2, false);
13313 srcmem = change_address (srcmem, HImode, srcptr);
13314 destmem = change_address (destmem, HImode, destptr);
13315 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
13316 ix86_adjust_counter (count, 2);
13317 emit_label (label);
13318 LABEL_NUSES (label) = 1;
13320 if (align <= 4 && desired_alignment > 4)
13322 rtx label = ix86_expand_aligntest (destptr, 4, false);
13323 srcmem = change_address (srcmem, SImode, srcptr);
13324 destmem = change_address (destmem, SImode, destptr);
13325 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
13326 ix86_adjust_counter (count, 4);
13327 emit_label (label);
13328 LABEL_NUSES (label) = 1;
13330 gcc_assert (desired_alignment <= 8);
13333 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
13334 DESIRED_ALIGNMENT. */
13335 static void
13336 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
13337 int align, int desired_alignment)
13339 if (align <= 1 && desired_alignment > 1)
13341 rtx label = ix86_expand_aligntest (destptr, 1, false);
13342 destmem = change_address (destmem, QImode, destptr);
13343 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
13344 ix86_adjust_counter (count, 1);
13345 emit_label (label);
13346 LABEL_NUSES (label) = 1;
13348 if (align <= 2 && desired_alignment > 2)
13350 rtx label = ix86_expand_aligntest (destptr, 2, false);
13351 destmem = change_address (destmem, HImode, destptr);
13352 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
13353 ix86_adjust_counter (count, 2);
13354 emit_label (label);
13355 LABEL_NUSES (label) = 1;
13357 if (align <= 4 && desired_alignment > 4)
13359 rtx label = ix86_expand_aligntest (destptr, 4, false);
13360 destmem = change_address (destmem, SImode, destptr);
13361 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
13362 ix86_adjust_counter (count, 4);
13363 emit_label (label);
13364 LABEL_NUSES (label) = 1;
13366 gcc_assert (desired_alignment <= 8);
13369 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
13370 static enum stringop_alg
13371 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
13372 int *dynamic_check)
13374 const struct stringop_algs * algs;
13376 *dynamic_check = -1;
13377 if (memset)
13378 algs = &ix86_cost->memset[TARGET_64BIT != 0];
13379 else
13380 algs = &ix86_cost->memcpy[TARGET_64BIT != 0];
13381 if (stringop_alg != no_stringop)
13382 return stringop_alg;
13383 /* rep; movq or rep; movl is the smallest variant. */
13384 else if (optimize_size)
13386 if (!count || (count & 3))
13387 return rep_prefix_1_byte;
13388 else
13389 return rep_prefix_4_byte;
13391 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
13393 else if (expected_size != -1 && expected_size < 4)
13394 return loop_1_byte;
13395 else if (expected_size != -1)
13397 unsigned int i;
13398 enum stringop_alg alg = libcall;
13399 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
13401 gcc_assert (algs->size[i].max);
13402 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
13404 if (algs->size[i].alg != libcall)
13405 alg = algs->size[i].alg;
13406 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
13407 last non-libcall inline algorithm. */
13408 if (TARGET_INLINE_ALL_STRINGOPS)
13410 /* When the current size is best to be copied by a libcall,
13411 but we are still forced to inline, run the heuristic bellow
13412 that will pick code for medium sized blocks. */
13413 if (alg != libcall)
13414 return alg;
13415 break;
13417 else
13418 return algs->size[i].alg;
13421 gcc_assert (TARGET_INLINE_ALL_STRINGOPS);
13423 /* When asked to inline the call anyway, try to pick meaningful choice.
13424 We look for maximal size of block that is faster to copy by hand and
13425 take blocks of at most of that size guessing that average size will
13426 be roughly half of the block.
13428 If this turns out to be bad, we might simply specify the preferred
13429 choice in ix86_costs. */
13430 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
13431 && algs->unknown_size == libcall)
13433 int max = -1;
13434 enum stringop_alg alg;
13435 int i;
13437 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
13438 if (algs->size[i].alg != libcall && algs->size[i].alg)
13439 max = algs->size[i].max;
13440 if (max == -1)
13441 max = 4096;
13442 alg = decide_alg (count, max / 2, memset, dynamic_check);
13443 gcc_assert (*dynamic_check == -1);
13444 gcc_assert (alg != libcall);
13445 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
13446 *dynamic_check = max;
13447 return alg;
13449 return algs->unknown_size;
13452 /* Decide on alignment. We know that the operand is already aligned to ALIGN
13453 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
13454 static int
13455 decide_alignment (int align,
13456 enum stringop_alg alg,
13457 int expected_size)
13459 int desired_align = 0;
13460 switch (alg)
13462 case no_stringop:
13463 gcc_unreachable ();
13464 case loop:
13465 case unrolled_loop:
13466 desired_align = GET_MODE_SIZE (Pmode);
13467 break;
13468 case rep_prefix_8_byte:
13469 desired_align = 8;
13470 break;
13471 case rep_prefix_4_byte:
13472 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
13473 copying whole cacheline at once. */
13474 if (TARGET_PENTIUMPRO)
13475 desired_align = 8;
13476 else
13477 desired_align = 4;
13478 break;
13479 case rep_prefix_1_byte:
13480 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
13481 copying whole cacheline at once. */
13482 if (TARGET_PENTIUMPRO)
13483 desired_align = 8;
13484 else
13485 desired_align = 1;
13486 break;
13487 case loop_1_byte:
13488 desired_align = 1;
13489 break;
13490 case libcall:
13491 return 0;
13494 if (optimize_size)
13495 desired_align = 1;
13496 if (desired_align < align)
13497 desired_align = align;
13498 if (expected_size != -1 && expected_size < 4)
13499 desired_align = align;
13500 return desired_align;
13503 /* Return the smallest power of 2 greater than VAL. */
13504 static int
13505 smallest_pow2_greater_than (int val)
13507 int ret = 1;
13508 while (ret <= val)
13509 ret <<= 1;
13510 return ret;
13513 /* Expand string move (memcpy) operation. Use i386 string operations when
13514 profitable. expand_clrmem contains similar code. The code depends upon
13515 architecture, block size and alignment, but always has the same
13516 overall structure:
13518 1) Prologue guard: Conditional that jumps up to epilogues for small
13519 blocks that can be handled by epilogue alone. This is faster but
13520 also needed for correctness, since prologue assume the block is larger
13521 than the desired alignment.
13523 Optional dynamic check for size and libcall for large
13524 blocks is emitted here too, with -minline-stringops-dynamically.
13526 2) Prologue: copy first few bytes in order to get destination aligned
13527 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
13528 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
13529 We emit either a jump tree on power of two sized blocks, or a byte loop.
13531 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
13532 with specified algorithm.
13534 4) Epilogue: code copying tail of the block that is too small to be
13535 handled by main body (or up to size guarded by prologue guard). */
13538 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
13539 rtx expected_align_exp, rtx expected_size_exp)
13541 rtx destreg;
13542 rtx srcreg;
13543 rtx label = NULL;
13544 rtx tmp;
13545 rtx jump_around_label = NULL;
13546 HOST_WIDE_INT align = 1;
13547 unsigned HOST_WIDE_INT count = 0;
13548 HOST_WIDE_INT expected_size = -1;
13549 int size_needed = 0, epilogue_size_needed;
13550 int desired_align = 0;
13551 enum stringop_alg alg;
13552 int dynamic_check;
13554 if (GET_CODE (align_exp) == CONST_INT)
13555 align = INTVAL (align_exp);
13556 /* i386 can do misaligned access on reasonably increased cost. */
13557 if (GET_CODE (expected_align_exp) == CONST_INT
13558 && INTVAL (expected_align_exp) > align)
13559 align = INTVAL (expected_align_exp);
13560 if (GET_CODE (count_exp) == CONST_INT)
13561 count = expected_size = INTVAL (count_exp);
13562 if (GET_CODE (expected_size_exp) == CONST_INT && count == 0)
13563 expected_size = INTVAL (expected_size_exp);
13565 /* Step 0: Decide on preferred algorithm, desired alignment and
13566 size of chunks to be copied by main loop. */
13568 alg = decide_alg (count, expected_size, false, &dynamic_check);
13569 desired_align = decide_alignment (align, alg, expected_size);
13571 if (!TARGET_ALIGN_STRINGOPS)
13572 align = desired_align;
13574 if (alg == libcall)
13575 return 0;
13576 gcc_assert (alg != no_stringop);
13577 if (!count)
13578 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
13579 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
13580 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
13581 switch (alg)
13583 case libcall:
13584 case no_stringop:
13585 gcc_unreachable ();
13586 case loop:
13587 size_needed = GET_MODE_SIZE (Pmode);
13588 break;
13589 case unrolled_loop:
13590 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
13591 break;
13592 case rep_prefix_8_byte:
13593 size_needed = 8;
13594 break;
13595 case rep_prefix_4_byte:
13596 size_needed = 4;
13597 break;
13598 case rep_prefix_1_byte:
13599 case loop_1_byte:
13600 size_needed = 1;
13601 break;
13604 epilogue_size_needed = size_needed;
13606 /* Step 1: Prologue guard. */
13608 /* Alignment code needs count to be in register. */
13609 if (GET_CODE (count_exp) == CONST_INT && desired_align > align)
13611 enum machine_mode mode = SImode;
13612 if (TARGET_64BIT && (count & ~0xffffffff))
13613 mode = DImode;
13614 count_exp = force_reg (mode, count_exp);
13616 gcc_assert (desired_align >= 1 && align >= 1);
13618 /* Ensure that alignment prologue won't copy past end of block. */
13619 if ((size_needed > 1 || (desired_align > 1 && desired_align > align))
13620 && !count)
13622 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
13624 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
13625 Make sure it is power of 2. */
13626 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
13628 label = gen_label_rtx ();
13629 emit_cmp_and_jump_insns (count_exp,
13630 GEN_INT (epilogue_size_needed),
13631 LTU, 0, GET_MODE (count_exp), 1, label);
13632 if (expected_size == -1 || expected_size < epilogue_size_needed)
13633 predict_jump (REG_BR_PROB_BASE * 60 / 100);
13634 else
13635 predict_jump (REG_BR_PROB_BASE * 20 / 100);
13637 /* Emit code to decide on runtime whether library call or inline should be
13638 used. */
13639 if (dynamic_check != -1)
13641 rtx hot_label = gen_label_rtx ();
13642 jump_around_label = gen_label_rtx ();
13643 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
13644 LEU, 0, GET_MODE (count_exp), 1, hot_label);
13645 predict_jump (REG_BR_PROB_BASE * 90 / 100);
13646 emit_block_move_via_libcall (dst, src, count_exp, false);
13647 emit_jump (jump_around_label);
13648 emit_label (hot_label);
13651 /* Step 2: Alignment prologue. */
13653 if (desired_align > align)
13655 /* Except for the first move in epilogue, we no longer know
13656 constant offset in aliasing info. It don't seems to worth
13657 the pain to maintain it for the first move, so throw away
13658 the info early. */
13659 src = change_address (src, BLKmode, srcreg);
13660 dst = change_address (dst, BLKmode, destreg);
13661 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
13662 desired_align);
13664 if (label && size_needed == 1)
13666 emit_label (label);
13667 LABEL_NUSES (label) = 1;
13668 label = NULL;
13671 /* Step 3: Main loop. */
13673 switch (alg)
13675 case libcall:
13676 case no_stringop:
13677 gcc_unreachable ();
13678 case loop_1_byte:
13679 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
13680 count_exp, QImode, 1, expected_size);
13681 break;
13682 case loop:
13683 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
13684 count_exp, Pmode, 1, expected_size);
13685 break;
13686 case unrolled_loop:
13687 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
13688 registers for 4 temporaries anyway. */
13689 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
13690 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
13691 expected_size);
13692 break;
13693 case rep_prefix_8_byte:
13694 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
13695 DImode);
13696 break;
13697 case rep_prefix_4_byte:
13698 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
13699 SImode);
13700 break;
13701 case rep_prefix_1_byte:
13702 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
13703 QImode);
13704 break;
13706 /* Adjust properly the offset of src and dest memory for aliasing. */
13707 if (GET_CODE (count_exp) == CONST_INT)
13709 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
13710 (count / size_needed) * size_needed);
13711 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
13712 (count / size_needed) * size_needed);
13714 else
13716 src = change_address (src, BLKmode, srcreg);
13717 dst = change_address (dst, BLKmode, destreg);
13720 /* Step 4: Epilogue to copy the remaining bytes. */
13722 if (label)
13724 /* When the main loop is done, COUNT_EXP might hold original count,
13725 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
13726 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
13727 bytes. Compensate if needed. */
13729 if (size_needed < epilogue_size_needed)
13731 tmp =
13732 expand_simple_binop (GET_MODE (count_exp), AND, count_exp,
13733 GEN_INT (size_needed - 1), count_exp, 1,
13734 OPTAB_DIRECT);
13735 if (tmp != count_exp)
13736 emit_move_insn (count_exp, tmp);
13738 emit_label (label);
13739 LABEL_NUSES (label) = 1;
13742 if (count_exp != const0_rtx && epilogue_size_needed > 1)
13743 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
13744 epilogue_size_needed);
13745 if (jump_around_label)
13746 emit_label (jump_around_label);
13747 return 1;
13750 /* Helper function for memcpy. For QImode value 0xXY produce
13751 0xXYXYXYXY of wide specified by MODE. This is essentially
13752 a * 0x10101010, but we can do slightly better than
13753 synth_mult by unwinding the sequence by hand on CPUs with
13754 slow multiply. */
13755 static rtx
13756 promote_duplicated_reg (enum machine_mode mode, rtx val)
13758 enum machine_mode valmode = GET_MODE (val);
13759 rtx tmp;
13760 int nops = mode == DImode ? 3 : 2;
13762 gcc_assert (mode == SImode || mode == DImode);
13763 if (val == const0_rtx)
13764 return copy_to_mode_reg (mode, const0_rtx);
13765 if (GET_CODE (val) == CONST_INT)
13767 HOST_WIDE_INT v = INTVAL (val) & 255;
13769 v |= v << 8;
13770 v |= v << 16;
13771 if (mode == DImode)
13772 v |= (v << 16) << 16;
13773 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
13776 if (valmode == VOIDmode)
13777 valmode = QImode;
13778 if (valmode != QImode)
13779 val = gen_lowpart (QImode, val);
13780 if (mode == QImode)
13781 return val;
13782 if (!TARGET_PARTIAL_REG_STALL)
13783 nops--;
13784 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
13785 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
13786 <= (ix86_cost->shift_const + ix86_cost->add) * nops
13787 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
13789 rtx reg = convert_modes (mode, QImode, val, true);
13790 tmp = promote_duplicated_reg (mode, const1_rtx);
13791 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
13792 OPTAB_DIRECT);
13794 else
13796 rtx reg = convert_modes (mode, QImode, val, true);
13798 if (!TARGET_PARTIAL_REG_STALL)
13799 if (mode == SImode)
13800 emit_insn (gen_movsi_insv_1 (reg, reg));
13801 else
13802 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
13803 else
13805 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
13806 NULL, 1, OPTAB_DIRECT);
13807 reg =
13808 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
13810 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
13811 NULL, 1, OPTAB_DIRECT);
13812 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
13813 if (mode == SImode)
13814 return reg;
13815 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
13816 NULL, 1, OPTAB_DIRECT);
13817 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
13818 return reg;
13822 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
13823 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
13824 alignment from ALIGN to DESIRED_ALIGN. */
13825 static rtx
13826 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
13828 rtx promoted_val;
13830 if (TARGET_64BIT
13831 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
13832 promoted_val = promote_duplicated_reg (DImode, val);
13833 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
13834 promoted_val = promote_duplicated_reg (SImode, val);
13835 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
13836 promoted_val = promote_duplicated_reg (HImode, val);
13837 else
13838 promoted_val = val;
13840 return promoted_val;
13843 /* Expand string clear operation (bzero). Use i386 string operations when
13844 profitable. See expand_movmem comment for explanation of individual
13845 steps performed. */
13847 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
13848 rtx expected_align_exp, rtx expected_size_exp)
13850 rtx destreg;
13851 rtx label = NULL;
13852 rtx tmp;
13853 rtx jump_around_label = NULL;
13854 HOST_WIDE_INT align = 1;
13855 unsigned HOST_WIDE_INT count = 0;
13856 HOST_WIDE_INT expected_size = -1;
13857 int size_needed = 0, epilogue_size_needed;
13858 int desired_align = 0;
13859 enum stringop_alg alg;
13860 rtx promoted_val = NULL;
13861 bool force_loopy_epilogue = false;
13862 int dynamic_check;
13864 if (GET_CODE (align_exp) == CONST_INT)
13865 align = INTVAL (align_exp);
13866 /* i386 can do misaligned access on reasonably increased cost. */
13867 if (GET_CODE (expected_align_exp) == CONST_INT
13868 && INTVAL (expected_align_exp) > align)
13869 align = INTVAL (expected_align_exp);
13870 if (GET_CODE (count_exp) == CONST_INT)
13871 count = expected_size = INTVAL (count_exp);
13872 if (GET_CODE (expected_size_exp) == CONST_INT && count == 0)
13873 expected_size = INTVAL (expected_size_exp);
13875 /* Step 0: Decide on preferred algorithm, desired alignment and
13876 size of chunks to be copied by main loop. */
13878 alg = decide_alg (count, expected_size, true, &dynamic_check);
13879 desired_align = decide_alignment (align, alg, expected_size);
13881 if (!TARGET_ALIGN_STRINGOPS)
13882 align = desired_align;
13884 if (alg == libcall)
13885 return 0;
13886 gcc_assert (alg != no_stringop);
13887 if (!count)
13888 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
13889 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
13890 switch (alg)
13892 case libcall:
13893 case no_stringop:
13894 gcc_unreachable ();
13895 case loop:
13896 size_needed = GET_MODE_SIZE (Pmode);
13897 break;
13898 case unrolled_loop:
13899 size_needed = GET_MODE_SIZE (Pmode) * 4;
13900 break;
13901 case rep_prefix_8_byte:
13902 size_needed = 8;
13903 break;
13904 case rep_prefix_4_byte:
13905 size_needed = 4;
13906 break;
13907 case rep_prefix_1_byte:
13908 case loop_1_byte:
13909 size_needed = 1;
13910 break;
13912 epilogue_size_needed = size_needed;
13914 /* Step 1: Prologue guard. */
13916 /* Alignment code needs count to be in register. */
13917 if (GET_CODE (count_exp) == CONST_INT && desired_align > align)
13919 enum machine_mode mode = SImode;
13920 if (TARGET_64BIT && (count & ~0xffffffff))
13921 mode = DImode;
13922 count_exp = force_reg (mode, count_exp);
13924 /* Do the cheap promotion to allow better CSE across the
13925 main loop and epilogue (ie one load of the big constant in the
13926 front of all code. */
13927 if (GET_CODE (val_exp) == CONST_INT)
13928 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
13929 desired_align, align);
13930 /* Ensure that alignment prologue won't copy past end of block. */
13931 if ((size_needed > 1 || (desired_align > 1 && desired_align > align))
13932 && !count)
13934 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
13936 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
13937 Make sure it is power of 2. */
13938 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
13940 /* To improve performance of small blocks, we jump around the VAL
13941 promoting mode. This mean that if the promoted VAL is not constant,
13942 we might not use it in the epilogue and have to use byte
13943 loop variant. */
13944 if (epilogue_size_needed > 2 && !promoted_val)
13945 force_loopy_epilogue = true;
13946 label = gen_label_rtx ();
13947 emit_cmp_and_jump_insns (count_exp,
13948 GEN_INT (epilogue_size_needed),
13949 LTU, 0, GET_MODE (count_exp), 1, label);
13950 if (expected_size == -1 || expected_size <= epilogue_size_needed)
13951 predict_jump (REG_BR_PROB_BASE * 60 / 100);
13952 else
13953 predict_jump (REG_BR_PROB_BASE * 20 / 100);
13955 if (dynamic_check != -1)
13957 rtx hot_label = gen_label_rtx ();
13958 jump_around_label = gen_label_rtx ();
13959 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
13960 LEU, 0, GET_MODE (count_exp), 1, hot_label);
13961 predict_jump (REG_BR_PROB_BASE * 90 / 100);
13962 set_storage_via_libcall (dst, count_exp, val_exp, false);
13963 emit_jump (jump_around_label);
13964 emit_label (hot_label);
13967 /* Step 2: Alignment prologue. */
13969 /* Do the expensive promotion once we branched off the small blocks. */
13970 if (!promoted_val)
13971 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
13972 desired_align, align);
13973 gcc_assert (desired_align >= 1 && align >= 1);
13975 if (desired_align > align)
13977 /* Except for the first move in epilogue, we no longer know
13978 constant offset in aliasing info. It don't seems to worth
13979 the pain to maintain it for the first move, so throw away
13980 the info early. */
13981 dst = change_address (dst, BLKmode, destreg);
13982 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
13983 desired_align);
13985 if (label && size_needed == 1)
13987 emit_label (label);
13988 LABEL_NUSES (label) = 1;
13989 label = NULL;
13992 /* Step 3: Main loop. */
13994 switch (alg)
13996 case libcall:
13997 case no_stringop:
13998 gcc_unreachable ();
13999 case loop_1_byte:
14000 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
14001 count_exp, QImode, 1, expected_size);
14002 break;
14003 case loop:
14004 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
14005 count_exp, Pmode, 1, expected_size);
14006 break;
14007 case unrolled_loop:
14008 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
14009 count_exp, Pmode, 4, expected_size);
14010 break;
14011 case rep_prefix_8_byte:
14012 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
14013 DImode);
14014 break;
14015 case rep_prefix_4_byte:
14016 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
14017 SImode);
14018 break;
14019 case rep_prefix_1_byte:
14020 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
14021 QImode);
14022 break;
14024 /* Adjust properly the offset of src and dest memory for aliasing. */
14025 if (GET_CODE (count_exp) == CONST_INT)
14026 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
14027 (count / size_needed) * size_needed);
14028 else
14029 dst = change_address (dst, BLKmode, destreg);
14031 /* Step 4: Epilogue to copy the remaining bytes. */
14033 if (label)
14035 /* When the main loop is done, COUNT_EXP might hold original count,
14036 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
14037 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
14038 bytes. Compensate if needed. */
14040 if (size_needed < desired_align - align)
14042 tmp =
14043 expand_simple_binop (GET_MODE (count_exp), AND, count_exp,
14044 GEN_INT (size_needed - 1), count_exp, 1,
14045 OPTAB_DIRECT);
14046 size_needed = desired_align - align + 1;
14047 if (tmp != count_exp)
14048 emit_move_insn (count_exp, tmp);
14050 emit_label (label);
14051 LABEL_NUSES (label) = 1;
14053 if (count_exp != const0_rtx && epilogue_size_needed > 1)
14055 if (force_loopy_epilogue)
14056 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
14057 size_needed);
14058 else
14059 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
14060 size_needed);
14062 if (jump_around_label)
14063 emit_label (jump_around_label);
14064 return 1;
14067 /* Expand strlen. */
14069 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
14071 rtx addr, scratch1, scratch2, scratch3, scratch4;
14073 /* The generic case of strlen expander is long. Avoid it's
14074 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
14076 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
14077 && !TARGET_INLINE_ALL_STRINGOPS
14078 && !optimize_size
14079 && (GET_CODE (align) != CONST_INT || INTVAL (align) < 4))
14080 return 0;
14082 addr = force_reg (Pmode, XEXP (src, 0));
14083 scratch1 = gen_reg_rtx (Pmode);
14085 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
14086 && !optimize_size)
14088 /* Well it seems that some optimizer does not combine a call like
14089 foo(strlen(bar), strlen(bar));
14090 when the move and the subtraction is done here. It does calculate
14091 the length just once when these instructions are done inside of
14092 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
14093 often used and I use one fewer register for the lifetime of
14094 output_strlen_unroll() this is better. */
14096 emit_move_insn (out, addr);
14098 ix86_expand_strlensi_unroll_1 (out, src, align);
14100 /* strlensi_unroll_1 returns the address of the zero at the end of
14101 the string, like memchr(), so compute the length by subtracting
14102 the start address. */
14103 if (TARGET_64BIT)
14104 emit_insn (gen_subdi3 (out, out, addr));
14105 else
14106 emit_insn (gen_subsi3 (out, out, addr));
14108 else
14110 rtx unspec;
14111 scratch2 = gen_reg_rtx (Pmode);
14112 scratch3 = gen_reg_rtx (Pmode);
14113 scratch4 = force_reg (Pmode, constm1_rtx);
14115 emit_move_insn (scratch3, addr);
14116 eoschar = force_reg (QImode, eoschar);
14118 src = replace_equiv_address_nv (src, scratch3);
14120 /* If .md starts supporting :P, this can be done in .md. */
14121 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
14122 scratch4), UNSPEC_SCAS);
14123 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
14124 if (TARGET_64BIT)
14126 emit_insn (gen_one_cmpldi2 (scratch2, scratch1));
14127 emit_insn (gen_adddi3 (out, scratch2, constm1_rtx));
14129 else
14131 emit_insn (gen_one_cmplsi2 (scratch2, scratch1));
14132 emit_insn (gen_addsi3 (out, scratch2, constm1_rtx));
14135 return 1;
14138 /* Expand the appropriate insns for doing strlen if not just doing
14139 repnz; scasb
14141 out = result, initialized with the start address
14142 align_rtx = alignment of the address.
14143 scratch = scratch register, initialized with the startaddress when
14144 not aligned, otherwise undefined
14146 This is just the body. It needs the initializations mentioned above and
14147 some address computing at the end. These things are done in i386.md. */
14149 static void
14150 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
14152 int align;
14153 rtx tmp;
14154 rtx align_2_label = NULL_RTX;
14155 rtx align_3_label = NULL_RTX;
14156 rtx align_4_label = gen_label_rtx ();
14157 rtx end_0_label = gen_label_rtx ();
14158 rtx mem;
14159 rtx tmpreg = gen_reg_rtx (SImode);
14160 rtx scratch = gen_reg_rtx (SImode);
14161 rtx cmp;
14163 align = 0;
14164 if (GET_CODE (align_rtx) == CONST_INT)
14165 align = INTVAL (align_rtx);
14167 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
14169 /* Is there a known alignment and is it less than 4? */
14170 if (align < 4)
14172 rtx scratch1 = gen_reg_rtx (Pmode);
14173 emit_move_insn (scratch1, out);
14174 /* Is there a known alignment and is it not 2? */
14175 if (align != 2)
14177 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
14178 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
14180 /* Leave just the 3 lower bits. */
14181 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
14182 NULL_RTX, 0, OPTAB_WIDEN);
14184 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
14185 Pmode, 1, align_4_label);
14186 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
14187 Pmode, 1, align_2_label);
14188 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
14189 Pmode, 1, align_3_label);
14191 else
14193 /* Since the alignment is 2, we have to check 2 or 0 bytes;
14194 check if is aligned to 4 - byte. */
14196 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
14197 NULL_RTX, 0, OPTAB_WIDEN);
14199 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
14200 Pmode, 1, align_4_label);
14203 mem = change_address (src, QImode, out);
14205 /* Now compare the bytes. */
14207 /* Compare the first n unaligned byte on a byte per byte basis. */
14208 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
14209 QImode, 1, end_0_label);
14211 /* Increment the address. */
14212 if (TARGET_64BIT)
14213 emit_insn (gen_adddi3 (out, out, const1_rtx));
14214 else
14215 emit_insn (gen_addsi3 (out, out, const1_rtx));
14217 /* Not needed with an alignment of 2 */
14218 if (align != 2)
14220 emit_label (align_2_label);
14222 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
14223 end_0_label);
14225 if (TARGET_64BIT)
14226 emit_insn (gen_adddi3 (out, out, const1_rtx));
14227 else
14228 emit_insn (gen_addsi3 (out, out, const1_rtx));
14230 emit_label (align_3_label);
14233 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
14234 end_0_label);
14236 if (TARGET_64BIT)
14237 emit_insn (gen_adddi3 (out, out, const1_rtx));
14238 else
14239 emit_insn (gen_addsi3 (out, out, const1_rtx));
14242 /* Generate loop to check 4 bytes at a time. It is not a good idea to
14243 align this loop. It gives only huge programs, but does not help to
14244 speed up. */
14245 emit_label (align_4_label);
14247 mem = change_address (src, SImode, out);
14248 emit_move_insn (scratch, mem);
14249 if (TARGET_64BIT)
14250 emit_insn (gen_adddi3 (out, out, GEN_INT (4)));
14251 else
14252 emit_insn (gen_addsi3 (out, out, GEN_INT (4)));
14254 /* This formula yields a nonzero result iff one of the bytes is zero.
14255 This saves three branches inside loop and many cycles. */
14257 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
14258 emit_insn (gen_one_cmplsi2 (scratch, scratch));
14259 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
14260 emit_insn (gen_andsi3 (tmpreg, tmpreg,
14261 gen_int_mode (0x80808080, SImode)));
14262 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
14263 align_4_label);
14265 if (TARGET_CMOVE)
14267 rtx reg = gen_reg_rtx (SImode);
14268 rtx reg2 = gen_reg_rtx (Pmode);
14269 emit_move_insn (reg, tmpreg);
14270 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
14272 /* If zero is not in the first two bytes, move two bytes forward. */
14273 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
14274 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14275 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
14276 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
14277 gen_rtx_IF_THEN_ELSE (SImode, tmp,
14278 reg,
14279 tmpreg)));
14280 /* Emit lea manually to avoid clobbering of flags. */
14281 emit_insn (gen_rtx_SET (SImode, reg2,
14282 gen_rtx_PLUS (Pmode, out, const2_rtx)));
14284 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14285 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
14286 emit_insn (gen_rtx_SET (VOIDmode, out,
14287 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
14288 reg2,
14289 out)));
14292 else
14294 rtx end_2_label = gen_label_rtx ();
14295 /* Is zero in the first two bytes? */
14297 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
14298 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
14299 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
14300 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
14301 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
14302 pc_rtx);
14303 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
14304 JUMP_LABEL (tmp) = end_2_label;
14306 /* Not in the first two. Move two bytes forward. */
14307 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
14308 if (TARGET_64BIT)
14309 emit_insn (gen_adddi3 (out, out, const2_rtx));
14310 else
14311 emit_insn (gen_addsi3 (out, out, const2_rtx));
14313 emit_label (end_2_label);
14317 /* Avoid branch in fixing the byte. */
14318 tmpreg = gen_lowpart (QImode, tmpreg);
14319 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
14320 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, 17), const0_rtx);
14321 if (TARGET_64BIT)
14322 emit_insn (gen_subdi3_carry_rex64 (out, out, GEN_INT (3), cmp));
14323 else
14324 emit_insn (gen_subsi3_carry (out, out, GEN_INT (3), cmp));
14326 emit_label (end_0_label);
14329 void
14330 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
14331 rtx callarg2 ATTRIBUTE_UNUSED,
14332 rtx pop, int sibcall)
14334 rtx use = NULL, call;
14336 if (pop == const0_rtx)
14337 pop = NULL;
14338 gcc_assert (!TARGET_64BIT || !pop);
14340 if (TARGET_MACHO && !TARGET_64BIT)
14342 #if TARGET_MACHO
14343 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
14344 fnaddr = machopic_indirect_call_target (fnaddr);
14345 #endif
14347 else
14349 /* Static functions and indirect calls don't need the pic register. */
14350 if (! TARGET_64BIT && flag_pic
14351 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
14352 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
14353 use_reg (&use, pic_offset_table_rtx);
14356 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
14358 rtx al = gen_rtx_REG (QImode, 0);
14359 emit_move_insn (al, callarg2);
14360 use_reg (&use, al);
14363 if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
14365 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
14366 fnaddr = gen_rtx_MEM (QImode, fnaddr);
14368 if (sibcall && TARGET_64BIT
14369 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
14371 rtx addr;
14372 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
14373 fnaddr = gen_rtx_REG (Pmode, R11_REG);
14374 emit_move_insn (fnaddr, addr);
14375 fnaddr = gen_rtx_MEM (QImode, fnaddr);
14378 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
14379 if (retval)
14380 call = gen_rtx_SET (VOIDmode, retval, call);
14381 if (pop)
14383 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
14384 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
14385 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
14388 call = emit_call_insn (call);
14389 if (use)
14390 CALL_INSN_FUNCTION_USAGE (call) = use;
14394 /* Clear stack slot assignments remembered from previous functions.
14395 This is called from INIT_EXPANDERS once before RTL is emitted for each
14396 function. */
14398 static struct machine_function *
14399 ix86_init_machine_status (void)
14401 struct machine_function *f;
14403 f = ggc_alloc_cleared (sizeof (struct machine_function));
14404 f->use_fast_prologue_epilogue_nregs = -1;
14405 f->tls_descriptor_call_expanded_p = 0;
14407 return f;
14410 /* Return a MEM corresponding to a stack slot with mode MODE.
14411 Allocate a new slot if necessary.
14413 The RTL for a function can have several slots available: N is
14414 which slot to use. */
14417 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
14419 struct stack_local_entry *s;
14421 gcc_assert (n < MAX_386_STACK_LOCALS);
14423 for (s = ix86_stack_locals; s; s = s->next)
14424 if (s->mode == mode && s->n == n)
14425 return copy_rtx (s->rtl);
14427 s = (struct stack_local_entry *)
14428 ggc_alloc (sizeof (struct stack_local_entry));
14429 s->n = n;
14430 s->mode = mode;
14431 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
14433 s->next = ix86_stack_locals;
14434 ix86_stack_locals = s;
14435 return s->rtl;
14438 /* Construct the SYMBOL_REF for the tls_get_addr function. */
14440 static GTY(()) rtx ix86_tls_symbol;
14442 ix86_tls_get_addr (void)
14445 if (!ix86_tls_symbol)
14447 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
14448 (TARGET_ANY_GNU_TLS
14449 && !TARGET_64BIT)
14450 ? "___tls_get_addr"
14451 : "__tls_get_addr");
14454 return ix86_tls_symbol;
14457 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
14459 static GTY(()) rtx ix86_tls_module_base_symbol;
14461 ix86_tls_module_base (void)
14464 if (!ix86_tls_module_base_symbol)
14466 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
14467 "_TLS_MODULE_BASE_");
14468 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
14469 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
14472 return ix86_tls_module_base_symbol;
14475 /* Calculate the length of the memory address in the instruction
14476 encoding. Does not include the one-byte modrm, opcode, or prefix. */
14479 memory_address_length (rtx addr)
14481 struct ix86_address parts;
14482 rtx base, index, disp;
14483 int len;
14484 int ok;
14486 if (GET_CODE (addr) == PRE_DEC
14487 || GET_CODE (addr) == POST_INC
14488 || GET_CODE (addr) == PRE_MODIFY
14489 || GET_CODE (addr) == POST_MODIFY)
14490 return 0;
14492 ok = ix86_decompose_address (addr, &parts);
14493 gcc_assert (ok);
14495 if (parts.base && GET_CODE (parts.base) == SUBREG)
14496 parts.base = SUBREG_REG (parts.base);
14497 if (parts.index && GET_CODE (parts.index) == SUBREG)
14498 parts.index = SUBREG_REG (parts.index);
14500 base = parts.base;
14501 index = parts.index;
14502 disp = parts.disp;
14503 len = 0;
14505 /* Rule of thumb:
14506 - esp as the base always wants an index,
14507 - ebp as the base always wants a displacement. */
14509 /* Register Indirect. */
14510 if (base && !index && !disp)
14512 /* esp (for its index) and ebp (for its displacement) need
14513 the two-byte modrm form. */
14514 if (addr == stack_pointer_rtx
14515 || addr == arg_pointer_rtx
14516 || addr == frame_pointer_rtx
14517 || addr == hard_frame_pointer_rtx)
14518 len = 1;
14521 /* Direct Addressing. */
14522 else if (disp && !base && !index)
14523 len = 4;
14525 else
14527 /* Find the length of the displacement constant. */
14528 if (disp)
14530 if (base && satisfies_constraint_K (disp))
14531 len = 1;
14532 else
14533 len = 4;
14535 /* ebp always wants a displacement. */
14536 else if (base == hard_frame_pointer_rtx)
14537 len = 1;
14539 /* An index requires the two-byte modrm form.... */
14540 if (index
14541 /* ...like esp, which always wants an index. */
14542 || base == stack_pointer_rtx
14543 || base == arg_pointer_rtx
14544 || base == frame_pointer_rtx)
14545 len += 1;
14548 return len;
14551 /* Compute default value for "length_immediate" attribute. When SHORTFORM
14552 is set, expect that insn have 8bit immediate alternative. */
14554 ix86_attr_length_immediate_default (rtx insn, int shortform)
14556 int len = 0;
14557 int i;
14558 extract_insn_cached (insn);
14559 for (i = recog_data.n_operands - 1; i >= 0; --i)
14560 if (CONSTANT_P (recog_data.operand[i]))
14562 gcc_assert (!len);
14563 if (shortform && satisfies_constraint_K (recog_data.operand[i]))
14564 len = 1;
14565 else
14567 switch (get_attr_mode (insn))
14569 case MODE_QI:
14570 len+=1;
14571 break;
14572 case MODE_HI:
14573 len+=2;
14574 break;
14575 case MODE_SI:
14576 len+=4;
14577 break;
14578 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
14579 case MODE_DI:
14580 len+=4;
14581 break;
14582 default:
14583 fatal_insn ("unknown insn mode", insn);
14587 return len;
14589 /* Compute default value for "length_address" attribute. */
14591 ix86_attr_length_address_default (rtx insn)
14593 int i;
14595 if (get_attr_type (insn) == TYPE_LEA)
14597 rtx set = PATTERN (insn);
14599 if (GET_CODE (set) == PARALLEL)
14600 set = XVECEXP (set, 0, 0);
14602 gcc_assert (GET_CODE (set) == SET);
14604 return memory_address_length (SET_SRC (set));
14607 extract_insn_cached (insn);
14608 for (i = recog_data.n_operands - 1; i >= 0; --i)
14609 if (GET_CODE (recog_data.operand[i]) == MEM)
14611 return memory_address_length (XEXP (recog_data.operand[i], 0));
14612 break;
14614 return 0;
14617 /* Return the maximum number of instructions a cpu can issue. */
14619 static int
14620 ix86_issue_rate (void)
14622 switch (ix86_tune)
14624 case PROCESSOR_PENTIUM:
14625 case PROCESSOR_K6:
14626 return 2;
14628 case PROCESSOR_PENTIUMPRO:
14629 case PROCESSOR_PENTIUM4:
14630 case PROCESSOR_ATHLON:
14631 case PROCESSOR_K8:
14632 case PROCESSOR_NOCONA:
14633 case PROCESSOR_GENERIC32:
14634 case PROCESSOR_GENERIC64:
14635 return 3;
14637 case PROCESSOR_CORE2:
14638 return 4;
14640 default:
14641 return 1;
14645 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
14646 by DEP_INSN and nothing set by DEP_INSN. */
14648 static int
14649 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
14651 rtx set, set2;
14653 /* Simplify the test for uninteresting insns. */
14654 if (insn_type != TYPE_SETCC
14655 && insn_type != TYPE_ICMOV
14656 && insn_type != TYPE_FCMOV
14657 && insn_type != TYPE_IBR)
14658 return 0;
14660 if ((set = single_set (dep_insn)) != 0)
14662 set = SET_DEST (set);
14663 set2 = NULL_RTX;
14665 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
14666 && XVECLEN (PATTERN (dep_insn), 0) == 2
14667 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
14668 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
14670 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
14671 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
14673 else
14674 return 0;
14676 if (GET_CODE (set) != REG || REGNO (set) != FLAGS_REG)
14677 return 0;
14679 /* This test is true if the dependent insn reads the flags but
14680 not any other potentially set register. */
14681 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
14682 return 0;
14684 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
14685 return 0;
14687 return 1;
14690 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
14691 address with operands set by DEP_INSN. */
14693 static int
14694 ix86_agi_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
14696 rtx addr;
14698 if (insn_type == TYPE_LEA
14699 && TARGET_PENTIUM)
14701 addr = PATTERN (insn);
14703 if (GET_CODE (addr) == PARALLEL)
14704 addr = XVECEXP (addr, 0, 0);
14706 gcc_assert (GET_CODE (addr) == SET);
14708 addr = SET_SRC (addr);
14710 else
14712 int i;
14713 extract_insn_cached (insn);
14714 for (i = recog_data.n_operands - 1; i >= 0; --i)
14715 if (GET_CODE (recog_data.operand[i]) == MEM)
14717 addr = XEXP (recog_data.operand[i], 0);
14718 goto found;
14720 return 0;
14721 found:;
14724 return modified_in_p (addr, dep_insn);
14727 static int
14728 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
14730 enum attr_type insn_type, dep_insn_type;
14731 enum attr_memory memory;
14732 rtx set, set2;
14733 int dep_insn_code_number;
14735 /* Anti and output dependencies have zero cost on all CPUs. */
14736 if (REG_NOTE_KIND (link) != 0)
14737 return 0;
14739 dep_insn_code_number = recog_memoized (dep_insn);
14741 /* If we can't recognize the insns, we can't really do anything. */
14742 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
14743 return cost;
14745 insn_type = get_attr_type (insn);
14746 dep_insn_type = get_attr_type (dep_insn);
14748 switch (ix86_tune)
14750 case PROCESSOR_PENTIUM:
14751 /* Address Generation Interlock adds a cycle of latency. */
14752 if (ix86_agi_dependent (insn, dep_insn, insn_type))
14753 cost += 1;
14755 /* ??? Compares pair with jump/setcc. */
14756 if (ix86_flags_dependent (insn, dep_insn, insn_type))
14757 cost = 0;
14759 /* Floating point stores require value to be ready one cycle earlier. */
14760 if (insn_type == TYPE_FMOV
14761 && get_attr_memory (insn) == MEMORY_STORE
14762 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14763 cost += 1;
14764 break;
14766 case PROCESSOR_PENTIUMPRO:
14767 memory = get_attr_memory (insn);
14769 /* INT->FP conversion is expensive. */
14770 if (get_attr_fp_int_src (dep_insn))
14771 cost += 5;
14773 /* There is one cycle extra latency between an FP op and a store. */
14774 if (insn_type == TYPE_FMOV
14775 && (set = single_set (dep_insn)) != NULL_RTX
14776 && (set2 = single_set (insn)) != NULL_RTX
14777 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
14778 && GET_CODE (SET_DEST (set2)) == MEM)
14779 cost += 1;
14781 /* Show ability of reorder buffer to hide latency of load by executing
14782 in parallel with previous instruction in case
14783 previous instruction is not needed to compute the address. */
14784 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
14785 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14787 /* Claim moves to take one cycle, as core can issue one load
14788 at time and the next load can start cycle later. */
14789 if (dep_insn_type == TYPE_IMOV
14790 || dep_insn_type == TYPE_FMOV)
14791 cost = 1;
14792 else if (cost > 1)
14793 cost--;
14795 break;
14797 case PROCESSOR_K6:
14798 memory = get_attr_memory (insn);
14800 /* The esp dependency is resolved before the instruction is really
14801 finished. */
14802 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
14803 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
14804 return 1;
14806 /* INT->FP conversion is expensive. */
14807 if (get_attr_fp_int_src (dep_insn))
14808 cost += 5;
14810 /* Show ability of reorder buffer to hide latency of load by executing
14811 in parallel with previous instruction in case
14812 previous instruction is not needed to compute the address. */
14813 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
14814 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14816 /* Claim moves to take one cycle, as core can issue one load
14817 at time and the next load can start cycle later. */
14818 if (dep_insn_type == TYPE_IMOV
14819 || dep_insn_type == TYPE_FMOV)
14820 cost = 1;
14821 else if (cost > 2)
14822 cost -= 2;
14823 else
14824 cost = 1;
14826 break;
14828 case PROCESSOR_ATHLON:
14829 case PROCESSOR_K8:
14830 case PROCESSOR_GENERIC32:
14831 case PROCESSOR_GENERIC64:
14832 memory = get_attr_memory (insn);
14834 /* Show ability of reorder buffer to hide latency of load by executing
14835 in parallel with previous instruction in case
14836 previous instruction is not needed to compute the address. */
14837 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
14838 && !ix86_agi_dependent (insn, dep_insn, insn_type))
14840 enum attr_unit unit = get_attr_unit (insn);
14841 int loadcost = 3;
14843 /* Because of the difference between the length of integer and
14844 floating unit pipeline preparation stages, the memory operands
14845 for floating point are cheaper.
14847 ??? For Athlon it the difference is most probably 2. */
14848 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
14849 loadcost = 3;
14850 else
14851 loadcost = TARGET_ATHLON ? 2 : 0;
14853 if (cost >= loadcost)
14854 cost -= loadcost;
14855 else
14856 cost = 0;
14859 default:
14860 break;
14863 return cost;
14866 /* How many alternative schedules to try. This should be as wide as the
14867 scheduling freedom in the DFA, but no wider. Making this value too
14868 large results extra work for the scheduler. */
14870 static int
14871 ia32_multipass_dfa_lookahead (void)
14873 if (ix86_tune == PROCESSOR_PENTIUM)
14874 return 2;
14876 if (ix86_tune == PROCESSOR_PENTIUMPRO
14877 || ix86_tune == PROCESSOR_K6)
14878 return 1;
14880 else
14881 return 0;
14885 /* Compute the alignment given to a constant that is being placed in memory.
14886 EXP is the constant and ALIGN is the alignment that the object would
14887 ordinarily have.
14888 The value of this function is used instead of that alignment to align
14889 the object. */
14892 ix86_constant_alignment (tree exp, int align)
14894 if (TREE_CODE (exp) == REAL_CST)
14896 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
14897 return 64;
14898 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
14899 return 128;
14901 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
14902 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
14903 return BITS_PER_WORD;
14905 return align;
14908 /* Compute the alignment for a static variable.
14909 TYPE is the data type, and ALIGN is the alignment that
14910 the object would ordinarily have. The value of this function is used
14911 instead of that alignment to align the object. */
14914 ix86_data_alignment (tree type, int align)
14916 int max_align = optimize_size ? BITS_PER_WORD : 256;
14918 if (AGGREGATE_TYPE_P (type)
14919 && TYPE_SIZE (type)
14920 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
14921 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
14922 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
14923 && align < max_align)
14924 align = max_align;
14926 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
14927 to 16byte boundary. */
14928 if (TARGET_64BIT)
14930 if (AGGREGATE_TYPE_P (type)
14931 && TYPE_SIZE (type)
14932 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
14933 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
14934 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
14935 return 128;
14938 if (TREE_CODE (type) == ARRAY_TYPE)
14940 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
14941 return 64;
14942 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
14943 return 128;
14945 else if (TREE_CODE (type) == COMPLEX_TYPE)
14948 if (TYPE_MODE (type) == DCmode && align < 64)
14949 return 64;
14950 if (TYPE_MODE (type) == XCmode && align < 128)
14951 return 128;
14953 else if ((TREE_CODE (type) == RECORD_TYPE
14954 || TREE_CODE (type) == UNION_TYPE
14955 || TREE_CODE (type) == QUAL_UNION_TYPE)
14956 && TYPE_FIELDS (type))
14958 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
14959 return 64;
14960 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
14961 return 128;
14963 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
14964 || TREE_CODE (type) == INTEGER_TYPE)
14966 if (TYPE_MODE (type) == DFmode && align < 64)
14967 return 64;
14968 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
14969 return 128;
14972 return align;
14975 /* Compute the alignment for a local variable.
14976 TYPE is the data type, and ALIGN is the alignment that
14977 the object would ordinarily have. The value of this macro is used
14978 instead of that alignment to align the object. */
14981 ix86_local_alignment (tree type, int align)
14983 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
14984 to 16byte boundary. */
14985 if (TARGET_64BIT)
14987 if (AGGREGATE_TYPE_P (type)
14988 && TYPE_SIZE (type)
14989 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
14990 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
14991 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
14992 return 128;
14994 if (TREE_CODE (type) == ARRAY_TYPE)
14996 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
14997 return 64;
14998 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
14999 return 128;
15001 else if (TREE_CODE (type) == COMPLEX_TYPE)
15003 if (TYPE_MODE (type) == DCmode && align < 64)
15004 return 64;
15005 if (TYPE_MODE (type) == XCmode && align < 128)
15006 return 128;
15008 else if ((TREE_CODE (type) == RECORD_TYPE
15009 || TREE_CODE (type) == UNION_TYPE
15010 || TREE_CODE (type) == QUAL_UNION_TYPE)
15011 && TYPE_FIELDS (type))
15013 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
15014 return 64;
15015 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
15016 return 128;
15018 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
15019 || TREE_CODE (type) == INTEGER_TYPE)
15022 if (TYPE_MODE (type) == DFmode && align < 64)
15023 return 64;
15024 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
15025 return 128;
15027 return align;
15030 /* Emit RTL insns to initialize the variable parts of a trampoline.
15031 FNADDR is an RTX for the address of the function's pure code.
15032 CXT is an RTX for the static chain value for the function. */
15033 void
15034 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
15036 if (!TARGET_64BIT)
15038 /* Compute offset from the end of the jmp to the target function. */
15039 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
15040 plus_constant (tramp, 10),
15041 NULL_RTX, 1, OPTAB_DIRECT);
15042 emit_move_insn (gen_rtx_MEM (QImode, tramp),
15043 gen_int_mode (0xb9, QImode));
15044 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
15045 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
15046 gen_int_mode (0xe9, QImode));
15047 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
15049 else
15051 int offset = 0;
15052 /* Try to load address using shorter movl instead of movabs.
15053 We may want to support movq for kernel mode, but kernel does not use
15054 trampolines at the moment. */
15055 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
15057 fnaddr = copy_to_mode_reg (DImode, fnaddr);
15058 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15059 gen_int_mode (0xbb41, HImode));
15060 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
15061 gen_lowpart (SImode, fnaddr));
15062 offset += 6;
15064 else
15066 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15067 gen_int_mode (0xbb49, HImode));
15068 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
15069 fnaddr);
15070 offset += 10;
15072 /* Load static chain using movabs to r10. */
15073 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15074 gen_int_mode (0xba49, HImode));
15075 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
15076 cxt);
15077 offset += 10;
15078 /* Jump to the r11 */
15079 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
15080 gen_int_mode (0xff49, HImode));
15081 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
15082 gen_int_mode (0xe3, QImode));
15083 offset += 3;
15084 gcc_assert (offset <= TRAMPOLINE_SIZE);
15087 #ifdef ENABLE_EXECUTE_STACK
15088 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
15089 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
15090 #endif
15093 /* Codes for all the SSE/MMX builtins. */
15094 enum ix86_builtins
15096 IX86_BUILTIN_ADDPS,
15097 IX86_BUILTIN_ADDSS,
15098 IX86_BUILTIN_DIVPS,
15099 IX86_BUILTIN_DIVSS,
15100 IX86_BUILTIN_MULPS,
15101 IX86_BUILTIN_MULSS,
15102 IX86_BUILTIN_SUBPS,
15103 IX86_BUILTIN_SUBSS,
15105 IX86_BUILTIN_CMPEQPS,
15106 IX86_BUILTIN_CMPLTPS,
15107 IX86_BUILTIN_CMPLEPS,
15108 IX86_BUILTIN_CMPGTPS,
15109 IX86_BUILTIN_CMPGEPS,
15110 IX86_BUILTIN_CMPNEQPS,
15111 IX86_BUILTIN_CMPNLTPS,
15112 IX86_BUILTIN_CMPNLEPS,
15113 IX86_BUILTIN_CMPNGTPS,
15114 IX86_BUILTIN_CMPNGEPS,
15115 IX86_BUILTIN_CMPORDPS,
15116 IX86_BUILTIN_CMPUNORDPS,
15117 IX86_BUILTIN_CMPEQSS,
15118 IX86_BUILTIN_CMPLTSS,
15119 IX86_BUILTIN_CMPLESS,
15120 IX86_BUILTIN_CMPNEQSS,
15121 IX86_BUILTIN_CMPNLTSS,
15122 IX86_BUILTIN_CMPNLESS,
15123 IX86_BUILTIN_CMPNGTSS,
15124 IX86_BUILTIN_CMPNGESS,
15125 IX86_BUILTIN_CMPORDSS,
15126 IX86_BUILTIN_CMPUNORDSS,
15128 IX86_BUILTIN_COMIEQSS,
15129 IX86_BUILTIN_COMILTSS,
15130 IX86_BUILTIN_COMILESS,
15131 IX86_BUILTIN_COMIGTSS,
15132 IX86_BUILTIN_COMIGESS,
15133 IX86_BUILTIN_COMINEQSS,
15134 IX86_BUILTIN_UCOMIEQSS,
15135 IX86_BUILTIN_UCOMILTSS,
15136 IX86_BUILTIN_UCOMILESS,
15137 IX86_BUILTIN_UCOMIGTSS,
15138 IX86_BUILTIN_UCOMIGESS,
15139 IX86_BUILTIN_UCOMINEQSS,
15141 IX86_BUILTIN_CVTPI2PS,
15142 IX86_BUILTIN_CVTPS2PI,
15143 IX86_BUILTIN_CVTSI2SS,
15144 IX86_BUILTIN_CVTSI642SS,
15145 IX86_BUILTIN_CVTSS2SI,
15146 IX86_BUILTIN_CVTSS2SI64,
15147 IX86_BUILTIN_CVTTPS2PI,
15148 IX86_BUILTIN_CVTTSS2SI,
15149 IX86_BUILTIN_CVTTSS2SI64,
15151 IX86_BUILTIN_MAXPS,
15152 IX86_BUILTIN_MAXSS,
15153 IX86_BUILTIN_MINPS,
15154 IX86_BUILTIN_MINSS,
15156 IX86_BUILTIN_LOADUPS,
15157 IX86_BUILTIN_STOREUPS,
15158 IX86_BUILTIN_MOVSS,
15160 IX86_BUILTIN_MOVHLPS,
15161 IX86_BUILTIN_MOVLHPS,
15162 IX86_BUILTIN_LOADHPS,
15163 IX86_BUILTIN_LOADLPS,
15164 IX86_BUILTIN_STOREHPS,
15165 IX86_BUILTIN_STORELPS,
15167 IX86_BUILTIN_MASKMOVQ,
15168 IX86_BUILTIN_MOVMSKPS,
15169 IX86_BUILTIN_PMOVMSKB,
15171 IX86_BUILTIN_MOVNTPS,
15172 IX86_BUILTIN_MOVNTQ,
15174 IX86_BUILTIN_LOADDQU,
15175 IX86_BUILTIN_STOREDQU,
15177 IX86_BUILTIN_PACKSSWB,
15178 IX86_BUILTIN_PACKSSDW,
15179 IX86_BUILTIN_PACKUSWB,
15181 IX86_BUILTIN_PADDB,
15182 IX86_BUILTIN_PADDW,
15183 IX86_BUILTIN_PADDD,
15184 IX86_BUILTIN_PADDQ,
15185 IX86_BUILTIN_PADDSB,
15186 IX86_BUILTIN_PADDSW,
15187 IX86_BUILTIN_PADDUSB,
15188 IX86_BUILTIN_PADDUSW,
15189 IX86_BUILTIN_PSUBB,
15190 IX86_BUILTIN_PSUBW,
15191 IX86_BUILTIN_PSUBD,
15192 IX86_BUILTIN_PSUBQ,
15193 IX86_BUILTIN_PSUBSB,
15194 IX86_BUILTIN_PSUBSW,
15195 IX86_BUILTIN_PSUBUSB,
15196 IX86_BUILTIN_PSUBUSW,
15198 IX86_BUILTIN_PAND,
15199 IX86_BUILTIN_PANDN,
15200 IX86_BUILTIN_POR,
15201 IX86_BUILTIN_PXOR,
15203 IX86_BUILTIN_PAVGB,
15204 IX86_BUILTIN_PAVGW,
15206 IX86_BUILTIN_PCMPEQB,
15207 IX86_BUILTIN_PCMPEQW,
15208 IX86_BUILTIN_PCMPEQD,
15209 IX86_BUILTIN_PCMPGTB,
15210 IX86_BUILTIN_PCMPGTW,
15211 IX86_BUILTIN_PCMPGTD,
15213 IX86_BUILTIN_PMADDWD,
15215 IX86_BUILTIN_PMAXSW,
15216 IX86_BUILTIN_PMAXUB,
15217 IX86_BUILTIN_PMINSW,
15218 IX86_BUILTIN_PMINUB,
15220 IX86_BUILTIN_PMULHUW,
15221 IX86_BUILTIN_PMULHW,
15222 IX86_BUILTIN_PMULLW,
15224 IX86_BUILTIN_PSADBW,
15225 IX86_BUILTIN_PSHUFW,
15227 IX86_BUILTIN_PSLLW,
15228 IX86_BUILTIN_PSLLD,
15229 IX86_BUILTIN_PSLLQ,
15230 IX86_BUILTIN_PSRAW,
15231 IX86_BUILTIN_PSRAD,
15232 IX86_BUILTIN_PSRLW,
15233 IX86_BUILTIN_PSRLD,
15234 IX86_BUILTIN_PSRLQ,
15235 IX86_BUILTIN_PSLLWI,
15236 IX86_BUILTIN_PSLLDI,
15237 IX86_BUILTIN_PSLLQI,
15238 IX86_BUILTIN_PSRAWI,
15239 IX86_BUILTIN_PSRADI,
15240 IX86_BUILTIN_PSRLWI,
15241 IX86_BUILTIN_PSRLDI,
15242 IX86_BUILTIN_PSRLQI,
15244 IX86_BUILTIN_PUNPCKHBW,
15245 IX86_BUILTIN_PUNPCKHWD,
15246 IX86_BUILTIN_PUNPCKHDQ,
15247 IX86_BUILTIN_PUNPCKLBW,
15248 IX86_BUILTIN_PUNPCKLWD,
15249 IX86_BUILTIN_PUNPCKLDQ,
15251 IX86_BUILTIN_SHUFPS,
15253 IX86_BUILTIN_RCPPS,
15254 IX86_BUILTIN_RCPSS,
15255 IX86_BUILTIN_RSQRTPS,
15256 IX86_BUILTIN_RSQRTSS,
15257 IX86_BUILTIN_SQRTPS,
15258 IX86_BUILTIN_SQRTSS,
15260 IX86_BUILTIN_UNPCKHPS,
15261 IX86_BUILTIN_UNPCKLPS,
15263 IX86_BUILTIN_ANDPS,
15264 IX86_BUILTIN_ANDNPS,
15265 IX86_BUILTIN_ORPS,
15266 IX86_BUILTIN_XORPS,
15268 IX86_BUILTIN_EMMS,
15269 IX86_BUILTIN_LDMXCSR,
15270 IX86_BUILTIN_STMXCSR,
15271 IX86_BUILTIN_SFENCE,
15273 /* 3DNow! Original */
15274 IX86_BUILTIN_FEMMS,
15275 IX86_BUILTIN_PAVGUSB,
15276 IX86_BUILTIN_PF2ID,
15277 IX86_BUILTIN_PFACC,
15278 IX86_BUILTIN_PFADD,
15279 IX86_BUILTIN_PFCMPEQ,
15280 IX86_BUILTIN_PFCMPGE,
15281 IX86_BUILTIN_PFCMPGT,
15282 IX86_BUILTIN_PFMAX,
15283 IX86_BUILTIN_PFMIN,
15284 IX86_BUILTIN_PFMUL,
15285 IX86_BUILTIN_PFRCP,
15286 IX86_BUILTIN_PFRCPIT1,
15287 IX86_BUILTIN_PFRCPIT2,
15288 IX86_BUILTIN_PFRSQIT1,
15289 IX86_BUILTIN_PFRSQRT,
15290 IX86_BUILTIN_PFSUB,
15291 IX86_BUILTIN_PFSUBR,
15292 IX86_BUILTIN_PI2FD,
15293 IX86_BUILTIN_PMULHRW,
15295 /* 3DNow! Athlon Extensions */
15296 IX86_BUILTIN_PF2IW,
15297 IX86_BUILTIN_PFNACC,
15298 IX86_BUILTIN_PFPNACC,
15299 IX86_BUILTIN_PI2FW,
15300 IX86_BUILTIN_PSWAPDSI,
15301 IX86_BUILTIN_PSWAPDSF,
15303 /* SSE2 */
15304 IX86_BUILTIN_ADDPD,
15305 IX86_BUILTIN_ADDSD,
15306 IX86_BUILTIN_DIVPD,
15307 IX86_BUILTIN_DIVSD,
15308 IX86_BUILTIN_MULPD,
15309 IX86_BUILTIN_MULSD,
15310 IX86_BUILTIN_SUBPD,
15311 IX86_BUILTIN_SUBSD,
15313 IX86_BUILTIN_CMPEQPD,
15314 IX86_BUILTIN_CMPLTPD,
15315 IX86_BUILTIN_CMPLEPD,
15316 IX86_BUILTIN_CMPGTPD,
15317 IX86_BUILTIN_CMPGEPD,
15318 IX86_BUILTIN_CMPNEQPD,
15319 IX86_BUILTIN_CMPNLTPD,
15320 IX86_BUILTIN_CMPNLEPD,
15321 IX86_BUILTIN_CMPNGTPD,
15322 IX86_BUILTIN_CMPNGEPD,
15323 IX86_BUILTIN_CMPORDPD,
15324 IX86_BUILTIN_CMPUNORDPD,
15325 IX86_BUILTIN_CMPNEPD,
15326 IX86_BUILTIN_CMPEQSD,
15327 IX86_BUILTIN_CMPLTSD,
15328 IX86_BUILTIN_CMPLESD,
15329 IX86_BUILTIN_CMPNEQSD,
15330 IX86_BUILTIN_CMPNLTSD,
15331 IX86_BUILTIN_CMPNLESD,
15332 IX86_BUILTIN_CMPORDSD,
15333 IX86_BUILTIN_CMPUNORDSD,
15334 IX86_BUILTIN_CMPNESD,
15336 IX86_BUILTIN_COMIEQSD,
15337 IX86_BUILTIN_COMILTSD,
15338 IX86_BUILTIN_COMILESD,
15339 IX86_BUILTIN_COMIGTSD,
15340 IX86_BUILTIN_COMIGESD,
15341 IX86_BUILTIN_COMINEQSD,
15342 IX86_BUILTIN_UCOMIEQSD,
15343 IX86_BUILTIN_UCOMILTSD,
15344 IX86_BUILTIN_UCOMILESD,
15345 IX86_BUILTIN_UCOMIGTSD,
15346 IX86_BUILTIN_UCOMIGESD,
15347 IX86_BUILTIN_UCOMINEQSD,
15349 IX86_BUILTIN_MAXPD,
15350 IX86_BUILTIN_MAXSD,
15351 IX86_BUILTIN_MINPD,
15352 IX86_BUILTIN_MINSD,
15354 IX86_BUILTIN_ANDPD,
15355 IX86_BUILTIN_ANDNPD,
15356 IX86_BUILTIN_ORPD,
15357 IX86_BUILTIN_XORPD,
15359 IX86_BUILTIN_SQRTPD,
15360 IX86_BUILTIN_SQRTSD,
15362 IX86_BUILTIN_UNPCKHPD,
15363 IX86_BUILTIN_UNPCKLPD,
15365 IX86_BUILTIN_SHUFPD,
15367 IX86_BUILTIN_LOADUPD,
15368 IX86_BUILTIN_STOREUPD,
15369 IX86_BUILTIN_MOVSD,
15371 IX86_BUILTIN_LOADHPD,
15372 IX86_BUILTIN_LOADLPD,
15374 IX86_BUILTIN_CVTDQ2PD,
15375 IX86_BUILTIN_CVTDQ2PS,
15377 IX86_BUILTIN_CVTPD2DQ,
15378 IX86_BUILTIN_CVTPD2PI,
15379 IX86_BUILTIN_CVTPD2PS,
15380 IX86_BUILTIN_CVTTPD2DQ,
15381 IX86_BUILTIN_CVTTPD2PI,
15383 IX86_BUILTIN_CVTPI2PD,
15384 IX86_BUILTIN_CVTSI2SD,
15385 IX86_BUILTIN_CVTSI642SD,
15387 IX86_BUILTIN_CVTSD2SI,
15388 IX86_BUILTIN_CVTSD2SI64,
15389 IX86_BUILTIN_CVTSD2SS,
15390 IX86_BUILTIN_CVTSS2SD,
15391 IX86_BUILTIN_CVTTSD2SI,
15392 IX86_BUILTIN_CVTTSD2SI64,
15394 IX86_BUILTIN_CVTPS2DQ,
15395 IX86_BUILTIN_CVTPS2PD,
15396 IX86_BUILTIN_CVTTPS2DQ,
15398 IX86_BUILTIN_MOVNTI,
15399 IX86_BUILTIN_MOVNTPD,
15400 IX86_BUILTIN_MOVNTDQ,
15402 /* SSE2 MMX */
15403 IX86_BUILTIN_MASKMOVDQU,
15404 IX86_BUILTIN_MOVMSKPD,
15405 IX86_BUILTIN_PMOVMSKB128,
15407 IX86_BUILTIN_PACKSSWB128,
15408 IX86_BUILTIN_PACKSSDW128,
15409 IX86_BUILTIN_PACKUSWB128,
15411 IX86_BUILTIN_PADDB128,
15412 IX86_BUILTIN_PADDW128,
15413 IX86_BUILTIN_PADDD128,
15414 IX86_BUILTIN_PADDQ128,
15415 IX86_BUILTIN_PADDSB128,
15416 IX86_BUILTIN_PADDSW128,
15417 IX86_BUILTIN_PADDUSB128,
15418 IX86_BUILTIN_PADDUSW128,
15419 IX86_BUILTIN_PSUBB128,
15420 IX86_BUILTIN_PSUBW128,
15421 IX86_BUILTIN_PSUBD128,
15422 IX86_BUILTIN_PSUBQ128,
15423 IX86_BUILTIN_PSUBSB128,
15424 IX86_BUILTIN_PSUBSW128,
15425 IX86_BUILTIN_PSUBUSB128,
15426 IX86_BUILTIN_PSUBUSW128,
15428 IX86_BUILTIN_PAND128,
15429 IX86_BUILTIN_PANDN128,
15430 IX86_BUILTIN_POR128,
15431 IX86_BUILTIN_PXOR128,
15433 IX86_BUILTIN_PAVGB128,
15434 IX86_BUILTIN_PAVGW128,
15436 IX86_BUILTIN_PCMPEQB128,
15437 IX86_BUILTIN_PCMPEQW128,
15438 IX86_BUILTIN_PCMPEQD128,
15439 IX86_BUILTIN_PCMPGTB128,
15440 IX86_BUILTIN_PCMPGTW128,
15441 IX86_BUILTIN_PCMPGTD128,
15443 IX86_BUILTIN_PMADDWD128,
15445 IX86_BUILTIN_PMAXSW128,
15446 IX86_BUILTIN_PMAXUB128,
15447 IX86_BUILTIN_PMINSW128,
15448 IX86_BUILTIN_PMINUB128,
15450 IX86_BUILTIN_PMULUDQ,
15451 IX86_BUILTIN_PMULUDQ128,
15452 IX86_BUILTIN_PMULHUW128,
15453 IX86_BUILTIN_PMULHW128,
15454 IX86_BUILTIN_PMULLW128,
15456 IX86_BUILTIN_PSADBW128,
15457 IX86_BUILTIN_PSHUFHW,
15458 IX86_BUILTIN_PSHUFLW,
15459 IX86_BUILTIN_PSHUFD,
15461 IX86_BUILTIN_PSLLW128,
15462 IX86_BUILTIN_PSLLD128,
15463 IX86_BUILTIN_PSLLQ128,
15464 IX86_BUILTIN_PSRAW128,
15465 IX86_BUILTIN_PSRAD128,
15466 IX86_BUILTIN_PSRLW128,
15467 IX86_BUILTIN_PSRLD128,
15468 IX86_BUILTIN_PSRLQ128,
15469 IX86_BUILTIN_PSLLDQI128,
15470 IX86_BUILTIN_PSLLWI128,
15471 IX86_BUILTIN_PSLLDI128,
15472 IX86_BUILTIN_PSLLQI128,
15473 IX86_BUILTIN_PSRAWI128,
15474 IX86_BUILTIN_PSRADI128,
15475 IX86_BUILTIN_PSRLDQI128,
15476 IX86_BUILTIN_PSRLWI128,
15477 IX86_BUILTIN_PSRLDI128,
15478 IX86_BUILTIN_PSRLQI128,
15480 IX86_BUILTIN_PUNPCKHBW128,
15481 IX86_BUILTIN_PUNPCKHWD128,
15482 IX86_BUILTIN_PUNPCKHDQ128,
15483 IX86_BUILTIN_PUNPCKHQDQ128,
15484 IX86_BUILTIN_PUNPCKLBW128,
15485 IX86_BUILTIN_PUNPCKLWD128,
15486 IX86_BUILTIN_PUNPCKLDQ128,
15487 IX86_BUILTIN_PUNPCKLQDQ128,
15489 IX86_BUILTIN_CLFLUSH,
15490 IX86_BUILTIN_MFENCE,
15491 IX86_BUILTIN_LFENCE,
15493 /* Prescott New Instructions. */
15494 IX86_BUILTIN_ADDSUBPS,
15495 IX86_BUILTIN_HADDPS,
15496 IX86_BUILTIN_HSUBPS,
15497 IX86_BUILTIN_MOVSHDUP,
15498 IX86_BUILTIN_MOVSLDUP,
15499 IX86_BUILTIN_ADDSUBPD,
15500 IX86_BUILTIN_HADDPD,
15501 IX86_BUILTIN_HSUBPD,
15502 IX86_BUILTIN_LDDQU,
15504 IX86_BUILTIN_MONITOR,
15505 IX86_BUILTIN_MWAIT,
15507 /* SSSE3. */
15508 IX86_BUILTIN_PHADDW,
15509 IX86_BUILTIN_PHADDD,
15510 IX86_BUILTIN_PHADDSW,
15511 IX86_BUILTIN_PHSUBW,
15512 IX86_BUILTIN_PHSUBD,
15513 IX86_BUILTIN_PHSUBSW,
15514 IX86_BUILTIN_PMADDUBSW,
15515 IX86_BUILTIN_PMULHRSW,
15516 IX86_BUILTIN_PSHUFB,
15517 IX86_BUILTIN_PSIGNB,
15518 IX86_BUILTIN_PSIGNW,
15519 IX86_BUILTIN_PSIGND,
15520 IX86_BUILTIN_PALIGNR,
15521 IX86_BUILTIN_PABSB,
15522 IX86_BUILTIN_PABSW,
15523 IX86_BUILTIN_PABSD,
15525 IX86_BUILTIN_PHADDW128,
15526 IX86_BUILTIN_PHADDD128,
15527 IX86_BUILTIN_PHADDSW128,
15528 IX86_BUILTIN_PHSUBW128,
15529 IX86_BUILTIN_PHSUBD128,
15530 IX86_BUILTIN_PHSUBSW128,
15531 IX86_BUILTIN_PMADDUBSW128,
15532 IX86_BUILTIN_PMULHRSW128,
15533 IX86_BUILTIN_PSHUFB128,
15534 IX86_BUILTIN_PSIGNB128,
15535 IX86_BUILTIN_PSIGNW128,
15536 IX86_BUILTIN_PSIGND128,
15537 IX86_BUILTIN_PALIGNR128,
15538 IX86_BUILTIN_PABSB128,
15539 IX86_BUILTIN_PABSW128,
15540 IX86_BUILTIN_PABSD128,
15542 IX86_BUILTIN_VEC_INIT_V2SI,
15543 IX86_BUILTIN_VEC_INIT_V4HI,
15544 IX86_BUILTIN_VEC_INIT_V8QI,
15545 IX86_BUILTIN_VEC_EXT_V2DF,
15546 IX86_BUILTIN_VEC_EXT_V2DI,
15547 IX86_BUILTIN_VEC_EXT_V4SF,
15548 IX86_BUILTIN_VEC_EXT_V4SI,
15549 IX86_BUILTIN_VEC_EXT_V8HI,
15550 IX86_BUILTIN_VEC_EXT_V2SI,
15551 IX86_BUILTIN_VEC_EXT_V4HI,
15552 IX86_BUILTIN_VEC_SET_V8HI,
15553 IX86_BUILTIN_VEC_SET_V4HI,
15555 IX86_BUILTIN_MAX
15558 /* Table for the ix86 builtin decls. */
15559 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
15561 /* Add a ix86 target builtin function with CODE, NAME and TYPE. Do so,
15562 * if the target_flags include one of MASK. Stores the function decl
15563 * in the ix86_builtins array.
15564 * Returns the function decl or NULL_TREE, if the builtin was not added. */
15566 static inline tree
15567 def_builtin (int mask, const char *name, tree type, enum ix86_builtins code)
15569 tree decl = NULL_TREE;
15571 if (mask & target_flags
15572 && (!(mask & MASK_64BIT) || TARGET_64BIT))
15574 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
15575 NULL, NULL_TREE);
15576 ix86_builtins[(int) code] = decl;
15579 return decl;
15582 /* Like def_builtin, but also marks the function decl "const". */
15584 static inline tree
15585 def_builtin_const (int mask, const char *name, tree type,
15586 enum ix86_builtins code)
15588 tree decl = def_builtin (mask, name, type, code);
15589 if (decl)
15590 TREE_READONLY (decl) = 1;
15591 return decl;
15594 /* Bits for builtin_description.flag. */
15596 /* Set when we don't support the comparison natively, and should
15597 swap_comparison in order to support it. */
15598 #define BUILTIN_DESC_SWAP_OPERANDS 1
15600 struct builtin_description
15602 const unsigned int mask;
15603 const enum insn_code icode;
15604 const char *const name;
15605 const enum ix86_builtins code;
15606 const enum rtx_code comparison;
15607 const unsigned int flag;
15610 static const struct builtin_description bdesc_comi[] =
15612 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
15613 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
15614 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
15615 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
15616 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
15617 { MASK_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
15618 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
15619 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
15620 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
15621 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
15622 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
15623 { MASK_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
15624 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
15625 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
15626 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
15627 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
15628 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
15629 { MASK_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
15630 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
15631 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
15632 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
15633 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
15634 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
15635 { MASK_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
15638 static const struct builtin_description bdesc_2arg[] =
15640 /* SSE */
15641 { MASK_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, 0, 0 },
15642 { MASK_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, 0, 0 },
15643 { MASK_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, 0, 0 },
15644 { MASK_SSE, CODE_FOR_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, 0, 0 },
15645 { MASK_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, 0, 0 },
15646 { MASK_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, 0, 0 },
15647 { MASK_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, 0, 0 },
15648 { MASK_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, 0, 0 },
15650 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, 0 },
15651 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, 0 },
15652 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, 0 },
15653 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT,
15654 BUILTIN_DESC_SWAP_OPERANDS },
15655 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE,
15656 BUILTIN_DESC_SWAP_OPERANDS },
15657 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, 0 },
15658 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, 0 },
15659 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, 0 },
15660 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, 0 },
15661 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE,
15662 BUILTIN_DESC_SWAP_OPERANDS },
15663 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT,
15664 BUILTIN_DESC_SWAP_OPERANDS },
15665 { MASK_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, 0 },
15666 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, 0 },
15667 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, 0 },
15668 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, 0 },
15669 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, 0 },
15670 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, 0 },
15671 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, 0 },
15672 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, 0 },
15673 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE,
15674 BUILTIN_DESC_SWAP_OPERANDS },
15675 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT,
15676 BUILTIN_DESC_SWAP_OPERANDS },
15677 { MASK_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, UNORDERED, 0 },
15679 { MASK_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, 0, 0 },
15680 { MASK_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, 0, 0 },
15681 { MASK_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, 0, 0 },
15682 { MASK_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, 0, 0 },
15684 { MASK_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, 0, 0 },
15685 { MASK_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, 0, 0 },
15686 { MASK_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, 0, 0 },
15687 { MASK_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, 0, 0 },
15689 { MASK_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, 0, 0 },
15690 { MASK_SSE, CODE_FOR_sse_movhlps, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, 0, 0 },
15691 { MASK_SSE, CODE_FOR_sse_movlhps, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, 0, 0 },
15692 { MASK_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, 0, 0 },
15693 { MASK_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, 0, 0 },
15695 /* MMX */
15696 { MASK_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, 0, 0 },
15697 { MASK_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, 0, 0 },
15698 { MASK_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, 0, 0 },
15699 { MASK_SSE2, CODE_FOR_mmx_adddi3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, 0, 0 },
15700 { MASK_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, 0, 0 },
15701 { MASK_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, 0, 0 },
15702 { MASK_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, 0, 0 },
15703 { MASK_SSE2, CODE_FOR_mmx_subdi3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, 0, 0 },
15705 { MASK_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, 0, 0 },
15706 { MASK_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, 0, 0 },
15707 { MASK_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, 0, 0 },
15708 { MASK_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, 0, 0 },
15709 { MASK_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, 0, 0 },
15710 { MASK_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, 0, 0 },
15711 { MASK_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, 0, 0 },
15712 { MASK_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, 0, 0 },
15714 { MASK_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, 0, 0 },
15715 { MASK_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, 0, 0 },
15716 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, 0, 0 },
15718 { MASK_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, 0, 0 },
15719 { MASK_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, 0, 0 },
15720 { MASK_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, 0, 0 },
15721 { MASK_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, 0, 0 },
15723 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, 0, 0 },
15724 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, 0, 0 },
15726 { MASK_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, 0, 0 },
15727 { MASK_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, 0, 0 },
15728 { MASK_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, 0, 0 },
15729 { MASK_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, 0, 0 },
15730 { MASK_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, 0, 0 },
15731 { MASK_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, 0, 0 },
15733 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, 0, 0 },
15734 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, 0, 0 },
15735 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, 0, 0 },
15736 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, 0, 0 },
15738 { MASK_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, 0, 0 },
15739 { MASK_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, 0, 0 },
15740 { MASK_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, 0, 0 },
15741 { MASK_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, 0, 0 },
15742 { MASK_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, 0, 0 },
15743 { MASK_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, 0, 0 },
15745 /* Special. */
15746 { MASK_MMX, CODE_FOR_mmx_packsswb, 0, IX86_BUILTIN_PACKSSWB, 0, 0 },
15747 { MASK_MMX, CODE_FOR_mmx_packssdw, 0, IX86_BUILTIN_PACKSSDW, 0, 0 },
15748 { MASK_MMX, CODE_FOR_mmx_packuswb, 0, IX86_BUILTIN_PACKUSWB, 0, 0 },
15750 { MASK_SSE, CODE_FOR_sse_cvtpi2ps, 0, IX86_BUILTIN_CVTPI2PS, 0, 0 },
15751 { MASK_SSE, CODE_FOR_sse_cvtsi2ss, 0, IX86_BUILTIN_CVTSI2SS, 0, 0 },
15752 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtsi2ssq, 0, IX86_BUILTIN_CVTSI642SS, 0, 0 },
15754 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLW, 0, 0 },
15755 { MASK_MMX, CODE_FOR_mmx_ashlv4hi3, 0, IX86_BUILTIN_PSLLWI, 0, 0 },
15756 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLD, 0, 0 },
15757 { MASK_MMX, CODE_FOR_mmx_ashlv2si3, 0, IX86_BUILTIN_PSLLDI, 0, 0 },
15758 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQ, 0, 0 },
15759 { MASK_MMX, CODE_FOR_mmx_ashldi3, 0, IX86_BUILTIN_PSLLQI, 0, 0 },
15761 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLW, 0, 0 },
15762 { MASK_MMX, CODE_FOR_mmx_lshrv4hi3, 0, IX86_BUILTIN_PSRLWI, 0, 0 },
15763 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLD, 0, 0 },
15764 { MASK_MMX, CODE_FOR_mmx_lshrv2si3, 0, IX86_BUILTIN_PSRLDI, 0, 0 },
15765 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQ, 0, 0 },
15766 { MASK_MMX, CODE_FOR_mmx_lshrdi3, 0, IX86_BUILTIN_PSRLQI, 0, 0 },
15768 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAW, 0, 0 },
15769 { MASK_MMX, CODE_FOR_mmx_ashrv4hi3, 0, IX86_BUILTIN_PSRAWI, 0, 0 },
15770 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRAD, 0, 0 },
15771 { MASK_MMX, CODE_FOR_mmx_ashrv2si3, 0, IX86_BUILTIN_PSRADI, 0, 0 },
15773 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_psadbw, 0, IX86_BUILTIN_PSADBW, 0, 0 },
15774 { MASK_MMX, CODE_FOR_mmx_pmaddwd, 0, IX86_BUILTIN_PMADDWD, 0, 0 },
15776 /* SSE2 */
15777 { MASK_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, 0, 0 },
15778 { MASK_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, 0, 0 },
15779 { MASK_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, 0, 0 },
15780 { MASK_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, 0, 0 },
15781 { MASK_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, 0, 0 },
15782 { MASK_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, 0, 0 },
15783 { MASK_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, 0, 0 },
15784 { MASK_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, 0, 0 },
15786 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, 0 },
15787 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, 0 },
15788 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, 0 },
15789 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT,
15790 BUILTIN_DESC_SWAP_OPERANDS },
15791 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE,
15792 BUILTIN_DESC_SWAP_OPERANDS },
15793 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, 0 },
15794 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, 0 },
15795 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, 0 },
15796 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, 0 },
15797 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE,
15798 BUILTIN_DESC_SWAP_OPERANDS },
15799 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT,
15800 BUILTIN_DESC_SWAP_OPERANDS },
15801 { MASK_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, 0 },
15802 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, 0 },
15803 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, 0 },
15804 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, 0 },
15805 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, 0 },
15806 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, 0 },
15807 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, 0 },
15808 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, 0 },
15809 { MASK_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, 0 },
15811 { MASK_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, 0, 0 },
15812 { MASK_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, 0, 0 },
15813 { MASK_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, 0, 0 },
15814 { MASK_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, 0, 0 },
15816 { MASK_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, 0, 0 },
15817 { MASK_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, 0, 0 },
15818 { MASK_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, 0, 0 },
15819 { MASK_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, 0, 0 },
15821 { MASK_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, 0, 0 },
15822 { MASK_SSE2, CODE_FOR_sse2_unpckhpd, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, 0, 0 },
15823 { MASK_SSE2, CODE_FOR_sse2_unpcklpd, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, 0, 0 },
15825 /* SSE2 MMX */
15826 { MASK_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, 0, 0 },
15827 { MASK_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, 0, 0 },
15828 { MASK_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, 0, 0 },
15829 { MASK_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, 0, 0 },
15830 { MASK_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, 0, 0 },
15831 { MASK_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, 0, 0 },
15832 { MASK_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, 0, 0 },
15833 { MASK_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, 0, 0 },
15835 { MASK_MMX, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, 0, 0 },
15836 { MASK_MMX, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, 0, 0 },
15837 { MASK_MMX, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, 0, 0 },
15838 { MASK_MMX, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, 0, 0 },
15839 { MASK_MMX, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, 0, 0 },
15840 { MASK_MMX, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, 0, 0 },
15841 { MASK_MMX, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, 0, 0 },
15842 { MASK_MMX, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, 0, 0 },
15844 { MASK_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, 0, 0 },
15845 { MASK_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, 0, 0 },
15847 { MASK_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, 0, 0 },
15848 { MASK_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, 0, 0 },
15849 { MASK_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, 0, 0 },
15850 { MASK_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, 0, 0 },
15852 { MASK_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, 0, 0 },
15853 { MASK_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, 0, 0 },
15855 { MASK_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, 0, 0 },
15856 { MASK_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, 0, 0 },
15857 { MASK_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, 0, 0 },
15858 { MASK_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, 0, 0 },
15859 { MASK_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, 0, 0 },
15860 { MASK_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, 0, 0 },
15862 { MASK_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, 0, 0 },
15863 { MASK_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, 0, 0 },
15864 { MASK_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, 0, 0 },
15865 { MASK_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, 0, 0 },
15867 { MASK_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, 0, 0 },
15868 { MASK_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, 0, 0 },
15869 { MASK_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, 0, 0 },
15870 { MASK_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, 0, 0 },
15871 { MASK_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, 0, 0 },
15872 { MASK_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, 0, 0 },
15873 { MASK_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, 0, 0 },
15874 { MASK_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, 0, 0 },
15876 { MASK_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, 0, 0 },
15877 { MASK_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, 0, 0 },
15878 { MASK_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, 0, 0 },
15880 { MASK_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, 0, 0 },
15881 { MASK_SSE2, CODE_FOR_sse2_psadbw, 0, IX86_BUILTIN_PSADBW128, 0, 0 },
15883 { MASK_SSE2, CODE_FOR_sse2_umulsidi3, 0, IX86_BUILTIN_PMULUDQ, 0, 0 },
15884 { MASK_SSE2, CODE_FOR_sse2_umulv2siv2di3, 0, IX86_BUILTIN_PMULUDQ128, 0, 0 },
15886 { MASK_SSE2, CODE_FOR_ashlv8hi3, 0, IX86_BUILTIN_PSLLWI128, 0, 0 },
15887 { MASK_SSE2, CODE_FOR_ashlv4si3, 0, IX86_BUILTIN_PSLLDI128, 0, 0 },
15888 { MASK_SSE2, CODE_FOR_ashlv2di3, 0, IX86_BUILTIN_PSLLQI128, 0, 0 },
15890 { MASK_SSE2, CODE_FOR_lshrv8hi3, 0, IX86_BUILTIN_PSRLWI128, 0, 0 },
15891 { MASK_SSE2, CODE_FOR_lshrv4si3, 0, IX86_BUILTIN_PSRLDI128, 0, 0 },
15892 { MASK_SSE2, CODE_FOR_lshrv2di3, 0, IX86_BUILTIN_PSRLQI128, 0, 0 },
15894 { MASK_SSE2, CODE_FOR_ashrv8hi3, 0, IX86_BUILTIN_PSRAWI128, 0, 0 },
15895 { MASK_SSE2, CODE_FOR_ashrv4si3, 0, IX86_BUILTIN_PSRADI128, 0, 0 },
15897 { MASK_SSE2, CODE_FOR_sse2_pmaddwd, 0, IX86_BUILTIN_PMADDWD128, 0, 0 },
15899 { MASK_SSE2, CODE_FOR_sse2_cvtsi2sd, 0, IX86_BUILTIN_CVTSI2SD, 0, 0 },
15900 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsi2sdq, 0, IX86_BUILTIN_CVTSI642SD, 0, 0 },
15901 { MASK_SSE2, CODE_FOR_sse2_cvtsd2ss, 0, IX86_BUILTIN_CVTSD2SS, 0, 0 },
15902 { MASK_SSE2, CODE_FOR_sse2_cvtss2sd, 0, IX86_BUILTIN_CVTSS2SD, 0, 0 },
15904 /* SSE3 MMX */
15905 { MASK_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, 0, 0 },
15906 { MASK_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, 0, 0 },
15907 { MASK_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, 0, 0 },
15908 { MASK_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, 0, 0 },
15909 { MASK_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, 0, 0 },
15910 { MASK_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, 0, 0 },
15912 /* SSSE3 */
15913 { MASK_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, 0, 0 },
15914 { MASK_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, 0, 0 },
15915 { MASK_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, 0, 0 },
15916 { MASK_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, 0, 0 },
15917 { MASK_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, 0, 0 },
15918 { MASK_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, 0, 0 },
15919 { MASK_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, 0, 0 },
15920 { MASK_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, 0, 0 },
15921 { MASK_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, 0, 0 },
15922 { MASK_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, 0, 0 },
15923 { MASK_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, 0, 0 },
15924 { MASK_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, 0, 0 },
15925 { MASK_SSSE3, CODE_FOR_ssse3_pmaddubswv8hi3, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, 0, 0 },
15926 { MASK_SSSE3, CODE_FOR_ssse3_pmaddubswv4hi3, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, 0, 0 },
15927 { MASK_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, 0, 0 },
15928 { MASK_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, 0, 0 },
15929 { MASK_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, 0, 0 },
15930 { MASK_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, 0, 0 },
15931 { MASK_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, 0, 0 },
15932 { MASK_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, 0, 0 },
15933 { MASK_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, 0, 0 },
15934 { MASK_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, 0, 0 },
15935 { MASK_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, 0, 0 },
15936 { MASK_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, 0, 0 }
15939 static const struct builtin_description bdesc_1arg[] =
15941 { MASK_SSE | MASK_3DNOW_A, CODE_FOR_mmx_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB, 0, 0 },
15942 { MASK_SSE, CODE_FOR_sse_movmskps, 0, IX86_BUILTIN_MOVMSKPS, 0, 0 },
15944 { MASK_SSE, CODE_FOR_sqrtv4sf2, 0, IX86_BUILTIN_SQRTPS, 0, 0 },
15945 { MASK_SSE, CODE_FOR_sse_rsqrtv4sf2, 0, IX86_BUILTIN_RSQRTPS, 0, 0 },
15946 { MASK_SSE, CODE_FOR_sse_rcpv4sf2, 0, IX86_BUILTIN_RCPPS, 0, 0 },
15948 { MASK_SSE, CODE_FOR_sse_cvtps2pi, 0, IX86_BUILTIN_CVTPS2PI, 0, 0 },
15949 { MASK_SSE, CODE_FOR_sse_cvtss2si, 0, IX86_BUILTIN_CVTSS2SI, 0, 0 },
15950 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvtss2siq, 0, IX86_BUILTIN_CVTSS2SI64, 0, 0 },
15951 { MASK_SSE, CODE_FOR_sse_cvttps2pi, 0, IX86_BUILTIN_CVTTPS2PI, 0, 0 },
15952 { MASK_SSE, CODE_FOR_sse_cvttss2si, 0, IX86_BUILTIN_CVTTSS2SI, 0, 0 },
15953 { MASK_SSE | MASK_64BIT, CODE_FOR_sse_cvttss2siq, 0, IX86_BUILTIN_CVTTSS2SI64, 0, 0 },
15955 { MASK_SSE2, CODE_FOR_sse2_pmovmskb, 0, IX86_BUILTIN_PMOVMSKB128, 0, 0 },
15956 { MASK_SSE2, CODE_FOR_sse2_movmskpd, 0, IX86_BUILTIN_MOVMSKPD, 0, 0 },
15958 { MASK_SSE2, CODE_FOR_sqrtv2df2, 0, IX86_BUILTIN_SQRTPD, 0, 0 },
15960 { MASK_SSE2, CODE_FOR_sse2_cvtdq2pd, 0, IX86_BUILTIN_CVTDQ2PD, 0, 0 },
15961 { MASK_SSE2, CODE_FOR_sse2_cvtdq2ps, 0, IX86_BUILTIN_CVTDQ2PS, 0, 0 },
15963 { MASK_SSE2, CODE_FOR_sse2_cvtpd2dq, 0, IX86_BUILTIN_CVTPD2DQ, 0, 0 },
15964 { MASK_SSE2, CODE_FOR_sse2_cvtpd2pi, 0, IX86_BUILTIN_CVTPD2PI, 0, 0 },
15965 { MASK_SSE2, CODE_FOR_sse2_cvtpd2ps, 0, IX86_BUILTIN_CVTPD2PS, 0, 0 },
15966 { MASK_SSE2, CODE_FOR_sse2_cvttpd2dq, 0, IX86_BUILTIN_CVTTPD2DQ, 0, 0 },
15967 { MASK_SSE2, CODE_FOR_sse2_cvttpd2pi, 0, IX86_BUILTIN_CVTTPD2PI, 0, 0 },
15969 { MASK_SSE2, CODE_FOR_sse2_cvtpi2pd, 0, IX86_BUILTIN_CVTPI2PD, 0, 0 },
15971 { MASK_SSE2, CODE_FOR_sse2_cvtsd2si, 0, IX86_BUILTIN_CVTSD2SI, 0, 0 },
15972 { MASK_SSE2, CODE_FOR_sse2_cvttsd2si, 0, IX86_BUILTIN_CVTTSD2SI, 0, 0 },
15973 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvtsd2siq, 0, IX86_BUILTIN_CVTSD2SI64, 0, 0 },
15974 { MASK_SSE2 | MASK_64BIT, CODE_FOR_sse2_cvttsd2siq, 0, IX86_BUILTIN_CVTTSD2SI64, 0, 0 },
15976 { MASK_SSE2, CODE_FOR_sse2_cvtps2dq, 0, IX86_BUILTIN_CVTPS2DQ, 0, 0 },
15977 { MASK_SSE2, CODE_FOR_sse2_cvtps2pd, 0, IX86_BUILTIN_CVTPS2PD, 0, 0 },
15978 { MASK_SSE2, CODE_FOR_sse2_cvttps2dq, 0, IX86_BUILTIN_CVTTPS2DQ, 0, 0 },
15980 /* SSE3 */
15981 { MASK_SSE3, CODE_FOR_sse3_movshdup, 0, IX86_BUILTIN_MOVSHDUP, 0, 0 },
15982 { MASK_SSE3, CODE_FOR_sse3_movsldup, 0, IX86_BUILTIN_MOVSLDUP, 0, 0 },
15984 /* SSSE3 */
15985 { MASK_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, 0, 0 },
15986 { MASK_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, 0, 0 },
15987 { MASK_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, 0, 0 },
15988 { MASK_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, 0, 0 },
15989 { MASK_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, 0, 0 },
15990 { MASK_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, 0, 0 },
15993 static void
15994 ix86_init_builtins (void)
15996 if (TARGET_MMX)
15997 ix86_init_mmx_sse_builtins ();
16000 /* Set up all the MMX/SSE builtins. This is not called if TARGET_MMX
16001 is zero. Otherwise, if TARGET_SSE is not set, only expand the MMX
16002 builtins. */
16003 static void
16004 ix86_init_mmx_sse_builtins (void)
16006 const struct builtin_description * d;
16007 size_t i;
16009 tree V16QI_type_node = build_vector_type_for_mode (char_type_node, V16QImode);
16010 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
16011 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
16012 tree V2DI_type_node
16013 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
16014 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
16015 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
16016 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
16017 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
16018 tree V8QI_type_node = build_vector_type_for_mode (char_type_node, V8QImode);
16019 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
16021 tree pchar_type_node = build_pointer_type (char_type_node);
16022 tree pcchar_type_node = build_pointer_type (
16023 build_type_variant (char_type_node, 1, 0));
16024 tree pfloat_type_node = build_pointer_type (float_type_node);
16025 tree pcfloat_type_node = build_pointer_type (
16026 build_type_variant (float_type_node, 1, 0));
16027 tree pv2si_type_node = build_pointer_type (V2SI_type_node);
16028 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
16029 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
16031 /* Comparisons. */
16032 tree int_ftype_v4sf_v4sf
16033 = build_function_type_list (integer_type_node,
16034 V4SF_type_node, V4SF_type_node, NULL_TREE);
16035 tree v4si_ftype_v4sf_v4sf
16036 = build_function_type_list (V4SI_type_node,
16037 V4SF_type_node, V4SF_type_node, NULL_TREE);
16038 /* MMX/SSE/integer conversions. */
16039 tree int_ftype_v4sf
16040 = build_function_type_list (integer_type_node,
16041 V4SF_type_node, NULL_TREE);
16042 tree int64_ftype_v4sf
16043 = build_function_type_list (long_long_integer_type_node,
16044 V4SF_type_node, NULL_TREE);
16045 tree int_ftype_v8qi
16046 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
16047 tree v4sf_ftype_v4sf_int
16048 = build_function_type_list (V4SF_type_node,
16049 V4SF_type_node, integer_type_node, NULL_TREE);
16050 tree v4sf_ftype_v4sf_int64
16051 = build_function_type_list (V4SF_type_node,
16052 V4SF_type_node, long_long_integer_type_node,
16053 NULL_TREE);
16054 tree v4sf_ftype_v4sf_v2si
16055 = build_function_type_list (V4SF_type_node,
16056 V4SF_type_node, V2SI_type_node, NULL_TREE);
16058 /* Miscellaneous. */
16059 tree v8qi_ftype_v4hi_v4hi
16060 = build_function_type_list (V8QI_type_node,
16061 V4HI_type_node, V4HI_type_node, NULL_TREE);
16062 tree v4hi_ftype_v2si_v2si
16063 = build_function_type_list (V4HI_type_node,
16064 V2SI_type_node, V2SI_type_node, NULL_TREE);
16065 tree v4sf_ftype_v4sf_v4sf_int
16066 = build_function_type_list (V4SF_type_node,
16067 V4SF_type_node, V4SF_type_node,
16068 integer_type_node, NULL_TREE);
16069 tree v2si_ftype_v4hi_v4hi
16070 = build_function_type_list (V2SI_type_node,
16071 V4HI_type_node, V4HI_type_node, NULL_TREE);
16072 tree v4hi_ftype_v4hi_int
16073 = build_function_type_list (V4HI_type_node,
16074 V4HI_type_node, integer_type_node, NULL_TREE);
16075 tree v4hi_ftype_v4hi_di
16076 = build_function_type_list (V4HI_type_node,
16077 V4HI_type_node, long_long_unsigned_type_node,
16078 NULL_TREE);
16079 tree v2si_ftype_v2si_di
16080 = build_function_type_list (V2SI_type_node,
16081 V2SI_type_node, long_long_unsigned_type_node,
16082 NULL_TREE);
16083 tree void_ftype_void
16084 = build_function_type (void_type_node, void_list_node);
16085 tree void_ftype_unsigned
16086 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
16087 tree void_ftype_unsigned_unsigned
16088 = build_function_type_list (void_type_node, unsigned_type_node,
16089 unsigned_type_node, NULL_TREE);
16090 tree void_ftype_pcvoid_unsigned_unsigned
16091 = build_function_type_list (void_type_node, const_ptr_type_node,
16092 unsigned_type_node, unsigned_type_node,
16093 NULL_TREE);
16094 tree unsigned_ftype_void
16095 = build_function_type (unsigned_type_node, void_list_node);
16096 tree v2si_ftype_v4sf
16097 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
16098 /* Loads/stores. */
16099 tree void_ftype_v8qi_v8qi_pchar
16100 = build_function_type_list (void_type_node,
16101 V8QI_type_node, V8QI_type_node,
16102 pchar_type_node, NULL_TREE);
16103 tree v4sf_ftype_pcfloat
16104 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
16105 /* @@@ the type is bogus */
16106 tree v4sf_ftype_v4sf_pv2si
16107 = build_function_type_list (V4SF_type_node,
16108 V4SF_type_node, pv2si_type_node, NULL_TREE);
16109 tree void_ftype_pv2si_v4sf
16110 = build_function_type_list (void_type_node,
16111 pv2si_type_node, V4SF_type_node, NULL_TREE);
16112 tree void_ftype_pfloat_v4sf
16113 = build_function_type_list (void_type_node,
16114 pfloat_type_node, V4SF_type_node, NULL_TREE);
16115 tree void_ftype_pdi_di
16116 = build_function_type_list (void_type_node,
16117 pdi_type_node, long_long_unsigned_type_node,
16118 NULL_TREE);
16119 tree void_ftype_pv2di_v2di
16120 = build_function_type_list (void_type_node,
16121 pv2di_type_node, V2DI_type_node, NULL_TREE);
16122 /* Normal vector unops. */
16123 tree v4sf_ftype_v4sf
16124 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16125 tree v16qi_ftype_v16qi
16126 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16127 tree v8hi_ftype_v8hi
16128 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16129 tree v4si_ftype_v4si
16130 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16131 tree v8qi_ftype_v8qi
16132 = build_function_type_list (V8QI_type_node, V8QI_type_node, NULL_TREE);
16133 tree v4hi_ftype_v4hi
16134 = build_function_type_list (V4HI_type_node, V4HI_type_node, NULL_TREE);
16136 /* Normal vector binops. */
16137 tree v4sf_ftype_v4sf_v4sf
16138 = build_function_type_list (V4SF_type_node,
16139 V4SF_type_node, V4SF_type_node, NULL_TREE);
16140 tree v8qi_ftype_v8qi_v8qi
16141 = build_function_type_list (V8QI_type_node,
16142 V8QI_type_node, V8QI_type_node, NULL_TREE);
16143 tree v4hi_ftype_v4hi_v4hi
16144 = build_function_type_list (V4HI_type_node,
16145 V4HI_type_node, V4HI_type_node, NULL_TREE);
16146 tree v2si_ftype_v2si_v2si
16147 = build_function_type_list (V2SI_type_node,
16148 V2SI_type_node, V2SI_type_node, NULL_TREE);
16149 tree di_ftype_di_di
16150 = build_function_type_list (long_long_unsigned_type_node,
16151 long_long_unsigned_type_node,
16152 long_long_unsigned_type_node, NULL_TREE);
16154 tree di_ftype_di_di_int
16155 = build_function_type_list (long_long_unsigned_type_node,
16156 long_long_unsigned_type_node,
16157 long_long_unsigned_type_node,
16158 integer_type_node, NULL_TREE);
16160 tree v2si_ftype_v2sf
16161 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
16162 tree v2sf_ftype_v2si
16163 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
16164 tree v2si_ftype_v2si
16165 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
16166 tree v2sf_ftype_v2sf
16167 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
16168 tree v2sf_ftype_v2sf_v2sf
16169 = build_function_type_list (V2SF_type_node,
16170 V2SF_type_node, V2SF_type_node, NULL_TREE);
16171 tree v2si_ftype_v2sf_v2sf
16172 = build_function_type_list (V2SI_type_node,
16173 V2SF_type_node, V2SF_type_node, NULL_TREE);
16174 tree pint_type_node = build_pointer_type (integer_type_node);
16175 tree pdouble_type_node = build_pointer_type (double_type_node);
16176 tree pcdouble_type_node = build_pointer_type (
16177 build_type_variant (double_type_node, 1, 0));
16178 tree int_ftype_v2df_v2df
16179 = build_function_type_list (integer_type_node,
16180 V2DF_type_node, V2DF_type_node, NULL_TREE);
16182 tree void_ftype_pcvoid
16183 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
16184 tree v4sf_ftype_v4si
16185 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
16186 tree v4si_ftype_v4sf
16187 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
16188 tree v2df_ftype_v4si
16189 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
16190 tree v4si_ftype_v2df
16191 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
16192 tree v2si_ftype_v2df
16193 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
16194 tree v4sf_ftype_v2df
16195 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
16196 tree v2df_ftype_v2si
16197 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
16198 tree v2df_ftype_v4sf
16199 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
16200 tree int_ftype_v2df
16201 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
16202 tree int64_ftype_v2df
16203 = build_function_type_list (long_long_integer_type_node,
16204 V2DF_type_node, NULL_TREE);
16205 tree v2df_ftype_v2df_int
16206 = build_function_type_list (V2DF_type_node,
16207 V2DF_type_node, integer_type_node, NULL_TREE);
16208 tree v2df_ftype_v2df_int64
16209 = build_function_type_list (V2DF_type_node,
16210 V2DF_type_node, long_long_integer_type_node,
16211 NULL_TREE);
16212 tree v4sf_ftype_v4sf_v2df
16213 = build_function_type_list (V4SF_type_node,
16214 V4SF_type_node, V2DF_type_node, NULL_TREE);
16215 tree v2df_ftype_v2df_v4sf
16216 = build_function_type_list (V2DF_type_node,
16217 V2DF_type_node, V4SF_type_node, NULL_TREE);
16218 tree v2df_ftype_v2df_v2df_int
16219 = build_function_type_list (V2DF_type_node,
16220 V2DF_type_node, V2DF_type_node,
16221 integer_type_node,
16222 NULL_TREE);
16223 tree v2df_ftype_v2df_pcdouble
16224 = build_function_type_list (V2DF_type_node,
16225 V2DF_type_node, pcdouble_type_node, NULL_TREE);
16226 tree void_ftype_pdouble_v2df
16227 = build_function_type_list (void_type_node,
16228 pdouble_type_node, V2DF_type_node, NULL_TREE);
16229 tree void_ftype_pint_int
16230 = build_function_type_list (void_type_node,
16231 pint_type_node, integer_type_node, NULL_TREE);
16232 tree void_ftype_v16qi_v16qi_pchar
16233 = build_function_type_list (void_type_node,
16234 V16QI_type_node, V16QI_type_node,
16235 pchar_type_node, NULL_TREE);
16236 tree v2df_ftype_pcdouble
16237 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
16238 tree v2df_ftype_v2df_v2df
16239 = build_function_type_list (V2DF_type_node,
16240 V2DF_type_node, V2DF_type_node, NULL_TREE);
16241 tree v16qi_ftype_v16qi_v16qi
16242 = build_function_type_list (V16QI_type_node,
16243 V16QI_type_node, V16QI_type_node, NULL_TREE);
16244 tree v8hi_ftype_v8hi_v8hi
16245 = build_function_type_list (V8HI_type_node,
16246 V8HI_type_node, V8HI_type_node, NULL_TREE);
16247 tree v4si_ftype_v4si_v4si
16248 = build_function_type_list (V4SI_type_node,
16249 V4SI_type_node, V4SI_type_node, NULL_TREE);
16250 tree v2di_ftype_v2di_v2di
16251 = build_function_type_list (V2DI_type_node,
16252 V2DI_type_node, V2DI_type_node, NULL_TREE);
16253 tree v2di_ftype_v2df_v2df
16254 = build_function_type_list (V2DI_type_node,
16255 V2DF_type_node, V2DF_type_node, NULL_TREE);
16256 tree v2df_ftype_v2df
16257 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16258 tree v2di_ftype_v2di_int
16259 = build_function_type_list (V2DI_type_node,
16260 V2DI_type_node, integer_type_node, NULL_TREE);
16261 tree v2di_ftype_v2di_v2di_int
16262 = build_function_type_list (V2DI_type_node, V2DI_type_node,
16263 V2DI_type_node, integer_type_node, NULL_TREE);
16264 tree v4si_ftype_v4si_int
16265 = build_function_type_list (V4SI_type_node,
16266 V4SI_type_node, integer_type_node, NULL_TREE);
16267 tree v8hi_ftype_v8hi_int
16268 = build_function_type_list (V8HI_type_node,
16269 V8HI_type_node, integer_type_node, NULL_TREE);
16270 tree v8hi_ftype_v8hi_v2di
16271 = build_function_type_list (V8HI_type_node,
16272 V8HI_type_node, V2DI_type_node, NULL_TREE);
16273 tree v4si_ftype_v4si_v2di
16274 = build_function_type_list (V4SI_type_node,
16275 V4SI_type_node, V2DI_type_node, NULL_TREE);
16276 tree v4si_ftype_v8hi_v8hi
16277 = build_function_type_list (V4SI_type_node,
16278 V8HI_type_node, V8HI_type_node, NULL_TREE);
16279 tree di_ftype_v8qi_v8qi
16280 = build_function_type_list (long_long_unsigned_type_node,
16281 V8QI_type_node, V8QI_type_node, NULL_TREE);
16282 tree di_ftype_v2si_v2si
16283 = build_function_type_list (long_long_unsigned_type_node,
16284 V2SI_type_node, V2SI_type_node, NULL_TREE);
16285 tree v2di_ftype_v16qi_v16qi
16286 = build_function_type_list (V2DI_type_node,
16287 V16QI_type_node, V16QI_type_node, NULL_TREE);
16288 tree v2di_ftype_v4si_v4si
16289 = build_function_type_list (V2DI_type_node,
16290 V4SI_type_node, V4SI_type_node, NULL_TREE);
16291 tree int_ftype_v16qi
16292 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
16293 tree v16qi_ftype_pcchar
16294 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
16295 tree void_ftype_pchar_v16qi
16296 = build_function_type_list (void_type_node,
16297 pchar_type_node, V16QI_type_node, NULL_TREE);
16299 tree float80_type;
16300 tree float128_type;
16301 tree ftype;
16303 /* The __float80 type. */
16304 if (TYPE_MODE (long_double_type_node) == XFmode)
16305 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
16306 "__float80");
16307 else
16309 /* The __float80 type. */
16310 float80_type = make_node (REAL_TYPE);
16311 TYPE_PRECISION (float80_type) = 80;
16312 layout_type (float80_type);
16313 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
16316 if (TARGET_64BIT)
16318 float128_type = make_node (REAL_TYPE);
16319 TYPE_PRECISION (float128_type) = 128;
16320 layout_type (float128_type);
16321 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
16324 /* Add all builtins that are more or less simple operations on two
16325 operands. */
16326 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16328 /* Use one of the operands; the target can have a different mode for
16329 mask-generating compares. */
16330 enum machine_mode mode;
16331 tree type;
16333 if (d->name == 0)
16334 continue;
16335 mode = insn_data[d->icode].operand[1].mode;
16337 switch (mode)
16339 case V16QImode:
16340 type = v16qi_ftype_v16qi_v16qi;
16341 break;
16342 case V8HImode:
16343 type = v8hi_ftype_v8hi_v8hi;
16344 break;
16345 case V4SImode:
16346 type = v4si_ftype_v4si_v4si;
16347 break;
16348 case V2DImode:
16349 type = v2di_ftype_v2di_v2di;
16350 break;
16351 case V2DFmode:
16352 type = v2df_ftype_v2df_v2df;
16353 break;
16354 case V4SFmode:
16355 type = v4sf_ftype_v4sf_v4sf;
16356 break;
16357 case V8QImode:
16358 type = v8qi_ftype_v8qi_v8qi;
16359 break;
16360 case V4HImode:
16361 type = v4hi_ftype_v4hi_v4hi;
16362 break;
16363 case V2SImode:
16364 type = v2si_ftype_v2si_v2si;
16365 break;
16366 case DImode:
16367 type = di_ftype_di_di;
16368 break;
16370 default:
16371 gcc_unreachable ();
16374 /* Override for comparisons. */
16375 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
16376 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3)
16377 type = v4si_ftype_v4sf_v4sf;
16379 if (d->icode == CODE_FOR_sse2_maskcmpv2df3
16380 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
16381 type = v2di_ftype_v2df_v2df;
16383 def_builtin (d->mask, d->name, type, d->code);
16386 /* Add all builtins that are more or less simple operations on 1 operand. */
16387 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16389 enum machine_mode mode;
16390 tree type;
16392 if (d->name == 0)
16393 continue;
16394 mode = insn_data[d->icode].operand[1].mode;
16396 switch (mode)
16398 case V16QImode:
16399 type = v16qi_ftype_v16qi;
16400 break;
16401 case V8HImode:
16402 type = v8hi_ftype_v8hi;
16403 break;
16404 case V4SImode:
16405 type = v4si_ftype_v4si;
16406 break;
16407 case V2DFmode:
16408 type = v2df_ftype_v2df;
16409 break;
16410 case V4SFmode:
16411 type = v4sf_ftype_v4sf;
16412 break;
16413 case V8QImode:
16414 type = v8qi_ftype_v8qi;
16415 break;
16416 case V4HImode:
16417 type = v4hi_ftype_v4hi;
16418 break;
16419 case V2SImode:
16420 type = v2si_ftype_v2si;
16421 break;
16423 default:
16424 abort ();
16427 def_builtin (d->mask, d->name, type, d->code);
16430 /* Add the remaining MMX insns with somewhat more complicated types. */
16431 def_builtin (MASK_MMX, "__builtin_ia32_emms", void_ftype_void, IX86_BUILTIN_EMMS);
16432 def_builtin (MASK_MMX, "__builtin_ia32_psllw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSLLW);
16433 def_builtin (MASK_MMX, "__builtin_ia32_pslld", v2si_ftype_v2si_di, IX86_BUILTIN_PSLLD);
16434 def_builtin (MASK_MMX, "__builtin_ia32_psllq", di_ftype_di_di, IX86_BUILTIN_PSLLQ);
16436 def_builtin (MASK_MMX, "__builtin_ia32_psrlw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRLW);
16437 def_builtin (MASK_MMX, "__builtin_ia32_psrld", v2si_ftype_v2si_di, IX86_BUILTIN_PSRLD);
16438 def_builtin (MASK_MMX, "__builtin_ia32_psrlq", di_ftype_di_di, IX86_BUILTIN_PSRLQ);
16440 def_builtin (MASK_MMX, "__builtin_ia32_psraw", v4hi_ftype_v4hi_di, IX86_BUILTIN_PSRAW);
16441 def_builtin (MASK_MMX, "__builtin_ia32_psrad", v2si_ftype_v2si_di, IX86_BUILTIN_PSRAD);
16443 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pshufw", v4hi_ftype_v4hi_int, IX86_BUILTIN_PSHUFW);
16444 def_builtin (MASK_MMX, "__builtin_ia32_pmaddwd", v2si_ftype_v4hi_v4hi, IX86_BUILTIN_PMADDWD);
16446 /* comi/ucomi insns. */
16447 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
16448 if (d->mask == MASK_SSE2)
16449 def_builtin (d->mask, d->name, int_ftype_v2df_v2df, d->code);
16450 else
16451 def_builtin (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
16453 def_builtin (MASK_MMX, "__builtin_ia32_packsswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKSSWB);
16454 def_builtin (MASK_MMX, "__builtin_ia32_packssdw", v4hi_ftype_v2si_v2si, IX86_BUILTIN_PACKSSDW);
16455 def_builtin (MASK_MMX, "__builtin_ia32_packuswb", v8qi_ftype_v4hi_v4hi, IX86_BUILTIN_PACKUSWB);
16457 def_builtin (MASK_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
16458 def_builtin (MASK_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
16459 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtpi2ps", v4sf_ftype_v4sf_v2si, IX86_BUILTIN_CVTPI2PS);
16460 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTPS2PI);
16461 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtsi2ss", v4sf_ftype_v4sf_int, IX86_BUILTIN_CVTSI2SS);
16462 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtsi642ss", v4sf_ftype_v4sf_int64, IX86_BUILTIN_CVTSI642SS);
16463 def_builtin_const (MASK_SSE, "__builtin_ia32_cvtss2si", int_ftype_v4sf, IX86_BUILTIN_CVTSS2SI);
16464 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvtss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTSS2SI64);
16465 def_builtin_const (MASK_SSE, "__builtin_ia32_cvttps2pi", v2si_ftype_v4sf, IX86_BUILTIN_CVTTPS2PI);
16466 def_builtin_const (MASK_SSE, "__builtin_ia32_cvttss2si", int_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI);
16467 def_builtin_const (MASK_SSE | MASK_64BIT, "__builtin_ia32_cvttss2si64", int64_ftype_v4sf, IX86_BUILTIN_CVTTSS2SI64);
16469 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
16471 def_builtin (MASK_SSE, "__builtin_ia32_loadups", v4sf_ftype_pcfloat, IX86_BUILTIN_LOADUPS);
16472 def_builtin (MASK_SSE, "__builtin_ia32_storeups", void_ftype_pfloat_v4sf, IX86_BUILTIN_STOREUPS);
16474 def_builtin (MASK_SSE, "__builtin_ia32_loadhps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADHPS);
16475 def_builtin (MASK_SSE, "__builtin_ia32_loadlps", v4sf_ftype_v4sf_pv2si, IX86_BUILTIN_LOADLPS);
16476 def_builtin (MASK_SSE, "__builtin_ia32_storehps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STOREHPS);
16477 def_builtin (MASK_SSE, "__builtin_ia32_storelps", void_ftype_pv2si_v4sf, IX86_BUILTIN_STORELPS);
16479 def_builtin (MASK_SSE, "__builtin_ia32_movmskps", int_ftype_v4sf, IX86_BUILTIN_MOVMSKPS);
16480 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_pmovmskb", int_ftype_v8qi, IX86_BUILTIN_PMOVMSKB);
16481 def_builtin (MASK_SSE, "__builtin_ia32_movntps", void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTPS);
16482 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_movntq", void_ftype_pdi_di, IX86_BUILTIN_MOVNTQ);
16484 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_sfence", void_ftype_void, IX86_BUILTIN_SFENCE);
16486 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_psadbw", di_ftype_v8qi_v8qi, IX86_BUILTIN_PSADBW);
16488 def_builtin (MASK_SSE, "__builtin_ia32_rcpps", v4sf_ftype_v4sf, IX86_BUILTIN_RCPPS);
16489 def_builtin (MASK_SSE, "__builtin_ia32_rcpss", v4sf_ftype_v4sf, IX86_BUILTIN_RCPSS);
16490 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTPS);
16491 def_builtin (MASK_SSE, "__builtin_ia32_rsqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_RSQRTSS);
16492 def_builtin_const (MASK_SSE, "__builtin_ia32_sqrtps", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTPS);
16493 def_builtin_const (MASK_SSE, "__builtin_ia32_sqrtss", v4sf_ftype_v4sf, IX86_BUILTIN_SQRTSS);
16495 def_builtin (MASK_SSE, "__builtin_ia32_shufps", v4sf_ftype_v4sf_v4sf_int, IX86_BUILTIN_SHUFPS);
16497 /* Original 3DNow! */
16498 def_builtin (MASK_3DNOW, "__builtin_ia32_femms", void_ftype_void, IX86_BUILTIN_FEMMS);
16499 def_builtin (MASK_3DNOW, "__builtin_ia32_pavgusb", v8qi_ftype_v8qi_v8qi, IX86_BUILTIN_PAVGUSB);
16500 def_builtin (MASK_3DNOW, "__builtin_ia32_pf2id", v2si_ftype_v2sf, IX86_BUILTIN_PF2ID);
16501 def_builtin (MASK_3DNOW, "__builtin_ia32_pfacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFACC);
16502 def_builtin (MASK_3DNOW, "__builtin_ia32_pfadd", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFADD);
16503 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpeq", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPEQ);
16504 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpge", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGE);
16505 def_builtin (MASK_3DNOW, "__builtin_ia32_pfcmpgt", v2si_ftype_v2sf_v2sf, IX86_BUILTIN_PFCMPGT);
16506 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmax", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMAX);
16507 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmin", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMIN);
16508 def_builtin (MASK_3DNOW, "__builtin_ia32_pfmul", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFMUL);
16509 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcp", v2sf_ftype_v2sf, IX86_BUILTIN_PFRCP);
16510 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT1);
16511 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrcpit2", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRCPIT2);
16512 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqrt", v2sf_ftype_v2sf, IX86_BUILTIN_PFRSQRT);
16513 def_builtin (MASK_3DNOW, "__builtin_ia32_pfrsqit1", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFRSQIT1);
16514 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsub", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUB);
16515 def_builtin (MASK_3DNOW, "__builtin_ia32_pfsubr", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFSUBR);
16516 def_builtin (MASK_3DNOW, "__builtin_ia32_pi2fd", v2sf_ftype_v2si, IX86_BUILTIN_PI2FD);
16517 def_builtin (MASK_3DNOW, "__builtin_ia32_pmulhrw", v4hi_ftype_v4hi_v4hi, IX86_BUILTIN_PMULHRW);
16519 /* 3DNow! extension as used in the Athlon CPU. */
16520 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pf2iw", v2si_ftype_v2sf, IX86_BUILTIN_PF2IW);
16521 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFNACC);
16522 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pfpnacc", v2sf_ftype_v2sf_v2sf, IX86_BUILTIN_PFPNACC);
16523 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pi2fw", v2sf_ftype_v2si, IX86_BUILTIN_PI2FW);
16524 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsf", v2sf_ftype_v2sf, IX86_BUILTIN_PSWAPDSF);
16525 def_builtin (MASK_3DNOW_A, "__builtin_ia32_pswapdsi", v2si_ftype_v2si, IX86_BUILTIN_PSWAPDSI);
16527 /* SSE2 */
16528 def_builtin (MASK_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
16530 def_builtin (MASK_SSE2, "__builtin_ia32_loadupd", v2df_ftype_pcdouble, IX86_BUILTIN_LOADUPD);
16531 def_builtin (MASK_SSE2, "__builtin_ia32_storeupd", void_ftype_pdouble_v2df, IX86_BUILTIN_STOREUPD);
16533 def_builtin (MASK_SSE2, "__builtin_ia32_loadhpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADHPD);
16534 def_builtin (MASK_SSE2, "__builtin_ia32_loadlpd", v2df_ftype_v2df_pcdouble, IX86_BUILTIN_LOADLPD);
16536 def_builtin (MASK_SSE2, "__builtin_ia32_movmskpd", int_ftype_v2df, IX86_BUILTIN_MOVMSKPD);
16537 def_builtin (MASK_SSE2, "__builtin_ia32_pmovmskb128", int_ftype_v16qi, IX86_BUILTIN_PMOVMSKB128);
16538 def_builtin (MASK_SSE2, "__builtin_ia32_movnti", void_ftype_pint_int, IX86_BUILTIN_MOVNTI);
16539 def_builtin (MASK_SSE2, "__builtin_ia32_movntpd", void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTPD);
16540 def_builtin (MASK_SSE2, "__builtin_ia32_movntdq", void_ftype_pv2di_v2di, IX86_BUILTIN_MOVNTDQ);
16542 def_builtin (MASK_SSE2, "__builtin_ia32_pshufd", v4si_ftype_v4si_int, IX86_BUILTIN_PSHUFD);
16543 def_builtin (MASK_SSE2, "__builtin_ia32_pshuflw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFLW);
16544 def_builtin (MASK_SSE2, "__builtin_ia32_pshufhw", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSHUFHW);
16545 def_builtin (MASK_SSE2, "__builtin_ia32_psadbw128", v2di_ftype_v16qi_v16qi, IX86_BUILTIN_PSADBW128);
16547 def_builtin_const (MASK_SSE2, "__builtin_ia32_sqrtpd", v2df_ftype_v2df, IX86_BUILTIN_SQRTPD);
16548 def_builtin_const (MASK_SSE2, "__builtin_ia32_sqrtsd", v2df_ftype_v2df, IX86_BUILTIN_SQRTSD);
16550 def_builtin (MASK_SSE2, "__builtin_ia32_shufpd", v2df_ftype_v2df_v2df_int, IX86_BUILTIN_SHUFPD);
16552 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtdq2pd", v2df_ftype_v4si, IX86_BUILTIN_CVTDQ2PD);
16553 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtdq2ps", v4sf_ftype_v4si, IX86_BUILTIN_CVTDQ2PS);
16555 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTPD2DQ);
16556 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTPD2PI);
16557 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpd2ps", v4sf_ftype_v2df, IX86_BUILTIN_CVTPD2PS);
16558 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttpd2dq", v4si_ftype_v2df, IX86_BUILTIN_CVTTPD2DQ);
16559 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttpd2pi", v2si_ftype_v2df, IX86_BUILTIN_CVTTPD2PI);
16561 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtpi2pd", v2df_ftype_v2si, IX86_BUILTIN_CVTPI2PD);
16563 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsd2si", int_ftype_v2df, IX86_BUILTIN_CVTSD2SI);
16564 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttsd2si", int_ftype_v2df, IX86_BUILTIN_CVTTSD2SI);
16565 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTSD2SI64);
16566 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvttsd2si64", int64_ftype_v2df, IX86_BUILTIN_CVTTSD2SI64);
16568 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTPS2DQ);
16569 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtps2pd", v2df_ftype_v4sf, IX86_BUILTIN_CVTPS2PD);
16570 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvttps2dq", v4si_ftype_v4sf, IX86_BUILTIN_CVTTPS2DQ);
16572 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsi2sd", v2df_ftype_v2df_int, IX86_BUILTIN_CVTSI2SD);
16573 def_builtin_const (MASK_SSE2 | MASK_64BIT, "__builtin_ia32_cvtsi642sd", v2df_ftype_v2df_int64, IX86_BUILTIN_CVTSI642SD);
16574 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtsd2ss", v4sf_ftype_v4sf_v2df, IX86_BUILTIN_CVTSD2SS);
16575 def_builtin_const (MASK_SSE2, "__builtin_ia32_cvtss2sd", v2df_ftype_v2df_v4sf, IX86_BUILTIN_CVTSS2SD);
16577 def_builtin (MASK_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
16578 def_builtin (MASK_SSE2, "__builtin_ia32_lfence", void_ftype_void, IX86_BUILTIN_LFENCE);
16579 def_builtin (MASK_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
16581 def_builtin (MASK_SSE2, "__builtin_ia32_loaddqu", v16qi_ftype_pcchar, IX86_BUILTIN_LOADDQU);
16582 def_builtin (MASK_SSE2, "__builtin_ia32_storedqu", void_ftype_pchar_v16qi, IX86_BUILTIN_STOREDQU);
16584 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq", di_ftype_v2si_v2si, IX86_BUILTIN_PMULUDQ);
16585 def_builtin (MASK_SSE2, "__builtin_ia32_pmuludq128", v2di_ftype_v4si_v4si, IX86_BUILTIN_PMULUDQ128);
16587 def_builtin (MASK_SSE2, "__builtin_ia32_psllw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSLLW128);
16588 def_builtin (MASK_SSE2, "__builtin_ia32_pslld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSLLD128);
16589 def_builtin (MASK_SSE2, "__builtin_ia32_psllq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSLLQ128);
16591 def_builtin (MASK_SSE2, "__builtin_ia32_psrlw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRLW128);
16592 def_builtin (MASK_SSE2, "__builtin_ia32_psrld128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRLD128);
16593 def_builtin (MASK_SSE2, "__builtin_ia32_psrlq128", v2di_ftype_v2di_v2di, IX86_BUILTIN_PSRLQ128);
16595 def_builtin (MASK_SSE2, "__builtin_ia32_psraw128", v8hi_ftype_v8hi_v2di, IX86_BUILTIN_PSRAW128);
16596 def_builtin (MASK_SSE2, "__builtin_ia32_psrad128", v4si_ftype_v4si_v2di, IX86_BUILTIN_PSRAD128);
16598 def_builtin (MASK_SSE2, "__builtin_ia32_pslldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLDQI128);
16599 def_builtin (MASK_SSE2, "__builtin_ia32_psllwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSLLWI128);
16600 def_builtin (MASK_SSE2, "__builtin_ia32_pslldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSLLDI128);
16601 def_builtin (MASK_SSE2, "__builtin_ia32_psllqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSLLQI128);
16603 def_builtin (MASK_SSE2, "__builtin_ia32_psrldqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLDQI128);
16604 def_builtin (MASK_SSE2, "__builtin_ia32_psrlwi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRLWI128);
16605 def_builtin (MASK_SSE2, "__builtin_ia32_psrldi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRLDI128);
16606 def_builtin (MASK_SSE2, "__builtin_ia32_psrlqi128", v2di_ftype_v2di_int, IX86_BUILTIN_PSRLQI128);
16608 def_builtin (MASK_SSE2, "__builtin_ia32_psrawi128", v8hi_ftype_v8hi_int, IX86_BUILTIN_PSRAWI128);
16609 def_builtin (MASK_SSE2, "__builtin_ia32_psradi128", v4si_ftype_v4si_int, IX86_BUILTIN_PSRADI128);
16611 def_builtin (MASK_SSE2, "__builtin_ia32_pmaddwd128", v4si_ftype_v8hi_v8hi, IX86_BUILTIN_PMADDWD128);
16613 /* Prescott New Instructions. */
16614 def_builtin (MASK_SSE3, "__builtin_ia32_monitor",
16615 void_ftype_pcvoid_unsigned_unsigned,
16616 IX86_BUILTIN_MONITOR);
16617 def_builtin (MASK_SSE3, "__builtin_ia32_mwait",
16618 void_ftype_unsigned_unsigned,
16619 IX86_BUILTIN_MWAIT);
16620 def_builtin (MASK_SSE3, "__builtin_ia32_movshdup",
16621 v4sf_ftype_v4sf,
16622 IX86_BUILTIN_MOVSHDUP);
16623 def_builtin (MASK_SSE3, "__builtin_ia32_movsldup",
16624 v4sf_ftype_v4sf,
16625 IX86_BUILTIN_MOVSLDUP);
16626 def_builtin (MASK_SSE3, "__builtin_ia32_lddqu",
16627 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
16629 /* SSSE3. */
16630 def_builtin (MASK_SSSE3, "__builtin_ia32_palignr128",
16631 v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PALIGNR128);
16632 def_builtin (MASK_SSSE3, "__builtin_ia32_palignr", di_ftype_di_di_int,
16633 IX86_BUILTIN_PALIGNR);
16635 /* Access to the vec_init patterns. */
16636 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
16637 integer_type_node, NULL_TREE);
16638 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v2si",
16639 ftype, IX86_BUILTIN_VEC_INIT_V2SI);
16641 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
16642 short_integer_type_node,
16643 short_integer_type_node,
16644 short_integer_type_node, NULL_TREE);
16645 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v4hi",
16646 ftype, IX86_BUILTIN_VEC_INIT_V4HI);
16648 ftype = build_function_type_list (V8QI_type_node, char_type_node,
16649 char_type_node, char_type_node,
16650 char_type_node, char_type_node,
16651 char_type_node, char_type_node,
16652 char_type_node, NULL_TREE);
16653 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v8qi",
16654 ftype, IX86_BUILTIN_VEC_INIT_V8QI);
16656 /* Access to the vec_extract patterns. */
16657 ftype = build_function_type_list (double_type_node, V2DF_type_node,
16658 integer_type_node, NULL_TREE);
16659 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2df",
16660 ftype, IX86_BUILTIN_VEC_EXT_V2DF);
16662 ftype = build_function_type_list (long_long_integer_type_node,
16663 V2DI_type_node, integer_type_node,
16664 NULL_TREE);
16665 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v2di",
16666 ftype, IX86_BUILTIN_VEC_EXT_V2DI);
16668 ftype = build_function_type_list (float_type_node, V4SF_type_node,
16669 integer_type_node, NULL_TREE);
16670 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4sf",
16671 ftype, IX86_BUILTIN_VEC_EXT_V4SF);
16673 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
16674 integer_type_node, NULL_TREE);
16675 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v4si",
16676 ftype, IX86_BUILTIN_VEC_EXT_V4SI);
16678 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
16679 integer_type_node, NULL_TREE);
16680 def_builtin (MASK_SSE, "__builtin_ia32_vec_ext_v8hi",
16681 ftype, IX86_BUILTIN_VEC_EXT_V8HI);
16683 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
16684 integer_type_node, NULL_TREE);
16685 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_ext_v4hi",
16686 ftype, IX86_BUILTIN_VEC_EXT_V4HI);
16688 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
16689 integer_type_node, NULL_TREE);
16690 def_builtin (MASK_MMX, "__builtin_ia32_vec_ext_v2si",
16691 ftype, IX86_BUILTIN_VEC_EXT_V2SI);
16693 /* Access to the vec_set patterns. */
16694 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
16695 intHI_type_node,
16696 integer_type_node, NULL_TREE);
16697 def_builtin (MASK_SSE, "__builtin_ia32_vec_set_v8hi",
16698 ftype, IX86_BUILTIN_VEC_SET_V8HI);
16700 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
16701 intHI_type_node,
16702 integer_type_node, NULL_TREE);
16703 def_builtin (MASK_SSE | MASK_3DNOW_A, "__builtin_ia32_vec_set_v4hi",
16704 ftype, IX86_BUILTIN_VEC_SET_V4HI);
16707 /* Errors in the source file can cause expand_expr to return const0_rtx
16708 where we expect a vector. To avoid crashing, use one of the vector
16709 clear instructions. */
16710 static rtx
16711 safe_vector_operand (rtx x, enum machine_mode mode)
16713 if (x == const0_rtx)
16714 x = CONST0_RTX (mode);
16715 return x;
16718 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
16720 static rtx
16721 ix86_expand_binop_builtin (enum insn_code icode, tree arglist, rtx target)
16723 rtx pat, xops[3];
16724 tree arg0 = TREE_VALUE (arglist);
16725 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16726 rtx op0 = expand_normal (arg0);
16727 rtx op1 = expand_normal (arg1);
16728 enum machine_mode tmode = insn_data[icode].operand[0].mode;
16729 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
16730 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
16732 if (VECTOR_MODE_P (mode0))
16733 op0 = safe_vector_operand (op0, mode0);
16734 if (VECTOR_MODE_P (mode1))
16735 op1 = safe_vector_operand (op1, mode1);
16737 if (optimize || !target
16738 || GET_MODE (target) != tmode
16739 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16740 target = gen_reg_rtx (tmode);
16742 if (GET_MODE (op1) == SImode && mode1 == TImode)
16744 rtx x = gen_reg_rtx (V4SImode);
16745 emit_insn (gen_sse2_loadd (x, op1));
16746 op1 = gen_lowpart (TImode, x);
16749 /* The insn must want input operands in the same modes as the
16750 result. */
16751 gcc_assert ((GET_MODE (op0) == mode0 || GET_MODE (op0) == VOIDmode)
16752 && (GET_MODE (op1) == mode1 || GET_MODE (op1) == VOIDmode));
16754 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
16755 op0 = copy_to_mode_reg (mode0, op0);
16756 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
16757 op1 = copy_to_mode_reg (mode1, op1);
16759 /* ??? Using ix86_fixup_binary_operands is problematic when
16760 we've got mismatched modes. Fake it. */
16762 xops[0] = target;
16763 xops[1] = op0;
16764 xops[2] = op1;
16766 if (tmode == mode0 && tmode == mode1)
16768 target = ix86_fixup_binary_operands (UNKNOWN, tmode, xops);
16769 op0 = xops[1];
16770 op1 = xops[2];
16772 else if (optimize || !ix86_binary_operator_ok (UNKNOWN, tmode, xops))
16774 op0 = force_reg (mode0, op0);
16775 op1 = force_reg (mode1, op1);
16776 target = gen_reg_rtx (tmode);
16779 pat = GEN_FCN (icode) (target, op0, op1);
16780 if (! pat)
16781 return 0;
16782 emit_insn (pat);
16783 return target;
16786 /* Subroutine of ix86_expand_builtin to take care of stores. */
16788 static rtx
16789 ix86_expand_store_builtin (enum insn_code icode, tree arglist)
16791 rtx pat;
16792 tree arg0 = TREE_VALUE (arglist);
16793 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16794 rtx op0 = expand_normal (arg0);
16795 rtx op1 = expand_normal (arg1);
16796 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
16797 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
16799 if (VECTOR_MODE_P (mode1))
16800 op1 = safe_vector_operand (op1, mode1);
16802 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
16803 op1 = copy_to_mode_reg (mode1, op1);
16805 pat = GEN_FCN (icode) (op0, op1);
16806 if (pat)
16807 emit_insn (pat);
16808 return 0;
16811 /* Subroutine of ix86_expand_builtin to take care of unop insns. */
16813 static rtx
16814 ix86_expand_unop_builtin (enum insn_code icode, tree arglist,
16815 rtx target, int do_load)
16817 rtx pat;
16818 tree arg0 = TREE_VALUE (arglist);
16819 rtx op0 = expand_normal (arg0);
16820 enum machine_mode tmode = insn_data[icode].operand[0].mode;
16821 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
16823 if (optimize || !target
16824 || GET_MODE (target) != tmode
16825 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16826 target = gen_reg_rtx (tmode);
16827 if (do_load)
16828 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
16829 else
16831 if (VECTOR_MODE_P (mode0))
16832 op0 = safe_vector_operand (op0, mode0);
16834 if ((optimize && !register_operand (op0, mode0))
16835 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
16836 op0 = copy_to_mode_reg (mode0, op0);
16839 pat = GEN_FCN (icode) (target, op0);
16840 if (! pat)
16841 return 0;
16842 emit_insn (pat);
16843 return target;
16846 /* Subroutine of ix86_expand_builtin to take care of three special unop insns:
16847 sqrtss, rsqrtss, rcpss. */
16849 static rtx
16850 ix86_expand_unop1_builtin (enum insn_code icode, tree arglist, rtx target)
16852 rtx pat;
16853 tree arg0 = TREE_VALUE (arglist);
16854 rtx op1, op0 = expand_normal (arg0);
16855 enum machine_mode tmode = insn_data[icode].operand[0].mode;
16856 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
16858 if (optimize || !target
16859 || GET_MODE (target) != tmode
16860 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16861 target = gen_reg_rtx (tmode);
16863 if (VECTOR_MODE_P (mode0))
16864 op0 = safe_vector_operand (op0, mode0);
16866 if ((optimize && !register_operand (op0, mode0))
16867 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
16868 op0 = copy_to_mode_reg (mode0, op0);
16870 op1 = op0;
16871 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
16872 op1 = copy_to_mode_reg (mode0, op1);
16874 pat = GEN_FCN (icode) (target, op0, op1);
16875 if (! pat)
16876 return 0;
16877 emit_insn (pat);
16878 return target;
16881 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
16883 static rtx
16884 ix86_expand_sse_compare (const struct builtin_description *d, tree arglist,
16885 rtx target)
16887 rtx pat;
16888 tree arg0 = TREE_VALUE (arglist);
16889 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16890 rtx op0 = expand_normal (arg0);
16891 rtx op1 = expand_normal (arg1);
16892 rtx op2;
16893 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
16894 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
16895 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
16896 enum rtx_code comparison = d->comparison;
16898 if (VECTOR_MODE_P (mode0))
16899 op0 = safe_vector_operand (op0, mode0);
16900 if (VECTOR_MODE_P (mode1))
16901 op1 = safe_vector_operand (op1, mode1);
16903 /* Swap operands if we have a comparison that isn't available in
16904 hardware. */
16905 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
16907 rtx tmp = gen_reg_rtx (mode1);
16908 emit_move_insn (tmp, op1);
16909 op1 = op0;
16910 op0 = tmp;
16913 if (optimize || !target
16914 || GET_MODE (target) != tmode
16915 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
16916 target = gen_reg_rtx (tmode);
16918 if ((optimize && !register_operand (op0, mode0))
16919 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
16920 op0 = copy_to_mode_reg (mode0, op0);
16921 if ((optimize && !register_operand (op1, mode1))
16922 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
16923 op1 = copy_to_mode_reg (mode1, op1);
16925 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
16926 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
16927 if (! pat)
16928 return 0;
16929 emit_insn (pat);
16930 return target;
16933 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
16935 static rtx
16936 ix86_expand_sse_comi (const struct builtin_description *d, tree arglist,
16937 rtx target)
16939 rtx pat;
16940 tree arg0 = TREE_VALUE (arglist);
16941 tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
16942 rtx op0 = expand_normal (arg0);
16943 rtx op1 = expand_normal (arg1);
16944 rtx op2;
16945 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
16946 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
16947 enum rtx_code comparison = d->comparison;
16949 if (VECTOR_MODE_P (mode0))
16950 op0 = safe_vector_operand (op0, mode0);
16951 if (VECTOR_MODE_P (mode1))
16952 op1 = safe_vector_operand (op1, mode1);
16954 /* Swap operands if we have a comparison that isn't available in
16955 hardware. */
16956 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
16958 rtx tmp = op1;
16959 op1 = op0;
16960 op0 = tmp;
16963 target = gen_reg_rtx (SImode);
16964 emit_move_insn (target, const0_rtx);
16965 target = gen_rtx_SUBREG (QImode, target, 0);
16967 if ((optimize && !register_operand (op0, mode0))
16968 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
16969 op0 = copy_to_mode_reg (mode0, op0);
16970 if ((optimize && !register_operand (op1, mode1))
16971 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
16972 op1 = copy_to_mode_reg (mode1, op1);
16974 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
16975 pat = GEN_FCN (d->icode) (op0, op1);
16976 if (! pat)
16977 return 0;
16978 emit_insn (pat);
16979 emit_insn (gen_rtx_SET (VOIDmode,
16980 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
16981 gen_rtx_fmt_ee (comparison, QImode,
16982 SET_DEST (pat),
16983 const0_rtx)));
16985 return SUBREG_REG (target);
16988 /* Return the integer constant in ARG. Constrain it to be in the range
16989 of the subparts of VEC_TYPE; issue an error if not. */
16991 static int
16992 get_element_number (tree vec_type, tree arg)
16994 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
16996 if (!host_integerp (arg, 1)
16997 || (elt = tree_low_cst (arg, 1), elt > max))
16999 error ("selector must be an integer constant in the range 0..%wi", max);
17000 return 0;
17003 return elt;
17006 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
17007 ix86_expand_vector_init. We DO have language-level syntax for this, in
17008 the form of (type){ init-list }. Except that since we can't place emms
17009 instructions from inside the compiler, we can't allow the use of MMX
17010 registers unless the user explicitly asks for it. So we do *not* define
17011 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
17012 we have builtins invoked by mmintrin.h that gives us license to emit
17013 these sorts of instructions. */
17015 static rtx
17016 ix86_expand_vec_init_builtin (tree type, tree arglist, rtx target)
17018 enum machine_mode tmode = TYPE_MODE (type);
17019 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
17020 int i, n_elt = GET_MODE_NUNITS (tmode);
17021 rtvec v = rtvec_alloc (n_elt);
17023 gcc_assert (VECTOR_MODE_P (tmode));
17025 for (i = 0; i < n_elt; ++i, arglist = TREE_CHAIN (arglist))
17027 rtx x = expand_normal (TREE_VALUE (arglist));
17028 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
17031 gcc_assert (arglist == NULL);
17033 if (!target || !register_operand (target, tmode))
17034 target = gen_reg_rtx (tmode);
17036 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
17037 return target;
17040 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
17041 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
17042 had a language-level syntax for referencing vector elements. */
17044 static rtx
17045 ix86_expand_vec_ext_builtin (tree arglist, rtx target)
17047 enum machine_mode tmode, mode0;
17048 tree arg0, arg1;
17049 int elt;
17050 rtx op0;
17052 arg0 = TREE_VALUE (arglist);
17053 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17055 op0 = expand_normal (arg0);
17056 elt = get_element_number (TREE_TYPE (arg0), arg1);
17058 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
17059 mode0 = TYPE_MODE (TREE_TYPE (arg0));
17060 gcc_assert (VECTOR_MODE_P (mode0));
17062 op0 = force_reg (mode0, op0);
17064 if (optimize || !target || !register_operand (target, tmode))
17065 target = gen_reg_rtx (tmode);
17067 ix86_expand_vector_extract (true, target, op0, elt);
17069 return target;
17072 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
17073 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
17074 a language-level syntax for referencing vector elements. */
17076 static rtx
17077 ix86_expand_vec_set_builtin (tree arglist)
17079 enum machine_mode tmode, mode1;
17080 tree arg0, arg1, arg2;
17081 int elt;
17082 rtx op0, op1;
17084 arg0 = TREE_VALUE (arglist);
17085 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17086 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17088 tmode = TYPE_MODE (TREE_TYPE (arg0));
17089 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
17090 gcc_assert (VECTOR_MODE_P (tmode));
17092 op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
17093 op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
17094 elt = get_element_number (TREE_TYPE (arg0), arg2);
17096 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
17097 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
17099 op0 = force_reg (tmode, op0);
17100 op1 = force_reg (mode1, op1);
17102 ix86_expand_vector_set (true, op0, op1, elt);
17104 return op0;
17107 /* Expand an expression EXP that calls a built-in function,
17108 with result going to TARGET if that's convenient
17109 (and in mode MODE if that's convenient).
17110 SUBTARGET may be used as the target for computing one of EXP's operands.
17111 IGNORE is nonzero if the value is to be ignored. */
17113 static rtx
17114 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
17115 enum machine_mode mode ATTRIBUTE_UNUSED,
17116 int ignore ATTRIBUTE_UNUSED)
17118 const struct builtin_description *d;
17119 size_t i;
17120 enum insn_code icode;
17121 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
17122 tree arglist = TREE_OPERAND (exp, 1);
17123 tree arg0, arg1, arg2;
17124 rtx op0, op1, op2, pat;
17125 enum machine_mode tmode, mode0, mode1, mode2, mode3;
17126 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
17128 switch (fcode)
17130 case IX86_BUILTIN_EMMS:
17131 emit_insn (gen_mmx_emms ());
17132 return 0;
17134 case IX86_BUILTIN_SFENCE:
17135 emit_insn (gen_sse_sfence ());
17136 return 0;
17138 case IX86_BUILTIN_MASKMOVQ:
17139 case IX86_BUILTIN_MASKMOVDQU:
17140 icode = (fcode == IX86_BUILTIN_MASKMOVQ
17141 ? CODE_FOR_mmx_maskmovq
17142 : CODE_FOR_sse2_maskmovdqu);
17143 /* Note the arg order is different from the operand order. */
17144 arg1 = TREE_VALUE (arglist);
17145 arg2 = TREE_VALUE (TREE_CHAIN (arglist));
17146 arg0 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17147 op0 = expand_normal (arg0);
17148 op1 = expand_normal (arg1);
17149 op2 = expand_normal (arg2);
17150 mode0 = insn_data[icode].operand[0].mode;
17151 mode1 = insn_data[icode].operand[1].mode;
17152 mode2 = insn_data[icode].operand[2].mode;
17154 op0 = force_reg (Pmode, op0);
17155 op0 = gen_rtx_MEM (mode1, op0);
17157 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
17158 op0 = copy_to_mode_reg (mode0, op0);
17159 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
17160 op1 = copy_to_mode_reg (mode1, op1);
17161 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
17162 op2 = copy_to_mode_reg (mode2, op2);
17163 pat = GEN_FCN (icode) (op0, op1, op2);
17164 if (! pat)
17165 return 0;
17166 emit_insn (pat);
17167 return 0;
17169 case IX86_BUILTIN_SQRTSS:
17170 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmsqrtv4sf2, arglist, target);
17171 case IX86_BUILTIN_RSQRTSS:
17172 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrsqrtv4sf2, arglist, target);
17173 case IX86_BUILTIN_RCPSS:
17174 return ix86_expand_unop1_builtin (CODE_FOR_sse_vmrcpv4sf2, arglist, target);
17176 case IX86_BUILTIN_LOADUPS:
17177 return ix86_expand_unop_builtin (CODE_FOR_sse_movups, arglist, target, 1);
17179 case IX86_BUILTIN_STOREUPS:
17180 return ix86_expand_store_builtin (CODE_FOR_sse_movups, arglist);
17182 case IX86_BUILTIN_LOADHPS:
17183 case IX86_BUILTIN_LOADLPS:
17184 case IX86_BUILTIN_LOADHPD:
17185 case IX86_BUILTIN_LOADLPD:
17186 icode = (fcode == IX86_BUILTIN_LOADHPS ? CODE_FOR_sse_loadhps
17187 : fcode == IX86_BUILTIN_LOADLPS ? CODE_FOR_sse_loadlps
17188 : fcode == IX86_BUILTIN_LOADHPD ? CODE_FOR_sse2_loadhpd
17189 : CODE_FOR_sse2_loadlpd);
17190 arg0 = TREE_VALUE (arglist);
17191 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17192 op0 = expand_normal (arg0);
17193 op1 = expand_normal (arg1);
17194 tmode = insn_data[icode].operand[0].mode;
17195 mode0 = insn_data[icode].operand[1].mode;
17196 mode1 = insn_data[icode].operand[2].mode;
17198 op0 = force_reg (mode0, op0);
17199 op1 = gen_rtx_MEM (mode1, copy_to_mode_reg (Pmode, op1));
17200 if (optimize || target == 0
17201 || GET_MODE (target) != tmode
17202 || !register_operand (target, tmode))
17203 target = gen_reg_rtx (tmode);
17204 pat = GEN_FCN (icode) (target, op0, op1);
17205 if (! pat)
17206 return 0;
17207 emit_insn (pat);
17208 return target;
17210 case IX86_BUILTIN_STOREHPS:
17211 case IX86_BUILTIN_STORELPS:
17212 icode = (fcode == IX86_BUILTIN_STOREHPS ? CODE_FOR_sse_storehps
17213 : CODE_FOR_sse_storelps);
17214 arg0 = TREE_VALUE (arglist);
17215 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17216 op0 = expand_normal (arg0);
17217 op1 = expand_normal (arg1);
17218 mode0 = insn_data[icode].operand[0].mode;
17219 mode1 = insn_data[icode].operand[1].mode;
17221 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
17222 op1 = force_reg (mode1, op1);
17224 pat = GEN_FCN (icode) (op0, op1);
17225 if (! pat)
17226 return 0;
17227 emit_insn (pat);
17228 return const0_rtx;
17230 case IX86_BUILTIN_MOVNTPS:
17231 return ix86_expand_store_builtin (CODE_FOR_sse_movntv4sf, arglist);
17232 case IX86_BUILTIN_MOVNTQ:
17233 return ix86_expand_store_builtin (CODE_FOR_sse_movntdi, arglist);
17235 case IX86_BUILTIN_LDMXCSR:
17236 op0 = expand_normal (TREE_VALUE (arglist));
17237 target = assign_386_stack_local (SImode, SLOT_TEMP);
17238 emit_move_insn (target, op0);
17239 emit_insn (gen_sse_ldmxcsr (target));
17240 return 0;
17242 case IX86_BUILTIN_STMXCSR:
17243 target = assign_386_stack_local (SImode, SLOT_TEMP);
17244 emit_insn (gen_sse_stmxcsr (target));
17245 return copy_to_mode_reg (SImode, target);
17247 case IX86_BUILTIN_SHUFPS:
17248 case IX86_BUILTIN_SHUFPD:
17249 icode = (fcode == IX86_BUILTIN_SHUFPS
17250 ? CODE_FOR_sse_shufps
17251 : CODE_FOR_sse2_shufpd);
17252 arg0 = TREE_VALUE (arglist);
17253 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17254 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17255 op0 = expand_normal (arg0);
17256 op1 = expand_normal (arg1);
17257 op2 = expand_normal (arg2);
17258 tmode = insn_data[icode].operand[0].mode;
17259 mode0 = insn_data[icode].operand[1].mode;
17260 mode1 = insn_data[icode].operand[2].mode;
17261 mode2 = insn_data[icode].operand[3].mode;
17263 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
17264 op0 = copy_to_mode_reg (mode0, op0);
17265 if ((optimize && !register_operand (op1, mode1))
17266 || !(*insn_data[icode].operand[2].predicate) (op1, mode1))
17267 op1 = copy_to_mode_reg (mode1, op1);
17268 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
17270 /* @@@ better error message */
17271 error ("mask must be an immediate");
17272 return gen_reg_rtx (tmode);
17274 if (optimize || target == 0
17275 || GET_MODE (target) != tmode
17276 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17277 target = gen_reg_rtx (tmode);
17278 pat = GEN_FCN (icode) (target, op0, op1, op2);
17279 if (! pat)
17280 return 0;
17281 emit_insn (pat);
17282 return target;
17284 case IX86_BUILTIN_PSHUFW:
17285 case IX86_BUILTIN_PSHUFD:
17286 case IX86_BUILTIN_PSHUFHW:
17287 case IX86_BUILTIN_PSHUFLW:
17288 icode = ( fcode == IX86_BUILTIN_PSHUFHW ? CODE_FOR_sse2_pshufhw
17289 : fcode == IX86_BUILTIN_PSHUFLW ? CODE_FOR_sse2_pshuflw
17290 : fcode == IX86_BUILTIN_PSHUFD ? CODE_FOR_sse2_pshufd
17291 : CODE_FOR_mmx_pshufw);
17292 arg0 = TREE_VALUE (arglist);
17293 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17294 op0 = expand_normal (arg0);
17295 op1 = expand_normal (arg1);
17296 tmode = insn_data[icode].operand[0].mode;
17297 mode1 = insn_data[icode].operand[1].mode;
17298 mode2 = insn_data[icode].operand[2].mode;
17300 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17301 op0 = copy_to_mode_reg (mode1, op0);
17302 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17304 /* @@@ better error message */
17305 error ("mask must be an immediate");
17306 return const0_rtx;
17308 if (target == 0
17309 || GET_MODE (target) != tmode
17310 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17311 target = gen_reg_rtx (tmode);
17312 pat = GEN_FCN (icode) (target, op0, op1);
17313 if (! pat)
17314 return 0;
17315 emit_insn (pat);
17316 return target;
17318 case IX86_BUILTIN_PSLLDQI128:
17319 case IX86_BUILTIN_PSRLDQI128:
17320 icode = ( fcode == IX86_BUILTIN_PSLLDQI128 ? CODE_FOR_sse2_ashlti3
17321 : CODE_FOR_sse2_lshrti3);
17322 arg0 = TREE_VALUE (arglist);
17323 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17324 op0 = expand_normal (arg0);
17325 op1 = expand_normal (arg1);
17326 tmode = insn_data[icode].operand[0].mode;
17327 mode1 = insn_data[icode].operand[1].mode;
17328 mode2 = insn_data[icode].operand[2].mode;
17330 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17332 op0 = copy_to_reg (op0);
17333 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
17335 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17337 error ("shift must be an immediate");
17338 return const0_rtx;
17340 target = gen_reg_rtx (V2DImode);
17341 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, V2DImode, 0), op0, op1);
17342 if (! pat)
17343 return 0;
17344 emit_insn (pat);
17345 return target;
17347 case IX86_BUILTIN_FEMMS:
17348 emit_insn (gen_mmx_femms ());
17349 return NULL_RTX;
17351 case IX86_BUILTIN_PAVGUSB:
17352 return ix86_expand_binop_builtin (CODE_FOR_mmx_uavgv8qi3, arglist, target);
17354 case IX86_BUILTIN_PF2ID:
17355 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2id, arglist, target, 0);
17357 case IX86_BUILTIN_PFACC:
17358 return ix86_expand_binop_builtin (CODE_FOR_mmx_haddv2sf3, arglist, target);
17360 case IX86_BUILTIN_PFADD:
17361 return ix86_expand_binop_builtin (CODE_FOR_mmx_addv2sf3, arglist, target);
17363 case IX86_BUILTIN_PFCMPEQ:
17364 return ix86_expand_binop_builtin (CODE_FOR_mmx_eqv2sf3, arglist, target);
17366 case IX86_BUILTIN_PFCMPGE:
17367 return ix86_expand_binop_builtin (CODE_FOR_mmx_gev2sf3, arglist, target);
17369 case IX86_BUILTIN_PFCMPGT:
17370 return ix86_expand_binop_builtin (CODE_FOR_mmx_gtv2sf3, arglist, target);
17372 case IX86_BUILTIN_PFMAX:
17373 return ix86_expand_binop_builtin (CODE_FOR_mmx_smaxv2sf3, arglist, target);
17375 case IX86_BUILTIN_PFMIN:
17376 return ix86_expand_binop_builtin (CODE_FOR_mmx_sminv2sf3, arglist, target);
17378 case IX86_BUILTIN_PFMUL:
17379 return ix86_expand_binop_builtin (CODE_FOR_mmx_mulv2sf3, arglist, target);
17381 case IX86_BUILTIN_PFRCP:
17382 return ix86_expand_unop_builtin (CODE_FOR_mmx_rcpv2sf2, arglist, target, 0);
17384 case IX86_BUILTIN_PFRCPIT1:
17385 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit1v2sf3, arglist, target);
17387 case IX86_BUILTIN_PFRCPIT2:
17388 return ix86_expand_binop_builtin (CODE_FOR_mmx_rcpit2v2sf3, arglist, target);
17390 case IX86_BUILTIN_PFRSQIT1:
17391 return ix86_expand_binop_builtin (CODE_FOR_mmx_rsqit1v2sf3, arglist, target);
17393 case IX86_BUILTIN_PFRSQRT:
17394 return ix86_expand_unop_builtin (CODE_FOR_mmx_rsqrtv2sf2, arglist, target, 0);
17396 case IX86_BUILTIN_PFSUB:
17397 return ix86_expand_binop_builtin (CODE_FOR_mmx_subv2sf3, arglist, target);
17399 case IX86_BUILTIN_PFSUBR:
17400 return ix86_expand_binop_builtin (CODE_FOR_mmx_subrv2sf3, arglist, target);
17402 case IX86_BUILTIN_PI2FD:
17403 return ix86_expand_unop_builtin (CODE_FOR_mmx_floatv2si2, arglist, target, 0);
17405 case IX86_BUILTIN_PMULHRW:
17406 return ix86_expand_binop_builtin (CODE_FOR_mmx_pmulhrwv4hi3, arglist, target);
17408 case IX86_BUILTIN_PF2IW:
17409 return ix86_expand_unop_builtin (CODE_FOR_mmx_pf2iw, arglist, target, 0);
17411 case IX86_BUILTIN_PFNACC:
17412 return ix86_expand_binop_builtin (CODE_FOR_mmx_hsubv2sf3, arglist, target);
17414 case IX86_BUILTIN_PFPNACC:
17415 return ix86_expand_binop_builtin (CODE_FOR_mmx_addsubv2sf3, arglist, target);
17417 case IX86_BUILTIN_PI2FW:
17418 return ix86_expand_unop_builtin (CODE_FOR_mmx_pi2fw, arglist, target, 0);
17420 case IX86_BUILTIN_PSWAPDSI:
17421 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2si2, arglist, target, 0);
17423 case IX86_BUILTIN_PSWAPDSF:
17424 return ix86_expand_unop_builtin (CODE_FOR_mmx_pswapdv2sf2, arglist, target, 0);
17426 case IX86_BUILTIN_SQRTSD:
17427 return ix86_expand_unop1_builtin (CODE_FOR_sse2_vmsqrtv2df2, arglist, target);
17428 case IX86_BUILTIN_LOADUPD:
17429 return ix86_expand_unop_builtin (CODE_FOR_sse2_movupd, arglist, target, 1);
17430 case IX86_BUILTIN_STOREUPD:
17431 return ix86_expand_store_builtin (CODE_FOR_sse2_movupd, arglist);
17433 case IX86_BUILTIN_MFENCE:
17434 emit_insn (gen_sse2_mfence ());
17435 return 0;
17436 case IX86_BUILTIN_LFENCE:
17437 emit_insn (gen_sse2_lfence ());
17438 return 0;
17440 case IX86_BUILTIN_CLFLUSH:
17441 arg0 = TREE_VALUE (arglist);
17442 op0 = expand_normal (arg0);
17443 icode = CODE_FOR_sse2_clflush;
17444 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
17445 op0 = copy_to_mode_reg (Pmode, op0);
17447 emit_insn (gen_sse2_clflush (op0));
17448 return 0;
17450 case IX86_BUILTIN_MOVNTPD:
17451 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2df, arglist);
17452 case IX86_BUILTIN_MOVNTDQ:
17453 return ix86_expand_store_builtin (CODE_FOR_sse2_movntv2di, arglist);
17454 case IX86_BUILTIN_MOVNTI:
17455 return ix86_expand_store_builtin (CODE_FOR_sse2_movntsi, arglist);
17457 case IX86_BUILTIN_LOADDQU:
17458 return ix86_expand_unop_builtin (CODE_FOR_sse2_movdqu, arglist, target, 1);
17459 case IX86_BUILTIN_STOREDQU:
17460 return ix86_expand_store_builtin (CODE_FOR_sse2_movdqu, arglist);
17462 case IX86_BUILTIN_MONITOR:
17463 arg0 = TREE_VALUE (arglist);
17464 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17465 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17466 op0 = expand_normal (arg0);
17467 op1 = expand_normal (arg1);
17468 op2 = expand_normal (arg2);
17469 if (!REG_P (op0))
17470 op0 = copy_to_mode_reg (Pmode, op0);
17471 if (!REG_P (op1))
17472 op1 = copy_to_mode_reg (SImode, op1);
17473 if (!REG_P (op2))
17474 op2 = copy_to_mode_reg (SImode, op2);
17475 if (!TARGET_64BIT)
17476 emit_insn (gen_sse3_monitor (op0, op1, op2));
17477 else
17478 emit_insn (gen_sse3_monitor64 (op0, op1, op2));
17479 return 0;
17481 case IX86_BUILTIN_MWAIT:
17482 arg0 = TREE_VALUE (arglist);
17483 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17484 op0 = expand_normal (arg0);
17485 op1 = expand_normal (arg1);
17486 if (!REG_P (op0))
17487 op0 = copy_to_mode_reg (SImode, op0);
17488 if (!REG_P (op1))
17489 op1 = copy_to_mode_reg (SImode, op1);
17490 emit_insn (gen_sse3_mwait (op0, op1));
17491 return 0;
17493 case IX86_BUILTIN_LDDQU:
17494 return ix86_expand_unop_builtin (CODE_FOR_sse3_lddqu, arglist,
17495 target, 1);
17497 case IX86_BUILTIN_PALIGNR:
17498 case IX86_BUILTIN_PALIGNR128:
17499 if (fcode == IX86_BUILTIN_PALIGNR)
17501 icode = CODE_FOR_ssse3_palignrdi;
17502 mode = DImode;
17504 else
17506 icode = CODE_FOR_ssse3_palignrti;
17507 mode = V2DImode;
17509 arg0 = TREE_VALUE (arglist);
17510 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17511 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17512 op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
17513 op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
17514 op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
17515 tmode = insn_data[icode].operand[0].mode;
17516 mode1 = insn_data[icode].operand[1].mode;
17517 mode2 = insn_data[icode].operand[2].mode;
17518 mode3 = insn_data[icode].operand[3].mode;
17520 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17522 op0 = copy_to_reg (op0);
17523 op0 = simplify_gen_subreg (mode1, op0, GET_MODE (op0), 0);
17525 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17527 op1 = copy_to_reg (op1);
17528 op1 = simplify_gen_subreg (mode2, op1, GET_MODE (op1), 0);
17530 if (! (*insn_data[icode].operand[3].predicate) (op2, mode3))
17532 error ("shift must be an immediate");
17533 return const0_rtx;
17535 target = gen_reg_rtx (mode);
17536 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, mode, 0),
17537 op0, op1, op2);
17538 if (! pat)
17539 return 0;
17540 emit_insn (pat);
17541 return target;
17543 case IX86_BUILTIN_VEC_INIT_V2SI:
17544 case IX86_BUILTIN_VEC_INIT_V4HI:
17545 case IX86_BUILTIN_VEC_INIT_V8QI:
17546 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
17548 case IX86_BUILTIN_VEC_EXT_V2DF:
17549 case IX86_BUILTIN_VEC_EXT_V2DI:
17550 case IX86_BUILTIN_VEC_EXT_V4SF:
17551 case IX86_BUILTIN_VEC_EXT_V4SI:
17552 case IX86_BUILTIN_VEC_EXT_V8HI:
17553 case IX86_BUILTIN_VEC_EXT_V2SI:
17554 case IX86_BUILTIN_VEC_EXT_V4HI:
17555 return ix86_expand_vec_ext_builtin (arglist, target);
17557 case IX86_BUILTIN_VEC_SET_V8HI:
17558 case IX86_BUILTIN_VEC_SET_V4HI:
17559 return ix86_expand_vec_set_builtin (arglist);
17561 default:
17562 break;
17565 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17566 if (d->code == fcode)
17568 /* Compares are treated specially. */
17569 if (d->icode == CODE_FOR_sse_maskcmpv4sf3
17570 || d->icode == CODE_FOR_sse_vmmaskcmpv4sf3
17571 || d->icode == CODE_FOR_sse2_maskcmpv2df3
17572 || d->icode == CODE_FOR_sse2_vmmaskcmpv2df3)
17573 return ix86_expand_sse_compare (d, arglist, target);
17575 return ix86_expand_binop_builtin (d->icode, arglist, target);
17578 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17579 if (d->code == fcode)
17580 return ix86_expand_unop_builtin (d->icode, arglist, target, 0);
17582 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
17583 if (d->code == fcode)
17584 return ix86_expand_sse_comi (d, arglist, target);
17586 gcc_unreachable ();
17589 /* Returns a function decl for a vectorized version of the builtin function
17590 with builtin function code FN and the result vector type TYPE, or NULL_TREE
17591 if it is not available. */
17593 static tree
17594 ix86_builtin_vectorized_function (enum built_in_function fn, tree type)
17596 enum machine_mode el_mode;
17597 int n;
17599 if (TREE_CODE (type) != VECTOR_TYPE)
17600 return NULL_TREE;
17602 el_mode = TYPE_MODE (TREE_TYPE (type));
17603 n = TYPE_VECTOR_SUBPARTS (type);
17605 switch (fn)
17607 case BUILT_IN_SQRT:
17608 if (el_mode == DFmode && n == 2)
17609 return ix86_builtins[IX86_BUILTIN_SQRTPD];
17610 return NULL_TREE;
17612 case BUILT_IN_SQRTF:
17613 if (el_mode == SFmode && n == 4)
17614 return ix86_builtins[IX86_BUILTIN_SQRTPS];
17615 return NULL_TREE;
17617 default:
17621 return NULL_TREE;
17624 /* Store OPERAND to the memory after reload is completed. This means
17625 that we can't easily use assign_stack_local. */
17627 ix86_force_to_memory (enum machine_mode mode, rtx operand)
17629 rtx result;
17631 gcc_assert (reload_completed);
17632 if (TARGET_RED_ZONE)
17634 result = gen_rtx_MEM (mode,
17635 gen_rtx_PLUS (Pmode,
17636 stack_pointer_rtx,
17637 GEN_INT (-RED_ZONE_SIZE)));
17638 emit_move_insn (result, operand);
17640 else if (!TARGET_RED_ZONE && TARGET_64BIT)
17642 switch (mode)
17644 case HImode:
17645 case SImode:
17646 operand = gen_lowpart (DImode, operand);
17647 /* FALLTHRU */
17648 case DImode:
17649 emit_insn (
17650 gen_rtx_SET (VOIDmode,
17651 gen_rtx_MEM (DImode,
17652 gen_rtx_PRE_DEC (DImode,
17653 stack_pointer_rtx)),
17654 operand));
17655 break;
17656 default:
17657 gcc_unreachable ();
17659 result = gen_rtx_MEM (mode, stack_pointer_rtx);
17661 else
17663 switch (mode)
17665 case DImode:
17667 rtx operands[2];
17668 split_di (&operand, 1, operands, operands + 1);
17669 emit_insn (
17670 gen_rtx_SET (VOIDmode,
17671 gen_rtx_MEM (SImode,
17672 gen_rtx_PRE_DEC (Pmode,
17673 stack_pointer_rtx)),
17674 operands[1]));
17675 emit_insn (
17676 gen_rtx_SET (VOIDmode,
17677 gen_rtx_MEM (SImode,
17678 gen_rtx_PRE_DEC (Pmode,
17679 stack_pointer_rtx)),
17680 operands[0]));
17682 break;
17683 case HImode:
17684 /* Store HImodes as SImodes. */
17685 operand = gen_lowpart (SImode, operand);
17686 /* FALLTHRU */
17687 case SImode:
17688 emit_insn (
17689 gen_rtx_SET (VOIDmode,
17690 gen_rtx_MEM (GET_MODE (operand),
17691 gen_rtx_PRE_DEC (SImode,
17692 stack_pointer_rtx)),
17693 operand));
17694 break;
17695 default:
17696 gcc_unreachable ();
17698 result = gen_rtx_MEM (mode, stack_pointer_rtx);
17700 return result;
17703 /* Free operand from the memory. */
17704 void
17705 ix86_free_from_memory (enum machine_mode mode)
17707 if (!TARGET_RED_ZONE)
17709 int size;
17711 if (mode == DImode || TARGET_64BIT)
17712 size = 8;
17713 else
17714 size = 4;
17715 /* Use LEA to deallocate stack space. In peephole2 it will be converted
17716 to pop or add instruction if registers are available. */
17717 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
17718 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
17719 GEN_INT (size))));
17723 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
17724 QImode must go into class Q_REGS.
17725 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
17726 movdf to do mem-to-mem moves through integer regs. */
17727 enum reg_class
17728 ix86_preferred_reload_class (rtx x, enum reg_class class)
17730 enum machine_mode mode = GET_MODE (x);
17732 /* We're only allowed to return a subclass of CLASS. Many of the
17733 following checks fail for NO_REGS, so eliminate that early. */
17734 if (class == NO_REGS)
17735 return NO_REGS;
17737 /* All classes can load zeros. */
17738 if (x == CONST0_RTX (mode))
17739 return class;
17741 /* Force constants into memory if we are loading a (nonzero) constant into
17742 an MMX or SSE register. This is because there are no MMX/SSE instructions
17743 to load from a constant. */
17744 if (CONSTANT_P (x)
17745 && (MAYBE_MMX_CLASS_P (class) || MAYBE_SSE_CLASS_P (class)))
17746 return NO_REGS;
17748 /* Prefer SSE regs only, if we can use them for math. */
17749 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
17750 return SSE_CLASS_P (class) ? class : NO_REGS;
17752 /* Floating-point constants need more complex checks. */
17753 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
17755 /* General regs can load everything. */
17756 if (reg_class_subset_p (class, GENERAL_REGS))
17757 return class;
17759 /* Floats can load 0 and 1 plus some others. Note that we eliminated
17760 zero above. We only want to wind up preferring 80387 registers if
17761 we plan on doing computation with them. */
17762 if (TARGET_80387
17763 && standard_80387_constant_p (x))
17765 /* Limit class to non-sse. */
17766 if (class == FLOAT_SSE_REGS)
17767 return FLOAT_REGS;
17768 if (class == FP_TOP_SSE_REGS)
17769 return FP_TOP_REG;
17770 if (class == FP_SECOND_SSE_REGS)
17771 return FP_SECOND_REG;
17772 if (class == FLOAT_INT_REGS || class == FLOAT_REGS)
17773 return class;
17776 return NO_REGS;
17779 /* Generally when we see PLUS here, it's the function invariant
17780 (plus soft-fp const_int). Which can only be computed into general
17781 regs. */
17782 if (GET_CODE (x) == PLUS)
17783 return reg_class_subset_p (class, GENERAL_REGS) ? class : NO_REGS;
17785 /* QImode constants are easy to load, but non-constant QImode data
17786 must go into Q_REGS. */
17787 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
17789 if (reg_class_subset_p (class, Q_REGS))
17790 return class;
17791 if (reg_class_subset_p (Q_REGS, class))
17792 return Q_REGS;
17793 return NO_REGS;
17796 return class;
17799 /* Discourage putting floating-point values in SSE registers unless
17800 SSE math is being used, and likewise for the 387 registers. */
17801 enum reg_class
17802 ix86_preferred_output_reload_class (rtx x, enum reg_class class)
17804 enum machine_mode mode = GET_MODE (x);
17806 /* Restrict the output reload class to the register bank that we are doing
17807 math on. If we would like not to return a subset of CLASS, reject this
17808 alternative: if reload cannot do this, it will still use its choice. */
17809 mode = GET_MODE (x);
17810 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
17811 return MAYBE_SSE_CLASS_P (class) ? SSE_REGS : NO_REGS;
17813 if (TARGET_80387 && SCALAR_FLOAT_MODE_P (mode))
17815 if (class == FP_TOP_SSE_REGS)
17816 return FP_TOP_REG;
17817 else if (class == FP_SECOND_SSE_REGS)
17818 return FP_SECOND_REG;
17819 else
17820 return FLOAT_CLASS_P (class) ? class : NO_REGS;
17823 return class;
17826 /* If we are copying between general and FP registers, we need a memory
17827 location. The same is true for SSE and MMX registers.
17829 The macro can't work reliably when one of the CLASSES is class containing
17830 registers from multiple units (SSE, MMX, integer). We avoid this by never
17831 combining those units in single alternative in the machine description.
17832 Ensure that this constraint holds to avoid unexpected surprises.
17834 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
17835 enforce these sanity checks. */
17838 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
17839 enum machine_mode mode, int strict)
17841 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
17842 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
17843 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
17844 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
17845 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
17846 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
17848 gcc_assert (!strict);
17849 return true;
17852 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
17853 return true;
17855 /* ??? This is a lie. We do have moves between mmx/general, and for
17856 mmx/sse2. But by saying we need secondary memory we discourage the
17857 register allocator from using the mmx registers unless needed. */
17858 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
17859 return true;
17861 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
17863 /* SSE1 doesn't have any direct moves from other classes. */
17864 if (!TARGET_SSE2)
17865 return true;
17867 /* If the target says that inter-unit moves are more expensive
17868 than moving through memory, then don't generate them. */
17869 if (!TARGET_INTER_UNIT_MOVES && !optimize_size)
17870 return true;
17872 /* Between SSE and general, we have moves no larger than word size. */
17873 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
17874 return true;
17876 /* ??? For the cost of one register reformat penalty, we could use
17877 the same instructions to move SFmode and DFmode data, but the
17878 relevant move patterns don't support those alternatives. */
17879 if (mode == SFmode || mode == DFmode)
17880 return true;
17883 return false;
17886 /* Return true if the registers in CLASS cannot represent the change from
17887 modes FROM to TO. */
17889 bool
17890 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
17891 enum reg_class class)
17893 if (from == to)
17894 return false;
17896 /* x87 registers can't do subreg at all, as all values are reformatted
17897 to extended precision. */
17898 if (MAYBE_FLOAT_CLASS_P (class))
17899 return true;
17901 if (MAYBE_SSE_CLASS_P (class) || MAYBE_MMX_CLASS_P (class))
17903 /* Vector registers do not support QI or HImode loads. If we don't
17904 disallow a change to these modes, reload will assume it's ok to
17905 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
17906 the vec_dupv4hi pattern. */
17907 if (GET_MODE_SIZE (from) < 4)
17908 return true;
17910 /* Vector registers do not support subreg with nonzero offsets, which
17911 are otherwise valid for integer registers. Since we can't see
17912 whether we have a nonzero offset from here, prohibit all
17913 nonparadoxical subregs changing size. */
17914 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
17915 return true;
17918 return false;
17921 /* Return the cost of moving data from a register in class CLASS1 to
17922 one in class CLASS2.
17924 It is not required that the cost always equal 2 when FROM is the same as TO;
17925 on some machines it is expensive to move between registers if they are not
17926 general registers. */
17929 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
17930 enum reg_class class2)
17932 /* In case we require secondary memory, compute cost of the store followed
17933 by load. In order to avoid bad register allocation choices, we need
17934 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
17936 if (ix86_secondary_memory_needed (class1, class2, mode, 0))
17938 int cost = 1;
17940 cost += MAX (MEMORY_MOVE_COST (mode, class1, 0),
17941 MEMORY_MOVE_COST (mode, class1, 1));
17942 cost += MAX (MEMORY_MOVE_COST (mode, class2, 0),
17943 MEMORY_MOVE_COST (mode, class2, 1));
17945 /* In case of copying from general_purpose_register we may emit multiple
17946 stores followed by single load causing memory size mismatch stall.
17947 Count this as arbitrarily high cost of 20. */
17948 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
17949 cost += 20;
17951 /* In the case of FP/MMX moves, the registers actually overlap, and we
17952 have to switch modes in order to treat them differently. */
17953 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
17954 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
17955 cost += 20;
17957 return cost;
17960 /* Moves between SSE/MMX and integer unit are expensive. */
17961 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
17962 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
17963 return ix86_cost->mmxsse_to_integer;
17964 if (MAYBE_FLOAT_CLASS_P (class1))
17965 return ix86_cost->fp_move;
17966 if (MAYBE_SSE_CLASS_P (class1))
17967 return ix86_cost->sse_move;
17968 if (MAYBE_MMX_CLASS_P (class1))
17969 return ix86_cost->mmx_move;
17970 return 2;
17973 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
17975 bool
17976 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
17978 /* Flags and only flags can only hold CCmode values. */
17979 if (CC_REGNO_P (regno))
17980 return GET_MODE_CLASS (mode) == MODE_CC;
17981 if (GET_MODE_CLASS (mode) == MODE_CC
17982 || GET_MODE_CLASS (mode) == MODE_RANDOM
17983 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
17984 return 0;
17985 if (FP_REGNO_P (regno))
17986 return VALID_FP_MODE_P (mode);
17987 if (SSE_REGNO_P (regno))
17989 /* We implement the move patterns for all vector modes into and
17990 out of SSE registers, even when no operation instructions
17991 are available. */
17992 return (VALID_SSE_REG_MODE (mode)
17993 || VALID_SSE2_REG_MODE (mode)
17994 || VALID_MMX_REG_MODE (mode)
17995 || VALID_MMX_REG_MODE_3DNOW (mode));
17997 if (MMX_REGNO_P (regno))
17999 /* We implement the move patterns for 3DNOW modes even in MMX mode,
18000 so if the register is available at all, then we can move data of
18001 the given mode into or out of it. */
18002 return (VALID_MMX_REG_MODE (mode)
18003 || VALID_MMX_REG_MODE_3DNOW (mode));
18006 if (mode == QImode)
18008 /* Take care for QImode values - they can be in non-QI regs,
18009 but then they do cause partial register stalls. */
18010 if (regno < 4 || TARGET_64BIT)
18011 return 1;
18012 if (!TARGET_PARTIAL_REG_STALL)
18013 return 1;
18014 return reload_in_progress || reload_completed;
18016 /* We handle both integer and floats in the general purpose registers. */
18017 else if (VALID_INT_MODE_P (mode))
18018 return 1;
18019 else if (VALID_FP_MODE_P (mode))
18020 return 1;
18021 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
18022 on to use that value in smaller contexts, this can easily force a
18023 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
18024 supporting DImode, allow it. */
18025 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
18026 return 1;
18028 return 0;
18031 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
18032 tieable integer mode. */
18034 static bool
18035 ix86_tieable_integer_mode_p (enum machine_mode mode)
18037 switch (mode)
18039 case HImode:
18040 case SImode:
18041 return true;
18043 case QImode:
18044 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
18046 case DImode:
18047 return TARGET_64BIT;
18049 default:
18050 return false;
18054 /* Return true if MODE1 is accessible in a register that can hold MODE2
18055 without copying. That is, all register classes that can hold MODE2
18056 can also hold MODE1. */
18058 bool
18059 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
18061 if (mode1 == mode2)
18062 return true;
18064 if (ix86_tieable_integer_mode_p (mode1)
18065 && ix86_tieable_integer_mode_p (mode2))
18066 return true;
18068 /* MODE2 being XFmode implies fp stack or general regs, which means we
18069 can tie any smaller floating point modes to it. Note that we do not
18070 tie this with TFmode. */
18071 if (mode2 == XFmode)
18072 return mode1 == SFmode || mode1 == DFmode;
18074 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
18075 that we can tie it with SFmode. */
18076 if (mode2 == DFmode)
18077 return mode1 == SFmode;
18079 /* If MODE2 is only appropriate for an SSE register, then tie with
18080 any other mode acceptable to SSE registers. */
18081 if (GET_MODE_SIZE (mode2) >= 8
18082 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
18083 return ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1);
18085 /* If MODE2 is appropriate for an MMX (or SSE) register, then tie
18086 with any other mode acceptable to MMX registers. */
18087 if (GET_MODE_SIZE (mode2) == 8
18088 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
18089 return ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1);
18091 return false;
18094 /* Return the cost of moving data of mode M between a
18095 register and memory. A value of 2 is the default; this cost is
18096 relative to those in `REGISTER_MOVE_COST'.
18098 If moving between registers and memory is more expensive than
18099 between two registers, you should define this macro to express the
18100 relative cost.
18102 Model also increased moving costs of QImode registers in non
18103 Q_REGS classes.
18106 ix86_memory_move_cost (enum machine_mode mode, enum reg_class class, int in)
18108 if (FLOAT_CLASS_P (class))
18110 int index;
18111 switch (mode)
18113 case SFmode:
18114 index = 0;
18115 break;
18116 case DFmode:
18117 index = 1;
18118 break;
18119 case XFmode:
18120 index = 2;
18121 break;
18122 default:
18123 return 100;
18125 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
18127 if (SSE_CLASS_P (class))
18129 int index;
18130 switch (GET_MODE_SIZE (mode))
18132 case 4:
18133 index = 0;
18134 break;
18135 case 8:
18136 index = 1;
18137 break;
18138 case 16:
18139 index = 2;
18140 break;
18141 default:
18142 return 100;
18144 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
18146 if (MMX_CLASS_P (class))
18148 int index;
18149 switch (GET_MODE_SIZE (mode))
18151 case 4:
18152 index = 0;
18153 break;
18154 case 8:
18155 index = 1;
18156 break;
18157 default:
18158 return 100;
18160 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
18162 switch (GET_MODE_SIZE (mode))
18164 case 1:
18165 if (in)
18166 return (Q_CLASS_P (class) ? ix86_cost->int_load[0]
18167 : ix86_cost->movzbl_load);
18168 else
18169 return (Q_CLASS_P (class) ? ix86_cost->int_store[0]
18170 : ix86_cost->int_store[0] + 4);
18171 break;
18172 case 2:
18173 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
18174 default:
18175 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
18176 if (mode == TFmode)
18177 mode = XFmode;
18178 return ((in ? ix86_cost->int_load[2] : ix86_cost->int_store[2])
18179 * (((int) GET_MODE_SIZE (mode)
18180 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
18184 /* Compute a (partial) cost for rtx X. Return true if the complete
18185 cost has been computed, and false if subexpressions should be
18186 scanned. In either case, *TOTAL contains the cost result. */
18188 static bool
18189 ix86_rtx_costs (rtx x, int code, int outer_code, int *total)
18191 enum machine_mode mode = GET_MODE (x);
18193 switch (code)
18195 case CONST_INT:
18196 case CONST:
18197 case LABEL_REF:
18198 case SYMBOL_REF:
18199 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
18200 *total = 3;
18201 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
18202 *total = 2;
18203 else if (flag_pic && SYMBOLIC_CONST (x)
18204 && (!TARGET_64BIT
18205 || (!GET_CODE (x) != LABEL_REF
18206 && (GET_CODE (x) != SYMBOL_REF
18207 || !SYMBOL_REF_LOCAL_P (x)))))
18208 *total = 1;
18209 else
18210 *total = 0;
18211 return true;
18213 case CONST_DOUBLE:
18214 if (mode == VOIDmode)
18215 *total = 0;
18216 else
18217 switch (standard_80387_constant_p (x))
18219 case 1: /* 0.0 */
18220 *total = 1;
18221 break;
18222 default: /* Other constants */
18223 *total = 2;
18224 break;
18225 case 0:
18226 case -1:
18227 /* Start with (MEM (SYMBOL_REF)), since that's where
18228 it'll probably end up. Add a penalty for size. */
18229 *total = (COSTS_N_INSNS (1)
18230 + (flag_pic != 0 && !TARGET_64BIT)
18231 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
18232 break;
18234 return true;
18236 case ZERO_EXTEND:
18237 /* The zero extensions is often completely free on x86_64, so make
18238 it as cheap as possible. */
18239 if (TARGET_64BIT && mode == DImode
18240 && GET_MODE (XEXP (x, 0)) == SImode)
18241 *total = 1;
18242 else if (TARGET_ZERO_EXTEND_WITH_AND)
18243 *total = ix86_cost->add;
18244 else
18245 *total = ix86_cost->movzx;
18246 return false;
18248 case SIGN_EXTEND:
18249 *total = ix86_cost->movsx;
18250 return false;
18252 case ASHIFT:
18253 if (GET_CODE (XEXP (x, 1)) == CONST_INT
18254 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
18256 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
18257 if (value == 1)
18259 *total = ix86_cost->add;
18260 return false;
18262 if ((value == 2 || value == 3)
18263 && ix86_cost->lea <= ix86_cost->shift_const)
18265 *total = ix86_cost->lea;
18266 return false;
18269 /* FALLTHRU */
18271 case ROTATE:
18272 case ASHIFTRT:
18273 case LSHIFTRT:
18274 case ROTATERT:
18275 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
18277 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
18279 if (INTVAL (XEXP (x, 1)) > 32)
18280 *total = ix86_cost->shift_const + COSTS_N_INSNS (2);
18281 else
18282 *total = ix86_cost->shift_const * 2;
18284 else
18286 if (GET_CODE (XEXP (x, 1)) == AND)
18287 *total = ix86_cost->shift_var * 2;
18288 else
18289 *total = ix86_cost->shift_var * 6 + COSTS_N_INSNS (2);
18292 else
18294 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
18295 *total = ix86_cost->shift_const;
18296 else
18297 *total = ix86_cost->shift_var;
18299 return false;
18301 case MULT:
18302 if (FLOAT_MODE_P (mode))
18304 *total = ix86_cost->fmul;
18305 return false;
18307 else
18309 rtx op0 = XEXP (x, 0);
18310 rtx op1 = XEXP (x, 1);
18311 int nbits;
18312 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
18314 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
18315 for (nbits = 0; value != 0; value &= value - 1)
18316 nbits++;
18318 else
18319 /* This is arbitrary. */
18320 nbits = 7;
18322 /* Compute costs correctly for widening multiplication. */
18323 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op1) == ZERO_EXTEND)
18324 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
18325 == GET_MODE_SIZE (mode))
18327 int is_mulwiden = 0;
18328 enum machine_mode inner_mode = GET_MODE (op0);
18330 if (GET_CODE (op0) == GET_CODE (op1))
18331 is_mulwiden = 1, op1 = XEXP (op1, 0);
18332 else if (GET_CODE (op1) == CONST_INT)
18334 if (GET_CODE (op0) == SIGN_EXTEND)
18335 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
18336 == INTVAL (op1);
18337 else
18338 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
18341 if (is_mulwiden)
18342 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
18345 *total = (ix86_cost->mult_init[MODE_INDEX (mode)]
18346 + nbits * ix86_cost->mult_bit
18347 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code));
18349 return true;
18352 case DIV:
18353 case UDIV:
18354 case MOD:
18355 case UMOD:
18356 if (FLOAT_MODE_P (mode))
18357 *total = ix86_cost->fdiv;
18358 else
18359 *total = ix86_cost->divide[MODE_INDEX (mode)];
18360 return false;
18362 case PLUS:
18363 if (FLOAT_MODE_P (mode))
18364 *total = ix86_cost->fadd;
18365 else if (GET_MODE_CLASS (mode) == MODE_INT
18366 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
18368 if (GET_CODE (XEXP (x, 0)) == PLUS
18369 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
18370 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
18371 && CONSTANT_P (XEXP (x, 1)))
18373 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
18374 if (val == 2 || val == 4 || val == 8)
18376 *total = ix86_cost->lea;
18377 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
18378 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
18379 outer_code);
18380 *total += rtx_cost (XEXP (x, 1), outer_code);
18381 return true;
18384 else if (GET_CODE (XEXP (x, 0)) == MULT
18385 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
18387 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
18388 if (val == 2 || val == 4 || val == 8)
18390 *total = ix86_cost->lea;
18391 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
18392 *total += rtx_cost (XEXP (x, 1), outer_code);
18393 return true;
18396 else if (GET_CODE (XEXP (x, 0)) == PLUS)
18398 *total = ix86_cost->lea;
18399 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
18400 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
18401 *total += rtx_cost (XEXP (x, 1), outer_code);
18402 return true;
18405 /* FALLTHRU */
18407 case MINUS:
18408 if (FLOAT_MODE_P (mode))
18410 *total = ix86_cost->fadd;
18411 return false;
18413 /* FALLTHRU */
18415 case AND:
18416 case IOR:
18417 case XOR:
18418 if (!TARGET_64BIT && mode == DImode)
18420 *total = (ix86_cost->add * 2
18421 + (rtx_cost (XEXP (x, 0), outer_code)
18422 << (GET_MODE (XEXP (x, 0)) != DImode))
18423 + (rtx_cost (XEXP (x, 1), outer_code)
18424 << (GET_MODE (XEXP (x, 1)) != DImode)));
18425 return true;
18427 /* FALLTHRU */
18429 case NEG:
18430 if (FLOAT_MODE_P (mode))
18432 *total = ix86_cost->fchs;
18433 return false;
18435 /* FALLTHRU */
18437 case NOT:
18438 if (!TARGET_64BIT && mode == DImode)
18439 *total = ix86_cost->add * 2;
18440 else
18441 *total = ix86_cost->add;
18442 return false;
18444 case COMPARE:
18445 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
18446 && XEXP (XEXP (x, 0), 1) == const1_rtx
18447 && GET_CODE (XEXP (XEXP (x, 0), 2)) == CONST_INT
18448 && XEXP (x, 1) == const0_rtx)
18450 /* This kind of construct is implemented using test[bwl].
18451 Treat it as if we had an AND. */
18452 *total = (ix86_cost->add
18453 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
18454 + rtx_cost (const1_rtx, outer_code));
18455 return true;
18457 return false;
18459 case FLOAT_EXTEND:
18460 if (!TARGET_SSE_MATH
18461 || mode == XFmode
18462 || (mode == DFmode && !TARGET_SSE2))
18463 *total = 0;
18464 return false;
18466 case ABS:
18467 if (FLOAT_MODE_P (mode))
18468 *total = ix86_cost->fabs;
18469 return false;
18471 case SQRT:
18472 if (FLOAT_MODE_P (mode))
18473 *total = ix86_cost->fsqrt;
18474 return false;
18476 case UNSPEC:
18477 if (XINT (x, 1) == UNSPEC_TP)
18478 *total = 0;
18479 return false;
18481 default:
18482 return false;
18486 #if TARGET_MACHO
18488 static int current_machopic_label_num;
18490 /* Given a symbol name and its associated stub, write out the
18491 definition of the stub. */
18493 void
18494 machopic_output_stub (FILE *file, const char *symb, const char *stub)
18496 unsigned int length;
18497 char *binder_name, *symbol_name, lazy_ptr_name[32];
18498 int label = ++current_machopic_label_num;
18500 /* For 64-bit we shouldn't get here. */
18501 gcc_assert (!TARGET_64BIT);
18503 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
18504 symb = (*targetm.strip_name_encoding) (symb);
18506 length = strlen (stub);
18507 binder_name = alloca (length + 32);
18508 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
18510 length = strlen (symb);
18511 symbol_name = alloca (length + 32);
18512 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
18514 sprintf (lazy_ptr_name, "L%d$lz", label);
18516 if (MACHOPIC_PURE)
18517 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
18518 else
18519 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
18521 fprintf (file, "%s:\n", stub);
18522 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
18524 if (MACHOPIC_PURE)
18526 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
18527 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
18528 fprintf (file, "\tjmp\t*%%edx\n");
18530 else
18531 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
18533 fprintf (file, "%s:\n", binder_name);
18535 if (MACHOPIC_PURE)
18537 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
18538 fprintf (file, "\tpushl\t%%eax\n");
18540 else
18541 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
18543 fprintf (file, "\tjmp\tdyld_stub_binding_helper\n");
18545 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
18546 fprintf (file, "%s:\n", lazy_ptr_name);
18547 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
18548 fprintf (file, "\t.long %s\n", binder_name);
18551 void
18552 darwin_x86_file_end (void)
18554 darwin_file_end ();
18555 ix86_file_end ();
18557 #endif /* TARGET_MACHO */
18559 /* Order the registers for register allocator. */
18561 void
18562 x86_order_regs_for_local_alloc (void)
18564 int pos = 0;
18565 int i;
18567 /* First allocate the local general purpose registers. */
18568 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
18569 if (GENERAL_REGNO_P (i) && call_used_regs[i])
18570 reg_alloc_order [pos++] = i;
18572 /* Global general purpose registers. */
18573 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
18574 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
18575 reg_alloc_order [pos++] = i;
18577 /* x87 registers come first in case we are doing FP math
18578 using them. */
18579 if (!TARGET_SSE_MATH)
18580 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
18581 reg_alloc_order [pos++] = i;
18583 /* SSE registers. */
18584 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
18585 reg_alloc_order [pos++] = i;
18586 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
18587 reg_alloc_order [pos++] = i;
18589 /* x87 registers. */
18590 if (TARGET_SSE_MATH)
18591 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
18592 reg_alloc_order [pos++] = i;
18594 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
18595 reg_alloc_order [pos++] = i;
18597 /* Initialize the rest of array as we do not allocate some registers
18598 at all. */
18599 while (pos < FIRST_PSEUDO_REGISTER)
18600 reg_alloc_order [pos++] = 0;
18603 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
18604 struct attribute_spec.handler. */
18605 static tree
18606 ix86_handle_struct_attribute (tree *node, tree name,
18607 tree args ATTRIBUTE_UNUSED,
18608 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
18610 tree *type = NULL;
18611 if (DECL_P (*node))
18613 if (TREE_CODE (*node) == TYPE_DECL)
18614 type = &TREE_TYPE (*node);
18616 else
18617 type = node;
18619 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
18620 || TREE_CODE (*type) == UNION_TYPE)))
18622 warning (OPT_Wattributes, "%qs attribute ignored",
18623 IDENTIFIER_POINTER (name));
18624 *no_add_attrs = true;
18627 else if ((is_attribute_p ("ms_struct", name)
18628 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
18629 || ((is_attribute_p ("gcc_struct", name)
18630 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
18632 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
18633 IDENTIFIER_POINTER (name));
18634 *no_add_attrs = true;
18637 return NULL_TREE;
18640 static bool
18641 ix86_ms_bitfield_layout_p (tree record_type)
18643 return (TARGET_MS_BITFIELD_LAYOUT &&
18644 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
18645 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
18648 /* Returns an expression indicating where the this parameter is
18649 located on entry to the FUNCTION. */
18651 static rtx
18652 x86_this_parameter (tree function)
18654 tree type = TREE_TYPE (function);
18656 if (TARGET_64BIT)
18658 int n = aggregate_value_p (TREE_TYPE (type), type) != 0;
18659 return gen_rtx_REG (DImode, x86_64_int_parameter_registers[n]);
18662 if (ix86_function_regparm (type, function) > 0)
18664 tree parm;
18666 parm = TYPE_ARG_TYPES (type);
18667 /* Figure out whether or not the function has a variable number of
18668 arguments. */
18669 for (; parm; parm = TREE_CHAIN (parm))
18670 if (TREE_VALUE (parm) == void_type_node)
18671 break;
18672 /* If not, the this parameter is in the first argument. */
18673 if (parm)
18675 int regno = 0;
18676 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
18677 regno = 2;
18678 return gen_rtx_REG (SImode, regno);
18682 if (aggregate_value_p (TREE_TYPE (type), type))
18683 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 8));
18684 else
18685 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, 4));
18688 /* Determine whether x86_output_mi_thunk can succeed. */
18690 static bool
18691 x86_can_output_mi_thunk (tree thunk ATTRIBUTE_UNUSED,
18692 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
18693 HOST_WIDE_INT vcall_offset, tree function)
18695 /* 64-bit can handle anything. */
18696 if (TARGET_64BIT)
18697 return true;
18699 /* For 32-bit, everything's fine if we have one free register. */
18700 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
18701 return true;
18703 /* Need a free register for vcall_offset. */
18704 if (vcall_offset)
18705 return false;
18707 /* Need a free register for GOT references. */
18708 if (flag_pic && !(*targetm.binds_local_p) (function))
18709 return false;
18711 /* Otherwise ok. */
18712 return true;
18715 /* Output the assembler code for a thunk function. THUNK_DECL is the
18716 declaration for the thunk function itself, FUNCTION is the decl for
18717 the target function. DELTA is an immediate constant offset to be
18718 added to THIS. If VCALL_OFFSET is nonzero, the word at
18719 *(*this + vcall_offset) should be added to THIS. */
18721 static void
18722 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
18723 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
18724 HOST_WIDE_INT vcall_offset, tree function)
18726 rtx xops[3];
18727 rtx this = x86_this_parameter (function);
18728 rtx this_reg, tmp;
18730 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
18731 pull it in now and let DELTA benefit. */
18732 if (REG_P (this))
18733 this_reg = this;
18734 else if (vcall_offset)
18736 /* Put the this parameter into %eax. */
18737 xops[0] = this;
18738 xops[1] = this_reg = gen_rtx_REG (Pmode, 0);
18739 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
18741 else
18742 this_reg = NULL_RTX;
18744 /* Adjust the this parameter by a fixed constant. */
18745 if (delta)
18747 xops[0] = GEN_INT (delta);
18748 xops[1] = this_reg ? this_reg : this;
18749 if (TARGET_64BIT)
18751 if (!x86_64_general_operand (xops[0], DImode))
18753 tmp = gen_rtx_REG (DImode, R10_REG);
18754 xops[1] = tmp;
18755 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
18756 xops[0] = tmp;
18757 xops[1] = this;
18759 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
18761 else
18762 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
18765 /* Adjust the this parameter by a value stored in the vtable. */
18766 if (vcall_offset)
18768 if (TARGET_64BIT)
18769 tmp = gen_rtx_REG (DImode, R10_REG);
18770 else
18772 int tmp_regno = 2 /* ECX */;
18773 if (lookup_attribute ("fastcall",
18774 TYPE_ATTRIBUTES (TREE_TYPE (function))))
18775 tmp_regno = 0 /* EAX */;
18776 tmp = gen_rtx_REG (SImode, tmp_regno);
18779 xops[0] = gen_rtx_MEM (Pmode, this_reg);
18780 xops[1] = tmp;
18781 if (TARGET_64BIT)
18782 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
18783 else
18784 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
18786 /* Adjust the this parameter. */
18787 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
18788 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
18790 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
18791 xops[0] = GEN_INT (vcall_offset);
18792 xops[1] = tmp2;
18793 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
18794 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
18796 xops[1] = this_reg;
18797 if (TARGET_64BIT)
18798 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
18799 else
18800 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
18803 /* If necessary, drop THIS back to its stack slot. */
18804 if (this_reg && this_reg != this)
18806 xops[0] = this_reg;
18807 xops[1] = this;
18808 output_asm_insn ("mov{l}\t{%0, %1|%1, %0}", xops);
18811 xops[0] = XEXP (DECL_RTL (function), 0);
18812 if (TARGET_64BIT)
18814 if (!flag_pic || (*targetm.binds_local_p) (function))
18815 output_asm_insn ("jmp\t%P0", xops);
18816 else
18818 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
18819 tmp = gen_rtx_CONST (Pmode, tmp);
18820 tmp = gen_rtx_MEM (QImode, tmp);
18821 xops[0] = tmp;
18822 output_asm_insn ("jmp\t%A0", xops);
18825 else
18827 if (!flag_pic || (*targetm.binds_local_p) (function))
18828 output_asm_insn ("jmp\t%P0", xops);
18829 else
18830 #if TARGET_MACHO
18831 if (TARGET_MACHO)
18833 rtx sym_ref = XEXP (DECL_RTL (function), 0);
18834 tmp = (gen_rtx_SYMBOL_REF
18835 (Pmode,
18836 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
18837 tmp = gen_rtx_MEM (QImode, tmp);
18838 xops[0] = tmp;
18839 output_asm_insn ("jmp\t%0", xops);
18841 else
18842 #endif /* TARGET_MACHO */
18844 tmp = gen_rtx_REG (SImode, 2 /* ECX */);
18845 output_set_got (tmp, NULL_RTX);
18847 xops[1] = tmp;
18848 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
18849 output_asm_insn ("jmp\t{*}%1", xops);
18854 static void
18855 x86_file_start (void)
18857 default_file_start ();
18858 #if TARGET_MACHO
18859 darwin_file_start ();
18860 #endif
18861 if (X86_FILE_START_VERSION_DIRECTIVE)
18862 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
18863 if (X86_FILE_START_FLTUSED)
18864 fputs ("\t.global\t__fltused\n", asm_out_file);
18865 if (ix86_asm_dialect == ASM_INTEL)
18866 fputs ("\t.intel_syntax\n", asm_out_file);
18870 x86_field_alignment (tree field, int computed)
18872 enum machine_mode mode;
18873 tree type = TREE_TYPE (field);
18875 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
18876 return computed;
18877 mode = TYPE_MODE (TREE_CODE (type) == ARRAY_TYPE
18878 ? get_inner_array_type (type) : type);
18879 if (mode == DFmode || mode == DCmode
18880 || GET_MODE_CLASS (mode) == MODE_INT
18881 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
18882 return MIN (32, computed);
18883 return computed;
18886 /* Output assembler code to FILE to increment profiler label # LABELNO
18887 for profiling a function entry. */
18888 void
18889 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
18891 if (TARGET_64BIT)
18892 if (flag_pic)
18894 #ifndef NO_PROFILE_COUNTERS
18895 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
18896 #endif
18897 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
18899 else
18901 #ifndef NO_PROFILE_COUNTERS
18902 fprintf (file, "\tmovq\t$%sP%d,%%r11\n", LPREFIX, labelno);
18903 #endif
18904 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
18906 else if (flag_pic)
18908 #ifndef NO_PROFILE_COUNTERS
18909 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
18910 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
18911 #endif
18912 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
18914 else
18916 #ifndef NO_PROFILE_COUNTERS
18917 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
18918 PROFILE_COUNT_REGISTER);
18919 #endif
18920 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
18924 /* We don't have exact information about the insn sizes, but we may assume
18925 quite safely that we are informed about all 1 byte insns and memory
18926 address sizes. This is enough to eliminate unnecessary padding in
18927 99% of cases. */
18929 static int
18930 min_insn_size (rtx insn)
18932 int l = 0;
18934 if (!INSN_P (insn) || !active_insn_p (insn))
18935 return 0;
18937 /* Discard alignments we've emit and jump instructions. */
18938 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
18939 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
18940 return 0;
18941 if (GET_CODE (insn) == JUMP_INSN
18942 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
18943 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
18944 return 0;
18946 /* Important case - calls are always 5 bytes.
18947 It is common to have many calls in the row. */
18948 if (GET_CODE (insn) == CALL_INSN
18949 && symbolic_reference_mentioned_p (PATTERN (insn))
18950 && !SIBLING_CALL_P (insn))
18951 return 5;
18952 if (get_attr_length (insn) <= 1)
18953 return 1;
18955 /* For normal instructions we may rely on the sizes of addresses
18956 and the presence of symbol to require 4 bytes of encoding.
18957 This is not the case for jumps where references are PC relative. */
18958 if (GET_CODE (insn) != JUMP_INSN)
18960 l = get_attr_length_address (insn);
18961 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
18962 l = 4;
18964 if (l)
18965 return 1+l;
18966 else
18967 return 2;
18970 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
18971 window. */
18973 static void
18974 ix86_avoid_jump_misspredicts (void)
18976 rtx insn, start = get_insns ();
18977 int nbytes = 0, njumps = 0;
18978 int isjump = 0;
18980 /* Look for all minimal intervals of instructions containing 4 jumps.
18981 The intervals are bounded by START and INSN. NBYTES is the total
18982 size of instructions in the interval including INSN and not including
18983 START. When the NBYTES is smaller than 16 bytes, it is possible
18984 that the end of START and INSN ends up in the same 16byte page.
18986 The smallest offset in the page INSN can start is the case where START
18987 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
18988 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
18990 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
18993 nbytes += min_insn_size (insn);
18994 if (dump_file)
18995 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
18996 INSN_UID (insn), min_insn_size (insn));
18997 if ((GET_CODE (insn) == JUMP_INSN
18998 && GET_CODE (PATTERN (insn)) != ADDR_VEC
18999 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
19000 || GET_CODE (insn) == CALL_INSN)
19001 njumps++;
19002 else
19003 continue;
19005 while (njumps > 3)
19007 start = NEXT_INSN (start);
19008 if ((GET_CODE (start) == JUMP_INSN
19009 && GET_CODE (PATTERN (start)) != ADDR_VEC
19010 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
19011 || GET_CODE (start) == CALL_INSN)
19012 njumps--, isjump = 1;
19013 else
19014 isjump = 0;
19015 nbytes -= min_insn_size (start);
19017 gcc_assert (njumps >= 0);
19018 if (dump_file)
19019 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
19020 INSN_UID (start), INSN_UID (insn), nbytes);
19022 if (njumps == 3 && isjump && nbytes < 16)
19024 int padsize = 15 - nbytes + min_insn_size (insn);
19026 if (dump_file)
19027 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
19028 INSN_UID (insn), padsize);
19029 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
19034 /* AMD Athlon works faster
19035 when RET is not destination of conditional jump or directly preceded
19036 by other jump instruction. We avoid the penalty by inserting NOP just
19037 before the RET instructions in such cases. */
19038 static void
19039 ix86_pad_returns (void)
19041 edge e;
19042 edge_iterator ei;
19044 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
19046 basic_block bb = e->src;
19047 rtx ret = BB_END (bb);
19048 rtx prev;
19049 bool replace = false;
19051 if (GET_CODE (ret) != JUMP_INSN || GET_CODE (PATTERN (ret)) != RETURN
19052 || !maybe_hot_bb_p (bb))
19053 continue;
19054 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
19055 if (active_insn_p (prev) || GET_CODE (prev) == CODE_LABEL)
19056 break;
19057 if (prev && GET_CODE (prev) == CODE_LABEL)
19059 edge e;
19060 edge_iterator ei;
19062 FOR_EACH_EDGE (e, ei, bb->preds)
19063 if (EDGE_FREQUENCY (e) && e->src->index >= 0
19064 && !(e->flags & EDGE_FALLTHRU))
19065 replace = true;
19067 if (!replace)
19069 prev = prev_active_insn (ret);
19070 if (prev
19071 && ((GET_CODE (prev) == JUMP_INSN && any_condjump_p (prev))
19072 || GET_CODE (prev) == CALL_INSN))
19073 replace = true;
19074 /* Empty functions get branch mispredict even when the jump destination
19075 is not visible to us. */
19076 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
19077 replace = true;
19079 if (replace)
19081 emit_insn_before (gen_return_internal_long (), ret);
19082 delete_insn (ret);
19087 /* Implement machine specific optimizations. We implement padding of returns
19088 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
19089 static void
19090 ix86_reorg (void)
19092 if (TARGET_PAD_RETURNS && optimize && !optimize_size)
19093 ix86_pad_returns ();
19094 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
19095 ix86_avoid_jump_misspredicts ();
19098 /* Return nonzero when QImode register that must be represented via REX prefix
19099 is used. */
19100 bool
19101 x86_extended_QIreg_mentioned_p (rtx insn)
19103 int i;
19104 extract_insn_cached (insn);
19105 for (i = 0; i < recog_data.n_operands; i++)
19106 if (REG_P (recog_data.operand[i])
19107 && REGNO (recog_data.operand[i]) >= 4)
19108 return true;
19109 return false;
19112 /* Return nonzero when P points to register encoded via REX prefix.
19113 Called via for_each_rtx. */
19114 static int
19115 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
19117 unsigned int regno;
19118 if (!REG_P (*p))
19119 return 0;
19120 regno = REGNO (*p);
19121 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
19124 /* Return true when INSN mentions register that must be encoded using REX
19125 prefix. */
19126 bool
19127 x86_extended_reg_mentioned_p (rtx insn)
19129 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
19132 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
19133 optabs would emit if we didn't have TFmode patterns. */
19135 void
19136 x86_emit_floatuns (rtx operands[2])
19138 rtx neglab, donelab, i0, i1, f0, in, out;
19139 enum machine_mode mode, inmode;
19141 inmode = GET_MODE (operands[1]);
19142 gcc_assert (inmode == SImode || inmode == DImode);
19144 out = operands[0];
19145 in = force_reg (inmode, operands[1]);
19146 mode = GET_MODE (out);
19147 neglab = gen_label_rtx ();
19148 donelab = gen_label_rtx ();
19149 i1 = gen_reg_rtx (Pmode);
19150 f0 = gen_reg_rtx (mode);
19152 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, Pmode, 0, neglab);
19154 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
19155 emit_jump_insn (gen_jump (donelab));
19156 emit_barrier ();
19158 emit_label (neglab);
19160 i0 = expand_simple_binop (Pmode, LSHIFTRT, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
19161 i1 = expand_simple_binop (Pmode, AND, in, const1_rtx, NULL, 1, OPTAB_DIRECT);
19162 i0 = expand_simple_binop (Pmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
19163 expand_float (f0, i0, 0);
19164 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
19166 emit_label (donelab);
19169 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
19170 with all elements equal to VAR. Return true if successful. */
19172 static bool
19173 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
19174 rtx target, rtx val)
19176 enum machine_mode smode, wsmode, wvmode;
19177 rtx x;
19179 switch (mode)
19181 case V2SImode:
19182 case V2SFmode:
19183 if (!mmx_ok)
19184 return false;
19185 /* FALLTHRU */
19187 case V2DFmode:
19188 case V2DImode:
19189 case V4SFmode:
19190 case V4SImode:
19191 val = force_reg (GET_MODE_INNER (mode), val);
19192 x = gen_rtx_VEC_DUPLICATE (mode, val);
19193 emit_insn (gen_rtx_SET (VOIDmode, target, x));
19194 return true;
19196 case V4HImode:
19197 if (!mmx_ok)
19198 return false;
19199 if (TARGET_SSE || TARGET_3DNOW_A)
19201 val = gen_lowpart (SImode, val);
19202 x = gen_rtx_TRUNCATE (HImode, val);
19203 x = gen_rtx_VEC_DUPLICATE (mode, x);
19204 emit_insn (gen_rtx_SET (VOIDmode, target, x));
19205 return true;
19207 else
19209 smode = HImode;
19210 wsmode = SImode;
19211 wvmode = V2SImode;
19212 goto widen;
19215 case V8QImode:
19216 if (!mmx_ok)
19217 return false;
19218 smode = QImode;
19219 wsmode = HImode;
19220 wvmode = V4HImode;
19221 goto widen;
19222 case V8HImode:
19223 if (TARGET_SSE2)
19225 rtx tmp1, tmp2;
19226 /* Extend HImode to SImode using a paradoxical SUBREG. */
19227 tmp1 = gen_reg_rtx (SImode);
19228 emit_move_insn (tmp1, gen_lowpart (SImode, val));
19229 /* Insert the SImode value as low element of V4SImode vector. */
19230 tmp2 = gen_reg_rtx (V4SImode);
19231 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
19232 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
19233 CONST0_RTX (V4SImode),
19234 const1_rtx);
19235 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
19236 /* Cast the V4SImode vector back to a V8HImode vector. */
19237 tmp1 = gen_reg_rtx (V8HImode);
19238 emit_move_insn (tmp1, gen_lowpart (V8HImode, tmp2));
19239 /* Duplicate the low short through the whole low SImode word. */
19240 emit_insn (gen_sse2_punpcklwd (tmp1, tmp1, tmp1));
19241 /* Cast the V8HImode vector back to a V4SImode vector. */
19242 tmp2 = gen_reg_rtx (V4SImode);
19243 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
19244 /* Replicate the low element of the V4SImode vector. */
19245 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
19246 /* Cast the V2SImode back to V8HImode, and store in target. */
19247 emit_move_insn (target, gen_lowpart (V8HImode, tmp2));
19248 return true;
19250 smode = HImode;
19251 wsmode = SImode;
19252 wvmode = V4SImode;
19253 goto widen;
19254 case V16QImode:
19255 if (TARGET_SSE2)
19257 rtx tmp1, tmp2;
19258 /* Extend QImode to SImode using a paradoxical SUBREG. */
19259 tmp1 = gen_reg_rtx (SImode);
19260 emit_move_insn (tmp1, gen_lowpart (SImode, val));
19261 /* Insert the SImode value as low element of V4SImode vector. */
19262 tmp2 = gen_reg_rtx (V4SImode);
19263 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
19264 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
19265 CONST0_RTX (V4SImode),
19266 const1_rtx);
19267 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
19268 /* Cast the V4SImode vector back to a V16QImode vector. */
19269 tmp1 = gen_reg_rtx (V16QImode);
19270 emit_move_insn (tmp1, gen_lowpart (V16QImode, tmp2));
19271 /* Duplicate the low byte through the whole low SImode word. */
19272 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
19273 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
19274 /* Cast the V16QImode vector back to a V4SImode vector. */
19275 tmp2 = gen_reg_rtx (V4SImode);
19276 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
19277 /* Replicate the low element of the V4SImode vector. */
19278 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
19279 /* Cast the V2SImode back to V16QImode, and store in target. */
19280 emit_move_insn (target, gen_lowpart (V16QImode, tmp2));
19281 return true;
19283 smode = QImode;
19284 wsmode = HImode;
19285 wvmode = V8HImode;
19286 goto widen;
19287 widen:
19288 /* Replicate the value once into the next wider mode and recurse. */
19289 val = convert_modes (wsmode, smode, val, true);
19290 x = expand_simple_binop (wsmode, ASHIFT, val,
19291 GEN_INT (GET_MODE_BITSIZE (smode)),
19292 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19293 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
19295 x = gen_reg_rtx (wvmode);
19296 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
19297 gcc_unreachable ();
19298 emit_move_insn (target, gen_lowpart (mode, x));
19299 return true;
19301 default:
19302 return false;
19306 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
19307 whose ONE_VAR element is VAR, and other elements are zero. Return true
19308 if successful. */
19310 static bool
19311 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
19312 rtx target, rtx var, int one_var)
19314 enum machine_mode vsimode;
19315 rtx new_target;
19316 rtx x, tmp;
19318 switch (mode)
19320 case V2SFmode:
19321 case V2SImode:
19322 if (!mmx_ok)
19323 return false;
19324 /* FALLTHRU */
19326 case V2DFmode:
19327 case V2DImode:
19328 if (one_var != 0)
19329 return false;
19330 var = force_reg (GET_MODE_INNER (mode), var);
19331 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
19332 emit_insn (gen_rtx_SET (VOIDmode, target, x));
19333 return true;
19335 case V4SFmode:
19336 case V4SImode:
19337 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
19338 new_target = gen_reg_rtx (mode);
19339 else
19340 new_target = target;
19341 var = force_reg (GET_MODE_INNER (mode), var);
19342 x = gen_rtx_VEC_DUPLICATE (mode, var);
19343 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
19344 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
19345 if (one_var != 0)
19347 /* We need to shuffle the value to the correct position, so
19348 create a new pseudo to store the intermediate result. */
19350 /* With SSE2, we can use the integer shuffle insns. */
19351 if (mode != V4SFmode && TARGET_SSE2)
19353 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
19354 GEN_INT (1),
19355 GEN_INT (one_var == 1 ? 0 : 1),
19356 GEN_INT (one_var == 2 ? 0 : 1),
19357 GEN_INT (one_var == 3 ? 0 : 1)));
19358 if (target != new_target)
19359 emit_move_insn (target, new_target);
19360 return true;
19363 /* Otherwise convert the intermediate result to V4SFmode and
19364 use the SSE1 shuffle instructions. */
19365 if (mode != V4SFmode)
19367 tmp = gen_reg_rtx (V4SFmode);
19368 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
19370 else
19371 tmp = new_target;
19373 emit_insn (gen_sse_shufps_1 (tmp, tmp, tmp,
19374 GEN_INT (1),
19375 GEN_INT (one_var == 1 ? 0 : 1),
19376 GEN_INT (one_var == 2 ? 0+4 : 1+4),
19377 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
19379 if (mode != V4SFmode)
19380 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
19381 else if (tmp != target)
19382 emit_move_insn (target, tmp);
19384 else if (target != new_target)
19385 emit_move_insn (target, new_target);
19386 return true;
19388 case V8HImode:
19389 case V16QImode:
19390 vsimode = V4SImode;
19391 goto widen;
19392 case V4HImode:
19393 case V8QImode:
19394 if (!mmx_ok)
19395 return false;
19396 vsimode = V2SImode;
19397 goto widen;
19398 widen:
19399 if (one_var != 0)
19400 return false;
19402 /* Zero extend the variable element to SImode and recurse. */
19403 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
19405 x = gen_reg_rtx (vsimode);
19406 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
19407 var, one_var))
19408 gcc_unreachable ();
19410 emit_move_insn (target, gen_lowpart (mode, x));
19411 return true;
19413 default:
19414 return false;
19418 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
19419 consisting of the values in VALS. It is known that all elements
19420 except ONE_VAR are constants. Return true if successful. */
19422 static bool
19423 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
19424 rtx target, rtx vals, int one_var)
19426 rtx var = XVECEXP (vals, 0, one_var);
19427 enum machine_mode wmode;
19428 rtx const_vec, x;
19430 const_vec = copy_rtx (vals);
19431 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
19432 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
19434 switch (mode)
19436 case V2DFmode:
19437 case V2DImode:
19438 case V2SFmode:
19439 case V2SImode:
19440 /* For the two element vectors, it's just as easy to use
19441 the general case. */
19442 return false;
19444 case V4SFmode:
19445 case V4SImode:
19446 case V8HImode:
19447 case V4HImode:
19448 break;
19450 case V16QImode:
19451 wmode = V8HImode;
19452 goto widen;
19453 case V8QImode:
19454 wmode = V4HImode;
19455 goto widen;
19456 widen:
19457 /* There's no way to set one QImode entry easily. Combine
19458 the variable value with its adjacent constant value, and
19459 promote to an HImode set. */
19460 x = XVECEXP (vals, 0, one_var ^ 1);
19461 if (one_var & 1)
19463 var = convert_modes (HImode, QImode, var, true);
19464 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
19465 NULL_RTX, 1, OPTAB_LIB_WIDEN);
19466 x = GEN_INT (INTVAL (x) & 0xff);
19468 else
19470 var = convert_modes (HImode, QImode, var, true);
19471 x = gen_int_mode (INTVAL (x) << 8, HImode);
19473 if (x != const0_rtx)
19474 var = expand_simple_binop (HImode, IOR, var, x, var,
19475 1, OPTAB_LIB_WIDEN);
19477 x = gen_reg_rtx (wmode);
19478 emit_move_insn (x, gen_lowpart (wmode, const_vec));
19479 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
19481 emit_move_insn (target, gen_lowpart (mode, x));
19482 return true;
19484 default:
19485 return false;
19488 emit_move_insn (target, const_vec);
19489 ix86_expand_vector_set (mmx_ok, target, var, one_var);
19490 return true;
19493 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
19494 all values variable, and none identical. */
19496 static void
19497 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
19498 rtx target, rtx vals)
19500 enum machine_mode half_mode = GET_MODE_INNER (mode);
19501 rtx op0 = NULL, op1 = NULL;
19502 bool use_vec_concat = false;
19504 switch (mode)
19506 case V2SFmode:
19507 case V2SImode:
19508 if (!mmx_ok && !TARGET_SSE)
19509 break;
19510 /* FALLTHRU */
19512 case V2DFmode:
19513 case V2DImode:
19514 /* For the two element vectors, we always implement VEC_CONCAT. */
19515 op0 = XVECEXP (vals, 0, 0);
19516 op1 = XVECEXP (vals, 0, 1);
19517 use_vec_concat = true;
19518 break;
19520 case V4SFmode:
19521 half_mode = V2SFmode;
19522 goto half;
19523 case V4SImode:
19524 half_mode = V2SImode;
19525 goto half;
19526 half:
19528 rtvec v;
19530 /* For V4SF and V4SI, we implement a concat of two V2 vectors.
19531 Recurse to load the two halves. */
19533 op0 = gen_reg_rtx (half_mode);
19534 v = gen_rtvec (2, XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1));
19535 ix86_expand_vector_init (false, op0, gen_rtx_PARALLEL (half_mode, v));
19537 op1 = gen_reg_rtx (half_mode);
19538 v = gen_rtvec (2, XVECEXP (vals, 0, 2), XVECEXP (vals, 0, 3));
19539 ix86_expand_vector_init (false, op1, gen_rtx_PARALLEL (half_mode, v));
19541 use_vec_concat = true;
19543 break;
19545 case V8HImode:
19546 case V16QImode:
19547 case V4HImode:
19548 case V8QImode:
19549 break;
19551 default:
19552 gcc_unreachable ();
19555 if (use_vec_concat)
19557 if (!register_operand (op0, half_mode))
19558 op0 = force_reg (half_mode, op0);
19559 if (!register_operand (op1, half_mode))
19560 op1 = force_reg (half_mode, op1);
19562 emit_insn (gen_rtx_SET (VOIDmode, target,
19563 gen_rtx_VEC_CONCAT (mode, op0, op1)));
19565 else
19567 int i, j, n_elts, n_words, n_elt_per_word;
19568 enum machine_mode inner_mode;
19569 rtx words[4], shift;
19571 inner_mode = GET_MODE_INNER (mode);
19572 n_elts = GET_MODE_NUNITS (mode);
19573 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
19574 n_elt_per_word = n_elts / n_words;
19575 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
19577 for (i = 0; i < n_words; ++i)
19579 rtx word = NULL_RTX;
19581 for (j = 0; j < n_elt_per_word; ++j)
19583 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
19584 elt = convert_modes (word_mode, inner_mode, elt, true);
19586 if (j == 0)
19587 word = elt;
19588 else
19590 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
19591 word, 1, OPTAB_LIB_WIDEN);
19592 word = expand_simple_binop (word_mode, IOR, word, elt,
19593 word, 1, OPTAB_LIB_WIDEN);
19597 words[i] = word;
19600 if (n_words == 1)
19601 emit_move_insn (target, gen_lowpart (mode, words[0]));
19602 else if (n_words == 2)
19604 rtx tmp = gen_reg_rtx (mode);
19605 emit_insn (gen_rtx_CLOBBER (VOIDmode, tmp));
19606 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
19607 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
19608 emit_move_insn (target, tmp);
19610 else if (n_words == 4)
19612 rtx tmp = gen_reg_rtx (V4SImode);
19613 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
19614 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
19615 emit_move_insn (target, gen_lowpart (mode, tmp));
19617 else
19618 gcc_unreachable ();
19622 /* Initialize vector TARGET via VALS. Suppress the use of MMX
19623 instructions unless MMX_OK is true. */
19625 void
19626 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
19628 enum machine_mode mode = GET_MODE (target);
19629 enum machine_mode inner_mode = GET_MODE_INNER (mode);
19630 int n_elts = GET_MODE_NUNITS (mode);
19631 int n_var = 0, one_var = -1;
19632 bool all_same = true, all_const_zero = true;
19633 int i;
19634 rtx x;
19636 for (i = 0; i < n_elts; ++i)
19638 x = XVECEXP (vals, 0, i);
19639 if (!CONSTANT_P (x))
19640 n_var++, one_var = i;
19641 else if (x != CONST0_RTX (inner_mode))
19642 all_const_zero = false;
19643 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
19644 all_same = false;
19647 /* Constants are best loaded from the constant pool. */
19648 if (n_var == 0)
19650 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
19651 return;
19654 /* If all values are identical, broadcast the value. */
19655 if (all_same
19656 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
19657 XVECEXP (vals, 0, 0)))
19658 return;
19660 /* Values where only one field is non-constant are best loaded from
19661 the pool and overwritten via move later. */
19662 if (n_var == 1)
19664 if (all_const_zero
19665 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
19666 XVECEXP (vals, 0, one_var),
19667 one_var))
19668 return;
19670 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
19671 return;
19674 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
19677 void
19678 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
19680 enum machine_mode mode = GET_MODE (target);
19681 enum machine_mode inner_mode = GET_MODE_INNER (mode);
19682 bool use_vec_merge = false;
19683 rtx tmp;
19685 switch (mode)
19687 case V2SFmode:
19688 case V2SImode:
19689 if (mmx_ok)
19691 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
19692 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
19693 if (elt == 0)
19694 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
19695 else
19696 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
19697 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19698 return;
19700 break;
19702 case V2DFmode:
19703 case V2DImode:
19705 rtx op0, op1;
19707 /* For the two element vectors, we implement a VEC_CONCAT with
19708 the extraction of the other element. */
19710 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
19711 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
19713 if (elt == 0)
19714 op0 = val, op1 = tmp;
19715 else
19716 op0 = tmp, op1 = val;
19718 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
19719 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19721 return;
19723 case V4SFmode:
19724 switch (elt)
19726 case 0:
19727 use_vec_merge = true;
19728 break;
19730 case 1:
19731 /* tmp = target = A B C D */
19732 tmp = copy_to_reg (target);
19733 /* target = A A B B */
19734 emit_insn (gen_sse_unpcklps (target, target, target));
19735 /* target = X A B B */
19736 ix86_expand_vector_set (false, target, val, 0);
19737 /* target = A X C D */
19738 emit_insn (gen_sse_shufps_1 (target, target, tmp,
19739 GEN_INT (1), GEN_INT (0),
19740 GEN_INT (2+4), GEN_INT (3+4)));
19741 return;
19743 case 2:
19744 /* tmp = target = A B C D */
19745 tmp = copy_to_reg (target);
19746 /* tmp = X B C D */
19747 ix86_expand_vector_set (false, tmp, val, 0);
19748 /* target = A B X D */
19749 emit_insn (gen_sse_shufps_1 (target, target, tmp,
19750 GEN_INT (0), GEN_INT (1),
19751 GEN_INT (0+4), GEN_INT (3+4)));
19752 return;
19754 case 3:
19755 /* tmp = target = A B C D */
19756 tmp = copy_to_reg (target);
19757 /* tmp = X B C D */
19758 ix86_expand_vector_set (false, tmp, val, 0);
19759 /* target = A B X D */
19760 emit_insn (gen_sse_shufps_1 (target, target, tmp,
19761 GEN_INT (0), GEN_INT (1),
19762 GEN_INT (2+4), GEN_INT (0+4)));
19763 return;
19765 default:
19766 gcc_unreachable ();
19768 break;
19770 case V4SImode:
19771 /* Element 0 handled by vec_merge below. */
19772 if (elt == 0)
19774 use_vec_merge = true;
19775 break;
19778 if (TARGET_SSE2)
19780 /* With SSE2, use integer shuffles to swap element 0 and ELT,
19781 store into element 0, then shuffle them back. */
19783 rtx order[4];
19785 order[0] = GEN_INT (elt);
19786 order[1] = const1_rtx;
19787 order[2] = const2_rtx;
19788 order[3] = GEN_INT (3);
19789 order[elt] = const0_rtx;
19791 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
19792 order[1], order[2], order[3]));
19794 ix86_expand_vector_set (false, target, val, 0);
19796 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
19797 order[1], order[2], order[3]));
19799 else
19801 /* For SSE1, we have to reuse the V4SF code. */
19802 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
19803 gen_lowpart (SFmode, val), elt);
19805 return;
19807 case V8HImode:
19808 use_vec_merge = TARGET_SSE2;
19809 break;
19810 case V4HImode:
19811 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
19812 break;
19814 case V16QImode:
19815 case V8QImode:
19816 default:
19817 break;
19820 if (use_vec_merge)
19822 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
19823 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
19824 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19826 else
19828 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
19830 emit_move_insn (mem, target);
19832 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
19833 emit_move_insn (tmp, val);
19835 emit_move_insn (target, mem);
19839 void
19840 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
19842 enum machine_mode mode = GET_MODE (vec);
19843 enum machine_mode inner_mode = GET_MODE_INNER (mode);
19844 bool use_vec_extr = false;
19845 rtx tmp;
19847 switch (mode)
19849 case V2SImode:
19850 case V2SFmode:
19851 if (!mmx_ok)
19852 break;
19853 /* FALLTHRU */
19855 case V2DFmode:
19856 case V2DImode:
19857 use_vec_extr = true;
19858 break;
19860 case V4SFmode:
19861 switch (elt)
19863 case 0:
19864 tmp = vec;
19865 break;
19867 case 1:
19868 case 3:
19869 tmp = gen_reg_rtx (mode);
19870 emit_insn (gen_sse_shufps_1 (tmp, vec, vec,
19871 GEN_INT (elt), GEN_INT (elt),
19872 GEN_INT (elt+4), GEN_INT (elt+4)));
19873 break;
19875 case 2:
19876 tmp = gen_reg_rtx (mode);
19877 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
19878 break;
19880 default:
19881 gcc_unreachable ();
19883 vec = tmp;
19884 use_vec_extr = true;
19885 elt = 0;
19886 break;
19888 case V4SImode:
19889 if (TARGET_SSE2)
19891 switch (elt)
19893 case 0:
19894 tmp = vec;
19895 break;
19897 case 1:
19898 case 3:
19899 tmp = gen_reg_rtx (mode);
19900 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
19901 GEN_INT (elt), GEN_INT (elt),
19902 GEN_INT (elt), GEN_INT (elt)));
19903 break;
19905 case 2:
19906 tmp = gen_reg_rtx (mode);
19907 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
19908 break;
19910 default:
19911 gcc_unreachable ();
19913 vec = tmp;
19914 use_vec_extr = true;
19915 elt = 0;
19917 else
19919 /* For SSE1, we have to reuse the V4SF code. */
19920 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
19921 gen_lowpart (V4SFmode, vec), elt);
19922 return;
19924 break;
19926 case V8HImode:
19927 use_vec_extr = TARGET_SSE2;
19928 break;
19929 case V4HImode:
19930 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
19931 break;
19933 case V16QImode:
19934 case V8QImode:
19935 /* ??? Could extract the appropriate HImode element and shift. */
19936 default:
19937 break;
19940 if (use_vec_extr)
19942 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
19943 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
19945 /* Let the rtl optimizers know about the zero extension performed. */
19946 if (inner_mode == HImode)
19948 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
19949 target = gen_lowpart (SImode, target);
19952 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
19954 else
19956 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
19958 emit_move_insn (mem, vec);
19960 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
19961 emit_move_insn (target, tmp);
19965 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
19966 pattern to reduce; DEST is the destination; IN is the input vector. */
19968 void
19969 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
19971 rtx tmp1, tmp2, tmp3;
19973 tmp1 = gen_reg_rtx (V4SFmode);
19974 tmp2 = gen_reg_rtx (V4SFmode);
19975 tmp3 = gen_reg_rtx (V4SFmode);
19977 emit_insn (gen_sse_movhlps (tmp1, in, in));
19978 emit_insn (fn (tmp2, tmp1, in));
19980 emit_insn (gen_sse_shufps_1 (tmp3, tmp2, tmp2,
19981 GEN_INT (1), GEN_INT (1),
19982 GEN_INT (1+4), GEN_INT (1+4)));
19983 emit_insn (fn (dest, tmp2, tmp3));
19986 /* Target hook for scalar_mode_supported_p. */
19987 static bool
19988 ix86_scalar_mode_supported_p (enum machine_mode mode)
19990 if (DECIMAL_FLOAT_MODE_P (mode))
19991 return true;
19992 else
19993 return default_scalar_mode_supported_p (mode);
19996 /* Implements target hook vector_mode_supported_p. */
19997 static bool
19998 ix86_vector_mode_supported_p (enum machine_mode mode)
20000 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
20001 return true;
20002 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
20003 return true;
20004 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
20005 return true;
20006 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
20007 return true;
20008 return false;
20011 /* Worker function for TARGET_MD_ASM_CLOBBERS.
20013 We do this in the new i386 backend to maintain source compatibility
20014 with the old cc0-based compiler. */
20016 static tree
20017 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
20018 tree inputs ATTRIBUTE_UNUSED,
20019 tree clobbers)
20021 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
20022 clobbers);
20023 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
20024 clobbers);
20025 return clobbers;
20028 /* Return true if this goes in small data/bss. */
20030 static bool
20031 ix86_in_large_data_p (tree exp)
20033 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
20034 return false;
20036 /* Functions are never large data. */
20037 if (TREE_CODE (exp) == FUNCTION_DECL)
20038 return false;
20040 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
20042 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
20043 if (strcmp (section, ".ldata") == 0
20044 || strcmp (section, ".lbss") == 0)
20045 return true;
20046 return false;
20048 else
20050 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
20052 /* If this is an incomplete type with size 0, then we can't put it
20053 in data because it might be too big when completed. */
20054 if (!size || size > ix86_section_threshold)
20055 return true;
20058 return false;
20060 static void
20061 ix86_encode_section_info (tree decl, rtx rtl, int first)
20063 default_encode_section_info (decl, rtl, first);
20065 if (TREE_CODE (decl) == VAR_DECL
20066 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
20067 && ix86_in_large_data_p (decl))
20068 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
20071 /* Worker function for REVERSE_CONDITION. */
20073 enum rtx_code
20074 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
20076 return (mode != CCFPmode && mode != CCFPUmode
20077 ? reverse_condition (code)
20078 : reverse_condition_maybe_unordered (code));
20081 /* Output code to perform an x87 FP register move, from OPERANDS[1]
20082 to OPERANDS[0]. */
20084 const char *
20085 output_387_reg_move (rtx insn, rtx *operands)
20087 if (REG_P (operands[1])
20088 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
20090 if (REGNO (operands[0]) == FIRST_STACK_REG)
20091 return output_387_ffreep (operands, 0);
20092 return "fstp\t%y0";
20094 if (STACK_TOP_P (operands[0]))
20095 return "fld%z1\t%y1";
20096 return "fst\t%y0";
20099 /* Output code to perform a conditional jump to LABEL, if C2 flag in
20100 FP status register is set. */
20102 void
20103 ix86_emit_fp_unordered_jump (rtx label)
20105 rtx reg = gen_reg_rtx (HImode);
20106 rtx temp;
20108 emit_insn (gen_x86_fnstsw_1 (reg));
20110 if (TARGET_USE_SAHF)
20112 emit_insn (gen_x86_sahf_1 (reg));
20114 temp = gen_rtx_REG (CCmode, FLAGS_REG);
20115 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
20117 else
20119 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
20121 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
20122 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
20125 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
20126 gen_rtx_LABEL_REF (VOIDmode, label),
20127 pc_rtx);
20128 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
20129 emit_jump_insn (temp);
20132 /* Output code to perform a log1p XFmode calculation. */
20134 void ix86_emit_i387_log1p (rtx op0, rtx op1)
20136 rtx label1 = gen_label_rtx ();
20137 rtx label2 = gen_label_rtx ();
20139 rtx tmp = gen_reg_rtx (XFmode);
20140 rtx tmp2 = gen_reg_rtx (XFmode);
20142 emit_insn (gen_absxf2 (tmp, op1));
20143 emit_insn (gen_cmpxf (tmp,
20144 CONST_DOUBLE_FROM_REAL_VALUE (
20145 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
20146 XFmode)));
20147 emit_jump_insn (gen_bge (label1));
20149 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
20150 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
20151 emit_jump (label2);
20153 emit_label (label1);
20154 emit_move_insn (tmp, CONST1_RTX (XFmode));
20155 emit_insn (gen_addxf3 (tmp, op1, tmp));
20156 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
20157 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
20159 emit_label (label2);
20162 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
20164 static void
20165 i386_solaris_elf_named_section (const char *name, unsigned int flags,
20166 tree decl)
20168 /* With Binutils 2.15, the "@unwind" marker must be specified on
20169 every occurrence of the ".eh_frame" section, not just the first
20170 one. */
20171 if (TARGET_64BIT
20172 && strcmp (name, ".eh_frame") == 0)
20174 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
20175 flags & SECTION_WRITE ? "aw" : "a");
20176 return;
20178 default_elf_asm_named_section (name, flags, decl);
20181 /* Return the mangling of TYPE if it is an extended fundamental type. */
20183 static const char *
20184 ix86_mangle_fundamental_type (tree type)
20186 switch (TYPE_MODE (type))
20188 case TFmode:
20189 /* __float128 is "g". */
20190 return "g";
20191 case XFmode:
20192 /* "long double" or __float80 is "e". */
20193 return "e";
20194 default:
20195 return NULL;
20199 /* For 32-bit code we can save PIC register setup by using
20200 __stack_chk_fail_local hidden function instead of calling
20201 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
20202 register, so it is better to call __stack_chk_fail directly. */
20204 static tree
20205 ix86_stack_protect_fail (void)
20207 return TARGET_64BIT
20208 ? default_external_stack_protect_fail ()
20209 : default_hidden_stack_protect_fail ();
20212 /* Select a format to encode pointers in exception handling data. CODE
20213 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
20214 true if the symbol may be affected by dynamic relocations.
20216 ??? All x86 object file formats are capable of representing this.
20217 After all, the relocation needed is the same as for the call insn.
20218 Whether or not a particular assembler allows us to enter such, I
20219 guess we'll have to see. */
20221 asm_preferred_eh_data_format (int code, int global)
20223 if (flag_pic)
20225 int type = DW_EH_PE_sdata8;
20226 if (!TARGET_64BIT
20227 || ix86_cmodel == CM_SMALL_PIC
20228 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
20229 type = DW_EH_PE_sdata4;
20230 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
20232 if (ix86_cmodel == CM_SMALL
20233 || (ix86_cmodel == CM_MEDIUM && code))
20234 return DW_EH_PE_udata4;
20235 return DW_EH_PE_absptr;
20238 /* Expand copysign from SIGN to the positive value ABS_VALUE
20239 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
20240 the sign-bit. */
20241 static void
20242 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
20244 enum machine_mode mode = GET_MODE (sign);
20245 rtx sgn = gen_reg_rtx (mode);
20246 if (mask == NULL_RTX)
20248 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
20249 if (!VECTOR_MODE_P (mode))
20251 /* We need to generate a scalar mode mask in this case. */
20252 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
20253 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
20254 mask = gen_reg_rtx (mode);
20255 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
20258 else
20259 mask = gen_rtx_NOT (mode, mask);
20260 emit_insn (gen_rtx_SET (VOIDmode, sgn,
20261 gen_rtx_AND (mode, mask, sign)));
20262 emit_insn (gen_rtx_SET (VOIDmode, result,
20263 gen_rtx_IOR (mode, abs_value, sgn)));
20266 /* Expand fabs (OP0) and return a new rtx that holds the result. The
20267 mask for masking out the sign-bit is stored in *SMASK, if that is
20268 non-null. */
20269 static rtx
20270 ix86_expand_sse_fabs (rtx op0, rtx *smask)
20272 enum machine_mode mode = GET_MODE (op0);
20273 rtx xa, mask;
20275 xa = gen_reg_rtx (mode);
20276 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
20277 if (!VECTOR_MODE_P (mode))
20279 /* We need to generate a scalar mode mask in this case. */
20280 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
20281 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
20282 mask = gen_reg_rtx (mode);
20283 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
20285 emit_insn (gen_rtx_SET (VOIDmode, xa,
20286 gen_rtx_AND (mode, op0, mask)));
20288 if (smask)
20289 *smask = mask;
20291 return xa;
20294 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
20295 swapping the operands if SWAP_OPERANDS is true. The expanded
20296 code is a forward jump to a newly created label in case the
20297 comparison is true. The generated label rtx is returned. */
20298 static rtx
20299 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
20300 bool swap_operands)
20302 rtx label, tmp;
20304 if (swap_operands)
20306 tmp = op0;
20307 op0 = op1;
20308 op1 = tmp;
20311 label = gen_label_rtx ();
20312 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
20313 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20314 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
20315 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
20316 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
20317 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
20318 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
20319 JUMP_LABEL (tmp) = label;
20321 return label;
20324 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
20325 using comparison code CODE. Operands are swapped for the comparison if
20326 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
20327 static rtx
20328 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
20329 bool swap_operands)
20331 enum machine_mode mode = GET_MODE (op0);
20332 rtx mask = gen_reg_rtx (mode);
20334 if (swap_operands)
20336 rtx tmp = op0;
20337 op0 = op1;
20338 op1 = tmp;
20341 if (mode == DFmode)
20342 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
20343 gen_rtx_fmt_ee (code, mode, op0, op1)));
20344 else
20345 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
20346 gen_rtx_fmt_ee (code, mode, op0, op1)));
20348 return mask;
20351 /* Generate and return a rtx of mode MODE for 2**n where n is the number
20352 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
20353 static rtx
20354 ix86_gen_TWO52 (enum machine_mode mode)
20356 REAL_VALUE_TYPE TWO52r;
20357 rtx TWO52;
20359 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
20360 TWO52 = const_double_from_real_value (TWO52r, mode);
20361 TWO52 = force_reg (mode, TWO52);
20363 return TWO52;
20366 /* Expand SSE sequence for computing lround from OP1 storing
20367 into OP0. */
20368 void
20369 ix86_expand_lround (rtx op0, rtx op1)
20371 /* C code for the stuff we're doing below:
20372 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
20373 return (long)tmp;
20375 enum machine_mode mode = GET_MODE (op1);
20376 const struct real_format *fmt;
20377 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
20378 rtx adj;
20380 /* load nextafter (0.5, 0.0) */
20381 fmt = REAL_MODE_FORMAT (mode);
20382 real_2expN (&half_minus_pred_half, -(fmt->p) - 1);
20383 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
20385 /* adj = copysign (0.5, op1) */
20386 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
20387 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
20389 /* adj = op1 + adj */
20390 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
20392 /* op0 = (imode)adj */
20393 expand_fix (op0, adj, 0);
20396 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
20397 into OPERAND0. */
20398 void
20399 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
20401 /* C code for the stuff we're doing below (for do_floor):
20402 xi = (long)op1;
20403 xi -= (double)xi > op1 ? 1 : 0;
20404 return xi;
20406 enum machine_mode fmode = GET_MODE (op1);
20407 enum machine_mode imode = GET_MODE (op0);
20408 rtx ireg, freg, label, tmp;
20410 /* reg = (long)op1 */
20411 ireg = gen_reg_rtx (imode);
20412 expand_fix (ireg, op1, 0);
20414 /* freg = (double)reg */
20415 freg = gen_reg_rtx (fmode);
20416 expand_float (freg, ireg, 0);
20418 /* ireg = (freg > op1) ? ireg - 1 : ireg */
20419 label = ix86_expand_sse_compare_and_jump (UNLE,
20420 freg, op1, !do_floor);
20421 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
20422 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
20423 emit_move_insn (ireg, tmp);
20425 emit_label (label);
20426 LABEL_NUSES (label) = 1;
20428 emit_move_insn (op0, ireg);
20431 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
20432 result in OPERAND0. */
20433 void
20434 ix86_expand_rint (rtx operand0, rtx operand1)
20436 /* C code for the stuff we're doing below:
20437 xa = fabs (operand1);
20438 if (!isless (xa, 2**52))
20439 return operand1;
20440 xa = xa + 2**52 - 2**52;
20441 return copysign (xa, operand1);
20443 enum machine_mode mode = GET_MODE (operand0);
20444 rtx res, xa, label, TWO52, mask;
20446 res = gen_reg_rtx (mode);
20447 emit_move_insn (res, operand1);
20449 /* xa = abs (operand1) */
20450 xa = ix86_expand_sse_fabs (res, &mask);
20452 /* if (!isless (xa, TWO52)) goto label; */
20453 TWO52 = ix86_gen_TWO52 (mode);
20454 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20456 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20457 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
20459 ix86_sse_copysign_to_positive (res, xa, res, mask);
20461 emit_label (label);
20462 LABEL_NUSES (label) = 1;
20464 emit_move_insn (operand0, res);
20467 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
20468 into OPERAND0. */
20469 void
20470 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
20472 /* C code for the stuff we expand below.
20473 double xa = fabs (x), x2;
20474 if (!isless (xa, TWO52))
20475 return x;
20476 xa = xa + TWO52 - TWO52;
20477 x2 = copysign (xa, x);
20478 Compensate. Floor:
20479 if (x2 > x)
20480 x2 -= 1;
20481 Compensate. Ceil:
20482 if (x2 < x)
20483 x2 -= -1;
20484 return x2;
20486 enum machine_mode mode = GET_MODE (operand0);
20487 rtx xa, TWO52, tmp, label, one, res, mask;
20489 TWO52 = ix86_gen_TWO52 (mode);
20491 /* Temporary for holding the result, initialized to the input
20492 operand to ease control flow. */
20493 res = gen_reg_rtx (mode);
20494 emit_move_insn (res, operand1);
20496 /* xa = abs (operand1) */
20497 xa = ix86_expand_sse_fabs (res, &mask);
20499 /* if (!isless (xa, TWO52)) goto label; */
20500 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20502 /* xa = xa + TWO52 - TWO52; */
20503 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20504 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
20506 /* xa = copysign (xa, operand1) */
20507 ix86_sse_copysign_to_positive (xa, xa, res, mask);
20509 /* generate 1.0 or -1.0 */
20510 one = force_reg (mode,
20511 const_double_from_real_value (do_floor
20512 ? dconst1 : dconstm1, mode));
20514 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
20515 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
20516 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20517 gen_rtx_AND (mode, one, tmp)));
20518 /* We always need to subtract here to preserve signed zero. */
20519 tmp = expand_simple_binop (mode, MINUS,
20520 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20521 emit_move_insn (res, tmp);
20523 emit_label (label);
20524 LABEL_NUSES (label) = 1;
20526 emit_move_insn (operand0, res);
20529 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
20530 into OPERAND0. */
20531 void
20532 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
20534 /* C code for the stuff we expand below.
20535 double xa = fabs (x), x2;
20536 if (!isless (xa, TWO52))
20537 return x;
20538 x2 = (double)(long)x;
20539 Compensate. Floor:
20540 if (x2 > x)
20541 x2 -= 1;
20542 Compensate. Ceil:
20543 if (x2 < x)
20544 x2 += 1;
20545 if (HONOR_SIGNED_ZEROS (mode))
20546 return copysign (x2, x);
20547 return x2;
20549 enum machine_mode mode = GET_MODE (operand0);
20550 rtx xa, xi, TWO52, tmp, label, one, res, mask;
20552 TWO52 = ix86_gen_TWO52 (mode);
20554 /* Temporary for holding the result, initialized to the input
20555 operand to ease control flow. */
20556 res = gen_reg_rtx (mode);
20557 emit_move_insn (res, operand1);
20559 /* xa = abs (operand1) */
20560 xa = ix86_expand_sse_fabs (res, &mask);
20562 /* if (!isless (xa, TWO52)) goto label; */
20563 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20565 /* xa = (double)(long)x */
20566 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
20567 expand_fix (xi, res, 0);
20568 expand_float (xa, xi, 0);
20570 /* generate 1.0 */
20571 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
20573 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
20574 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
20575 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20576 gen_rtx_AND (mode, one, tmp)));
20577 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
20578 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20579 emit_move_insn (res, tmp);
20581 if (HONOR_SIGNED_ZEROS (mode))
20582 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
20584 emit_label (label);
20585 LABEL_NUSES (label) = 1;
20587 emit_move_insn (operand0, res);
20590 /* Expand SSE sequence for computing round from OPERAND1 storing
20591 into OPERAND0. Sequence that works without relying on DImode truncation
20592 via cvttsd2siq that is only available on 64bit targets. */
20593 void
20594 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
20596 /* C code for the stuff we expand below.
20597 double xa = fabs (x), xa2, x2;
20598 if (!isless (xa, TWO52))
20599 return x;
20600 Using the absolute value and copying back sign makes
20601 -0.0 -> -0.0 correct.
20602 xa2 = xa + TWO52 - TWO52;
20603 Compensate.
20604 dxa = xa2 - xa;
20605 if (dxa <= -0.5)
20606 xa2 += 1;
20607 else if (dxa > 0.5)
20608 xa2 -= 1;
20609 x2 = copysign (xa2, x);
20610 return x2;
20612 enum machine_mode mode = GET_MODE (operand0);
20613 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
20615 TWO52 = ix86_gen_TWO52 (mode);
20617 /* Temporary for holding the result, initialized to the input
20618 operand to ease control flow. */
20619 res = gen_reg_rtx (mode);
20620 emit_move_insn (res, operand1);
20622 /* xa = abs (operand1) */
20623 xa = ix86_expand_sse_fabs (res, &mask);
20625 /* if (!isless (xa, TWO52)) goto label; */
20626 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20628 /* xa2 = xa + TWO52 - TWO52; */
20629 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20630 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
20632 /* dxa = xa2 - xa; */
20633 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
20635 /* generate 0.5, 1.0 and -0.5 */
20636 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
20637 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
20638 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
20639 0, OPTAB_DIRECT);
20641 /* Compensate. */
20642 tmp = gen_reg_rtx (mode);
20643 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
20644 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
20645 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20646 gen_rtx_AND (mode, one, tmp)));
20647 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20648 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
20649 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
20650 emit_insn (gen_rtx_SET (VOIDmode, tmp,
20651 gen_rtx_AND (mode, one, tmp)));
20652 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
20654 /* res = copysign (xa2, operand1) */
20655 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
20657 emit_label (label);
20658 LABEL_NUSES (label) = 1;
20660 emit_move_insn (operand0, res);
20663 /* Expand SSE sequence for computing trunc from OPERAND1 storing
20664 into OPERAND0. */
20665 void
20666 ix86_expand_trunc (rtx operand0, rtx operand1)
20668 /* C code for SSE variant we expand below.
20669 double xa = fabs (x), x2;
20670 if (!isless (xa, TWO52))
20671 return x;
20672 x2 = (double)(long)x;
20673 if (HONOR_SIGNED_ZEROS (mode))
20674 return copysign (x2, x);
20675 return x2;
20677 enum machine_mode mode = GET_MODE (operand0);
20678 rtx xa, xi, TWO52, label, res, mask;
20680 TWO52 = ix86_gen_TWO52 (mode);
20682 /* Temporary for holding the result, initialized to the input
20683 operand to ease control flow. */
20684 res = gen_reg_rtx (mode);
20685 emit_move_insn (res, operand1);
20687 /* xa = abs (operand1) */
20688 xa = ix86_expand_sse_fabs (res, &mask);
20690 /* if (!isless (xa, TWO52)) goto label; */
20691 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20693 /* x = (double)(long)x */
20694 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
20695 expand_fix (xi, res, 0);
20696 expand_float (res, xi, 0);
20698 if (HONOR_SIGNED_ZEROS (mode))
20699 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
20701 emit_label (label);
20702 LABEL_NUSES (label) = 1;
20704 emit_move_insn (operand0, res);
20707 /* Expand SSE sequence for computing trunc from OPERAND1 storing
20708 into OPERAND0. */
20709 void
20710 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
20712 enum machine_mode mode = GET_MODE (operand0);
20713 rtx xa, mask, TWO52, label, one, res, smask, tmp;
20715 /* C code for SSE variant we expand below.
20716 double xa = fabs (x), x2;
20717 if (!isless (xa, TWO52))
20718 return x;
20719 xa2 = xa + TWO52 - TWO52;
20720 Compensate:
20721 if (xa2 > xa)
20722 xa2 -= 1.0;
20723 x2 = copysign (xa2, x);
20724 return x2;
20727 TWO52 = ix86_gen_TWO52 (mode);
20729 /* Temporary for holding the result, initialized to the input
20730 operand to ease control flow. */
20731 res = gen_reg_rtx (mode);
20732 emit_move_insn (res, operand1);
20734 /* xa = abs (operand1) */
20735 xa = ix86_expand_sse_fabs (res, &smask);
20737 /* if (!isless (xa, TWO52)) goto label; */
20738 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20740 /* res = xa + TWO52 - TWO52; */
20741 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
20742 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
20743 emit_move_insn (res, tmp);
20745 /* generate 1.0 */
20746 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
20748 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
20749 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
20750 emit_insn (gen_rtx_SET (VOIDmode, mask,
20751 gen_rtx_AND (mode, mask, one)));
20752 tmp = expand_simple_binop (mode, MINUS,
20753 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
20754 emit_move_insn (res, tmp);
20756 /* res = copysign (res, operand1) */
20757 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
20759 emit_label (label);
20760 LABEL_NUSES (label) = 1;
20762 emit_move_insn (operand0, res);
20765 /* Expand SSE sequence for computing round from OPERAND1 storing
20766 into OPERAND0. */
20767 void
20768 ix86_expand_round (rtx operand0, rtx operand1)
20770 /* C code for the stuff we're doing below:
20771 double xa = fabs (x);
20772 if (!isless (xa, TWO52))
20773 return x;
20774 xa = (double)(long)(xa + nextafter (0.5, 0.0));
20775 return copysign (xa, x);
20777 enum machine_mode mode = GET_MODE (operand0);
20778 rtx res, TWO52, xa, label, xi, half, mask;
20779 const struct real_format *fmt;
20780 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
20782 /* Temporary for holding the result, initialized to the input
20783 operand to ease control flow. */
20784 res = gen_reg_rtx (mode);
20785 emit_move_insn (res, operand1);
20787 TWO52 = ix86_gen_TWO52 (mode);
20788 xa = ix86_expand_sse_fabs (res, &mask);
20789 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
20791 /* load nextafter (0.5, 0.0) */
20792 fmt = REAL_MODE_FORMAT (mode);
20793 real_2expN (&half_minus_pred_half, -(fmt->p) - 1);
20794 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
20796 /* xa = xa + 0.5 */
20797 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
20798 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
20800 /* xa = (double)(int64_t)xa */
20801 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
20802 expand_fix (xi, xa, 0);
20803 expand_float (xa, xi, 0);
20805 /* res = copysign (xa, operand1) */
20806 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
20808 emit_label (label);
20809 LABEL_NUSES (label) = 1;
20811 emit_move_insn (operand0, res);
20814 #include "gt-i386.h"