Merge from trunk @ 138209
[official-gcc.git] / gcc / config / i386 / i386.c
blob62d1b8d9bce6815a02a195e0de1d8f42b288c53d
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "c-common.h"
39 #include "except.h"
40 #include "function.h"
41 #include "recog.h"
42 #include "expr.h"
43 #include "optabs.h"
44 #include "toplev.h"
45 #include "basic-block.h"
46 #include "ggc.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "langhooks.h"
50 #include "cgraph.h"
51 #include "gimple.h"
52 #include "dwarf2.h"
53 #include "df.h"
54 #include "tm-constrs.h"
55 #include "params.h"
57 static int x86_builtin_vectorization_cost (bool);
58 static rtx legitimize_dllimport_symbol (rtx, bool);
60 #ifndef CHECK_STACK_LIMIT
61 #define CHECK_STACK_LIMIT (-1)
62 #endif
64 /* Return index of given mode in mult and division cost tables. */
65 #define MODE_INDEX(mode) \
66 ((mode) == QImode ? 0 \
67 : (mode) == HImode ? 1 \
68 : (mode) == SImode ? 2 \
69 : (mode) == DImode ? 3 \
70 : 4)
72 /* Processor costs (relative to an add) */
73 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
74 #define COSTS_N_BYTES(N) ((N) * 2)
76 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
78 static const
79 struct processor_costs size_cost = { /* costs for tuning for size */
80 COSTS_N_BYTES (2), /* cost of an add instruction */
81 COSTS_N_BYTES (3), /* cost of a lea instruction */
82 COSTS_N_BYTES (2), /* variable shift costs */
83 COSTS_N_BYTES (3), /* constant shift costs */
84 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
85 COSTS_N_BYTES (3), /* HI */
86 COSTS_N_BYTES (3), /* SI */
87 COSTS_N_BYTES (3), /* DI */
88 COSTS_N_BYTES (5)}, /* other */
89 0, /* cost of multiply per each bit set */
90 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
91 COSTS_N_BYTES (3), /* HI */
92 COSTS_N_BYTES (3), /* SI */
93 COSTS_N_BYTES (3), /* DI */
94 COSTS_N_BYTES (5)}, /* other */
95 COSTS_N_BYTES (3), /* cost of movsx */
96 COSTS_N_BYTES (3), /* cost of movzx */
97 0, /* "large" insn */
98 2, /* MOVE_RATIO */
99 2, /* cost for loading QImode using movzbl */
100 {2, 2, 2}, /* cost of loading integer registers
101 in QImode, HImode and SImode.
102 Relative to reg-reg move (2). */
103 {2, 2, 2}, /* cost of storing integer registers */
104 2, /* cost of reg,reg fld/fst */
105 {2, 2, 2}, /* cost of loading fp registers
106 in SFmode, DFmode and XFmode */
107 {2, 2, 2}, /* cost of storing fp registers
108 in SFmode, DFmode and XFmode */
109 3, /* cost of moving MMX register */
110 {3, 3}, /* cost of loading MMX registers
111 in SImode and DImode */
112 {3, 3}, /* cost of storing MMX registers
113 in SImode and DImode */
114 3, /* cost of moving SSE register */
115 {3, 3, 3}, /* cost of loading SSE registers
116 in SImode, DImode and TImode */
117 {3, 3, 3}, /* cost of storing SSE registers
118 in SImode, DImode and TImode */
119 3, /* MMX or SSE register to integer */
120 0, /* size of l1 cache */
121 0, /* size of l2 cache */
122 0, /* size of prefetch block */
123 0, /* number of parallel prefetches */
124 2, /* Branch cost */
125 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
126 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
127 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
128 COSTS_N_BYTES (2), /* cost of FABS instruction. */
129 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
130 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
131 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
132 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
133 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
134 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
135 1, /* scalar_stmt_cost. */
136 1, /* scalar load_cost. */
137 1, /* scalar_store_cost. */
138 1, /* vec_stmt_cost. */
139 1, /* vec_to_scalar_cost. */
140 1, /* scalar_to_vec_cost. */
141 1, /* vec_align_load_cost. */
142 1, /* vec_unalign_load_cost. */
143 1, /* vec_store_cost. */
144 1, /* cond_taken_branch_cost. */
145 1, /* cond_not_taken_branch_cost. */
148 /* Processor costs (relative to an add) */
149 static const
150 struct processor_costs i386_cost = { /* 386 specific costs */
151 COSTS_N_INSNS (1), /* cost of an add instruction */
152 COSTS_N_INSNS (1), /* cost of a lea instruction */
153 COSTS_N_INSNS (3), /* variable shift costs */
154 COSTS_N_INSNS (2), /* constant shift costs */
155 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
156 COSTS_N_INSNS (6), /* HI */
157 COSTS_N_INSNS (6), /* SI */
158 COSTS_N_INSNS (6), /* DI */
159 COSTS_N_INSNS (6)}, /* other */
160 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
161 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
162 COSTS_N_INSNS (23), /* HI */
163 COSTS_N_INSNS (23), /* SI */
164 COSTS_N_INSNS (23), /* DI */
165 COSTS_N_INSNS (23)}, /* other */
166 COSTS_N_INSNS (3), /* cost of movsx */
167 COSTS_N_INSNS (2), /* cost of movzx */
168 15, /* "large" insn */
169 3, /* MOVE_RATIO */
170 4, /* cost for loading QImode using movzbl */
171 {2, 4, 2}, /* cost of loading integer registers
172 in QImode, HImode and SImode.
173 Relative to reg-reg move (2). */
174 {2, 4, 2}, /* cost of storing integer registers */
175 2, /* cost of reg,reg fld/fst */
176 {8, 8, 8}, /* cost of loading fp registers
177 in SFmode, DFmode and XFmode */
178 {8, 8, 8}, /* cost of storing fp registers
179 in SFmode, DFmode and XFmode */
180 2, /* cost of moving MMX register */
181 {4, 8}, /* cost of loading MMX registers
182 in SImode and DImode */
183 {4, 8}, /* cost of storing MMX registers
184 in SImode and DImode */
185 2, /* cost of moving SSE register */
186 {4, 8, 16}, /* cost of loading SSE registers
187 in SImode, DImode and TImode */
188 {4, 8, 16}, /* cost of storing SSE registers
189 in SImode, DImode and TImode */
190 3, /* MMX or SSE register to integer */
191 0, /* size of l1 cache */
192 0, /* size of l2 cache */
193 0, /* size of prefetch block */
194 0, /* number of parallel prefetches */
195 1, /* Branch cost */
196 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
197 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
198 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
199 COSTS_N_INSNS (22), /* cost of FABS instruction. */
200 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
201 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
202 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
203 DUMMY_STRINGOP_ALGS},
204 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
205 DUMMY_STRINGOP_ALGS},
206 1, /* scalar_stmt_cost. */
207 1, /* scalar load_cost. */
208 1, /* scalar_store_cost. */
209 1, /* vec_stmt_cost. */
210 1, /* vec_to_scalar_cost. */
211 1, /* scalar_to_vec_cost. */
212 1, /* vec_align_load_cost. */
213 2, /* vec_unalign_load_cost. */
214 1, /* vec_store_cost. */
215 3, /* cond_taken_branch_cost. */
216 1, /* cond_not_taken_branch_cost. */
219 static const
220 struct processor_costs i486_cost = { /* 486 specific costs */
221 COSTS_N_INSNS (1), /* cost of an add instruction */
222 COSTS_N_INSNS (1), /* cost of a lea instruction */
223 COSTS_N_INSNS (3), /* variable shift costs */
224 COSTS_N_INSNS (2), /* constant shift costs */
225 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
226 COSTS_N_INSNS (12), /* HI */
227 COSTS_N_INSNS (12), /* SI */
228 COSTS_N_INSNS (12), /* DI */
229 COSTS_N_INSNS (12)}, /* other */
230 1, /* cost of multiply per each bit set */
231 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
232 COSTS_N_INSNS (40), /* HI */
233 COSTS_N_INSNS (40), /* SI */
234 COSTS_N_INSNS (40), /* DI */
235 COSTS_N_INSNS (40)}, /* other */
236 COSTS_N_INSNS (3), /* cost of movsx */
237 COSTS_N_INSNS (2), /* cost of movzx */
238 15, /* "large" insn */
239 3, /* MOVE_RATIO */
240 4, /* cost for loading QImode using movzbl */
241 {2, 4, 2}, /* cost of loading integer registers
242 in QImode, HImode and SImode.
243 Relative to reg-reg move (2). */
244 {2, 4, 2}, /* cost of storing integer registers */
245 2, /* cost of reg,reg fld/fst */
246 {8, 8, 8}, /* cost of loading fp registers
247 in SFmode, DFmode and XFmode */
248 {8, 8, 8}, /* cost of storing fp registers
249 in SFmode, DFmode and XFmode */
250 2, /* cost of moving MMX register */
251 {4, 8}, /* cost of loading MMX registers
252 in SImode and DImode */
253 {4, 8}, /* cost of storing MMX registers
254 in SImode and DImode */
255 2, /* cost of moving SSE register */
256 {4, 8, 16}, /* cost of loading SSE registers
257 in SImode, DImode and TImode */
258 {4, 8, 16}, /* cost of storing SSE registers
259 in SImode, DImode and TImode */
260 3, /* MMX or SSE register to integer */
261 4, /* size of l1 cache. 486 has 8kB cache
262 shared for code and data, so 4kB is
263 not really precise. */
264 4, /* size of l2 cache */
265 0, /* size of prefetch block */
266 0, /* number of parallel prefetches */
267 1, /* Branch cost */
268 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
269 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
270 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
271 COSTS_N_INSNS (3), /* cost of FABS instruction. */
272 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
273 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
274 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
275 DUMMY_STRINGOP_ALGS},
276 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
277 DUMMY_STRINGOP_ALGS},
278 1, /* scalar_stmt_cost. */
279 1, /* scalar load_cost. */
280 1, /* scalar_store_cost. */
281 1, /* vec_stmt_cost. */
282 1, /* vec_to_scalar_cost. */
283 1, /* scalar_to_vec_cost. */
284 1, /* vec_align_load_cost. */
285 2, /* vec_unalign_load_cost. */
286 1, /* vec_store_cost. */
287 3, /* cond_taken_branch_cost. */
288 1, /* cond_not_taken_branch_cost. */
291 static const
292 struct processor_costs pentium_cost = {
293 COSTS_N_INSNS (1), /* cost of an add instruction */
294 COSTS_N_INSNS (1), /* cost of a lea instruction */
295 COSTS_N_INSNS (4), /* variable shift costs */
296 COSTS_N_INSNS (1), /* constant shift costs */
297 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
298 COSTS_N_INSNS (11), /* HI */
299 COSTS_N_INSNS (11), /* SI */
300 COSTS_N_INSNS (11), /* DI */
301 COSTS_N_INSNS (11)}, /* other */
302 0, /* cost of multiply per each bit set */
303 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
304 COSTS_N_INSNS (25), /* HI */
305 COSTS_N_INSNS (25), /* SI */
306 COSTS_N_INSNS (25), /* DI */
307 COSTS_N_INSNS (25)}, /* other */
308 COSTS_N_INSNS (3), /* cost of movsx */
309 COSTS_N_INSNS (2), /* cost of movzx */
310 8, /* "large" insn */
311 6, /* MOVE_RATIO */
312 6, /* cost for loading QImode using movzbl */
313 {2, 4, 2}, /* cost of loading integer registers
314 in QImode, HImode and SImode.
315 Relative to reg-reg move (2). */
316 {2, 4, 2}, /* cost of storing integer registers */
317 2, /* cost of reg,reg fld/fst */
318 {2, 2, 6}, /* cost of loading fp registers
319 in SFmode, DFmode and XFmode */
320 {4, 4, 6}, /* cost of storing fp registers
321 in SFmode, DFmode and XFmode */
322 8, /* cost of moving MMX register */
323 {8, 8}, /* cost of loading MMX registers
324 in SImode and DImode */
325 {8, 8}, /* cost of storing MMX registers
326 in SImode and DImode */
327 2, /* cost of moving SSE register */
328 {4, 8, 16}, /* cost of loading SSE registers
329 in SImode, DImode and TImode */
330 {4, 8, 16}, /* cost of storing SSE registers
331 in SImode, DImode and TImode */
332 3, /* MMX or SSE register to integer */
333 8, /* size of l1 cache. */
334 8, /* size of l2 cache */
335 0, /* size of prefetch block */
336 0, /* number of parallel prefetches */
337 2, /* Branch cost */
338 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
339 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
340 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
341 COSTS_N_INSNS (1), /* cost of FABS instruction. */
342 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
343 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
344 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
345 DUMMY_STRINGOP_ALGS},
346 {{libcall, {{-1, rep_prefix_4_byte}}},
347 DUMMY_STRINGOP_ALGS},
348 1, /* scalar_stmt_cost. */
349 1, /* scalar load_cost. */
350 1, /* scalar_store_cost. */
351 1, /* vec_stmt_cost. */
352 1, /* vec_to_scalar_cost. */
353 1, /* scalar_to_vec_cost. */
354 1, /* vec_align_load_cost. */
355 2, /* vec_unalign_load_cost. */
356 1, /* vec_store_cost. */
357 3, /* cond_taken_branch_cost. */
358 1, /* cond_not_taken_branch_cost. */
361 static const
362 struct processor_costs pentiumpro_cost = {
363 COSTS_N_INSNS (1), /* cost of an add instruction */
364 COSTS_N_INSNS (1), /* cost of a lea instruction */
365 COSTS_N_INSNS (1), /* variable shift costs */
366 COSTS_N_INSNS (1), /* constant shift costs */
367 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
368 COSTS_N_INSNS (4), /* HI */
369 COSTS_N_INSNS (4), /* SI */
370 COSTS_N_INSNS (4), /* DI */
371 COSTS_N_INSNS (4)}, /* other */
372 0, /* cost of multiply per each bit set */
373 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
374 COSTS_N_INSNS (17), /* HI */
375 COSTS_N_INSNS (17), /* SI */
376 COSTS_N_INSNS (17), /* DI */
377 COSTS_N_INSNS (17)}, /* other */
378 COSTS_N_INSNS (1), /* cost of movsx */
379 COSTS_N_INSNS (1), /* cost of movzx */
380 8, /* "large" insn */
381 6, /* MOVE_RATIO */
382 2, /* cost for loading QImode using movzbl */
383 {4, 4, 4}, /* cost of loading integer registers
384 in QImode, HImode and SImode.
385 Relative to reg-reg move (2). */
386 {2, 2, 2}, /* cost of storing integer registers */
387 2, /* cost of reg,reg fld/fst */
388 {2, 2, 6}, /* cost of loading fp registers
389 in SFmode, DFmode and XFmode */
390 {4, 4, 6}, /* cost of storing fp registers
391 in SFmode, DFmode and XFmode */
392 2, /* cost of moving MMX register */
393 {2, 2}, /* cost of loading MMX registers
394 in SImode and DImode */
395 {2, 2}, /* cost of storing MMX registers
396 in SImode and DImode */
397 2, /* cost of moving SSE register */
398 {2, 2, 8}, /* cost of loading SSE registers
399 in SImode, DImode and TImode */
400 {2, 2, 8}, /* cost of storing SSE registers
401 in SImode, DImode and TImode */
402 3, /* MMX or SSE register to integer */
403 8, /* size of l1 cache. */
404 256, /* size of l2 cache */
405 32, /* size of prefetch block */
406 6, /* number of parallel prefetches */
407 2, /* Branch cost */
408 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
409 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
410 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
411 COSTS_N_INSNS (2), /* cost of FABS instruction. */
412 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
413 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
414 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
415 the alignment). For small blocks inline loop is still a noticeable win, for bigger
416 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
417 more expensive startup time in CPU, but after 4K the difference is down in the noise.
419 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
420 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
421 DUMMY_STRINGOP_ALGS},
422 {{rep_prefix_4_byte, {{1024, unrolled_loop},
423 {8192, rep_prefix_4_byte}, {-1, libcall}}},
424 DUMMY_STRINGOP_ALGS},
425 1, /* scalar_stmt_cost. */
426 1, /* scalar load_cost. */
427 1, /* scalar_store_cost. */
428 1, /* vec_stmt_cost. */
429 1, /* vec_to_scalar_cost. */
430 1, /* scalar_to_vec_cost. */
431 1, /* vec_align_load_cost. */
432 2, /* vec_unalign_load_cost. */
433 1, /* vec_store_cost. */
434 3, /* cond_taken_branch_cost. */
435 1, /* cond_not_taken_branch_cost. */
438 static const
439 struct processor_costs geode_cost = {
440 COSTS_N_INSNS (1), /* cost of an add instruction */
441 COSTS_N_INSNS (1), /* cost of a lea instruction */
442 COSTS_N_INSNS (2), /* variable shift costs */
443 COSTS_N_INSNS (1), /* constant shift costs */
444 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
445 COSTS_N_INSNS (4), /* HI */
446 COSTS_N_INSNS (7), /* SI */
447 COSTS_N_INSNS (7), /* DI */
448 COSTS_N_INSNS (7)}, /* other */
449 0, /* cost of multiply per each bit set */
450 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
451 COSTS_N_INSNS (23), /* HI */
452 COSTS_N_INSNS (39), /* SI */
453 COSTS_N_INSNS (39), /* DI */
454 COSTS_N_INSNS (39)}, /* other */
455 COSTS_N_INSNS (1), /* cost of movsx */
456 COSTS_N_INSNS (1), /* cost of movzx */
457 8, /* "large" insn */
458 4, /* MOVE_RATIO */
459 1, /* cost for loading QImode using movzbl */
460 {1, 1, 1}, /* cost of loading integer registers
461 in QImode, HImode and SImode.
462 Relative to reg-reg move (2). */
463 {1, 1, 1}, /* cost of storing integer registers */
464 1, /* cost of reg,reg fld/fst */
465 {1, 1, 1}, /* cost of loading fp registers
466 in SFmode, DFmode and XFmode */
467 {4, 6, 6}, /* cost of storing fp registers
468 in SFmode, DFmode and XFmode */
470 1, /* cost of moving MMX register */
471 {1, 1}, /* cost of loading MMX registers
472 in SImode and DImode */
473 {1, 1}, /* cost of storing MMX registers
474 in SImode and DImode */
475 1, /* cost of moving SSE register */
476 {1, 1, 1}, /* cost of loading SSE registers
477 in SImode, DImode and TImode */
478 {1, 1, 1}, /* cost of storing SSE registers
479 in SImode, DImode and TImode */
480 1, /* MMX or SSE register to integer */
481 64, /* size of l1 cache. */
482 128, /* size of l2 cache. */
483 32, /* size of prefetch block */
484 1, /* number of parallel prefetches */
485 1, /* Branch cost */
486 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
487 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
488 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
489 COSTS_N_INSNS (1), /* cost of FABS instruction. */
490 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
491 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
492 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
493 DUMMY_STRINGOP_ALGS},
494 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
495 DUMMY_STRINGOP_ALGS},
496 1, /* scalar_stmt_cost. */
497 1, /* scalar load_cost. */
498 1, /* scalar_store_cost. */
499 1, /* vec_stmt_cost. */
500 1, /* vec_to_scalar_cost. */
501 1, /* scalar_to_vec_cost. */
502 1, /* vec_align_load_cost. */
503 2, /* vec_unalign_load_cost. */
504 1, /* vec_store_cost. */
505 3, /* cond_taken_branch_cost. */
506 1, /* cond_not_taken_branch_cost. */
509 static const
510 struct processor_costs k6_cost = {
511 COSTS_N_INSNS (1), /* cost of an add instruction */
512 COSTS_N_INSNS (2), /* cost of a lea instruction */
513 COSTS_N_INSNS (1), /* variable shift costs */
514 COSTS_N_INSNS (1), /* constant shift costs */
515 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
516 COSTS_N_INSNS (3), /* HI */
517 COSTS_N_INSNS (3), /* SI */
518 COSTS_N_INSNS (3), /* DI */
519 COSTS_N_INSNS (3)}, /* other */
520 0, /* cost of multiply per each bit set */
521 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
522 COSTS_N_INSNS (18), /* HI */
523 COSTS_N_INSNS (18), /* SI */
524 COSTS_N_INSNS (18), /* DI */
525 COSTS_N_INSNS (18)}, /* other */
526 COSTS_N_INSNS (2), /* cost of movsx */
527 COSTS_N_INSNS (2), /* cost of movzx */
528 8, /* "large" insn */
529 4, /* MOVE_RATIO */
530 3, /* cost for loading QImode using movzbl */
531 {4, 5, 4}, /* cost of loading integer registers
532 in QImode, HImode and SImode.
533 Relative to reg-reg move (2). */
534 {2, 3, 2}, /* cost of storing integer registers */
535 4, /* cost of reg,reg fld/fst */
536 {6, 6, 6}, /* cost of loading fp registers
537 in SFmode, DFmode and XFmode */
538 {4, 4, 4}, /* cost of storing fp registers
539 in SFmode, DFmode and XFmode */
540 2, /* cost of moving MMX register */
541 {2, 2}, /* cost of loading MMX registers
542 in SImode and DImode */
543 {2, 2}, /* cost of storing MMX registers
544 in SImode and DImode */
545 2, /* cost of moving SSE register */
546 {2, 2, 8}, /* cost of loading SSE registers
547 in SImode, DImode and TImode */
548 {2, 2, 8}, /* cost of storing SSE registers
549 in SImode, DImode and TImode */
550 6, /* MMX or SSE register to integer */
551 32, /* size of l1 cache. */
552 32, /* size of l2 cache. Some models
553 have integrated l2 cache, but
554 optimizing for k6 is not important
555 enough to worry about that. */
556 32, /* size of prefetch block */
557 1, /* number of parallel prefetches */
558 1, /* Branch cost */
559 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
560 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
561 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
562 COSTS_N_INSNS (2), /* cost of FABS instruction. */
563 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
564 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
565 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
566 DUMMY_STRINGOP_ALGS},
567 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
568 DUMMY_STRINGOP_ALGS},
569 1, /* scalar_stmt_cost. */
570 1, /* scalar load_cost. */
571 1, /* scalar_store_cost. */
572 1, /* vec_stmt_cost. */
573 1, /* vec_to_scalar_cost. */
574 1, /* scalar_to_vec_cost. */
575 1, /* vec_align_load_cost. */
576 2, /* vec_unalign_load_cost. */
577 1, /* vec_store_cost. */
578 3, /* cond_taken_branch_cost. */
579 1, /* cond_not_taken_branch_cost. */
582 static const
583 struct processor_costs athlon_cost = {
584 COSTS_N_INSNS (1), /* cost of an add instruction */
585 COSTS_N_INSNS (2), /* cost of a lea instruction */
586 COSTS_N_INSNS (1), /* variable shift costs */
587 COSTS_N_INSNS (1), /* constant shift costs */
588 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
589 COSTS_N_INSNS (5), /* HI */
590 COSTS_N_INSNS (5), /* SI */
591 COSTS_N_INSNS (5), /* DI */
592 COSTS_N_INSNS (5)}, /* other */
593 0, /* cost of multiply per each bit set */
594 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
595 COSTS_N_INSNS (26), /* HI */
596 COSTS_N_INSNS (42), /* SI */
597 COSTS_N_INSNS (74), /* DI */
598 COSTS_N_INSNS (74)}, /* other */
599 COSTS_N_INSNS (1), /* cost of movsx */
600 COSTS_N_INSNS (1), /* cost of movzx */
601 8, /* "large" insn */
602 9, /* MOVE_RATIO */
603 4, /* cost for loading QImode using movzbl */
604 {3, 4, 3}, /* cost of loading integer registers
605 in QImode, HImode and SImode.
606 Relative to reg-reg move (2). */
607 {3, 4, 3}, /* cost of storing integer registers */
608 4, /* cost of reg,reg fld/fst */
609 {4, 4, 12}, /* cost of loading fp registers
610 in SFmode, DFmode and XFmode */
611 {6, 6, 8}, /* cost of storing fp registers
612 in SFmode, DFmode and XFmode */
613 2, /* cost of moving MMX register */
614 {4, 4}, /* cost of loading MMX registers
615 in SImode and DImode */
616 {4, 4}, /* cost of storing MMX registers
617 in SImode and DImode */
618 2, /* cost of moving SSE register */
619 {4, 4, 6}, /* cost of loading SSE registers
620 in SImode, DImode and TImode */
621 {4, 4, 5}, /* cost of storing SSE registers
622 in SImode, DImode and TImode */
623 5, /* MMX or SSE register to integer */
624 64, /* size of l1 cache. */
625 256, /* size of l2 cache. */
626 64, /* size of prefetch block */
627 6, /* number of parallel prefetches */
628 5, /* Branch cost */
629 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
630 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
631 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
632 COSTS_N_INSNS (2), /* cost of FABS instruction. */
633 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
634 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
635 /* For some reason, Athlon deals better with REP prefix (relative to loops)
636 compared to K8. Alignment becomes important after 8 bytes for memcpy and
637 128 bytes for memset. */
638 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
639 DUMMY_STRINGOP_ALGS},
640 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
641 DUMMY_STRINGOP_ALGS},
642 1, /* scalar_stmt_cost. */
643 1, /* scalar load_cost. */
644 1, /* scalar_store_cost. */
645 1, /* vec_stmt_cost. */
646 1, /* vec_to_scalar_cost. */
647 1, /* scalar_to_vec_cost. */
648 1, /* vec_align_load_cost. */
649 2, /* vec_unalign_load_cost. */
650 1, /* vec_store_cost. */
651 3, /* cond_taken_branch_cost. */
652 1, /* cond_not_taken_branch_cost. */
655 static const
656 struct processor_costs k8_cost = {
657 COSTS_N_INSNS (1), /* cost of an add instruction */
658 COSTS_N_INSNS (2), /* cost of a lea instruction */
659 COSTS_N_INSNS (1), /* variable shift costs */
660 COSTS_N_INSNS (1), /* constant shift costs */
661 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
662 COSTS_N_INSNS (4), /* HI */
663 COSTS_N_INSNS (3), /* SI */
664 COSTS_N_INSNS (4), /* DI */
665 COSTS_N_INSNS (5)}, /* other */
666 0, /* cost of multiply per each bit set */
667 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
668 COSTS_N_INSNS (26), /* HI */
669 COSTS_N_INSNS (42), /* SI */
670 COSTS_N_INSNS (74), /* DI */
671 COSTS_N_INSNS (74)}, /* other */
672 COSTS_N_INSNS (1), /* cost of movsx */
673 COSTS_N_INSNS (1), /* cost of movzx */
674 8, /* "large" insn */
675 9, /* MOVE_RATIO */
676 4, /* cost for loading QImode using movzbl */
677 {3, 4, 3}, /* cost of loading integer registers
678 in QImode, HImode and SImode.
679 Relative to reg-reg move (2). */
680 {3, 4, 3}, /* cost of storing integer registers */
681 4, /* cost of reg,reg fld/fst */
682 {4, 4, 12}, /* cost of loading fp registers
683 in SFmode, DFmode and XFmode */
684 {6, 6, 8}, /* cost of storing fp registers
685 in SFmode, DFmode and XFmode */
686 2, /* cost of moving MMX register */
687 {3, 3}, /* cost of loading MMX registers
688 in SImode and DImode */
689 {4, 4}, /* cost of storing MMX registers
690 in SImode and DImode */
691 2, /* cost of moving SSE register */
692 {4, 3, 6}, /* cost of loading SSE registers
693 in SImode, DImode and TImode */
694 {4, 4, 5}, /* cost of storing SSE registers
695 in SImode, DImode and TImode */
696 5, /* MMX or SSE register to integer */
697 64, /* size of l1 cache. */
698 512, /* size of l2 cache. */
699 64, /* size of prefetch block */
700 /* New AMD processors never drop prefetches; if they cannot be performed
701 immediately, they are queued. We set number of simultaneous prefetches
702 to a large constant to reflect this (it probably is not a good idea not
703 to limit number of prefetches at all, as their execution also takes some
704 time). */
705 100, /* number of parallel prefetches */
706 3, /* Branch cost */
707 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
708 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
709 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
710 COSTS_N_INSNS (2), /* cost of FABS instruction. */
711 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
712 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
713 /* K8 has optimized REP instruction for medium sized blocks, but for very small
714 blocks it is better to use loop. For large blocks, libcall can do
715 nontemporary accesses and beat inline considerably. */
716 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
717 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
718 {{libcall, {{8, loop}, {24, unrolled_loop},
719 {2048, rep_prefix_4_byte}, {-1, libcall}}},
720 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
721 4, /* scalar_stmt_cost. */
722 2, /* scalar load_cost. */
723 2, /* scalar_store_cost. */
724 5, /* vec_stmt_cost. */
725 0, /* vec_to_scalar_cost. */
726 2, /* scalar_to_vec_cost. */
727 2, /* vec_align_load_cost. */
728 3, /* vec_unalign_load_cost. */
729 3, /* vec_store_cost. */
730 3, /* cond_taken_branch_cost. */
731 2, /* cond_not_taken_branch_cost. */
734 struct processor_costs amdfam10_cost = {
735 COSTS_N_INSNS (1), /* cost of an add instruction */
736 COSTS_N_INSNS (2), /* cost of a lea instruction */
737 COSTS_N_INSNS (1), /* variable shift costs */
738 COSTS_N_INSNS (1), /* constant shift costs */
739 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
740 COSTS_N_INSNS (4), /* HI */
741 COSTS_N_INSNS (3), /* SI */
742 COSTS_N_INSNS (4), /* DI */
743 COSTS_N_INSNS (5)}, /* other */
744 0, /* cost of multiply per each bit set */
745 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
746 COSTS_N_INSNS (35), /* HI */
747 COSTS_N_INSNS (51), /* SI */
748 COSTS_N_INSNS (83), /* DI */
749 COSTS_N_INSNS (83)}, /* other */
750 COSTS_N_INSNS (1), /* cost of movsx */
751 COSTS_N_INSNS (1), /* cost of movzx */
752 8, /* "large" insn */
753 9, /* MOVE_RATIO */
754 4, /* cost for loading QImode using movzbl */
755 {3, 4, 3}, /* cost of loading integer registers
756 in QImode, HImode and SImode.
757 Relative to reg-reg move (2). */
758 {3, 4, 3}, /* cost of storing integer registers */
759 4, /* cost of reg,reg fld/fst */
760 {4, 4, 12}, /* cost of loading fp registers
761 in SFmode, DFmode and XFmode */
762 {6, 6, 8}, /* cost of storing fp registers
763 in SFmode, DFmode and XFmode */
764 2, /* cost of moving MMX register */
765 {3, 3}, /* cost of loading MMX registers
766 in SImode and DImode */
767 {4, 4}, /* cost of storing MMX registers
768 in SImode and DImode */
769 2, /* cost of moving SSE register */
770 {4, 4, 3}, /* cost of loading SSE registers
771 in SImode, DImode and TImode */
772 {4, 4, 5}, /* cost of storing SSE registers
773 in SImode, DImode and TImode */
774 3, /* MMX or SSE register to integer */
775 /* On K8
776 MOVD reg64, xmmreg Double FSTORE 4
777 MOVD reg32, xmmreg Double FSTORE 4
778 On AMDFAM10
779 MOVD reg64, xmmreg Double FADD 3
780 1/1 1/1
781 MOVD reg32, xmmreg Double FADD 3
782 1/1 1/1 */
783 64, /* size of l1 cache. */
784 512, /* size of l2 cache. */
785 64, /* size of prefetch block */
786 /* New AMD processors never drop prefetches; if they cannot be performed
787 immediately, they are queued. We set number of simultaneous prefetches
788 to a large constant to reflect this (it probably is not a good idea not
789 to limit number of prefetches at all, as their execution also takes some
790 time). */
791 100, /* number of parallel prefetches */
792 2, /* Branch cost */
793 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
794 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
795 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
796 COSTS_N_INSNS (2), /* cost of FABS instruction. */
797 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
798 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
800 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
801 very small blocks it is better to use loop. For large blocks, libcall can
802 do nontemporary accesses and beat inline considerably. */
803 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
804 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
805 {{libcall, {{8, loop}, {24, unrolled_loop},
806 {2048, rep_prefix_4_byte}, {-1, libcall}}},
807 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
808 4, /* scalar_stmt_cost. */
809 2, /* scalar load_cost. */
810 2, /* scalar_store_cost. */
811 6, /* vec_stmt_cost. */
812 0, /* vec_to_scalar_cost. */
813 2, /* scalar_to_vec_cost. */
814 2, /* vec_align_load_cost. */
815 2, /* vec_unalign_load_cost. */
816 2, /* vec_store_cost. */
817 2, /* cond_taken_branch_cost. */
818 1, /* cond_not_taken_branch_cost. */
821 static const
822 struct processor_costs pentium4_cost = {
823 COSTS_N_INSNS (1), /* cost of an add instruction */
824 COSTS_N_INSNS (3), /* cost of a lea instruction */
825 COSTS_N_INSNS (4), /* variable shift costs */
826 COSTS_N_INSNS (4), /* constant shift costs */
827 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
828 COSTS_N_INSNS (15), /* HI */
829 COSTS_N_INSNS (15), /* SI */
830 COSTS_N_INSNS (15), /* DI */
831 COSTS_N_INSNS (15)}, /* other */
832 0, /* cost of multiply per each bit set */
833 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
834 COSTS_N_INSNS (56), /* HI */
835 COSTS_N_INSNS (56), /* SI */
836 COSTS_N_INSNS (56), /* DI */
837 COSTS_N_INSNS (56)}, /* other */
838 COSTS_N_INSNS (1), /* cost of movsx */
839 COSTS_N_INSNS (1), /* cost of movzx */
840 16, /* "large" insn */
841 6, /* MOVE_RATIO */
842 2, /* cost for loading QImode using movzbl */
843 {4, 5, 4}, /* cost of loading integer registers
844 in QImode, HImode and SImode.
845 Relative to reg-reg move (2). */
846 {2, 3, 2}, /* cost of storing integer registers */
847 2, /* cost of reg,reg fld/fst */
848 {2, 2, 6}, /* cost of loading fp registers
849 in SFmode, DFmode and XFmode */
850 {4, 4, 6}, /* cost of storing fp registers
851 in SFmode, DFmode and XFmode */
852 2, /* cost of moving MMX register */
853 {2, 2}, /* cost of loading MMX registers
854 in SImode and DImode */
855 {2, 2}, /* cost of storing MMX registers
856 in SImode and DImode */
857 12, /* cost of moving SSE register */
858 {12, 12, 12}, /* cost of loading SSE registers
859 in SImode, DImode and TImode */
860 {2, 2, 8}, /* cost of storing SSE registers
861 in SImode, DImode and TImode */
862 10, /* MMX or SSE register to integer */
863 8, /* size of l1 cache. */
864 256, /* size of l2 cache. */
865 64, /* size of prefetch block */
866 6, /* number of parallel prefetches */
867 2, /* Branch cost */
868 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
869 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
870 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
871 COSTS_N_INSNS (2), /* cost of FABS instruction. */
872 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
873 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
874 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
875 DUMMY_STRINGOP_ALGS},
876 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
877 {-1, libcall}}},
878 DUMMY_STRINGOP_ALGS},
879 1, /* scalar_stmt_cost. */
880 1, /* scalar load_cost. */
881 1, /* scalar_store_cost. */
882 1, /* vec_stmt_cost. */
883 1, /* vec_to_scalar_cost. */
884 1, /* scalar_to_vec_cost. */
885 1, /* vec_align_load_cost. */
886 2, /* vec_unalign_load_cost. */
887 1, /* vec_store_cost. */
888 3, /* cond_taken_branch_cost. */
889 1, /* cond_not_taken_branch_cost. */
892 static const
893 struct processor_costs nocona_cost = {
894 COSTS_N_INSNS (1), /* cost of an add instruction */
895 COSTS_N_INSNS (1), /* cost of a lea instruction */
896 COSTS_N_INSNS (1), /* variable shift costs */
897 COSTS_N_INSNS (1), /* constant shift costs */
898 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
899 COSTS_N_INSNS (10), /* HI */
900 COSTS_N_INSNS (10), /* SI */
901 COSTS_N_INSNS (10), /* DI */
902 COSTS_N_INSNS (10)}, /* other */
903 0, /* cost of multiply per each bit set */
904 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
905 COSTS_N_INSNS (66), /* HI */
906 COSTS_N_INSNS (66), /* SI */
907 COSTS_N_INSNS (66), /* DI */
908 COSTS_N_INSNS (66)}, /* other */
909 COSTS_N_INSNS (1), /* cost of movsx */
910 COSTS_N_INSNS (1), /* cost of movzx */
911 16, /* "large" insn */
912 17, /* MOVE_RATIO */
913 4, /* cost for loading QImode using movzbl */
914 {4, 4, 4}, /* cost of loading integer registers
915 in QImode, HImode and SImode.
916 Relative to reg-reg move (2). */
917 {4, 4, 4}, /* cost of storing integer registers */
918 3, /* cost of reg,reg fld/fst */
919 {12, 12, 12}, /* cost of loading fp registers
920 in SFmode, DFmode and XFmode */
921 {4, 4, 4}, /* cost of storing fp registers
922 in SFmode, DFmode and XFmode */
923 6, /* cost of moving MMX register */
924 {12, 12}, /* cost of loading MMX registers
925 in SImode and DImode */
926 {12, 12}, /* cost of storing MMX registers
927 in SImode and DImode */
928 6, /* cost of moving SSE register */
929 {12, 12, 12}, /* cost of loading SSE registers
930 in SImode, DImode and TImode */
931 {12, 12, 12}, /* cost of storing SSE registers
932 in SImode, DImode and TImode */
933 8, /* MMX or SSE register to integer */
934 8, /* size of l1 cache. */
935 1024, /* size of l2 cache. */
936 128, /* size of prefetch block */
937 8, /* number of parallel prefetches */
938 1, /* Branch cost */
939 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
940 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
941 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
942 COSTS_N_INSNS (3), /* cost of FABS instruction. */
943 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
944 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
945 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
946 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
947 {100000, unrolled_loop}, {-1, libcall}}}},
948 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
949 {-1, libcall}}},
950 {libcall, {{24, loop}, {64, unrolled_loop},
951 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
952 1, /* scalar_stmt_cost. */
953 1, /* scalar load_cost. */
954 1, /* scalar_store_cost. */
955 1, /* vec_stmt_cost. */
956 1, /* vec_to_scalar_cost. */
957 1, /* scalar_to_vec_cost. */
958 1, /* vec_align_load_cost. */
959 2, /* vec_unalign_load_cost. */
960 1, /* vec_store_cost. */
961 3, /* cond_taken_branch_cost. */
962 1, /* cond_not_taken_branch_cost. */
965 static const
966 struct processor_costs core2_cost = {
967 COSTS_N_INSNS (1), /* cost of an add instruction */
968 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
969 COSTS_N_INSNS (1), /* variable shift costs */
970 COSTS_N_INSNS (1), /* constant shift costs */
971 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
972 COSTS_N_INSNS (3), /* HI */
973 COSTS_N_INSNS (3), /* SI */
974 COSTS_N_INSNS (3), /* DI */
975 COSTS_N_INSNS (3)}, /* other */
976 0, /* cost of multiply per each bit set */
977 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
978 COSTS_N_INSNS (22), /* HI */
979 COSTS_N_INSNS (22), /* SI */
980 COSTS_N_INSNS (22), /* DI */
981 COSTS_N_INSNS (22)}, /* other */
982 COSTS_N_INSNS (1), /* cost of movsx */
983 COSTS_N_INSNS (1), /* cost of movzx */
984 8, /* "large" insn */
985 16, /* MOVE_RATIO */
986 2, /* cost for loading QImode using movzbl */
987 {6, 6, 6}, /* cost of loading integer registers
988 in QImode, HImode and SImode.
989 Relative to reg-reg move (2). */
990 {4, 4, 4}, /* cost of storing integer registers */
991 2, /* cost of reg,reg fld/fst */
992 {6, 6, 6}, /* cost of loading fp registers
993 in SFmode, DFmode and XFmode */
994 {4, 4, 4}, /* cost of loading integer registers */
995 2, /* cost of moving MMX register */
996 {6, 6}, /* cost of loading MMX registers
997 in SImode and DImode */
998 {4, 4}, /* cost of storing MMX registers
999 in SImode and DImode */
1000 2, /* cost of moving SSE register */
1001 {6, 6, 6}, /* cost of loading SSE registers
1002 in SImode, DImode and TImode */
1003 {4, 4, 4}, /* cost of storing SSE registers
1004 in SImode, DImode and TImode */
1005 2, /* MMX or SSE register to integer */
1006 32, /* size of l1 cache. */
1007 2048, /* size of l2 cache. */
1008 128, /* size of prefetch block */
1009 8, /* number of parallel prefetches */
1010 3, /* Branch cost */
1011 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1012 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1013 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1014 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1015 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1016 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1017 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1018 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1019 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1020 {{libcall, {{8, loop}, {15, unrolled_loop},
1021 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1022 {libcall, {{24, loop}, {32, unrolled_loop},
1023 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1024 1, /* scalar_stmt_cost. */
1025 1, /* scalar load_cost. */
1026 1, /* scalar_store_cost. */
1027 1, /* vec_stmt_cost. */
1028 1, /* vec_to_scalar_cost. */
1029 1, /* scalar_to_vec_cost. */
1030 1, /* vec_align_load_cost. */
1031 2, /* vec_unalign_load_cost. */
1032 1, /* vec_store_cost. */
1033 3, /* cond_taken_branch_cost. */
1034 1, /* cond_not_taken_branch_cost. */
1037 /* Generic64 should produce code tuned for Nocona and K8. */
1038 static const
1039 struct processor_costs generic64_cost = {
1040 COSTS_N_INSNS (1), /* cost of an add instruction */
1041 /* On all chips taken into consideration lea is 2 cycles and more. With
1042 this cost however our current implementation of synth_mult results in
1043 use of unnecessary temporary registers causing regression on several
1044 SPECfp benchmarks. */
1045 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1046 COSTS_N_INSNS (1), /* variable shift costs */
1047 COSTS_N_INSNS (1), /* constant shift costs */
1048 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1049 COSTS_N_INSNS (4), /* HI */
1050 COSTS_N_INSNS (3), /* SI */
1051 COSTS_N_INSNS (4), /* DI */
1052 COSTS_N_INSNS (2)}, /* other */
1053 0, /* cost of multiply per each bit set */
1054 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1055 COSTS_N_INSNS (26), /* HI */
1056 COSTS_N_INSNS (42), /* SI */
1057 COSTS_N_INSNS (74), /* DI */
1058 COSTS_N_INSNS (74)}, /* other */
1059 COSTS_N_INSNS (1), /* cost of movsx */
1060 COSTS_N_INSNS (1), /* cost of movzx */
1061 8, /* "large" insn */
1062 17, /* MOVE_RATIO */
1063 4, /* cost for loading QImode using movzbl */
1064 {4, 4, 4}, /* cost of loading integer registers
1065 in QImode, HImode and SImode.
1066 Relative to reg-reg move (2). */
1067 {4, 4, 4}, /* cost of storing integer registers */
1068 4, /* cost of reg,reg fld/fst */
1069 {12, 12, 12}, /* cost of loading fp registers
1070 in SFmode, DFmode and XFmode */
1071 {6, 6, 8}, /* cost of storing fp registers
1072 in SFmode, DFmode and XFmode */
1073 2, /* cost of moving MMX register */
1074 {8, 8}, /* cost of loading MMX registers
1075 in SImode and DImode */
1076 {8, 8}, /* cost of storing MMX registers
1077 in SImode and DImode */
1078 2, /* cost of moving SSE register */
1079 {8, 8, 8}, /* cost of loading SSE registers
1080 in SImode, DImode and TImode */
1081 {8, 8, 8}, /* cost of storing SSE registers
1082 in SImode, DImode and TImode */
1083 5, /* MMX or SSE register to integer */
1084 32, /* size of l1 cache. */
1085 512, /* size of l2 cache. */
1086 64, /* size of prefetch block */
1087 6, /* number of parallel prefetches */
1088 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
1089 is increased to perhaps more appropriate value of 5. */
1090 3, /* Branch cost */
1091 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1092 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1093 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1094 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1095 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1096 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1097 {DUMMY_STRINGOP_ALGS,
1098 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1099 {DUMMY_STRINGOP_ALGS,
1100 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1101 1, /* scalar_stmt_cost. */
1102 1, /* scalar load_cost. */
1103 1, /* scalar_store_cost. */
1104 1, /* vec_stmt_cost. */
1105 1, /* vec_to_scalar_cost. */
1106 1, /* scalar_to_vec_cost. */
1107 1, /* vec_align_load_cost. */
1108 2, /* vec_unalign_load_cost. */
1109 1, /* vec_store_cost. */
1110 3, /* cond_taken_branch_cost. */
1111 1, /* cond_not_taken_branch_cost. */
1114 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
1115 static const
1116 struct processor_costs generic32_cost = {
1117 COSTS_N_INSNS (1), /* cost of an add instruction */
1118 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1119 COSTS_N_INSNS (1), /* variable shift costs */
1120 COSTS_N_INSNS (1), /* constant shift costs */
1121 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1122 COSTS_N_INSNS (4), /* HI */
1123 COSTS_N_INSNS (3), /* SI */
1124 COSTS_N_INSNS (4), /* DI */
1125 COSTS_N_INSNS (2)}, /* other */
1126 0, /* cost of multiply per each bit set */
1127 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1128 COSTS_N_INSNS (26), /* HI */
1129 COSTS_N_INSNS (42), /* SI */
1130 COSTS_N_INSNS (74), /* DI */
1131 COSTS_N_INSNS (74)}, /* other */
1132 COSTS_N_INSNS (1), /* cost of movsx */
1133 COSTS_N_INSNS (1), /* cost of movzx */
1134 8, /* "large" insn */
1135 17, /* MOVE_RATIO */
1136 4, /* cost for loading QImode using movzbl */
1137 {4, 4, 4}, /* cost of loading integer registers
1138 in QImode, HImode and SImode.
1139 Relative to reg-reg move (2). */
1140 {4, 4, 4}, /* cost of storing integer registers */
1141 4, /* cost of reg,reg fld/fst */
1142 {12, 12, 12}, /* cost of loading fp registers
1143 in SFmode, DFmode and XFmode */
1144 {6, 6, 8}, /* cost of storing fp registers
1145 in SFmode, DFmode and XFmode */
1146 2, /* cost of moving MMX register */
1147 {8, 8}, /* cost of loading MMX registers
1148 in SImode and DImode */
1149 {8, 8}, /* cost of storing MMX registers
1150 in SImode and DImode */
1151 2, /* cost of moving SSE register */
1152 {8, 8, 8}, /* cost of loading SSE registers
1153 in SImode, DImode and TImode */
1154 {8, 8, 8}, /* cost of storing SSE registers
1155 in SImode, DImode and TImode */
1156 5, /* MMX or SSE register to integer */
1157 32, /* size of l1 cache. */
1158 256, /* size of l2 cache. */
1159 64, /* size of prefetch block */
1160 6, /* number of parallel prefetches */
1161 3, /* Branch cost */
1162 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1163 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1164 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1165 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1166 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1167 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1168 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1169 DUMMY_STRINGOP_ALGS},
1170 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1171 DUMMY_STRINGOP_ALGS},
1172 1, /* scalar_stmt_cost. */
1173 1, /* scalar load_cost. */
1174 1, /* scalar_store_cost. */
1175 1, /* vec_stmt_cost. */
1176 1, /* vec_to_scalar_cost. */
1177 1, /* scalar_to_vec_cost. */
1178 1, /* vec_align_load_cost. */
1179 2, /* vec_unalign_load_cost. */
1180 1, /* vec_store_cost. */
1181 3, /* cond_taken_branch_cost. */
1182 1, /* cond_not_taken_branch_cost. */
1185 const struct processor_costs *ix86_cost = &pentium_cost;
1187 /* Processor feature/optimization bitmasks. */
1188 #define m_386 (1<<PROCESSOR_I386)
1189 #define m_486 (1<<PROCESSOR_I486)
1190 #define m_PENT (1<<PROCESSOR_PENTIUM)
1191 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1192 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1193 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1194 #define m_CORE2 (1<<PROCESSOR_CORE2)
1196 #define m_GEODE (1<<PROCESSOR_GEODE)
1197 #define m_K6 (1<<PROCESSOR_K6)
1198 #define m_K6_GEODE (m_K6 | m_GEODE)
1199 #define m_K8 (1<<PROCESSOR_K8)
1200 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1201 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1202 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1203 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10)
1205 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1206 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1208 /* Generic instruction choice should be common subset of supported CPUs
1209 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1210 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1212 /* Feature tests against the various tunings. */
1213 unsigned char ix86_tune_features[X86_TUNE_LAST];
1215 /* Feature tests against the various tunings used to create ix86_tune_features
1216 based on the processor mask. */
1217 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1218 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1219 negatively, so enabling for Generic64 seems like good code size
1220 tradeoff. We can't enable it for 32bit generic because it does not
1221 work well with PPro base chips. */
1222 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1224 /* X86_TUNE_PUSH_MEMORY */
1225 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1226 | m_NOCONA | m_CORE2 | m_GENERIC,
1228 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1229 m_486 | m_PENT,
1231 /* X86_TUNE_USE_BIT_TEST */
1232 m_386,
1234 /* X86_TUNE_UNROLL_STRLEN */
1235 m_486 | m_PENT | m_PPRO | m_AMD_MULTIPLE | m_K6 | m_CORE2 | m_GENERIC,
1237 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1238 m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1240 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1241 on simulation result. But after P4 was made, no performance benefit
1242 was observed with branch hints. It also increases the code size.
1243 As a result, icc never generates branch hints. */
1246 /* X86_TUNE_DOUBLE_WITH_ADD */
1247 ~m_386,
1249 /* X86_TUNE_USE_SAHF */
1250 m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_PENT4
1251 | m_NOCONA | m_CORE2 | m_GENERIC,
1253 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1254 partial dependencies. */
1255 m_AMD_MULTIPLE | m_PPRO | m_PENT4 | m_NOCONA
1256 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1258 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1259 register stalls on Generic32 compilation setting as well. However
1260 in current implementation the partial register stalls are not eliminated
1261 very well - they can be introduced via subregs synthesized by combine
1262 and can happen in caller/callee saving sequences. Because this option
1263 pays back little on PPro based chips and is in conflict with partial reg
1264 dependencies used by Athlon/P4 based chips, it is better to leave it off
1265 for generic32 for now. */
1266 m_PPRO,
1268 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1269 m_CORE2 | m_GENERIC,
1271 /* X86_TUNE_USE_HIMODE_FIOP */
1272 m_386 | m_486 | m_K6_GEODE,
1274 /* X86_TUNE_USE_SIMODE_FIOP */
1275 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_CORE2 | m_GENERIC),
1277 /* X86_TUNE_USE_MOV0 */
1278 m_K6,
1280 /* X86_TUNE_USE_CLTD */
1281 ~(m_PENT | m_K6 | m_CORE2 | m_GENERIC),
1283 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1284 m_PENT4,
1286 /* X86_TUNE_SPLIT_LONG_MOVES */
1287 m_PPRO,
1289 /* X86_TUNE_READ_MODIFY_WRITE */
1290 ~m_PENT,
1292 /* X86_TUNE_READ_MODIFY */
1293 ~(m_PENT | m_PPRO),
1295 /* X86_TUNE_PROMOTE_QIMODE */
1296 m_K6_GEODE | m_PENT | m_386 | m_486 | m_AMD_MULTIPLE | m_CORE2
1297 | m_GENERIC /* | m_PENT4 ? */,
1299 /* X86_TUNE_FAST_PREFIX */
1300 ~(m_PENT | m_486 | m_386),
1302 /* X86_TUNE_SINGLE_STRINGOP */
1303 m_386 | m_PENT4 | m_NOCONA,
1305 /* X86_TUNE_QIMODE_MATH */
1308 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1309 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1310 might be considered for Generic32 if our scheme for avoiding partial
1311 stalls was more effective. */
1312 ~m_PPRO,
1314 /* X86_TUNE_PROMOTE_QI_REGS */
1317 /* X86_TUNE_PROMOTE_HI_REGS */
1318 m_PPRO,
1320 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
1321 m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1323 /* X86_TUNE_ADD_ESP_8 */
1324 m_AMD_MULTIPLE | m_PPRO | m_K6_GEODE | m_386
1325 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1327 /* X86_TUNE_SUB_ESP_4 */
1328 m_AMD_MULTIPLE | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1330 /* X86_TUNE_SUB_ESP_8 */
1331 m_AMD_MULTIPLE | m_PPRO | m_386 | m_486
1332 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1334 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1335 for DFmode copies */
1336 ~(m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1337 | m_GENERIC | m_GEODE),
1339 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1340 m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1342 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1343 conflict here in between PPro/Pentium4 based chips that thread 128bit
1344 SSE registers as single units versus K8 based chips that divide SSE
1345 registers to two 64bit halves. This knob promotes all store destinations
1346 to be 128bit to allow register renaming on 128bit SSE units, but usually
1347 results in one extra microop on 64bit SSE units. Experimental results
1348 shows that disabling this option on P4 brings over 20% SPECfp regression,
1349 while enabling it on K8 brings roughly 2.4% regression that can be partly
1350 masked by careful scheduling of moves. */
1351 m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC | m_AMDFAM10,
1353 /* X86_TUNE_SSE_UNALIGNED_MOVE_OPTIMAL */
1354 m_AMDFAM10,
1356 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1357 are resolved on SSE register parts instead of whole registers, so we may
1358 maintain just lower part of scalar values in proper format leaving the
1359 upper part undefined. */
1360 m_ATHLON_K8,
1362 /* X86_TUNE_SSE_TYPELESS_STORES */
1363 m_AMD_MULTIPLE,
1365 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1366 m_PPRO | m_PENT4 | m_NOCONA,
1368 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1369 m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1371 /* X86_TUNE_PROLOGUE_USING_MOVE */
1372 m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC,
1374 /* X86_TUNE_EPILOGUE_USING_MOVE */
1375 m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC,
1377 /* X86_TUNE_SHIFT1 */
1378 ~m_486,
1380 /* X86_TUNE_USE_FFREEP */
1381 m_AMD_MULTIPLE,
1383 /* X86_TUNE_INTER_UNIT_MOVES */
1384 ~(m_AMD_MULTIPLE | m_GENERIC),
1386 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1387 ~(m_AMDFAM10),
1389 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1390 than 4 branch instructions in the 16 byte window. */
1391 m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1393 /* X86_TUNE_SCHEDULE */
1394 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_CORE2 | m_GENERIC,
1396 /* X86_TUNE_USE_BT */
1397 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1399 /* X86_TUNE_USE_INCDEC */
1400 ~(m_PENT4 | m_NOCONA | m_GENERIC),
1402 /* X86_TUNE_PAD_RETURNS */
1403 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1405 /* X86_TUNE_EXT_80387_CONSTANTS */
1406 m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC,
1408 /* X86_TUNE_SHORTEN_X87_SSE */
1409 ~m_K8,
1411 /* X86_TUNE_AVOID_VECTOR_DECODE */
1412 m_K8 | m_GENERIC64,
1414 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1415 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1416 ~(m_386 | m_486),
1418 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1419 vector path on AMD machines. */
1420 m_K8 | m_GENERIC64 | m_AMDFAM10,
1422 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1423 machines. */
1424 m_K8 | m_GENERIC64 | m_AMDFAM10,
1426 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1427 than a MOV. */
1428 m_PENT,
1430 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1431 but one byte longer. */
1432 m_PENT,
1434 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1435 operand that cannot be represented using a modRM byte. The XOR
1436 replacement is long decoded, so this split helps here as well. */
1437 m_K6,
1439 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1440 from integer to FP. */
1441 m_AMDFAM10,
1443 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1444 with a subsequent conditional jump instruction into a single
1445 compare-and-branch uop. */
1446 m_CORE2,
1449 /* Feature tests against the various architecture variations. */
1450 unsigned char ix86_arch_features[X86_ARCH_LAST];
1452 /* Feature tests against the various architecture variations, used to create
1453 ix86_arch_features based on the processor mask. */
1454 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
1455 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1456 ~(m_386 | m_486 | m_PENT | m_K6),
1458 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1459 ~m_386,
1461 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1462 ~(m_386 | m_486),
1464 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1465 ~m_386,
1467 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1468 ~m_386,
1471 static const unsigned int x86_accumulate_outgoing_args
1472 = m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
1474 static const unsigned int x86_arch_always_fancy_math_387
1475 = m_PENT | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1476 | m_NOCONA | m_CORE2 | m_GENERIC;
1478 static enum stringop_alg stringop_alg = no_stringop;
1480 /* In case the average insn count for single function invocation is
1481 lower than this constant, emit fast (but longer) prologue and
1482 epilogue code. */
1483 #define FAST_PROLOGUE_INSN_COUNT 20
1485 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1486 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1487 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1488 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1490 /* Array of the smallest class containing reg number REGNO, indexed by
1491 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1493 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1495 /* ax, dx, cx, bx */
1496 AREG, DREG, CREG, BREG,
1497 /* si, di, bp, sp */
1498 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1499 /* FP registers */
1500 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1501 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1502 /* arg pointer */
1503 NON_Q_REGS,
1504 /* flags, fpsr, fpcr, frame */
1505 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1506 /* SSE registers */
1507 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1508 SSE_REGS, SSE_REGS,
1509 /* MMX registers */
1510 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1511 MMX_REGS, MMX_REGS,
1512 /* REX registers */
1513 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1514 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1515 /* SSE REX registers */
1516 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1517 SSE_REGS, SSE_REGS,
1520 /* The "default" register map used in 32bit mode. */
1522 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1524 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1525 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1526 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1527 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1528 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1529 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1530 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1533 static int const x86_64_int_parameter_registers[6] =
1535 5 /*RDI*/, 4 /*RSI*/, 1 /*RDX*/, 2 /*RCX*/,
1536 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
1539 static int const x86_64_ms_abi_int_parameter_registers[4] =
1541 2 /*RCX*/, 1 /*RDX*/,
1542 FIRST_REX_INT_REG /*R8 */, FIRST_REX_INT_REG + 1 /*R9 */
1545 static int const x86_64_int_return_registers[4] =
1547 0 /*RAX*/, 1 /*RDX*/, 5 /*RDI*/, 4 /*RSI*/
1550 /* The "default" register map used in 64bit mode. */
1551 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1553 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1554 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1555 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1556 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1557 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1558 8,9,10,11,12,13,14,15, /* extended integer registers */
1559 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1562 /* Define the register numbers to be used in Dwarf debugging information.
1563 The SVR4 reference port C compiler uses the following register numbers
1564 in its Dwarf output code:
1565 0 for %eax (gcc regno = 0)
1566 1 for %ecx (gcc regno = 2)
1567 2 for %edx (gcc regno = 1)
1568 3 for %ebx (gcc regno = 3)
1569 4 for %esp (gcc regno = 7)
1570 5 for %ebp (gcc regno = 6)
1571 6 for %esi (gcc regno = 4)
1572 7 for %edi (gcc regno = 5)
1573 The following three DWARF register numbers are never generated by
1574 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1575 believes these numbers have these meanings.
1576 8 for %eip (no gcc equivalent)
1577 9 for %eflags (gcc regno = 17)
1578 10 for %trapno (no gcc equivalent)
1579 It is not at all clear how we should number the FP stack registers
1580 for the x86 architecture. If the version of SDB on x86/svr4 were
1581 a bit less brain dead with respect to floating-point then we would
1582 have a precedent to follow with respect to DWARF register numbers
1583 for x86 FP registers, but the SDB on x86/svr4 is so completely
1584 broken with respect to FP registers that it is hardly worth thinking
1585 of it as something to strive for compatibility with.
1586 The version of x86/svr4 SDB I have at the moment does (partially)
1587 seem to believe that DWARF register number 11 is associated with
1588 the x86 register %st(0), but that's about all. Higher DWARF
1589 register numbers don't seem to be associated with anything in
1590 particular, and even for DWARF regno 11, SDB only seems to under-
1591 stand that it should say that a variable lives in %st(0) (when
1592 asked via an `=' command) if we said it was in DWARF regno 11,
1593 but SDB still prints garbage when asked for the value of the
1594 variable in question (via a `/' command).
1595 (Also note that the labels SDB prints for various FP stack regs
1596 when doing an `x' command are all wrong.)
1597 Note that these problems generally don't affect the native SVR4
1598 C compiler because it doesn't allow the use of -O with -g and
1599 because when it is *not* optimizing, it allocates a memory
1600 location for each floating-point variable, and the memory
1601 location is what gets described in the DWARF AT_location
1602 attribute for the variable in question.
1603 Regardless of the severe mental illness of the x86/svr4 SDB, we
1604 do something sensible here and we use the following DWARF
1605 register numbers. Note that these are all stack-top-relative
1606 numbers.
1607 11 for %st(0) (gcc regno = 8)
1608 12 for %st(1) (gcc regno = 9)
1609 13 for %st(2) (gcc regno = 10)
1610 14 for %st(3) (gcc regno = 11)
1611 15 for %st(4) (gcc regno = 12)
1612 16 for %st(5) (gcc regno = 13)
1613 17 for %st(6) (gcc regno = 14)
1614 18 for %st(7) (gcc regno = 15)
1616 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1618 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1619 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1620 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1621 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1622 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1623 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1624 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1627 /* Test and compare insns in i386.md store the information needed to
1628 generate branch and scc insns here. */
1630 rtx ix86_compare_op0 = NULL_RTX;
1631 rtx ix86_compare_op1 = NULL_RTX;
1632 rtx ix86_compare_emitted = NULL_RTX;
1634 /* Size of the register save area. */
1635 #define X86_64_VARARGS_SIZE (X86_64_REGPARM_MAX * UNITS_PER_WORD + X86_64_SSE_REGPARM_MAX * 16)
1637 /* Define the structure for the machine field in struct function. */
1639 struct stack_local_entry GTY(())
1641 unsigned short mode;
1642 unsigned short n;
1643 rtx rtl;
1644 struct stack_local_entry *next;
1647 /* Structure describing stack frame layout.
1648 Stack grows downward:
1650 [arguments]
1651 <- ARG_POINTER
1652 saved pc
1654 saved frame pointer if frame_pointer_needed
1655 <- HARD_FRAME_POINTER
1656 [saved regs]
1658 [padding1] \
1660 [va_arg registers] (
1661 > to_allocate <- FRAME_POINTER
1662 [frame] (
1664 [padding2] /
1666 struct ix86_frame
1668 int nregs;
1669 int padding1;
1670 int va_arg_size;
1671 HOST_WIDE_INT frame;
1672 int padding2;
1673 int outgoing_arguments_size;
1674 int red_zone_size;
1676 HOST_WIDE_INT to_allocate;
1677 /* The offsets relative to ARG_POINTER. */
1678 HOST_WIDE_INT frame_pointer_offset;
1679 HOST_WIDE_INT hard_frame_pointer_offset;
1680 HOST_WIDE_INT stack_pointer_offset;
1682 /* When save_regs_using_mov is set, emit prologue using
1683 move instead of push instructions. */
1684 bool save_regs_using_mov;
1687 /* Code model option. */
1688 enum cmodel ix86_cmodel;
1689 /* Asm dialect. */
1690 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1691 /* TLS dialects. */
1692 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1694 /* Which unit we are generating floating point math for. */
1695 enum fpmath_unit ix86_fpmath;
1697 /* Which cpu are we scheduling for. */
1698 enum processor_type ix86_tune;
1700 /* Which instruction set architecture to use. */
1701 enum processor_type ix86_arch;
1703 /* true if sse prefetch instruction is not NOOP. */
1704 int x86_prefetch_sse;
1706 /* ix86_regparm_string as a number */
1707 static int ix86_regparm;
1709 /* -mstackrealign option */
1710 extern int ix86_force_align_arg_pointer;
1711 static const char ix86_force_align_arg_pointer_string[] = "force_align_arg_pointer";
1713 static rtx (*ix86_gen_leave) (void);
1714 static rtx (*ix86_gen_pop1) (rtx);
1715 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
1716 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
1717 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx);
1718 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
1719 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
1721 /* Preferred alignment for stack boundary in bits. */
1722 unsigned int ix86_preferred_stack_boundary;
1724 /* Values 1-5: see jump.c */
1725 int ix86_branch_cost;
1727 /* Calling abi specific va_list type nodes. */
1728 static GTY(()) tree sysv_va_list_type_node;
1729 static GTY(()) tree ms_va_list_type_node;
1731 /* Variables which are this size or smaller are put in the data/bss
1732 or ldata/lbss sections. */
1734 int ix86_section_threshold = 65536;
1736 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1737 char internal_label_prefix[16];
1738 int internal_label_prefix_len;
1740 /* Fence to use after loop using movnt. */
1741 tree x86_mfence;
1743 /* Register class used for passing given 64bit part of the argument.
1744 These represent classes as documented by the PS ABI, with the exception
1745 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1746 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1748 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1749 whenever possible (upper half does contain padding). */
1750 enum x86_64_reg_class
1752 X86_64_NO_CLASS,
1753 X86_64_INTEGER_CLASS,
1754 X86_64_INTEGERSI_CLASS,
1755 X86_64_SSE_CLASS,
1756 X86_64_SSESF_CLASS,
1757 X86_64_SSEDF_CLASS,
1758 X86_64_SSEUP_CLASS,
1759 X86_64_X87_CLASS,
1760 X86_64_X87UP_CLASS,
1761 X86_64_COMPLEX_X87_CLASS,
1762 X86_64_MEMORY_CLASS
1764 static const char * const x86_64_reg_class_name[] =
1766 "no", "integer", "integerSI", "sse", "sseSF", "sseDF",
1767 "sseup", "x87", "x87up", "cplx87", "no"
1770 #define MAX_CLASSES 4
1772 /* Table of constants used by fldpi, fldln2, etc.... */
1773 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1774 static bool ext_80387_constants_init = 0;
1777 static struct machine_function * ix86_init_machine_status (void);
1778 static rtx ix86_function_value (const_tree, const_tree, bool);
1779 static int ix86_function_regparm (const_tree, const_tree);
1780 static void ix86_compute_frame_layout (struct ix86_frame *);
1781 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1782 rtx, rtx, int);
1784 enum ix86_function_specific_strings
1786 IX86_FUNCTION_SPECIFIC_ARCH,
1787 IX86_FUNCTION_SPECIFIC_TUNE,
1788 IX86_FUNCTION_SPECIFIC_FPMATH,
1789 IX86_FUNCTION_SPECIFIC_MAX
1792 static char *ix86_target_string (int, int, const char *, const char *,
1793 const char *, bool);
1794 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
1795 static void ix86_function_specific_save (struct cl_target_option *);
1796 static void ix86_function_specific_restore (struct cl_target_option *);
1797 static void ix86_function_specific_print (FILE *, int,
1798 struct cl_target_option *);
1799 static bool ix86_valid_option_attribute_p (tree, tree, tree, int);
1800 static bool ix86_valid_option_attribute_inner_p (tree, char *[]);
1801 static bool ix86_can_inline_p (tree, tree);
1802 static void ix86_set_current_function (tree);
1805 /* The svr4 ABI for the i386 says that records and unions are returned
1806 in memory. */
1807 #ifndef DEFAULT_PCC_STRUCT_RETURN
1808 #define DEFAULT_PCC_STRUCT_RETURN 1
1809 #endif
1811 /* Whether -mtune= or -march= were specified */
1812 static int ix86_tune_defaulted;
1813 static int ix86_arch_specified;
1815 /* Bit flags that specify the ISA we are compiling for. */
1816 int ix86_isa_flags = TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_ISA_DEFAULT;
1818 /* A mask of ix86_isa_flags that includes bit X if X
1819 was set or cleared on the command line. */
1820 static int ix86_isa_flags_explicit;
1822 /* Define a set of ISAs which are available when a given ISA is
1823 enabled. MMX and SSE ISAs are handled separately. */
1825 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
1826 #define OPTION_MASK_ISA_3DNOW_SET \
1827 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
1829 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
1830 #define OPTION_MASK_ISA_SSE2_SET \
1831 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
1832 #define OPTION_MASK_ISA_SSE3_SET \
1833 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
1834 #define OPTION_MASK_ISA_SSSE3_SET \
1835 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
1836 #define OPTION_MASK_ISA_SSE4_1_SET \
1837 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
1838 #define OPTION_MASK_ISA_SSE4_2_SET \
1839 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
1841 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
1842 as -msse4.2. */
1843 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
1845 #define OPTION_MASK_ISA_SSE4A_SET \
1846 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
1847 #define OPTION_MASK_ISA_SSE5_SET \
1848 (OPTION_MASK_ISA_SSE5 | OPTION_MASK_ISA_SSE4A_SET)
1850 /* AES and PCLMUL need SSE2 because they use xmm registers */
1851 #define OPTION_MASK_ISA_AES_SET \
1852 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
1853 #define OPTION_MASK_ISA_PCLMUL_SET \
1854 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
1856 #define OPTION_MASK_ISA_ABM_SET \
1857 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
1858 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
1859 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
1860 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
1862 /* Define a set of ISAs which aren't available when a given ISA is
1863 disabled. MMX and SSE ISAs are handled separately. */
1865 #define OPTION_MASK_ISA_MMX_UNSET \
1866 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
1867 #define OPTION_MASK_ISA_3DNOW_UNSET \
1868 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
1869 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
1871 #define OPTION_MASK_ISA_SSE_UNSET \
1872 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
1873 #define OPTION_MASK_ISA_SSE2_UNSET \
1874 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
1875 #define OPTION_MASK_ISA_SSE3_UNSET \
1876 (OPTION_MASK_ISA_SSE3 \
1877 | OPTION_MASK_ISA_SSSE3_UNSET \
1878 | OPTION_MASK_ISA_SSE4A_UNSET )
1879 #define OPTION_MASK_ISA_SSSE3_UNSET \
1880 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
1881 #define OPTION_MASK_ISA_SSE4_1_UNSET \
1882 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
1883 #define OPTION_MASK_ISA_SSE4_2_UNSET OPTION_MASK_ISA_SSE4_2
1885 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
1886 as -mno-sse4.1. */
1887 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
1889 #define OPTION_MASK_ISA_SSE4A_UNSET \
1890 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE5_UNSET)
1891 #define OPTION_MASK_ISA_SSE5_UNSET OPTION_MASK_ISA_SSE5
1892 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
1893 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
1894 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
1895 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
1896 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
1897 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
1899 /* Vectorization library interface and handlers. */
1900 tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
1901 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
1902 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
1904 /* Processor target table, indexed by processor number */
1905 struct ptt
1907 const struct processor_costs *cost; /* Processor costs */
1908 const int align_loop; /* Default alignments. */
1909 const int align_loop_max_skip;
1910 const int align_jump;
1911 const int align_jump_max_skip;
1912 const int align_func;
1915 static const struct ptt processor_target_table[PROCESSOR_max] =
1917 {&i386_cost, 4, 3, 4, 3, 4},
1918 {&i486_cost, 16, 15, 16, 15, 16},
1919 {&pentium_cost, 16, 7, 16, 7, 16},
1920 {&pentiumpro_cost, 16, 15, 16, 10, 16},
1921 {&geode_cost, 0, 0, 0, 0, 0},
1922 {&k6_cost, 32, 7, 32, 7, 32},
1923 {&athlon_cost, 16, 7, 16, 7, 16},
1924 {&pentium4_cost, 0, 0, 0, 0, 0},
1925 {&k8_cost, 16, 7, 16, 7, 16},
1926 {&nocona_cost, 0, 0, 0, 0, 0},
1927 {&core2_cost, 16, 10, 16, 10, 16},
1928 {&generic32_cost, 16, 7, 16, 7, 16},
1929 {&generic64_cost, 16, 10, 16, 10, 16},
1930 {&amdfam10_cost, 32, 24, 32, 7, 32}
1933 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
1935 "generic",
1936 "i386",
1937 "i486",
1938 "pentium",
1939 "pentium-mmx",
1940 "pentiumpro",
1941 "pentium2",
1942 "pentium3",
1943 "pentium4",
1944 "pentium-m",
1945 "prescott",
1946 "nocona",
1947 "core2",
1948 "geode",
1949 "k6",
1950 "k6-2",
1951 "k6-3",
1952 "athlon",
1953 "athlon-4",
1954 "k8",
1955 "amdfam10"
1958 /* Implement TARGET_HANDLE_OPTION. */
1960 static bool
1961 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
1963 switch (code)
1965 case OPT_mmmx:
1966 if (value)
1968 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
1969 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
1971 else
1973 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
1974 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
1976 return true;
1978 case OPT_m3dnow:
1979 if (value)
1981 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
1982 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
1984 else
1986 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
1987 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
1989 return true;
1991 case OPT_m3dnowa:
1992 return false;
1994 case OPT_msse:
1995 if (value)
1997 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
1998 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2000 else
2002 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2003 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2005 return true;
2007 case OPT_msse2:
2008 if (value)
2010 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2011 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2013 else
2015 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2016 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2018 return true;
2020 case OPT_msse3:
2021 if (value)
2023 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2024 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2026 else
2028 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2029 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2031 return true;
2033 case OPT_mssse3:
2034 if (value)
2036 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2037 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2039 else
2041 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2042 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2044 return true;
2046 case OPT_msse4_1:
2047 if (value)
2049 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2050 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2052 else
2054 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2055 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2057 return true;
2059 case OPT_msse4_2:
2060 if (value)
2062 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2063 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2065 else
2067 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2068 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2070 return true;
2072 case OPT_msse4:
2073 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2074 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2075 return true;
2077 case OPT_mno_sse4:
2078 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2079 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2080 return true;
2082 case OPT_msse4a:
2083 if (value)
2085 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2086 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2088 else
2090 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2091 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2093 return true;
2095 case OPT_msse5:
2096 if (value)
2098 ix86_isa_flags |= OPTION_MASK_ISA_SSE5_SET;
2099 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE5_SET;
2101 else
2103 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE5_UNSET;
2104 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE5_UNSET;
2106 return true;
2108 case OPT_mabm:
2109 if (value)
2111 ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2112 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2114 else
2116 ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2117 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2119 return true;
2121 case OPT_mpopcnt:
2122 if (value)
2124 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2125 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2127 else
2129 ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2130 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2132 return true;
2134 case OPT_msahf:
2135 if (value)
2137 ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2138 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2140 else
2142 ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2143 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2145 return true;
2147 case OPT_mcx16:
2148 if (value)
2150 ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2151 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2153 else
2155 ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2156 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2158 return true;
2160 case OPT_maes:
2161 if (value)
2163 ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2164 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
2166 else
2168 ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
2169 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
2171 return true;
2173 case OPT_mpclmul:
2174 if (value)
2176 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
2177 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
2179 else
2181 ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
2182 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
2184 return true;
2186 default:
2187 return true;
2191 /* Return a string the documents the current -m options. The caller is
2192 responsible for freeing the string. */
2194 static char *
2195 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
2196 const char *fpmath, bool add_nl_p)
2198 struct ix86_target_opts
2200 const char *option; /* option string */
2201 int mask; /* isa mask options */
2204 /* This table is ordered so that options like -msse5 or -msse4.2 that imply
2205 preceding options while match those first. */
2206 static struct ix86_target_opts isa_opts[] =
2208 { "-m64", OPTION_MASK_ISA_64BIT },
2209 { "-msse5", OPTION_MASK_ISA_SSE5 },
2210 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2211 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2212 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2213 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2214 { "-msse3", OPTION_MASK_ISA_SSE3 },
2215 { "-msse2", OPTION_MASK_ISA_SSE2 },
2216 { "-msse", OPTION_MASK_ISA_SSE },
2217 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2218 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2219 { "-mmmx", OPTION_MASK_ISA_MMX },
2220 { "-mabm", OPTION_MASK_ISA_ABM },
2221 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2222 { "-maes", OPTION_MASK_ISA_AES },
2223 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2226 /* Flag options. */
2227 static struct ix86_target_opts flag_opts[] =
2229 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2230 { "-m80387", MASK_80387 },
2231 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2232 { "-malign-double", MASK_ALIGN_DOUBLE },
2233 { "-mcld", MASK_CLD },
2234 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2235 { "-mieee-fp", MASK_IEEE_FP },
2236 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2237 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2238 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2239 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2240 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2241 { "-mno-fused-madd", MASK_NO_FUSED_MADD },
2242 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2243 { "-mno-red-zone", MASK_NO_RED_ZONE },
2244 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2245 { "-mrecip", MASK_RECIP },
2246 { "-mrtd", MASK_RTD },
2247 { "-msseregparm", MASK_SSEREGPARM },
2248 { "-mstack-arg-probe", MASK_STACK_PROBE },
2249 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2252 const char *opts[ (sizeof (isa_opts) / sizeof (isa_opts[0])
2253 + sizeof (flag_opts) / sizeof (flag_opts[0])
2254 + 6)][2];
2256 char isa_other[40];
2257 char target_other[40];
2258 unsigned num = 0;
2259 unsigned i, j;
2260 char *ret;
2261 char *ptr;
2262 size_t len;
2263 size_t line_len;
2264 size_t sep_len;
2266 memset (opts, '\0', sizeof (opts));
2268 /* Add -march= option. */
2269 if (arch)
2271 opts[num][0] = "-march=";
2272 opts[num++][1] = arch;
2275 /* Add -mtune= option. */
2276 if (tune)
2278 opts[num][0] = "-mtune=";
2279 opts[num++][1] = tune;
2282 /* Pick out the options in isa options. */
2283 for (i = 0; i < sizeof (isa_opts) / sizeof (isa_opts[0]); i++)
2285 if ((isa & isa_opts[i].mask) != 0)
2287 opts[num++][0] = isa_opts[i].option;
2288 isa &= ~ isa_opts[i].mask;
2292 if (isa && add_nl_p)
2294 opts[num++][0] = isa_other;
2295 sprintf (isa_other, "(other isa: 0x%x)", isa);
2298 /* Add flag options. */
2299 for (i = 0; i < sizeof (flag_opts) / sizeof (flag_opts[0]); i++)
2301 if ((flags & flag_opts[i].mask) != 0)
2303 opts[num++][0] = flag_opts[i].option;
2304 flags &= ~ flag_opts[i].mask;
2308 if (flags && add_nl_p)
2310 opts[num++][0] = target_other;
2311 sprintf (target_other, "(other flags: 0x%x)", isa);
2314 /* Add -fpmath= option. */
2315 if (fpmath)
2317 opts[num][0] = "-mfpmath=";
2318 opts[num++][1] = fpmath;
2321 /* Any options? */
2322 if (num == 0)
2323 return NULL;
2325 gcc_assert (num < sizeof (opts) / sizeof (opts[0]));
2327 /* Size the string. */
2328 len = 0;
2329 sep_len = (add_nl_p) ? 3 : 1;
2330 for (i = 0; i < num; i++)
2332 len += sep_len;
2333 for (j = 0; j < 2; j++)
2334 if (opts[i][j])
2335 len += strlen (opts[i][j]);
2338 /* Build the string. */
2339 ret = ptr = (char *) xmalloc (len);
2340 line_len = 0;
2342 for (i = 0; i < num; i++)
2344 size_t len2[2];
2346 for (j = 0; j < 2; j++)
2347 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
2349 if (i != 0)
2351 *ptr++ = ' ';
2352 line_len++;
2354 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
2356 *ptr++ = '\\';
2357 *ptr++ = '\n';
2358 line_len = 0;
2362 for (j = 0; j < 2; j++)
2363 if (opts[i][j])
2365 memcpy (ptr, opts[i][j], len2[j]);
2366 ptr += len2[j];
2367 line_len += len2[j];
2371 *ptr = '\0';
2372 gcc_assert (ret + len >= ptr);
2374 return ret;
2377 /* Function that is callable from the debugger to print the current
2378 options. */
2379 void
2380 ix86_debug_options (void)
2382 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
2383 ix86_arch_string, ix86_tune_string,
2384 ix86_fpmath_string, true);
2386 if (opts)
2388 fprintf (stderr, "%s\n\n", opts);
2389 free (opts);
2391 else
2392 fprintf (stderr, "<no options>\n\n");
2394 return;
2397 /* Sometimes certain combinations of command options do not make
2398 sense on a particular target machine. You can define a macro
2399 `OVERRIDE_OPTIONS' to take account of this. This macro, if
2400 defined, is executed once just after all the command options have
2401 been parsed.
2403 Don't use this macro to turn on various extra optimizations for
2404 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
2406 void
2407 override_options (bool main_args_p)
2409 int i;
2410 unsigned int ix86_arch_mask, ix86_tune_mask;
2411 const char *prefix;
2412 const char *suffix;
2413 const char *sw;
2415 /* Comes from final.c -- no real reason to change it. */
2416 #define MAX_CODE_ALIGN 16
2418 enum pta_flags
2420 PTA_SSE = 1 << 0,
2421 PTA_SSE2 = 1 << 1,
2422 PTA_SSE3 = 1 << 2,
2423 PTA_MMX = 1 << 3,
2424 PTA_PREFETCH_SSE = 1 << 4,
2425 PTA_3DNOW = 1 << 5,
2426 PTA_3DNOW_A = 1 << 6,
2427 PTA_64BIT = 1 << 7,
2428 PTA_SSSE3 = 1 << 8,
2429 PTA_CX16 = 1 << 9,
2430 PTA_POPCNT = 1 << 10,
2431 PTA_ABM = 1 << 11,
2432 PTA_SSE4A = 1 << 12,
2433 PTA_NO_SAHF = 1 << 13,
2434 PTA_SSE4_1 = 1 << 14,
2435 PTA_SSE4_2 = 1 << 15,
2436 PTA_SSE5 = 1 << 16,
2437 PTA_AES = 1 << 17,
2438 PTA_PCLMUL = 1 << 18
2441 static struct pta
2443 const char *const name; /* processor name or nickname. */
2444 const enum processor_type processor;
2445 const unsigned /*enum pta_flags*/ flags;
2447 const processor_alias_table[] =
2449 {"i386", PROCESSOR_I386, 0},
2450 {"i486", PROCESSOR_I486, 0},
2451 {"i586", PROCESSOR_PENTIUM, 0},
2452 {"pentium", PROCESSOR_PENTIUM, 0},
2453 {"pentium-mmx", PROCESSOR_PENTIUM, PTA_MMX},
2454 {"winchip-c6", PROCESSOR_I486, PTA_MMX},
2455 {"winchip2", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
2456 {"c3", PROCESSOR_I486, PTA_MMX | PTA_3DNOW},
2457 {"c3-2", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE},
2458 {"i686", PROCESSOR_PENTIUMPRO, 0},
2459 {"pentiumpro", PROCESSOR_PENTIUMPRO, 0},
2460 {"pentium2", PROCESSOR_PENTIUMPRO, PTA_MMX},
2461 {"pentium3", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE},
2462 {"pentium3m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE},
2463 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_SSE2},
2464 {"pentium4", PROCESSOR_PENTIUM4, PTA_MMX |PTA_SSE | PTA_SSE2},
2465 {"pentium4m", PROCESSOR_PENTIUM4, PTA_MMX | PTA_SSE | PTA_SSE2},
2466 {"prescott", PROCESSOR_NOCONA, PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2467 {"nocona", PROCESSOR_NOCONA, (PTA_64BIT
2468 | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2469 | PTA_CX16 | PTA_NO_SAHF)},
2470 {"core2", PROCESSOR_CORE2, (PTA_64BIT
2471 | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2472 | PTA_SSSE3
2473 | PTA_CX16)},
2474 {"geode", PROCESSOR_GEODE, (PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2475 |PTA_PREFETCH_SSE)},
2476 {"k6", PROCESSOR_K6, PTA_MMX},
2477 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
2478 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
2479 {"athlon", PROCESSOR_ATHLON, (PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2480 | PTA_PREFETCH_SSE)},
2481 {"athlon-tbird", PROCESSOR_ATHLON, (PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2482 | PTA_PREFETCH_SSE)},
2483 {"athlon-4", PROCESSOR_ATHLON, (PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2484 | PTA_SSE)},
2485 {"athlon-xp", PROCESSOR_ATHLON, (PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2486 | PTA_SSE)},
2487 {"athlon-mp", PROCESSOR_ATHLON, (PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2488 | PTA_SSE)},
2489 {"x86-64", PROCESSOR_K8, (PTA_64BIT
2490 | PTA_MMX | PTA_SSE | PTA_SSE2
2491 | PTA_NO_SAHF)},
2492 {"k8", PROCESSOR_K8, (PTA_64BIT
2493 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2494 | PTA_SSE | PTA_SSE2
2495 | PTA_NO_SAHF)},
2496 {"k8-sse3", PROCESSOR_K8, (PTA_64BIT
2497 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2498 | PTA_SSE | PTA_SSE2 | PTA_SSE3
2499 | PTA_NO_SAHF)},
2500 {"opteron", PROCESSOR_K8, (PTA_64BIT
2501 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2502 | PTA_SSE | PTA_SSE2
2503 | PTA_NO_SAHF)},
2504 {"opteron-sse3", PROCESSOR_K8, (PTA_64BIT
2505 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2506 | PTA_SSE | PTA_SSE2 | PTA_SSE3
2507 | PTA_NO_SAHF)},
2508 {"athlon64", PROCESSOR_K8, (PTA_64BIT
2509 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2510 | PTA_SSE | PTA_SSE2
2511 | PTA_NO_SAHF)},
2512 {"athlon64-sse3", PROCESSOR_K8, (PTA_64BIT
2513 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2514 | PTA_SSE | PTA_SSE2 | PTA_SSE3
2515 | PTA_NO_SAHF)},
2516 {"athlon-fx", PROCESSOR_K8, (PTA_64BIT
2517 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2518 | PTA_SSE | PTA_SSE2
2519 | PTA_NO_SAHF)},
2520 {"amdfam10", PROCESSOR_AMDFAM10, (PTA_64BIT
2521 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2522 | PTA_SSE | PTA_SSE2 | PTA_SSE3
2523 | PTA_SSE4A
2524 | PTA_CX16 | PTA_ABM)},
2525 {"barcelona", PROCESSOR_AMDFAM10, (PTA_64BIT
2526 | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A
2527 | PTA_SSE | PTA_SSE2 | PTA_SSE3
2528 | PTA_SSE4A
2529 | PTA_CX16 | PTA_ABM)},
2530 {"generic32", PROCESSOR_GENERIC32, 0 /* flags are only used for -march switch. */ },
2531 {"generic64", PROCESSOR_GENERIC64, PTA_64BIT /* flags are only used for -march switch. */ },
2534 int const pta_size = ARRAY_SIZE (processor_alias_table);
2536 /* Set up prefix/suffix so the error messages refer to either the command
2537 line argument, or the attribute(option). */
2538 if (main_args_p)
2540 prefix = "-m";
2541 suffix = "";
2542 sw = "switch";
2544 else
2546 prefix = "option(\"";
2547 suffix = "\")";
2548 sw = "attribute";
2551 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2552 SUBTARGET_OVERRIDE_OPTIONS;
2553 #endif
2555 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2556 SUBSUBTARGET_OVERRIDE_OPTIONS;
2557 #endif
2559 /* -fPIC is the default for x86_64. */
2560 if (TARGET_MACHO && TARGET_64BIT)
2561 flag_pic = 2;
2563 /* Set the default values for switches whose default depends on TARGET_64BIT
2564 in case they weren't overwritten by command line options. */
2565 if (TARGET_64BIT)
2567 /* Mach-O doesn't support omitting the frame pointer for now. */
2568 if (flag_omit_frame_pointer == 2)
2569 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
2570 if (flag_asynchronous_unwind_tables == 2)
2571 flag_asynchronous_unwind_tables = 1;
2572 if (flag_pcc_struct_return == 2)
2573 flag_pcc_struct_return = 0;
2575 else
2577 if (flag_omit_frame_pointer == 2)
2578 flag_omit_frame_pointer = 0;
2579 if (flag_asynchronous_unwind_tables == 2)
2580 flag_asynchronous_unwind_tables = 0;
2581 if (flag_pcc_struct_return == 2)
2582 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
2585 /* Need to check -mtune=generic first. */
2586 if (ix86_tune_string)
2588 if (!strcmp (ix86_tune_string, "generic")
2589 || !strcmp (ix86_tune_string, "i686")
2590 /* As special support for cross compilers we read -mtune=native
2591 as -mtune=generic. With native compilers we won't see the
2592 -mtune=native, as it was changed by the driver. */
2593 || !strcmp (ix86_tune_string, "native"))
2595 if (TARGET_64BIT)
2596 ix86_tune_string = "generic64";
2597 else
2598 ix86_tune_string = "generic32";
2600 /* If this call is for setting the option attribute, allow the
2601 generic32/generic64 that was previously set. */
2602 else if (!main_args_p
2603 && (!strcmp (ix86_tune_string, "generic32")
2604 || !strcmp (ix86_tune_string, "generic64")))
2606 else if (!strncmp (ix86_tune_string, "generic", 7))
2607 error ("bad value (%s) for %stune=%s %s",
2608 ix86_tune_string, prefix, suffix, sw);
2610 else
2612 if (ix86_arch_string)
2613 ix86_tune_string = ix86_arch_string;
2614 if (!ix86_tune_string)
2616 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
2617 ix86_tune_defaulted = 1;
2620 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
2621 need to use a sensible tune option. */
2622 if (!strcmp (ix86_tune_string, "generic")
2623 || !strcmp (ix86_tune_string, "x86-64")
2624 || !strcmp (ix86_tune_string, "i686"))
2626 if (TARGET_64BIT)
2627 ix86_tune_string = "generic64";
2628 else
2629 ix86_tune_string = "generic32";
2632 if (ix86_stringop_string)
2634 if (!strcmp (ix86_stringop_string, "rep_byte"))
2635 stringop_alg = rep_prefix_1_byte;
2636 else if (!strcmp (ix86_stringop_string, "libcall"))
2637 stringop_alg = libcall;
2638 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
2639 stringop_alg = rep_prefix_4_byte;
2640 else if (!strcmp (ix86_stringop_string, "rep_8byte"))
2641 stringop_alg = rep_prefix_8_byte;
2642 else if (!strcmp (ix86_stringop_string, "byte_loop"))
2643 stringop_alg = loop_1_byte;
2644 else if (!strcmp (ix86_stringop_string, "loop"))
2645 stringop_alg = loop;
2646 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
2647 stringop_alg = unrolled_loop;
2648 else
2649 error ("bad value (%s) for %sstringop-strategy=%s %s",
2650 ix86_stringop_string, prefix, suffix, sw);
2652 if (!strcmp (ix86_tune_string, "x86-64"))
2653 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated. Use "
2654 "%stune=k8%s or %stune=generic%s instead as appropriate.",
2655 prefix, suffix, prefix, suffix, prefix, suffix);
2657 if (!ix86_arch_string)
2658 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
2659 else
2660 ix86_arch_specified = 1;
2662 if (!strcmp (ix86_arch_string, "generic"))
2663 error ("generic CPU can be used only for %stune=%s %s",
2664 prefix, suffix, sw);
2665 if (!strncmp (ix86_arch_string, "generic", 7))
2666 error ("bad value (%s) for %sarch=%s %s",
2667 ix86_arch_string, prefix, suffix, sw);
2669 if (ix86_cmodel_string != 0)
2671 if (!strcmp (ix86_cmodel_string, "small"))
2672 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2673 else if (!strcmp (ix86_cmodel_string, "medium"))
2674 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
2675 else if (!strcmp (ix86_cmodel_string, "large"))
2676 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
2677 else if (flag_pic)
2678 error ("code model %s does not support PIC mode", ix86_cmodel_string);
2679 else if (!strcmp (ix86_cmodel_string, "32"))
2680 ix86_cmodel = CM_32;
2681 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
2682 ix86_cmodel = CM_KERNEL;
2683 else
2684 error ("bad value (%s) for %scmodel=%s %s",
2685 ix86_cmodel_string, prefix, suffix, sw);
2687 else
2689 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
2690 use of rip-relative addressing. This eliminates fixups that
2691 would otherwise be needed if this object is to be placed in a
2692 DLL, and is essentially just as efficient as direct addressing. */
2693 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
2694 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
2695 else if (TARGET_64BIT)
2696 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2697 else
2698 ix86_cmodel = CM_32;
2700 if (ix86_asm_string != 0)
2702 if (! TARGET_MACHO
2703 && !strcmp (ix86_asm_string, "intel"))
2704 ix86_asm_dialect = ASM_INTEL;
2705 else if (!strcmp (ix86_asm_string, "att"))
2706 ix86_asm_dialect = ASM_ATT;
2707 else
2708 error ("bad value (%s) for %sasm=%s %s",
2709 ix86_asm_string, prefix, suffix, sw);
2711 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
2712 error ("code model %qs not supported in the %s bit mode",
2713 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
2714 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
2715 sorry ("%i-bit mode not compiled in",
2716 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
2718 for (i = 0; i < pta_size; i++)
2719 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
2721 ix86_arch = processor_alias_table[i].processor;
2722 /* Default cpu tuning to the architecture. */
2723 ix86_tune = ix86_arch;
2725 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2726 error ("CPU you selected does not support x86-64 "
2727 "instruction set");
2729 if (processor_alias_table[i].flags & PTA_MMX
2730 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
2731 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
2732 if (processor_alias_table[i].flags & PTA_3DNOW
2733 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
2734 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
2735 if (processor_alias_table[i].flags & PTA_3DNOW_A
2736 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
2737 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
2738 if (processor_alias_table[i].flags & PTA_SSE
2739 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
2740 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
2741 if (processor_alias_table[i].flags & PTA_SSE2
2742 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
2743 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
2744 if (processor_alias_table[i].flags & PTA_SSE3
2745 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
2746 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
2747 if (processor_alias_table[i].flags & PTA_SSSE3
2748 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
2749 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
2750 if (processor_alias_table[i].flags & PTA_SSE4_1
2751 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
2752 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
2753 if (processor_alias_table[i].flags & PTA_SSE4_2
2754 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
2755 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
2756 if (processor_alias_table[i].flags & PTA_SSE4A
2757 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
2758 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
2759 if (processor_alias_table[i].flags & PTA_SSE5
2760 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE5))
2761 ix86_isa_flags |= OPTION_MASK_ISA_SSE5;
2762 if (processor_alias_table[i].flags & PTA_ABM
2763 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
2764 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
2765 if (processor_alias_table[i].flags & PTA_CX16
2766 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
2767 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
2768 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
2769 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
2770 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
2771 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
2772 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
2773 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
2774 if (processor_alias_table[i].flags & PTA_AES
2775 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
2776 ix86_isa_flags |= OPTION_MASK_ISA_AES;
2777 if (processor_alias_table[i].flags & PTA_PCLMUL
2778 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
2779 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
2780 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
2781 x86_prefetch_sse = true;
2783 break;
2786 if (i == pta_size)
2787 error ("bad value (%s) for %sarch=%s %s",
2788 ix86_arch_string, prefix, suffix, sw);
2790 ix86_arch_mask = 1u << ix86_arch;
2791 for (i = 0; i < X86_ARCH_LAST; ++i)
2792 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
2794 for (i = 0; i < pta_size; i++)
2795 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
2797 ix86_tune = processor_alias_table[i].processor;
2798 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2800 if (ix86_tune_defaulted)
2802 ix86_tune_string = "x86-64";
2803 for (i = 0; i < pta_size; i++)
2804 if (! strcmp (ix86_tune_string,
2805 processor_alias_table[i].name))
2806 break;
2807 ix86_tune = processor_alias_table[i].processor;
2809 else
2810 error ("CPU you selected does not support x86-64 "
2811 "instruction set");
2813 /* Intel CPUs have always interpreted SSE prefetch instructions as
2814 NOPs; so, we can enable SSE prefetch instructions even when
2815 -mtune (rather than -march) points us to a processor that has them.
2816 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
2817 higher processors. */
2818 if (TARGET_CMOVE
2819 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
2820 x86_prefetch_sse = true;
2821 break;
2823 if (i == pta_size)
2824 error ("bad value (%s) for %stune=%s %s",
2825 ix86_tune_string, prefix, suffix, sw);
2827 ix86_tune_mask = 1u << ix86_tune;
2828 for (i = 0; i < X86_TUNE_LAST; ++i)
2829 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
2831 if (optimize_size)
2832 ix86_cost = &size_cost;
2833 else
2834 ix86_cost = processor_target_table[ix86_tune].cost;
2836 /* Arrange to set up i386_stack_locals for all functions. */
2837 init_machine_status = ix86_init_machine_status;
2839 /* Validate -mregparm= value. */
2840 if (ix86_regparm_string)
2842 if (TARGET_64BIT)
2843 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
2844 i = atoi (ix86_regparm_string);
2845 if (i < 0 || i > REGPARM_MAX)
2846 error ("%sregparm=%d%s is not between 0 and %d",
2847 prefix, i, suffix, REGPARM_MAX);
2848 else
2849 ix86_regparm = i;
2851 if (TARGET_64BIT)
2852 ix86_regparm = REGPARM_MAX;
2854 /* If the user has provided any of the -malign-* options,
2855 warn and use that value only if -falign-* is not set.
2856 Remove this code in GCC 3.2 or later. */
2857 if (ix86_align_loops_string)
2859 warning (0, "%salign-loops%s is obsolete, use %salign-loops%s",
2860 prefix, suffix, prefix, suffix);
2861 if (align_loops == 0)
2863 i = atoi (ix86_align_loops_string);
2864 if (i < 0 || i > MAX_CODE_ALIGN)
2865 error ("%salign-loops=%d%s is not between 0 and %d",
2866 prefix, i, suffix, MAX_CODE_ALIGN);
2867 else
2868 align_loops = 1 << i;
2872 if (ix86_align_jumps_string)
2874 warning (0, "%salign-jumps%s is obsolete, use %salign-jumps%s",
2875 prefix, suffix, prefix, suffix);
2876 if (align_jumps == 0)
2878 i = atoi (ix86_align_jumps_string);
2879 if (i < 0 || i > MAX_CODE_ALIGN)
2880 error ("%salign-loops=%d%s is not between 0 and %d",
2881 prefix, i, suffix, MAX_CODE_ALIGN);
2882 else
2883 align_jumps = 1 << i;
2887 if (ix86_align_funcs_string)
2889 warning (0, "%salign-functions%s is obsolete, use %salign-functions%s",
2890 prefix, suffix, prefix, suffix);
2891 if (align_functions == 0)
2893 i = atoi (ix86_align_funcs_string);
2894 if (i < 0 || i > MAX_CODE_ALIGN)
2895 error ("%salign-loops=%d%s is not between 0 and %d",
2896 prefix, i, suffix, MAX_CODE_ALIGN);
2897 else
2898 align_functions = 1 << i;
2902 /* Default align_* from the processor table. */
2903 if (align_loops == 0)
2905 align_loops = processor_target_table[ix86_tune].align_loop;
2906 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
2908 if (align_jumps == 0)
2910 align_jumps = processor_target_table[ix86_tune].align_jump;
2911 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
2913 if (align_functions == 0)
2915 align_functions = processor_target_table[ix86_tune].align_func;
2918 /* Validate -mbranch-cost= value, or provide default. */
2919 ix86_branch_cost = ix86_cost->branch_cost;
2920 if (ix86_branch_cost_string)
2922 i = atoi (ix86_branch_cost_string);
2923 if (i < 0 || i > 5)
2924 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
2925 else
2926 ix86_branch_cost = i;
2928 if (ix86_section_threshold_string)
2930 i = atoi (ix86_section_threshold_string);
2931 if (i < 0)
2932 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
2933 else
2934 ix86_section_threshold = i;
2937 if (ix86_tls_dialect_string)
2939 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
2940 ix86_tls_dialect = TLS_DIALECT_GNU;
2941 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
2942 ix86_tls_dialect = TLS_DIALECT_GNU2;
2943 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
2944 ix86_tls_dialect = TLS_DIALECT_SUN;
2945 else
2946 error ("bad value (%s) for %stls-dialect=%s %s",
2947 ix86_tls_dialect_string, prefix, suffix, sw);
2950 if (ix87_precision_string)
2952 i = atoi (ix87_precision_string);
2953 if (i != 32 && i != 64 && i != 80)
2954 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
2957 if (TARGET_64BIT)
2959 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
2961 /* Enable by default the SSE and MMX builtins. Do allow the user to
2962 explicitly disable any of these. In particular, disabling SSE and
2963 MMX for kernel code is extremely useful. */
2964 if (!ix86_arch_specified)
2965 ix86_isa_flags
2966 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
2967 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
2969 if (TARGET_RTD)
2970 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
2972 else
2974 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
2976 if (!ix86_arch_specified)
2977 ix86_isa_flags
2978 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
2980 /* i386 ABI does not specify red zone. It still makes sense to use it
2981 when programmer takes care to stack from being destroyed. */
2982 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
2983 target_flags |= MASK_NO_RED_ZONE;
2986 /* Keep nonleaf frame pointers. */
2987 if (flag_omit_frame_pointer)
2988 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
2989 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
2990 flag_omit_frame_pointer = 1;
2992 /* If we're doing fast math, we don't care about comparison order
2993 wrt NaNs. This lets us use a shorter comparison sequence. */
2994 if (flag_finite_math_only)
2995 target_flags &= ~MASK_IEEE_FP;
2997 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
2998 since the insns won't need emulation. */
2999 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3000 target_flags &= ~MASK_NO_FANCY_MATH_387;
3002 /* Likewise, if the target doesn't have a 387, or we've specified
3003 software floating point, don't use 387 inline intrinsics. */
3004 if (!TARGET_80387)
3005 target_flags |= MASK_NO_FANCY_MATH_387;
3007 /* Turn on MMX builtins for -msse. */
3008 if (TARGET_SSE)
3010 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3011 x86_prefetch_sse = true;
3014 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3015 if (TARGET_SSE4_2 || TARGET_ABM)
3016 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3018 /* Validate -mpreferred-stack-boundary= value, or provide default.
3019 The default of 128 bits is for Pentium III's SSE __m128. We can't
3020 change it because of optimize_size. Otherwise, we can't mix object
3021 files compiled with -Os and -On. */
3022 ix86_preferred_stack_boundary = 128;
3023 if (ix86_preferred_stack_boundary_string)
3025 i = atoi (ix86_preferred_stack_boundary_string);
3026 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3027 error ("%spreferred-stack-boundary=%d%s is not between %d and 12",
3028 prefix, i, suffix, TARGET_64BIT ? 4 : 2);
3029 else
3030 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
3033 /* Accept -msseregparm only if at least SSE support is enabled. */
3034 if (TARGET_SSEREGPARM
3035 && ! TARGET_SSE)
3036 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3038 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3039 if (ix86_fpmath_string != 0)
3041 if (! strcmp (ix86_fpmath_string, "387"))
3042 ix86_fpmath = FPMATH_387;
3043 else if (! strcmp (ix86_fpmath_string, "sse"))
3045 if (!TARGET_SSE)
3047 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3048 ix86_fpmath = FPMATH_387;
3050 else
3051 ix86_fpmath = FPMATH_SSE;
3053 else if (! strcmp (ix86_fpmath_string, "387,sse")
3054 || ! strcmp (ix86_fpmath_string, "387+sse")
3055 || ! strcmp (ix86_fpmath_string, "sse,387")
3056 || ! strcmp (ix86_fpmath_string, "sse+387")
3057 || ! strcmp (ix86_fpmath_string, "both"))
3059 if (!TARGET_SSE)
3061 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3062 ix86_fpmath = FPMATH_387;
3064 else if (!TARGET_80387)
3066 warning (0, "387 instruction set disabled, using SSE arithmetics");
3067 ix86_fpmath = FPMATH_SSE;
3069 else
3070 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
3072 else
3073 error ("bad value (%s) for %sfpmath=%s %s",
3074 ix86_fpmath_string, prefix, suffix, sw);
3077 /* If the i387 is disabled, then do not return values in it. */
3078 if (!TARGET_80387)
3079 target_flags &= ~MASK_FLOAT_RETURNS;
3081 /* Use external vectorized library in vectorizing intrinsics. */
3082 if (ix86_veclibabi_string)
3084 if (strcmp (ix86_veclibabi_string, "svml") == 0)
3085 ix86_veclib_handler = ix86_veclibabi_svml;
3086 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
3087 ix86_veclib_handler = ix86_veclibabi_acml;
3088 else
3089 error ("unknown vectorization library ABI type (%s) for "
3090 "%sveclibabi=%s %s", ix86_veclibabi_string,
3091 prefix, suffix, sw);
3094 if ((x86_accumulate_outgoing_args & ix86_tune_mask)
3095 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3096 && !optimize_size)
3097 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3099 /* ??? Unwind info is not correct around the CFG unless either a frame
3100 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3101 unwind info generation to be aware of the CFG and propagating states
3102 around edges. */
3103 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3104 || flag_exceptions || flag_non_call_exceptions)
3105 && flag_omit_frame_pointer
3106 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3108 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3109 warning (0, "unwind tables currently require either a frame pointer "
3110 "or %saccumulate-outgoing-args%s for correctness",
3111 prefix, suffix);
3112 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3115 /* If stack probes are required, the space used for large function
3116 arguments on the stack must also be probed, so enable
3117 -maccumulate-outgoing-args so this happens in the prologue. */
3118 if (TARGET_STACK_PROBE
3119 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3121 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3122 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3123 "for correctness", prefix, suffix);
3124 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3127 /* For sane SSE instruction set generation we need fcomi instruction.
3128 It is safe to enable all CMOVE instructions. */
3129 if (TARGET_SSE)
3130 TARGET_CMOVE = 1;
3132 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3134 char *p;
3135 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3136 p = strchr (internal_label_prefix, 'X');
3137 internal_label_prefix_len = p - internal_label_prefix;
3138 *p = '\0';
3141 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
3142 set_param_value ("simultaneous-prefetches",
3143 ix86_cost->simultaneous_prefetches);
3144 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
3145 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
3146 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
3147 set_param_value ("l1-cache-size", ix86_cost->l1_cache_size);
3148 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
3149 set_param_value ("l2-cache-size", ix86_cost->l2_cache_size);
3151 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3152 can be optimized to ap = __builtin_next_arg (0). */
3153 if (!TARGET_64BIT)
3154 targetm.expand_builtin_va_start = NULL;
3156 if (TARGET_64BIT)
3158 ix86_gen_leave = gen_leave_rex64;
3159 ix86_gen_pop1 = gen_popdi1;
3160 ix86_gen_add3 = gen_adddi3;
3161 ix86_gen_sub3 = gen_subdi3;
3162 ix86_gen_sub3_carry = gen_subdi3_carry_rex64;
3163 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3164 ix86_gen_monitor = gen_sse3_monitor64;
3166 else
3168 ix86_gen_leave = gen_leave;
3169 ix86_gen_pop1 = gen_popsi1;
3170 ix86_gen_add3 = gen_addsi3;
3171 ix86_gen_sub3 = gen_subsi3;
3172 ix86_gen_sub3_carry = gen_subsi3_carry;
3173 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
3174 ix86_gen_monitor = gen_sse3_monitor;
3177 #ifdef USE_IX86_CLD
3178 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3179 if (!TARGET_64BIT)
3180 target_flags |= MASK_CLD & ~target_flags_explicit;
3181 #endif
3183 /* Save the initial options in case the user does function specific options */
3184 if (main_args_p)
3185 target_option_default_node = target_option_current_node
3186 = build_target_option_node ();
3189 /* Save the current options */
3191 static void
3192 ix86_function_specific_save (struct cl_target_option *ptr)
3194 gcc_assert (IN_RANGE (ix86_arch, 0, 255));
3195 gcc_assert (IN_RANGE (ix86_tune, 0, 255));
3196 gcc_assert (IN_RANGE (ix86_fpmath, 0, 255));
3197 gcc_assert (IN_RANGE (ix86_branch_cost, 0, 255));
3199 ptr->arch = ix86_arch;
3200 ptr->tune = ix86_tune;
3201 ptr->fpmath = ix86_fpmath;
3202 ptr->branch_cost = ix86_branch_cost;
3203 ptr->tune_defaulted = ix86_tune_defaulted;
3204 ptr->arch_specified = ix86_arch_specified;
3205 ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
3206 ptr->target_flags_explicit = target_flags_explicit;
3209 /* Restore the current options */
3211 static void
3212 ix86_function_specific_restore (struct cl_target_option *ptr)
3214 enum processor_type old_tune = ix86_tune;
3215 enum processor_type old_arch = ix86_arch;
3216 unsigned int ix86_arch_mask, ix86_tune_mask;
3217 int i;
3219 ix86_arch = ptr->arch;
3220 ix86_tune = ptr->tune;
3221 ix86_fpmath = ptr->fpmath;
3222 ix86_branch_cost = ptr->branch_cost;
3223 ix86_tune_defaulted = ptr->tune_defaulted;
3224 ix86_arch_specified = ptr->arch_specified;
3225 ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
3226 target_flags_explicit = ptr->target_flags_explicit;
3228 /* Recreate the arch feature tests if the arch changed */
3229 if (old_arch != ix86_arch)
3231 ix86_arch_mask = 1u << ix86_arch;
3232 for (i = 0; i < X86_ARCH_LAST; ++i)
3233 ix86_arch_features[i]
3234 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3237 /* Recreate the tune optimization tests */
3238 if (old_tune != ix86_tune)
3240 ix86_tune_mask = 1u << ix86_tune;
3241 for (i = 0; i < X86_TUNE_LAST; ++i)
3242 ix86_tune_features[i]
3243 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3247 /* Print the current options */
3249 static void
3250 ix86_function_specific_print (FILE *file, int indent,
3251 struct cl_target_option *ptr)
3253 char *target_string
3254 = ix86_target_string (ptr->ix86_isa_flags, ptr->target_flags,
3255 NULL, NULL, NULL, false);
3257 fprintf (file, "%*sarch = %d (%s)\n",
3258 indent, "",
3259 ptr->arch,
3260 ((ptr->arch < TARGET_CPU_DEFAULT_max)
3261 ? cpu_names[ptr->arch]
3262 : "<unknown>"));
3264 fprintf (file, "%*stune = %d (%s)\n",
3265 indent, "",
3266 ptr->tune,
3267 ((ptr->tune < TARGET_CPU_DEFAULT_max)
3268 ? cpu_names[ptr->tune]
3269 : "<unknown>"));
3271 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
3272 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
3273 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
3274 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
3276 if (target_string)
3278 fprintf (file, "%*s%s\n", indent, "", target_string);
3279 free (target_string);
3284 /* Inner function to process the attribute((option(...))), take an argument and
3285 set the current options from the argument. If we have a list, recursively go
3286 over the list. */
3288 static bool
3289 ix86_valid_option_attribute_inner_p (tree args, char *p_strings[])
3291 char *next_optstr;
3292 bool ret = true;
3294 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
3295 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
3296 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
3297 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
3299 enum ix86_opt_type
3301 ix86_opt_unknown,
3302 ix86_opt_yes,
3303 ix86_opt_no,
3304 ix86_opt_str,
3305 ix86_opt_isa
3308 static const struct
3310 const char *string;
3311 size_t len;
3312 enum ix86_opt_type type;
3313 int opt;
3314 int mask;
3315 } attrs[] = {
3316 /* isa options */
3317 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
3318 IX86_ATTR_ISA ("abm", OPT_mabm),
3319 IX86_ATTR_ISA ("aes", OPT_maes),
3320 IX86_ATTR_ISA ("mmx", OPT_mmmx),
3321 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
3322 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
3323 IX86_ATTR_ISA ("sse", OPT_msse),
3324 IX86_ATTR_ISA ("sse2", OPT_msse2),
3325 IX86_ATTR_ISA ("sse3", OPT_msse3),
3326 IX86_ATTR_ISA ("sse4", OPT_msse4),
3327 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
3328 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
3329 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
3330 IX86_ATTR_ISA ("sse5", OPT_msse5),
3331 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
3333 /* string options */
3334 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
3335 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
3336 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
3338 /* flag options */
3339 IX86_ATTR_YES ("cld",
3340 OPT_mcld,
3341 MASK_CLD),
3343 IX86_ATTR_NO ("fancy-math-387",
3344 OPT_mfancy_math_387,
3345 MASK_NO_FANCY_MATH_387),
3347 IX86_ATTR_NO ("fused-madd",
3348 OPT_mfused_madd,
3349 MASK_NO_FUSED_MADD),
3351 IX86_ATTR_YES ("ieee-fp",
3352 OPT_mieee_fp,
3353 MASK_IEEE_FP),
3355 IX86_ATTR_YES ("inline-all-stringops",
3356 OPT_minline_all_stringops,
3357 MASK_INLINE_ALL_STRINGOPS),
3359 IX86_ATTR_YES ("inline-stringops-dynamically",
3360 OPT_minline_stringops_dynamically,
3361 MASK_INLINE_STRINGOPS_DYNAMICALLY),
3363 IX86_ATTR_NO ("align-stringops",
3364 OPT_mno_align_stringops,
3365 MASK_NO_ALIGN_STRINGOPS),
3367 IX86_ATTR_YES ("recip",
3368 OPT_mrecip,
3369 MASK_RECIP),
3373 /* If this is a list, recurse to get the options. */
3374 if (TREE_CODE (args) == TREE_LIST)
3376 bool ret = true;
3378 for (; args; args = TREE_CHAIN (args))
3379 if (TREE_VALUE (args)
3380 && !ix86_valid_option_attribute_inner_p (TREE_VALUE (args), p_strings))
3381 ret = false;
3383 return ret;
3386 else if (TREE_CODE (args) != STRING_CST)
3387 gcc_unreachable ();
3389 /* Handle multiple arguments separated by commas. */
3390 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
3392 while (next_optstr && *next_optstr != '\0')
3394 char *p = next_optstr;
3395 char *orig_p = p;
3396 char *comma = strchr (next_optstr, ',');
3397 const char *opt_string;
3398 size_t len, opt_len;
3399 int opt;
3400 bool opt_set_p;
3401 char ch;
3402 unsigned i;
3403 enum ix86_opt_type type = ix86_opt_unknown;
3404 int mask = 0;
3406 if (comma)
3408 *comma = '\0';
3409 len = comma - next_optstr;
3410 next_optstr = comma + 1;
3412 else
3414 len = strlen (p);
3415 next_optstr = NULL;
3418 /* Recognize no-xxx. */
3419 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
3421 opt_set_p = false;
3422 p += 3;
3423 len -= 3;
3425 else
3426 opt_set_p = true;
3428 /* Find the option. */
3429 ch = *p;
3430 opt = N_OPTS;
3431 for (i = 0; i < sizeof (attrs) / sizeof (attrs[0]); i++)
3433 type = attrs[i].type;
3434 opt_len = attrs[i].len;
3435 if (ch == attrs[i].string[0]
3436 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
3437 && memcmp (p, attrs[i].string, opt_len) == 0)
3439 opt = attrs[i].opt;
3440 mask = attrs[i].mask;
3441 opt_string = attrs[i].string;
3442 break;
3446 /* Process the option. */
3447 if (opt == N_OPTS)
3449 error ("attribute(option(\"%s\")) is unknown", orig_p);
3450 ret = false;
3453 else if (type == ix86_opt_isa)
3454 ix86_handle_option (opt, p, opt_set_p);
3456 else if (type == ix86_opt_yes || type == ix86_opt_no)
3458 if (type == ix86_opt_no)
3459 opt_set_p = !opt_set_p;
3461 if (opt_set_p)
3462 target_flags |= mask;
3463 else
3464 target_flags &= ~mask;
3467 else if (type == ix86_opt_str)
3469 if (p_strings[opt])
3471 error ("option(\"%s\") was already specified", opt_string);
3472 ret = false;
3474 else
3475 p_strings[opt] = xstrdup (p + opt_len);
3478 else
3479 gcc_unreachable ();
3482 return ret;
3485 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
3487 tree
3488 ix86_valid_option_attribute_tree (tree args)
3490 const char *orig_arch_string = ix86_arch_string;
3491 const char *orig_tune_string = ix86_tune_string;
3492 const char *orig_fpmath_string = ix86_fpmath_string;
3493 int orig_tune_defaulted = ix86_tune_defaulted;
3494 int orig_arch_specified = ix86_arch_specified;
3495 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
3496 tree t = NULL_TREE;
3497 int i;
3498 struct cl_target_option *def
3499 = TREE_TARGET_OPTION (target_option_default_node);
3501 /* Process each of the options on the chain. */
3502 if (! ix86_valid_option_attribute_inner_p (args, option_strings))
3503 return NULL_TREE;
3505 /* If the changed options are different from the default, rerun override_options,
3506 and then save the options away. The string options are are attribute options,
3507 and will be undone when we copy the save structure. */
3508 if (ix86_isa_flags != def->ix86_isa_flags
3509 || target_flags != def->target_flags
3510 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
3511 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
3512 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3514 /* If we are using the default tune= or arch=, undo the string assigned,
3515 and use the default. */
3516 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
3517 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
3518 else if (!orig_arch_specified)
3519 ix86_arch_string = NULL;
3521 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
3522 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
3523 else if (orig_tune_defaulted)
3524 ix86_tune_string = NULL;
3526 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
3527 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3528 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
3529 else if (!TARGET_64BIT && TARGET_SSE)
3530 ix86_fpmath_string = "sse,387";
3532 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
3533 override_options (false);
3535 /* Save the current options unless we are validating options for
3536 #pragma. */
3537 t = build_target_option_node ();
3539 ix86_arch_string = orig_arch_string;
3540 ix86_tune_string = orig_tune_string;
3541 ix86_fpmath_string = orig_fpmath_string;
3543 /* Free up memory allocated to hold the strings */
3544 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
3545 if (option_strings[i])
3546 free (option_strings[i]);
3549 return t;
3552 /* Hook to validate attribute((option("string"))). */
3554 static bool
3555 ix86_valid_option_attribute_p (tree fndecl,
3556 tree ARG_UNUSED (name),
3557 tree args,
3558 int ARG_UNUSED (flags))
3560 struct cl_target_option cur_opts;
3561 bool ret = true;
3562 tree new_opts;
3564 cl_target_option_save (&cur_opts);
3565 new_opts = ix86_valid_option_attribute_tree (args);
3566 if (!new_opts)
3567 ret = false;
3569 else if (fndecl)
3570 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_opts;
3572 cl_target_option_restore (&cur_opts);
3573 return ret;
3577 /* Hook to determine if one function can safely inline another. */
3579 static bool
3580 ix86_can_inline_p (tree caller, tree callee)
3582 bool ret = false;
3583 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
3584 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
3586 /* If callee has no option attributes, then it is ok to inline. */
3587 if (!callee_tree)
3588 ret = true;
3590 /* If caller has no option attributes, but callee does then it is not ok to
3591 inline. */
3592 else if (!caller_tree)
3593 ret = false;
3595 else
3597 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
3598 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
3600 /* Callee's isa options should a subset of the caller's, i.e. a SSE5 function
3601 can inline a SSE2 function but a SSE2 function can't inline a SSE5
3602 function. */
3603 if ((caller_opts->ix86_isa_flags & callee_opts->ix86_isa_flags)
3604 != callee_opts->ix86_isa_flags)
3605 ret = false;
3607 /* See if we have the same non-isa options. */
3608 else if (caller_opts->target_flags != callee_opts->target_flags)
3609 ret = false;
3611 /* See if arch, tune, etc. are the same. */
3612 else if (caller_opts->arch != callee_opts->arch)
3613 ret = false;
3615 else if (caller_opts->tune != callee_opts->tune)
3616 ret = false;
3618 else if (caller_opts->fpmath != callee_opts->fpmath)
3619 ret = false;
3621 else if (caller_opts->branch_cost != callee_opts->branch_cost)
3622 ret = false;
3624 else
3625 ret = true;
3628 return ret;
3632 /* Remember the last target of ix86_set_current_function. */
3633 static GTY(()) tree ix86_previous_fndecl;
3635 /* Establish appropriate back-end context for processing the function
3636 FNDECL. The argument might be NULL to indicate processing at top
3637 level, outside of any function scope. */
3638 static void
3639 ix86_set_current_function (tree fndecl)
3641 /* Only change the context if the function changes. This hook is called
3642 several times in the course of compiling a function, and we don't want to
3643 slow things down too much or call target_reinit when it isn't safe. */
3644 if (fndecl && fndecl != ix86_previous_fndecl)
3646 tree old_tree = (ix86_previous_fndecl
3647 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
3648 : NULL_TREE);
3650 tree new_tree = (fndecl
3651 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
3652 : NULL_TREE);
3654 ix86_previous_fndecl = fndecl;
3655 if (old_tree == new_tree)
3658 else if (new_tree)
3660 cl_target_option_restore (TREE_TARGET_OPTION (new_tree));
3661 target_reinit ();
3664 else if (old_tree)
3666 struct cl_target_option *def
3667 = TREE_TARGET_OPTION (target_option_current_node);
3669 cl_target_option_restore (def);
3670 target_reinit ();
3676 /* Return true if this goes in large data/bss. */
3678 static bool
3679 ix86_in_large_data_p (tree exp)
3681 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
3682 return false;
3684 /* Functions are never large data. */
3685 if (TREE_CODE (exp) == FUNCTION_DECL)
3686 return false;
3688 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
3690 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
3691 if (strcmp (section, ".ldata") == 0
3692 || strcmp (section, ".lbss") == 0)
3693 return true;
3694 return false;
3696 else
3698 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
3700 /* If this is an incomplete type with size 0, then we can't put it
3701 in data because it might be too big when completed. */
3702 if (!size || size > ix86_section_threshold)
3703 return true;
3706 return false;
3709 /* Switch to the appropriate section for output of DECL.
3710 DECL is either a `VAR_DECL' node or a constant of some sort.
3711 RELOC indicates whether forming the initial value of DECL requires
3712 link-time relocations. */
3714 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
3715 ATTRIBUTE_UNUSED;
3717 static section *
3718 x86_64_elf_select_section (tree decl, int reloc,
3719 unsigned HOST_WIDE_INT align)
3721 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
3722 && ix86_in_large_data_p (decl))
3724 const char *sname = NULL;
3725 unsigned int flags = SECTION_WRITE;
3726 switch (categorize_decl_for_section (decl, reloc))
3728 case SECCAT_DATA:
3729 sname = ".ldata";
3730 break;
3731 case SECCAT_DATA_REL:
3732 sname = ".ldata.rel";
3733 break;
3734 case SECCAT_DATA_REL_LOCAL:
3735 sname = ".ldata.rel.local";
3736 break;
3737 case SECCAT_DATA_REL_RO:
3738 sname = ".ldata.rel.ro";
3739 break;
3740 case SECCAT_DATA_REL_RO_LOCAL:
3741 sname = ".ldata.rel.ro.local";
3742 break;
3743 case SECCAT_BSS:
3744 sname = ".lbss";
3745 flags |= SECTION_BSS;
3746 break;
3747 case SECCAT_RODATA:
3748 case SECCAT_RODATA_MERGE_STR:
3749 case SECCAT_RODATA_MERGE_STR_INIT:
3750 case SECCAT_RODATA_MERGE_CONST:
3751 sname = ".lrodata";
3752 flags = 0;
3753 break;
3754 case SECCAT_SRODATA:
3755 case SECCAT_SDATA:
3756 case SECCAT_SBSS:
3757 gcc_unreachable ();
3758 case SECCAT_TEXT:
3759 case SECCAT_TDATA:
3760 case SECCAT_TBSS:
3761 /* We don't split these for medium model. Place them into
3762 default sections and hope for best. */
3763 break;
3764 case SECCAT_EMUTLS_VAR:
3765 case SECCAT_EMUTLS_TMPL:
3766 gcc_unreachable ();
3768 if (sname)
3770 /* We might get called with string constants, but get_named_section
3771 doesn't like them as they are not DECLs. Also, we need to set
3772 flags in that case. */
3773 if (!DECL_P (decl))
3774 return get_section (sname, flags, NULL);
3775 return get_named_section (decl, sname, reloc);
3778 return default_elf_select_section (decl, reloc, align);
3781 /* Build up a unique section name, expressed as a
3782 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
3783 RELOC indicates whether the initial value of EXP requires
3784 link-time relocations. */
3786 static void ATTRIBUTE_UNUSED
3787 x86_64_elf_unique_section (tree decl, int reloc)
3789 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
3790 && ix86_in_large_data_p (decl))
3792 const char *prefix = NULL;
3793 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
3794 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
3796 switch (categorize_decl_for_section (decl, reloc))
3798 case SECCAT_DATA:
3799 case SECCAT_DATA_REL:
3800 case SECCAT_DATA_REL_LOCAL:
3801 case SECCAT_DATA_REL_RO:
3802 case SECCAT_DATA_REL_RO_LOCAL:
3803 prefix = one_only ? ".ld" : ".ldata";
3804 break;
3805 case SECCAT_BSS:
3806 prefix = one_only ? ".lb" : ".lbss";
3807 break;
3808 case SECCAT_RODATA:
3809 case SECCAT_RODATA_MERGE_STR:
3810 case SECCAT_RODATA_MERGE_STR_INIT:
3811 case SECCAT_RODATA_MERGE_CONST:
3812 prefix = one_only ? ".lr" : ".lrodata";
3813 break;
3814 case SECCAT_SRODATA:
3815 case SECCAT_SDATA:
3816 case SECCAT_SBSS:
3817 gcc_unreachable ();
3818 case SECCAT_TEXT:
3819 case SECCAT_TDATA:
3820 case SECCAT_TBSS:
3821 /* We don't split these for medium model. Place them into
3822 default sections and hope for best. */
3823 break;
3824 case SECCAT_EMUTLS_VAR:
3825 prefix = targetm.emutls.var_section;
3826 break;
3827 case SECCAT_EMUTLS_TMPL:
3828 prefix = targetm.emutls.tmpl_section;
3829 break;
3831 if (prefix)
3833 const char *name, *linkonce;
3834 char *string;
3836 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3837 name = targetm.strip_name_encoding (name);
3839 /* If we're using one_only, then there needs to be a .gnu.linkonce
3840 prefix to the section name. */
3841 linkonce = one_only ? ".gnu.linkonce" : "";
3843 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
3845 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
3846 return;
3849 default_unique_section (decl, reloc);
3852 #ifdef COMMON_ASM_OP
3853 /* This says how to output assembler code to declare an
3854 uninitialized external linkage data object.
3856 For medium model x86-64 we need to use .largecomm opcode for
3857 large objects. */
3858 void
3859 x86_elf_aligned_common (FILE *file,
3860 const char *name, unsigned HOST_WIDE_INT size,
3861 int align)
3863 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
3864 && size > (unsigned int)ix86_section_threshold)
3865 fprintf (file, ".largecomm\t");
3866 else
3867 fprintf (file, "%s", COMMON_ASM_OP);
3868 assemble_name (file, name);
3869 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
3870 size, align / BITS_PER_UNIT);
3872 #endif
3874 /* Utility function for targets to use in implementing
3875 ASM_OUTPUT_ALIGNED_BSS. */
3877 void
3878 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
3879 const char *name, unsigned HOST_WIDE_INT size,
3880 int align)
3882 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
3883 && size > (unsigned int)ix86_section_threshold)
3884 switch_to_section (get_named_section (decl, ".lbss", 0));
3885 else
3886 switch_to_section (bss_section);
3887 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
3888 #ifdef ASM_DECLARE_OBJECT_NAME
3889 last_assemble_variable_decl = decl;
3890 ASM_DECLARE_OBJECT_NAME (file, name, decl);
3891 #else
3892 /* Standard thing is just output label for the object. */
3893 ASM_OUTPUT_LABEL (file, name);
3894 #endif /* ASM_DECLARE_OBJECT_NAME */
3895 ASM_OUTPUT_SKIP (file, size ? size : 1);
3898 void
3899 optimization_options (int level, int size ATTRIBUTE_UNUSED)
3901 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
3902 make the problem with not enough registers even worse. */
3903 #ifdef INSN_SCHEDULING
3904 if (level > 1)
3905 flag_schedule_insns = 0;
3906 #endif
3908 /* When scheduling description is not available, disable scheduler pass
3909 so it won't slow down the compilation and make x87 code slower. */
3910 if (!TARGET_SCHEDULE)
3911 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3913 if (TARGET_MACHO)
3914 /* The Darwin libraries never set errno, so we might as well
3915 avoid calling them when that's the only reason we would. */
3916 flag_errno_math = 0;
3918 /* The default values of these switches depend on the TARGET_64BIT
3919 that is not known at this moment. Mark these values with 2 and
3920 let user the to override these. In case there is no command line option
3921 specifying them, we will set the defaults in override_options. */
3922 if (optimize >= 1)
3923 flag_omit_frame_pointer = 2;
3924 flag_pcc_struct_return = 2;
3925 flag_asynchronous_unwind_tables = 2;
3926 flag_vect_cost_model = 1;
3927 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
3928 SUBTARGET_OPTIMIZATION_OPTIONS;
3929 #endif
3932 /* Decide whether we can make a sibling call to a function. DECL is the
3933 declaration of the function being targeted by the call and EXP is the
3934 CALL_EXPR representing the call. */
3936 static bool
3937 ix86_function_ok_for_sibcall (tree decl, tree exp)
3939 tree func;
3940 rtx a, b;
3942 /* If we are generating position-independent code, we cannot sibcall
3943 optimize any indirect call, or a direct call to a global function,
3944 as the PLT requires %ebx be live. */
3945 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
3946 return false;
3948 if (decl)
3949 func = decl;
3950 else
3952 func = TREE_TYPE (CALL_EXPR_FN (exp));
3953 if (POINTER_TYPE_P (func))
3954 func = TREE_TYPE (func);
3957 /* Check that the return value locations are the same. Like
3958 if we are returning floats on the 80387 register stack, we cannot
3959 make a sibcall from a function that doesn't return a float to a
3960 function that does or, conversely, from a function that does return
3961 a float to a function that doesn't; the necessary stack adjustment
3962 would not be executed. This is also the place we notice
3963 differences in the return value ABI. Note that it is ok for one
3964 of the functions to have void return type as long as the return
3965 value of the other is passed in a register. */
3966 a = ix86_function_value (TREE_TYPE (exp), func, false);
3967 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
3968 cfun->decl, false);
3969 if (STACK_REG_P (a) || STACK_REG_P (b))
3971 if (!rtx_equal_p (a, b))
3972 return false;
3974 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
3976 else if (!rtx_equal_p (a, b))
3977 return false;
3979 /* If this call is indirect, we'll need to be able to use a call-clobbered
3980 register for the address of the target function. Make sure that all
3981 such registers are not used for passing parameters. */
3982 if (!decl && !TARGET_64BIT)
3984 tree type;
3986 /* We're looking at the CALL_EXPR, we need the type of the function. */
3987 type = CALL_EXPR_FN (exp); /* pointer expression */
3988 type = TREE_TYPE (type); /* pointer type */
3989 type = TREE_TYPE (type); /* function type */
3991 if (ix86_function_regparm (type, NULL) >= 3)
3993 /* ??? Need to count the actual number of registers to be used,
3994 not the possible number of registers. Fix later. */
3995 return false;
3999 /* Dllimport'd functions are also called indirectly. */
4000 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
4001 && decl && DECL_DLLIMPORT_P (decl)
4002 && ix86_function_regparm (TREE_TYPE (decl), NULL) >= 3)
4003 return false;
4005 /* If we forced aligned the stack, then sibcalling would unalign the
4006 stack, which may break the called function. */
4007 if (cfun->machine->force_align_arg_pointer)
4008 return false;
4010 /* Otherwise okay. That also includes certain types of indirect calls. */
4011 return true;
4014 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
4015 calling convention attributes;
4016 arguments as in struct attribute_spec.handler. */
4018 static tree
4019 ix86_handle_cconv_attribute (tree *node, tree name,
4020 tree args,
4021 int flags ATTRIBUTE_UNUSED,
4022 bool *no_add_attrs)
4024 if (TREE_CODE (*node) != FUNCTION_TYPE
4025 && TREE_CODE (*node) != METHOD_TYPE
4026 && TREE_CODE (*node) != FIELD_DECL
4027 && TREE_CODE (*node) != TYPE_DECL)
4029 warning (OPT_Wattributes, "%qs attribute only applies to functions",
4030 IDENTIFIER_POINTER (name));
4031 *no_add_attrs = true;
4032 return NULL_TREE;
4035 /* Can combine regparm with all attributes but fastcall. */
4036 if (is_attribute_p ("regparm", name))
4038 tree cst;
4040 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4042 error ("fastcall and regparm attributes are not compatible");
4045 cst = TREE_VALUE (args);
4046 if (TREE_CODE (cst) != INTEGER_CST)
4048 warning (OPT_Wattributes,
4049 "%qs attribute requires an integer constant argument",
4050 IDENTIFIER_POINTER (name));
4051 *no_add_attrs = true;
4053 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
4055 warning (OPT_Wattributes, "argument to %qs attribute larger than %d",
4056 IDENTIFIER_POINTER (name), REGPARM_MAX);
4057 *no_add_attrs = true;
4060 if (!TARGET_64BIT
4061 && lookup_attribute (ix86_force_align_arg_pointer_string,
4062 TYPE_ATTRIBUTES (*node))
4063 && compare_tree_int (cst, REGPARM_MAX-1))
4065 error ("%s functions limited to %d register parameters",
4066 ix86_force_align_arg_pointer_string, REGPARM_MAX-1);
4069 return NULL_TREE;
4072 if (TARGET_64BIT)
4074 /* Do not warn when emulating the MS ABI. */
4075 if (TREE_CODE (*node) != FUNCTION_TYPE || ix86_function_type_abi (*node)!=MS_ABI)
4076 warning (OPT_Wattributes, "%qs attribute ignored",
4077 IDENTIFIER_POINTER (name));
4078 *no_add_attrs = true;
4079 return NULL_TREE;
4082 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4083 if (is_attribute_p ("fastcall", name))
4085 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4087 error ("fastcall and cdecl attributes are not compatible");
4089 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4091 error ("fastcall and stdcall attributes are not compatible");
4093 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
4095 error ("fastcall and regparm attributes are not compatible");
4099 /* Can combine stdcall with fastcall (redundant), regparm and
4100 sseregparm. */
4101 else if (is_attribute_p ("stdcall", name))
4103 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4105 error ("stdcall and cdecl attributes are not compatible");
4107 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4109 error ("stdcall and fastcall attributes are not compatible");
4113 /* Can combine cdecl with regparm and sseregparm. */
4114 else if (is_attribute_p ("cdecl", name))
4116 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4118 error ("stdcall and cdecl attributes are not compatible");
4120 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4122 error ("fastcall and cdecl attributes are not compatible");
4126 /* Can combine sseregparm with all attributes. */
4128 return NULL_TREE;
4131 /* Return 0 if the attributes for two types are incompatible, 1 if they
4132 are compatible, and 2 if they are nearly compatible (which causes a
4133 warning to be generated). */
4135 static int
4136 ix86_comp_type_attributes (const_tree type1, const_tree type2)
4138 /* Check for mismatch of non-default calling convention. */
4139 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
4141 if (TREE_CODE (type1) != FUNCTION_TYPE
4142 && TREE_CODE (type1) != METHOD_TYPE)
4143 return 1;
4145 /* Check for mismatched fastcall/regparm types. */
4146 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
4147 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
4148 || (ix86_function_regparm (type1, NULL)
4149 != ix86_function_regparm (type2, NULL)))
4150 return 0;
4152 /* Check for mismatched sseregparm types. */
4153 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
4154 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
4155 return 0;
4157 /* Check for mismatched return types (cdecl vs stdcall). */
4158 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
4159 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
4160 return 0;
4162 return 1;
4165 /* Return the regparm value for a function with the indicated TYPE and DECL.
4166 DECL may be NULL when calling function indirectly
4167 or considering a libcall. */
4169 static int
4170 ix86_function_regparm (const_tree type, const_tree decl)
4172 tree attr;
4173 int regparm = ix86_regparm;
4175 static bool error_issued;
4177 if (TARGET_64BIT)
4179 if (ix86_function_type_abi (type) == DEFAULT_ABI)
4180 return regparm;
4181 return DEFAULT_ABI != SYSV_ABI ? X86_64_REGPARM_MAX : X64_REGPARM_MAX;
4184 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
4185 if (attr)
4187 regparm
4188 = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
4190 if (decl && TREE_CODE (decl) == FUNCTION_DECL)
4192 /* We can't use regparm(3) for nested functions because
4193 these pass static chain pointer in %ecx register. */
4194 if (!error_issued && regparm == 3
4195 && decl_function_context (decl)
4196 && !DECL_NO_STATIC_CHAIN (decl))
4198 error ("nested functions are limited to 2 register parameters");
4199 error_issued = true;
4200 return 0;
4204 return regparm;
4207 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
4208 return 2;
4210 /* Use register calling convention for local functions when possible. */
4211 if (decl && TREE_CODE (decl) == FUNCTION_DECL
4212 && !profile_flag)
4214 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4215 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4216 if (i && i->local)
4218 int local_regparm, globals = 0, regno;
4219 struct function *f;
4221 /* Make sure no regparm register is taken by a
4222 fixed register variable. */
4223 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
4224 if (fixed_regs[local_regparm])
4225 break;
4227 /* We can't use regparm(3) for nested functions as these use
4228 static chain pointer in third argument. */
4229 if (local_regparm == 3
4230 && (decl_function_context (decl)
4231 || ix86_force_align_arg_pointer)
4232 && !DECL_NO_STATIC_CHAIN (decl))
4233 local_regparm = 2;
4235 /* If the function realigns its stackpointer, the prologue will
4236 clobber %ecx. If we've already generated code for the callee,
4237 the callee DECL_STRUCT_FUNCTION is gone, so we fall back to
4238 scanning the attributes for the self-realigning property. */
4239 f = DECL_STRUCT_FUNCTION (decl);
4240 if (local_regparm == 3
4241 && (f ? !!f->machine->force_align_arg_pointer
4242 : !!lookup_attribute (ix86_force_align_arg_pointer_string,
4243 TYPE_ATTRIBUTES (TREE_TYPE (decl)))))
4244 local_regparm = 2;
4246 /* Each fixed register usage increases register pressure,
4247 so less registers should be used for argument passing.
4248 This functionality can be overriden by an explicit
4249 regparm value. */
4250 for (regno = 0; regno <= DI_REG; regno++)
4251 if (fixed_regs[regno])
4252 globals++;
4254 local_regparm
4255 = globals < local_regparm ? local_regparm - globals : 0;
4257 if (local_regparm > regparm)
4258 regparm = local_regparm;
4262 return regparm;
4265 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
4266 DFmode (2) arguments in SSE registers for a function with the
4267 indicated TYPE and DECL. DECL may be NULL when calling function
4268 indirectly or considering a libcall. Otherwise return 0. */
4270 static int
4271 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
4273 gcc_assert (!TARGET_64BIT);
4275 /* Use SSE registers to pass SFmode and DFmode arguments if requested
4276 by the sseregparm attribute. */
4277 if (TARGET_SSEREGPARM
4278 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
4280 if (!TARGET_SSE)
4282 if (warn)
4284 if (decl)
4285 error ("Calling %qD with attribute sseregparm without "
4286 "SSE/SSE2 enabled", decl);
4287 else
4288 error ("Calling %qT with attribute sseregparm without "
4289 "SSE/SSE2 enabled", type);
4291 return 0;
4294 return 2;
4297 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
4298 (and DFmode for SSE2) arguments in SSE registers. */
4299 if (decl && TARGET_SSE_MATH && !profile_flag)
4301 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4302 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4303 if (i && i->local)
4304 return TARGET_SSE2 ? 2 : 1;
4307 return 0;
4310 /* Return true if EAX is live at the start of the function. Used by
4311 ix86_expand_prologue to determine if we need special help before
4312 calling allocate_stack_worker. */
4314 static bool
4315 ix86_eax_live_at_start_p (void)
4317 /* Cheat. Don't bother working forward from ix86_function_regparm
4318 to the function type to whether an actual argument is located in
4319 eax. Instead just look at cfg info, which is still close enough
4320 to correct at this point. This gives false positives for broken
4321 functions that might use uninitialized data that happens to be
4322 allocated in eax, but who cares? */
4323 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
4326 /* Value is the number of bytes of arguments automatically
4327 popped when returning from a subroutine call.
4328 FUNDECL is the declaration node of the function (as a tree),
4329 FUNTYPE is the data type of the function (as a tree),
4330 or for a library call it is an identifier node for the subroutine name.
4331 SIZE is the number of bytes of arguments passed on the stack.
4333 On the 80386, the RTD insn may be used to pop them if the number
4334 of args is fixed, but if the number is variable then the caller
4335 must pop them all. RTD can't be used for library calls now
4336 because the library is compiled with the Unix compiler.
4337 Use of RTD is a selectable option, since it is incompatible with
4338 standard Unix calling sequences. If the option is not selected,
4339 the caller must always pop the args.
4341 The attribute stdcall is equivalent to RTD on a per module basis. */
4344 ix86_return_pops_args (tree fundecl, tree funtype, int size)
4346 int rtd;
4348 /* None of the 64-bit ABIs pop arguments. */
4349 if (TARGET_64BIT)
4350 return 0;
4352 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
4354 /* Cdecl functions override -mrtd, and never pop the stack. */
4355 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
4357 /* Stdcall and fastcall functions will pop the stack if not
4358 variable args. */
4359 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
4360 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
4361 rtd = 1;
4363 if (rtd && ! stdarg_p (funtype))
4364 return size;
4367 /* Lose any fake structure return argument if it is passed on the stack. */
4368 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
4369 && !KEEP_AGGREGATE_RETURN_POINTER)
4371 int nregs = ix86_function_regparm (funtype, fundecl);
4372 if (nregs == 0)
4373 return GET_MODE_SIZE (Pmode);
4376 return 0;
4379 /* Argument support functions. */
4381 /* Return true when register may be used to pass function parameters. */
4382 bool
4383 ix86_function_arg_regno_p (int regno)
4385 int i;
4386 const int *parm_regs;
4388 if (!TARGET_64BIT)
4390 if (TARGET_MACHO)
4391 return (regno < REGPARM_MAX
4392 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
4393 else
4394 return (regno < REGPARM_MAX
4395 || (TARGET_MMX && MMX_REGNO_P (regno)
4396 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
4397 || (TARGET_SSE && SSE_REGNO_P (regno)
4398 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
4401 if (TARGET_MACHO)
4403 if (SSE_REGNO_P (regno) && TARGET_SSE)
4404 return true;
4406 else
4408 if (TARGET_SSE && SSE_REGNO_P (regno)
4409 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
4410 return true;
4413 /* TODO: The function should depend on current function ABI but
4414 builtins.c would need updating then. Therefore we use the
4415 default ABI. */
4417 /* RAX is used as hidden argument to va_arg functions. */
4418 if (DEFAULT_ABI == SYSV_ABI && regno == AX_REG)
4419 return true;
4421 if (DEFAULT_ABI == MS_ABI)
4422 parm_regs = x86_64_ms_abi_int_parameter_registers;
4423 else
4424 parm_regs = x86_64_int_parameter_registers;
4425 for (i = 0; i < (DEFAULT_ABI == MS_ABI ? X64_REGPARM_MAX
4426 : X86_64_REGPARM_MAX); i++)
4427 if (regno == parm_regs[i])
4428 return true;
4429 return false;
4432 /* Return if we do not know how to pass TYPE solely in registers. */
4434 static bool
4435 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
4437 if (must_pass_in_stack_var_size_or_pad (mode, type))
4438 return true;
4440 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
4441 The layout_type routine is crafty and tries to trick us into passing
4442 currently unsupported vector types on the stack by using TImode. */
4443 return (!TARGET_64BIT && mode == TImode
4444 && type && TREE_CODE (type) != VECTOR_TYPE);
4447 /* It returns the size, in bytes, of the area reserved for arguments passed
4448 in registers for the function represented by fndecl dependent to the used
4449 abi format. */
4451 ix86_reg_parm_stack_space (const_tree fndecl)
4453 int call_abi = 0;
4454 /* For libcalls it is possible that there is no fndecl at hand.
4455 Therefore assume for this case the default abi of the target. */
4456 if (!fndecl)
4457 call_abi = DEFAULT_ABI;
4458 else
4459 call_abi = ix86_function_abi (fndecl);
4460 if (call_abi == 1)
4461 return 32;
4462 return 0;
4465 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
4466 call abi used. */
4468 ix86_function_type_abi (const_tree fntype)
4470 if (TARGET_64BIT && fntype != NULL)
4472 int abi;
4473 if (DEFAULT_ABI == SYSV_ABI)
4474 abi = lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)) ? MS_ABI : SYSV_ABI;
4475 else
4476 abi = lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)) ? SYSV_ABI : MS_ABI;
4478 return abi;
4480 return DEFAULT_ABI;
4484 ix86_function_abi (const_tree fndecl)
4486 if (! fndecl)
4487 return DEFAULT_ABI;
4488 return ix86_function_type_abi (TREE_TYPE (fndecl));
4491 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
4492 call abi used. */
4494 ix86_cfun_abi (void)
4496 if (! cfun || ! TARGET_64BIT)
4497 return DEFAULT_ABI;
4498 return cfun->machine->call_abi;
4501 /* regclass.c */
4502 extern void init_regs (void);
4504 /* Implementation of call abi switching target hook. Specific to FNDECL
4505 the specific call register sets are set. See also CONDITIONAL_REGISTER_USAGE
4506 for more details.
4507 To prevent redudant calls of costy function init_regs (), it checks not to
4508 reset register usage for default abi. */
4509 void
4510 ix86_call_abi_override (const_tree fndecl)
4512 if (fndecl == NULL_TREE)
4513 cfun->machine->call_abi = DEFAULT_ABI;
4514 else
4515 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
4516 if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
4518 if (call_used_regs[4 /*RSI*/] != 0 || call_used_regs[5 /*RDI*/] != 0)
4520 call_used_regs[4 /*RSI*/] = 0;
4521 call_used_regs[5 /*RDI*/] = 0;
4522 init_regs ();
4525 else if (TARGET_64BIT)
4527 if (call_used_regs[4 /*RSI*/] != 1 || call_used_regs[5 /*RDI*/] != 1)
4529 call_used_regs[4 /*RSI*/] = 1;
4530 call_used_regs[5 /*RDI*/] = 1;
4531 init_regs ();
4536 /* Initialize a variable CUM of type CUMULATIVE_ARGS
4537 for a call to a function whose data type is FNTYPE.
4538 For a library call, FNTYPE is 0. */
4540 void
4541 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
4542 tree fntype, /* tree ptr for function decl */
4543 rtx libname, /* SYMBOL_REF of library name or 0 */
4544 tree fndecl)
4546 struct cgraph_local_info *i = fndecl ? cgraph_local_info (fndecl) : NULL;
4547 memset (cum, 0, sizeof (*cum));
4549 cum->call_abi = ix86_function_type_abi (fntype);
4550 /* Set up the number of registers to use for passing arguments. */
4551 cum->nregs = ix86_regparm;
4552 if (TARGET_64BIT)
4554 if (cum->call_abi != DEFAULT_ABI)
4555 cum->nregs = DEFAULT_ABI != SYSV_ABI ? X86_64_REGPARM_MAX
4556 : X64_REGPARM_MAX;
4558 if (TARGET_SSE)
4560 cum->sse_nregs = SSE_REGPARM_MAX;
4561 if (TARGET_64BIT)
4563 if (cum->call_abi != DEFAULT_ABI)
4564 cum->sse_nregs = DEFAULT_ABI != SYSV_ABI ? X86_64_SSE_REGPARM_MAX
4565 : X64_SSE_REGPARM_MAX;
4568 if (TARGET_MMX)
4569 cum->mmx_nregs = MMX_REGPARM_MAX;
4570 cum->warn_sse = true;
4571 cum->warn_mmx = true;
4573 /* Because type might mismatch in between caller and callee, we need to
4574 use actual type of function for local calls.
4575 FIXME: cgraph_analyze can be told to actually record if function uses
4576 va_start so for local functions maybe_vaarg can be made aggressive
4577 helping K&R code.
4578 FIXME: once typesytem is fixed, we won't need this code anymore. */
4579 if (i && i->local)
4580 fntype = TREE_TYPE (fndecl);
4581 cum->maybe_vaarg = (fntype
4582 ? (!prototype_p (fntype) || stdarg_p (fntype))
4583 : !libname);
4585 if (!TARGET_64BIT)
4587 /* If there are variable arguments, then we won't pass anything
4588 in registers in 32-bit mode. */
4589 if (stdarg_p (fntype))
4591 cum->nregs = 0;
4592 cum->sse_nregs = 0;
4593 cum->mmx_nregs = 0;
4594 cum->warn_sse = 0;
4595 cum->warn_mmx = 0;
4596 return;
4599 /* Use ecx and edx registers if function has fastcall attribute,
4600 else look for regparm information. */
4601 if (fntype)
4603 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
4605 cum->nregs = 2;
4606 cum->fastcall = 1;
4608 else
4609 cum->nregs = ix86_function_regparm (fntype, fndecl);
4612 /* Set up the number of SSE registers used for passing SFmode
4613 and DFmode arguments. Warn for mismatching ABI. */
4614 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
4618 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
4619 But in the case of vector types, it is some vector mode.
4621 When we have only some of our vector isa extensions enabled, then there
4622 are some modes for which vector_mode_supported_p is false. For these
4623 modes, the generic vector support in gcc will choose some non-vector mode
4624 in order to implement the type. By computing the natural mode, we'll
4625 select the proper ABI location for the operand and not depend on whatever
4626 the middle-end decides to do with these vector types. */
4628 static enum machine_mode
4629 type_natural_mode (const_tree type)
4631 enum machine_mode mode = TYPE_MODE (type);
4633 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
4635 HOST_WIDE_INT size = int_size_in_bytes (type);
4636 if ((size == 8 || size == 16)
4637 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
4638 && TYPE_VECTOR_SUBPARTS (type) > 1)
4640 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
4642 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4643 mode = MIN_MODE_VECTOR_FLOAT;
4644 else
4645 mode = MIN_MODE_VECTOR_INT;
4647 /* Get the mode which has this inner mode and number of units. */
4648 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
4649 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
4650 && GET_MODE_INNER (mode) == innermode)
4651 return mode;
4653 gcc_unreachable ();
4657 return mode;
4660 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
4661 this may not agree with the mode that the type system has chosen for the
4662 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
4663 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
4665 static rtx
4666 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
4667 unsigned int regno)
4669 rtx tmp;
4671 if (orig_mode != BLKmode)
4672 tmp = gen_rtx_REG (orig_mode, regno);
4673 else
4675 tmp = gen_rtx_REG (mode, regno);
4676 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
4677 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
4680 return tmp;
4683 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
4684 of this code is to classify each 8bytes of incoming argument by the register
4685 class and assign registers accordingly. */
4687 /* Return the union class of CLASS1 and CLASS2.
4688 See the x86-64 PS ABI for details. */
4690 static enum x86_64_reg_class
4691 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
4693 /* Rule #1: If both classes are equal, this is the resulting class. */
4694 if (class1 == class2)
4695 return class1;
4697 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
4698 the other class. */
4699 if (class1 == X86_64_NO_CLASS)
4700 return class2;
4701 if (class2 == X86_64_NO_CLASS)
4702 return class1;
4704 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
4705 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
4706 return X86_64_MEMORY_CLASS;
4708 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
4709 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
4710 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
4711 return X86_64_INTEGERSI_CLASS;
4712 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
4713 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
4714 return X86_64_INTEGER_CLASS;
4716 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
4717 MEMORY is used. */
4718 if (class1 == X86_64_X87_CLASS
4719 || class1 == X86_64_X87UP_CLASS
4720 || class1 == X86_64_COMPLEX_X87_CLASS
4721 || class2 == X86_64_X87_CLASS
4722 || class2 == X86_64_X87UP_CLASS
4723 || class2 == X86_64_COMPLEX_X87_CLASS)
4724 return X86_64_MEMORY_CLASS;
4726 /* Rule #6: Otherwise class SSE is used. */
4727 return X86_64_SSE_CLASS;
4730 /* Classify the argument of type TYPE and mode MODE.
4731 CLASSES will be filled by the register class used to pass each word
4732 of the operand. The number of words is returned. In case the parameter
4733 should be passed in memory, 0 is returned. As a special case for zero
4734 sized containers, classes[0] will be NO_CLASS and 1 is returned.
4736 BIT_OFFSET is used internally for handling records and specifies offset
4737 of the offset in bits modulo 256 to avoid overflow cases.
4739 See the x86-64 PS ABI for details.
4742 static int
4743 classify_argument (enum machine_mode mode, const_tree type,
4744 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
4746 HOST_WIDE_INT bytes =
4747 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
4748 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4750 /* Variable sized entities are always passed/returned in memory. */
4751 if (bytes < 0)
4752 return 0;
4754 if (mode != VOIDmode
4755 && targetm.calls.must_pass_in_stack (mode, type))
4756 return 0;
4758 if (type && AGGREGATE_TYPE_P (type))
4760 int i;
4761 tree field;
4762 enum x86_64_reg_class subclasses[MAX_CLASSES];
4764 /* On x86-64 we pass structures larger than 16 bytes on the stack. */
4765 if (bytes > 16)
4766 return 0;
4768 for (i = 0; i < words; i++)
4769 classes[i] = X86_64_NO_CLASS;
4771 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
4772 signalize memory class, so handle it as special case. */
4773 if (!words)
4775 classes[0] = X86_64_NO_CLASS;
4776 return 1;
4779 /* Classify each field of record and merge classes. */
4780 switch (TREE_CODE (type))
4782 case RECORD_TYPE:
4783 /* And now merge the fields of structure. */
4784 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4786 if (TREE_CODE (field) == FIELD_DECL)
4788 int num;
4790 if (TREE_TYPE (field) == error_mark_node)
4791 continue;
4793 /* Bitfields are always classified as integer. Handle them
4794 early, since later code would consider them to be
4795 misaligned integers. */
4796 if (DECL_BIT_FIELD (field))
4798 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
4799 i < ((int_bit_position (field) + (bit_offset % 64))
4800 + tree_low_cst (DECL_SIZE (field), 0)
4801 + 63) / 8 / 8; i++)
4802 classes[i] =
4803 merge_classes (X86_64_INTEGER_CLASS,
4804 classes[i]);
4806 else
4808 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
4809 TREE_TYPE (field), subclasses,
4810 (int_bit_position (field)
4811 + bit_offset) % 256);
4812 if (!num)
4813 return 0;
4814 for (i = 0; i < num; i++)
4816 int pos =
4817 (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
4818 classes[i + pos] =
4819 merge_classes (subclasses[i], classes[i + pos]);
4824 break;
4826 case ARRAY_TYPE:
4827 /* Arrays are handled as small records. */
4829 int num;
4830 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
4831 TREE_TYPE (type), subclasses, bit_offset);
4832 if (!num)
4833 return 0;
4835 /* The partial classes are now full classes. */
4836 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
4837 subclasses[0] = X86_64_SSE_CLASS;
4838 if (subclasses[0] == X86_64_INTEGERSI_CLASS && bytes != 4)
4839 subclasses[0] = X86_64_INTEGER_CLASS;
4841 for (i = 0; i < words; i++)
4842 classes[i] = subclasses[i % num];
4844 break;
4846 case UNION_TYPE:
4847 case QUAL_UNION_TYPE:
4848 /* Unions are similar to RECORD_TYPE but offset is always 0.
4850 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4852 if (TREE_CODE (field) == FIELD_DECL)
4854 int num;
4856 if (TREE_TYPE (field) == error_mark_node)
4857 continue;
4859 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
4860 TREE_TYPE (field), subclasses,
4861 bit_offset);
4862 if (!num)
4863 return 0;
4864 for (i = 0; i < num; i++)
4865 classes[i] = merge_classes (subclasses[i], classes[i]);
4868 break;
4870 default:
4871 gcc_unreachable ();
4874 /* Final merger cleanup. */
4875 for (i = 0; i < words; i++)
4877 /* If one class is MEMORY, everything should be passed in
4878 memory. */
4879 if (classes[i] == X86_64_MEMORY_CLASS)
4880 return 0;
4882 /* The X86_64_SSEUP_CLASS should be always preceded by
4883 X86_64_SSE_CLASS. */
4884 if (classes[i] == X86_64_SSEUP_CLASS
4885 && (i == 0 || classes[i - 1] != X86_64_SSE_CLASS))
4886 classes[i] = X86_64_SSE_CLASS;
4888 /* X86_64_X87UP_CLASS should be preceded by X86_64_X87_CLASS. */
4889 if (classes[i] == X86_64_X87UP_CLASS
4890 && (i == 0 || classes[i - 1] != X86_64_X87_CLASS))
4891 classes[i] = X86_64_SSE_CLASS;
4893 return words;
4896 /* Compute alignment needed. We align all types to natural boundaries with
4897 exception of XFmode that is aligned to 64bits. */
4898 if (mode != VOIDmode && mode != BLKmode)
4900 int mode_alignment = GET_MODE_BITSIZE (mode);
4902 if (mode == XFmode)
4903 mode_alignment = 128;
4904 else if (mode == XCmode)
4905 mode_alignment = 256;
4906 if (COMPLEX_MODE_P (mode))
4907 mode_alignment /= 2;
4908 /* Misaligned fields are always returned in memory. */
4909 if (bit_offset % mode_alignment)
4910 return 0;
4913 /* for V1xx modes, just use the base mode */
4914 if (VECTOR_MODE_P (mode) && mode != V1DImode
4915 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
4916 mode = GET_MODE_INNER (mode);
4918 /* Classification of atomic types. */
4919 switch (mode)
4921 case SDmode:
4922 case DDmode:
4923 classes[0] = X86_64_SSE_CLASS;
4924 return 1;
4925 case TDmode:
4926 classes[0] = X86_64_SSE_CLASS;
4927 classes[1] = X86_64_SSEUP_CLASS;
4928 return 2;
4929 case DImode:
4930 case SImode:
4931 case HImode:
4932 case QImode:
4933 case CSImode:
4934 case CHImode:
4935 case CQImode:
4936 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
4937 classes[0] = X86_64_INTEGERSI_CLASS;
4938 else
4939 classes[0] = X86_64_INTEGER_CLASS;
4940 return 1;
4941 case CDImode:
4942 case TImode:
4943 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
4944 return 2;
4945 case CTImode:
4946 return 0;
4947 case SFmode:
4948 if (!(bit_offset % 64))
4949 classes[0] = X86_64_SSESF_CLASS;
4950 else
4951 classes[0] = X86_64_SSE_CLASS;
4952 return 1;
4953 case DFmode:
4954 classes[0] = X86_64_SSEDF_CLASS;
4955 return 1;
4956 case XFmode:
4957 classes[0] = X86_64_X87_CLASS;
4958 classes[1] = X86_64_X87UP_CLASS;
4959 return 2;
4960 case TFmode:
4961 classes[0] = X86_64_SSE_CLASS;
4962 classes[1] = X86_64_SSEUP_CLASS;
4963 return 2;
4964 case SCmode:
4965 classes[0] = X86_64_SSE_CLASS;
4966 return 1;
4967 case DCmode:
4968 classes[0] = X86_64_SSEDF_CLASS;
4969 classes[1] = X86_64_SSEDF_CLASS;
4970 return 2;
4971 case XCmode:
4972 classes[0] = X86_64_COMPLEX_X87_CLASS;
4973 return 1;
4974 case TCmode:
4975 /* This modes is larger than 16 bytes. */
4976 return 0;
4977 case V4SFmode:
4978 case V4SImode:
4979 case V16QImode:
4980 case V8HImode:
4981 case V2DFmode:
4982 case V2DImode:
4983 classes[0] = X86_64_SSE_CLASS;
4984 classes[1] = X86_64_SSEUP_CLASS;
4985 return 2;
4986 case V1DImode:
4987 case V2SFmode:
4988 case V2SImode:
4989 case V4HImode:
4990 case V8QImode:
4991 classes[0] = X86_64_SSE_CLASS;
4992 return 1;
4993 case BLKmode:
4994 case VOIDmode:
4995 return 0;
4996 default:
4997 gcc_assert (VECTOR_MODE_P (mode));
4999 if (bytes > 16)
5000 return 0;
5002 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
5004 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
5005 classes[0] = X86_64_INTEGERSI_CLASS;
5006 else
5007 classes[0] = X86_64_INTEGER_CLASS;
5008 classes[1] = X86_64_INTEGER_CLASS;
5009 return 1 + (bytes > 8);
5013 /* Examine the argument and return set number of register required in each
5014 class. Return 0 iff parameter should be passed in memory. */
5015 static int
5016 examine_argument (enum machine_mode mode, const_tree type, int in_return,
5017 int *int_nregs, int *sse_nregs)
5019 enum x86_64_reg_class regclass[MAX_CLASSES];
5020 int n = classify_argument (mode, type, regclass, 0);
5022 *int_nregs = 0;
5023 *sse_nregs = 0;
5024 if (!n)
5025 return 0;
5026 for (n--; n >= 0; n--)
5027 switch (regclass[n])
5029 case X86_64_INTEGER_CLASS:
5030 case X86_64_INTEGERSI_CLASS:
5031 (*int_nregs)++;
5032 break;
5033 case X86_64_SSE_CLASS:
5034 case X86_64_SSESF_CLASS:
5035 case X86_64_SSEDF_CLASS:
5036 (*sse_nregs)++;
5037 break;
5038 case X86_64_NO_CLASS:
5039 case X86_64_SSEUP_CLASS:
5040 break;
5041 case X86_64_X87_CLASS:
5042 case X86_64_X87UP_CLASS:
5043 if (!in_return)
5044 return 0;
5045 break;
5046 case X86_64_COMPLEX_X87_CLASS:
5047 return in_return ? 2 : 0;
5048 case X86_64_MEMORY_CLASS:
5049 gcc_unreachable ();
5051 return 1;
5054 /* Construct container for the argument used by GCC interface. See
5055 FUNCTION_ARG for the detailed description. */
5057 static rtx
5058 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
5059 const_tree type, int in_return, int nintregs, int nsseregs,
5060 const int *intreg, int sse_regno)
5062 /* The following variables hold the static issued_error state. */
5063 static bool issued_sse_arg_error;
5064 static bool issued_sse_ret_error;
5065 static bool issued_x87_ret_error;
5067 enum machine_mode tmpmode;
5068 int bytes =
5069 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5070 enum x86_64_reg_class regclass[MAX_CLASSES];
5071 int n;
5072 int i;
5073 int nexps = 0;
5074 int needed_sseregs, needed_intregs;
5075 rtx exp[MAX_CLASSES];
5076 rtx ret;
5078 n = classify_argument (mode, type, regclass, 0);
5079 if (!n)
5080 return NULL;
5081 if (!examine_argument (mode, type, in_return, &needed_intregs,
5082 &needed_sseregs))
5083 return NULL;
5084 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
5085 return NULL;
5087 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
5088 some less clueful developer tries to use floating-point anyway. */
5089 if (needed_sseregs && !TARGET_SSE)
5091 if (in_return)
5093 if (!issued_sse_ret_error)
5095 error ("SSE register return with SSE disabled");
5096 issued_sse_ret_error = true;
5099 else if (!issued_sse_arg_error)
5101 error ("SSE register argument with SSE disabled");
5102 issued_sse_arg_error = true;
5104 return NULL;
5107 /* Likewise, error if the ABI requires us to return values in the
5108 x87 registers and the user specified -mno-80387. */
5109 if (!TARGET_80387 && in_return)
5110 for (i = 0; i < n; i++)
5111 if (regclass[i] == X86_64_X87_CLASS
5112 || regclass[i] == X86_64_X87UP_CLASS
5113 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
5115 if (!issued_x87_ret_error)
5117 error ("x87 register return with x87 disabled");
5118 issued_x87_ret_error = true;
5120 return NULL;
5123 /* First construct simple cases. Avoid SCmode, since we want to use
5124 single register to pass this type. */
5125 if (n == 1 && mode != SCmode)
5126 switch (regclass[0])
5128 case X86_64_INTEGER_CLASS:
5129 case X86_64_INTEGERSI_CLASS:
5130 return gen_rtx_REG (mode, intreg[0]);
5131 case X86_64_SSE_CLASS:
5132 case X86_64_SSESF_CLASS:
5133 case X86_64_SSEDF_CLASS:
5134 return gen_reg_or_parallel (mode, orig_mode, SSE_REGNO (sse_regno));
5135 case X86_64_X87_CLASS:
5136 case X86_64_COMPLEX_X87_CLASS:
5137 return gen_rtx_REG (mode, FIRST_STACK_REG);
5138 case X86_64_NO_CLASS:
5139 /* Zero sized array, struct or class. */
5140 return NULL;
5141 default:
5142 gcc_unreachable ();
5144 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
5145 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
5146 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5148 if (n == 2
5149 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
5150 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
5151 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
5152 && regclass[1] == X86_64_INTEGER_CLASS
5153 && (mode == CDImode || mode == TImode || mode == TFmode)
5154 && intreg[0] + 1 == intreg[1])
5155 return gen_rtx_REG (mode, intreg[0]);
5157 /* Otherwise figure out the entries of the PARALLEL. */
5158 for (i = 0; i < n; i++)
5160 switch (regclass[i])
5162 case X86_64_NO_CLASS:
5163 break;
5164 case X86_64_INTEGER_CLASS:
5165 case X86_64_INTEGERSI_CLASS:
5166 /* Merge TImodes on aligned occasions here too. */
5167 if (i * 8 + 8 > bytes)
5168 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
5169 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
5170 tmpmode = SImode;
5171 else
5172 tmpmode = DImode;
5173 /* We've requested 24 bytes we don't have mode for. Use DImode. */
5174 if (tmpmode == BLKmode)
5175 tmpmode = DImode;
5176 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5177 gen_rtx_REG (tmpmode, *intreg),
5178 GEN_INT (i*8));
5179 intreg++;
5180 break;
5181 case X86_64_SSESF_CLASS:
5182 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5183 gen_rtx_REG (SFmode,
5184 SSE_REGNO (sse_regno)),
5185 GEN_INT (i*8));
5186 sse_regno++;
5187 break;
5188 case X86_64_SSEDF_CLASS:
5189 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5190 gen_rtx_REG (DFmode,
5191 SSE_REGNO (sse_regno)),
5192 GEN_INT (i*8));
5193 sse_regno++;
5194 break;
5195 case X86_64_SSE_CLASS:
5196 if (i < n - 1 && regclass[i + 1] == X86_64_SSEUP_CLASS)
5197 tmpmode = TImode;
5198 else
5199 tmpmode = DImode;
5200 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5201 gen_rtx_REG (tmpmode,
5202 SSE_REGNO (sse_regno)),
5203 GEN_INT (i*8));
5204 if (tmpmode == TImode)
5205 i++;
5206 sse_regno++;
5207 break;
5208 default:
5209 gcc_unreachable ();
5213 /* Empty aligned struct, union or class. */
5214 if (nexps == 0)
5215 return NULL;
5217 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
5218 for (i = 0; i < nexps; i++)
5219 XVECEXP (ret, 0, i) = exp [i];
5220 return ret;
5223 /* Update the data in CUM to advance over an argument of mode MODE
5224 and data type TYPE. (TYPE is null for libcalls where that information
5225 may not be available.) */
5227 static void
5228 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5229 tree type, HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5231 switch (mode)
5233 default:
5234 break;
5236 case BLKmode:
5237 if (bytes < 0)
5238 break;
5239 /* FALLTHRU */
5241 case DImode:
5242 case SImode:
5243 case HImode:
5244 case QImode:
5245 cum->words += words;
5246 cum->nregs -= words;
5247 cum->regno += words;
5249 if (cum->nregs <= 0)
5251 cum->nregs = 0;
5252 cum->regno = 0;
5254 break;
5256 case DFmode:
5257 if (cum->float_in_sse < 2)
5258 break;
5259 case SFmode:
5260 if (cum->float_in_sse < 1)
5261 break;
5262 /* FALLTHRU */
5264 case TImode:
5265 case V16QImode:
5266 case V8HImode:
5267 case V4SImode:
5268 case V2DImode:
5269 case V4SFmode:
5270 case V2DFmode:
5271 if (!type || !AGGREGATE_TYPE_P (type))
5273 cum->sse_words += words;
5274 cum->sse_nregs -= 1;
5275 cum->sse_regno += 1;
5276 if (cum->sse_nregs <= 0)
5278 cum->sse_nregs = 0;
5279 cum->sse_regno = 0;
5282 break;
5284 case V8QImode:
5285 case V4HImode:
5286 case V2SImode:
5287 case V2SFmode:
5288 case V1DImode:
5289 if (!type || !AGGREGATE_TYPE_P (type))
5291 cum->mmx_words += words;
5292 cum->mmx_nregs -= 1;
5293 cum->mmx_regno += 1;
5294 if (cum->mmx_nregs <= 0)
5296 cum->mmx_nregs = 0;
5297 cum->mmx_regno = 0;
5300 break;
5304 static void
5305 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5306 tree type, HOST_WIDE_INT words)
5308 int int_nregs, sse_nregs;
5310 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
5311 cum->words += words;
5312 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
5314 cum->nregs -= int_nregs;
5315 cum->sse_nregs -= sse_nregs;
5316 cum->regno += int_nregs;
5317 cum->sse_regno += sse_nregs;
5319 else
5320 cum->words += words;
5323 static void
5324 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
5325 HOST_WIDE_INT words)
5327 /* Otherwise, this should be passed indirect. */
5328 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
5330 cum->words += words;
5331 if (cum->nregs > 0)
5333 cum->nregs -= 1;
5334 cum->regno += 1;
5338 void
5339 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5340 tree type, int named ATTRIBUTE_UNUSED)
5342 HOST_WIDE_INT bytes, words;
5344 if (mode == BLKmode)
5345 bytes = int_size_in_bytes (type);
5346 else
5347 bytes = GET_MODE_SIZE (mode);
5348 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5350 if (type)
5351 mode = type_natural_mode (type);
5353 if (TARGET_64BIT && (cum ? cum->call_abi : DEFAULT_ABI) == MS_ABI)
5354 function_arg_advance_ms_64 (cum, bytes, words);
5355 else if (TARGET_64BIT)
5356 function_arg_advance_64 (cum, mode, type, words);
5357 else
5358 function_arg_advance_32 (cum, mode, type, bytes, words);
5361 /* Define where to put the arguments to a function.
5362 Value is zero to push the argument on the stack,
5363 or a hard register in which to store the argument.
5365 MODE is the argument's machine mode.
5366 TYPE is the data type of the argument (as a tree).
5367 This is null for libcalls where that information may
5368 not be available.
5369 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5370 the preceding args and about the function being called.
5371 NAMED is nonzero if this argument is a named parameter
5372 (otherwise it is an extra parameter matching an ellipsis). */
5374 static rtx
5375 function_arg_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5376 enum machine_mode orig_mode, tree type,
5377 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5379 static bool warnedsse, warnedmmx;
5381 /* Avoid the AL settings for the Unix64 ABI. */
5382 if (mode == VOIDmode)
5383 return constm1_rtx;
5385 switch (mode)
5387 default:
5388 break;
5390 case BLKmode:
5391 if (bytes < 0)
5392 break;
5393 /* FALLTHRU */
5394 case DImode:
5395 case SImode:
5396 case HImode:
5397 case QImode:
5398 if (words <= cum->nregs)
5400 int regno = cum->regno;
5402 /* Fastcall allocates the first two DWORD (SImode) or
5403 smaller arguments to ECX and EDX if it isn't an
5404 aggregate type . */
5405 if (cum->fastcall)
5407 if (mode == BLKmode
5408 || mode == DImode
5409 || (type && AGGREGATE_TYPE_P (type)))
5410 break;
5412 /* ECX not EAX is the first allocated register. */
5413 if (regno == AX_REG)
5414 regno = CX_REG;
5416 return gen_rtx_REG (mode, regno);
5418 break;
5420 case DFmode:
5421 if (cum->float_in_sse < 2)
5422 break;
5423 case SFmode:
5424 if (cum->float_in_sse < 1)
5425 break;
5426 /* FALLTHRU */
5427 case TImode:
5428 case V16QImode:
5429 case V8HImode:
5430 case V4SImode:
5431 case V2DImode:
5432 case V4SFmode:
5433 case V2DFmode:
5434 if (!type || !AGGREGATE_TYPE_P (type))
5436 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
5438 warnedsse = true;
5439 warning (0, "SSE vector argument without SSE enabled "
5440 "changes the ABI");
5442 if (cum->sse_nregs)
5443 return gen_reg_or_parallel (mode, orig_mode,
5444 cum->sse_regno + FIRST_SSE_REG);
5446 break;
5448 case V8QImode:
5449 case V4HImode:
5450 case V2SImode:
5451 case V2SFmode:
5452 case V1DImode:
5453 if (!type || !AGGREGATE_TYPE_P (type))
5455 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
5457 warnedmmx = true;
5458 warning (0, "MMX vector argument without MMX enabled "
5459 "changes the ABI");
5461 if (cum->mmx_nregs)
5462 return gen_reg_or_parallel (mode, orig_mode,
5463 cum->mmx_regno + FIRST_MMX_REG);
5465 break;
5468 return NULL_RTX;
5471 static rtx
5472 function_arg_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5473 enum machine_mode orig_mode, tree type)
5475 /* Handle a hidden AL argument containing number of registers
5476 for varargs x86-64 functions. */
5477 if (mode == VOIDmode)
5478 return GEN_INT (cum->maybe_vaarg
5479 ? (cum->sse_nregs < 0
5480 ? (cum->call_abi == DEFAULT_ABI
5481 ? SSE_REGPARM_MAX
5482 : (DEFAULT_ABI != SYSV_ABI ? X86_64_SSE_REGPARM_MAX
5483 : X64_SSE_REGPARM_MAX))
5484 : cum->sse_regno)
5485 : -1);
5487 return construct_container (mode, orig_mode, type, 0, cum->nregs,
5488 cum->sse_nregs,
5489 &x86_64_int_parameter_registers [cum->regno],
5490 cum->sse_regno);
5493 static rtx
5494 function_arg_ms_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5495 enum machine_mode orig_mode, int named,
5496 HOST_WIDE_INT bytes)
5498 unsigned int regno;
5500 /* Avoid the AL settings for the Unix64 ABI. */
5501 if (mode == VOIDmode)
5502 return constm1_rtx;
5504 /* If we've run out of registers, it goes on the stack. */
5505 if (cum->nregs == 0)
5506 return NULL_RTX;
5508 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
5510 /* Only floating point modes are passed in anything but integer regs. */
5511 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
5513 if (named)
5514 regno = cum->regno + FIRST_SSE_REG;
5515 else
5517 rtx t1, t2;
5519 /* Unnamed floating parameters are passed in both the
5520 SSE and integer registers. */
5521 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
5522 t2 = gen_rtx_REG (mode, regno);
5523 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
5524 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
5525 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
5528 /* Handle aggregated types passed in register. */
5529 if (orig_mode == BLKmode)
5531 if (bytes > 0 && bytes <= 8)
5532 mode = (bytes > 4 ? DImode : SImode);
5533 if (mode == BLKmode)
5534 mode = DImode;
5537 return gen_reg_or_parallel (mode, orig_mode, regno);
5541 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
5542 tree type, int named)
5544 enum machine_mode mode = omode;
5545 HOST_WIDE_INT bytes, words;
5547 if (mode == BLKmode)
5548 bytes = int_size_in_bytes (type);
5549 else
5550 bytes = GET_MODE_SIZE (mode);
5551 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5553 /* To simplify the code below, represent vector types with a vector mode
5554 even if MMX/SSE are not active. */
5555 if (type && TREE_CODE (type) == VECTOR_TYPE)
5556 mode = type_natural_mode (type);
5558 if (TARGET_64BIT && (cum ? cum->call_abi : DEFAULT_ABI) == MS_ABI)
5559 return function_arg_ms_64 (cum, mode, omode, named, bytes);
5560 else if (TARGET_64BIT)
5561 return function_arg_64 (cum, mode, omode, type);
5562 else
5563 return function_arg_32 (cum, mode, omode, type, bytes, words);
5566 /* A C expression that indicates when an argument must be passed by
5567 reference. If nonzero for an argument, a copy of that argument is
5568 made in memory and a pointer to the argument is passed instead of
5569 the argument itself. The pointer is passed in whatever way is
5570 appropriate for passing a pointer to that type. */
5572 static bool
5573 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5574 enum machine_mode mode ATTRIBUTE_UNUSED,
5575 const_tree type, bool named ATTRIBUTE_UNUSED)
5577 /* See Windows x64 Software Convention. */
5578 if (TARGET_64BIT && (cum ? cum->call_abi : DEFAULT_ABI) == MS_ABI)
5580 int msize = (int) GET_MODE_SIZE (mode);
5581 if (type)
5583 /* Arrays are passed by reference. */
5584 if (TREE_CODE (type) == ARRAY_TYPE)
5585 return true;
5587 if (AGGREGATE_TYPE_P (type))
5589 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
5590 are passed by reference. */
5591 msize = int_size_in_bytes (type);
5595 /* __m128 is passed by reference. */
5596 switch (msize) {
5597 case 1: case 2: case 4: case 8:
5598 break;
5599 default:
5600 return true;
5603 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
5604 return 1;
5606 return 0;
5609 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
5610 ABI. */
5611 static bool
5612 contains_aligned_value_p (tree type)
5614 enum machine_mode mode = TYPE_MODE (type);
5615 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
5616 || mode == TDmode
5617 || mode == TFmode
5618 || mode == TCmode)
5619 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
5620 return true;
5621 if (TYPE_ALIGN (type) < 128)
5622 return false;
5624 if (AGGREGATE_TYPE_P (type))
5626 /* Walk the aggregates recursively. */
5627 switch (TREE_CODE (type))
5629 case RECORD_TYPE:
5630 case UNION_TYPE:
5631 case QUAL_UNION_TYPE:
5633 tree field;
5635 /* Walk all the structure fields. */
5636 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5638 if (TREE_CODE (field) == FIELD_DECL
5639 && contains_aligned_value_p (TREE_TYPE (field)))
5640 return true;
5642 break;
5645 case ARRAY_TYPE:
5646 /* Just for use if some languages passes arrays by value. */
5647 if (contains_aligned_value_p (TREE_TYPE (type)))
5648 return true;
5649 break;
5651 default:
5652 gcc_unreachable ();
5655 return false;
5658 /* Gives the alignment boundary, in bits, of an argument with the
5659 specified mode and type. */
5662 ix86_function_arg_boundary (enum machine_mode mode, tree type)
5664 int align;
5665 if (type)
5667 /* Since canonical type is used for call, we convert it to
5668 canonical type if needed. */
5669 if (!TYPE_STRUCTURAL_EQUALITY_P (type))
5670 type = TYPE_CANONICAL (type);
5671 align = TYPE_ALIGN (type);
5673 else
5674 align = GET_MODE_ALIGNMENT (mode);
5675 if (align < PARM_BOUNDARY)
5676 align = PARM_BOUNDARY;
5677 /* In 32bit, only _Decimal128 and __float128 are aligned to their
5678 natural boundaries. */
5679 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
5681 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
5682 make an exception for SSE modes since these require 128bit
5683 alignment.
5685 The handling here differs from field_alignment. ICC aligns MMX
5686 arguments to 4 byte boundaries, while structure fields are aligned
5687 to 8 byte boundaries. */
5688 if (!type)
5690 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
5691 align = PARM_BOUNDARY;
5693 else
5695 if (!contains_aligned_value_p (type))
5696 align = PARM_BOUNDARY;
5699 if (align > BIGGEST_ALIGNMENT)
5700 align = BIGGEST_ALIGNMENT;
5701 return align;
5704 /* Return true if N is a possible register number of function value. */
5706 bool
5707 ix86_function_value_regno_p (int regno)
5709 switch (regno)
5711 case 0:
5712 return true;
5714 case FIRST_FLOAT_REG:
5715 /* TODO: The function should depend on current function ABI but
5716 builtins.c would need updating then. Therefore we use the
5717 default ABI. */
5718 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
5719 return false;
5720 return TARGET_FLOAT_RETURNS_IN_80387;
5722 case FIRST_SSE_REG:
5723 return TARGET_SSE;
5725 case FIRST_MMX_REG:
5726 if (TARGET_MACHO || TARGET_64BIT)
5727 return false;
5728 return TARGET_MMX;
5731 return false;
5734 /* Define how to find the value returned by a function.
5735 VALTYPE is the data type of the value (as a tree).
5736 If the precise function being called is known, FUNC is its FUNCTION_DECL;
5737 otherwise, FUNC is 0. */
5739 static rtx
5740 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
5741 const_tree fntype, const_tree fn)
5743 unsigned int regno;
5745 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
5746 we normally prevent this case when mmx is not available. However
5747 some ABIs may require the result to be returned like DImode. */
5748 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
5749 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
5751 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
5752 we prevent this case when sse is not available. However some ABIs
5753 may require the result to be returned like integer TImode. */
5754 else if (mode == TImode
5755 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
5756 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
5758 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
5759 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
5760 regno = FIRST_FLOAT_REG;
5761 else
5762 /* Most things go in %eax. */
5763 regno = AX_REG;
5765 /* Override FP return register with %xmm0 for local functions when
5766 SSE math is enabled or for functions with sseregparm attribute. */
5767 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
5769 int sse_level = ix86_function_sseregparm (fntype, fn, false);
5770 if ((sse_level >= 1 && mode == SFmode)
5771 || (sse_level == 2 && mode == DFmode))
5772 regno = FIRST_SSE_REG;
5775 return gen_rtx_REG (orig_mode, regno);
5778 static rtx
5779 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
5780 const_tree valtype)
5782 rtx ret;
5784 /* Handle libcalls, which don't provide a type node. */
5785 if (valtype == NULL)
5787 switch (mode)
5789 case SFmode:
5790 case SCmode:
5791 case DFmode:
5792 case DCmode:
5793 case TFmode:
5794 case SDmode:
5795 case DDmode:
5796 case TDmode:
5797 return gen_rtx_REG (mode, FIRST_SSE_REG);
5798 case XFmode:
5799 case XCmode:
5800 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
5801 case TCmode:
5802 return NULL;
5803 default:
5804 return gen_rtx_REG (mode, AX_REG);
5808 ret = construct_container (mode, orig_mode, valtype, 1,
5809 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
5810 x86_64_int_return_registers, 0);
5812 /* For zero sized structures, construct_container returns NULL, but we
5813 need to keep rest of compiler happy by returning meaningful value. */
5814 if (!ret)
5815 ret = gen_rtx_REG (orig_mode, AX_REG);
5817 return ret;
5820 static rtx
5821 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
5823 unsigned int regno = AX_REG;
5825 if (TARGET_SSE)
5827 switch (GET_MODE_SIZE (mode))
5829 case 16:
5830 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
5831 && !COMPLEX_MODE_P (mode))
5832 regno = FIRST_SSE_REG;
5833 break;
5834 case 8:
5835 case 4:
5836 if (mode == SFmode || mode == DFmode)
5837 regno = FIRST_SSE_REG;
5838 break;
5839 default:
5840 break;
5843 return gen_rtx_REG (orig_mode, regno);
5846 static rtx
5847 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
5848 enum machine_mode orig_mode, enum machine_mode mode)
5850 const_tree fn, fntype;
5852 fn = NULL_TREE;
5853 if (fntype_or_decl && DECL_P (fntype_or_decl))
5854 fn = fntype_or_decl;
5855 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
5857 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
5858 return function_value_ms_64 (orig_mode, mode);
5859 else if (TARGET_64BIT)
5860 return function_value_64 (orig_mode, mode, valtype);
5861 else
5862 return function_value_32 (orig_mode, mode, fntype, fn);
5865 static rtx
5866 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
5867 bool outgoing ATTRIBUTE_UNUSED)
5869 enum machine_mode mode, orig_mode;
5871 orig_mode = TYPE_MODE (valtype);
5872 mode = type_natural_mode (valtype);
5873 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
5877 ix86_libcall_value (enum machine_mode mode)
5879 return ix86_function_value_1 (NULL, NULL, mode, mode);
5882 /* Return true iff type is returned in memory. */
5884 static int ATTRIBUTE_UNUSED
5885 return_in_memory_32 (const_tree type, enum machine_mode mode)
5887 HOST_WIDE_INT size;
5889 if (mode == BLKmode)
5890 return 1;
5892 size = int_size_in_bytes (type);
5894 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
5895 return 0;
5897 if (VECTOR_MODE_P (mode) || mode == TImode)
5899 /* User-created vectors small enough to fit in EAX. */
5900 if (size < 8)
5901 return 0;
5903 /* MMX/3dNow values are returned in MM0,
5904 except when it doesn't exits. */
5905 if (size == 8)
5906 return (TARGET_MMX ? 0 : 1);
5908 /* SSE values are returned in XMM0, except when it doesn't exist. */
5909 if (size == 16)
5910 return (TARGET_SSE ? 0 : 1);
5913 if (mode == XFmode)
5914 return 0;
5916 if (size > 12)
5917 return 1;
5918 return 0;
5921 static int ATTRIBUTE_UNUSED
5922 return_in_memory_64 (const_tree type, enum machine_mode mode)
5924 int needed_intregs, needed_sseregs;
5925 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
5928 static int ATTRIBUTE_UNUSED
5929 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
5931 HOST_WIDE_INT size = int_size_in_bytes (type);
5933 /* __m128 is returned in xmm0. */
5934 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
5935 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
5936 return 0;
5938 /* Otherwise, the size must be exactly in [1248]. */
5939 return (size != 1 && size != 2 && size != 4 && size != 8);
5942 static bool
5943 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5945 #ifdef SUBTARGET_RETURN_IN_MEMORY
5946 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
5947 #else
5948 const enum machine_mode mode = type_natural_mode (type);
5950 if (TARGET_64BIT_MS_ABI)
5951 return return_in_memory_ms_64 (type, mode);
5952 else if (TARGET_64BIT)
5953 return return_in_memory_64 (type, mode);
5954 else
5955 return return_in_memory_32 (type, mode);
5956 #endif
5959 /* Return false iff TYPE is returned in memory. This version is used
5960 on Solaris 10. It is similar to the generic ix86_return_in_memory,
5961 but differs notably in that when MMX is available, 8-byte vectors
5962 are returned in memory, rather than in MMX registers. */
5964 bool
5965 ix86_sol10_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5967 int size;
5968 enum machine_mode mode = type_natural_mode (type);
5970 if (TARGET_64BIT)
5971 return return_in_memory_64 (type, mode);
5973 if (mode == BLKmode)
5974 return 1;
5976 size = int_size_in_bytes (type);
5978 if (VECTOR_MODE_P (mode))
5980 /* Return in memory only if MMX registers *are* available. This
5981 seems backwards, but it is consistent with the existing
5982 Solaris x86 ABI. */
5983 if (size == 8)
5984 return TARGET_MMX;
5985 if (size == 16)
5986 return !TARGET_SSE;
5988 else if (mode == TImode)
5989 return !TARGET_SSE;
5990 else if (mode == XFmode)
5991 return 0;
5993 return size > 12;
5996 /* When returning SSE vector types, we have a choice of either
5997 (1) being abi incompatible with a -march switch, or
5998 (2) generating an error.
5999 Given no good solution, I think the safest thing is one warning.
6000 The user won't be able to use -Werror, but....
6002 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
6003 called in response to actually generating a caller or callee that
6004 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
6005 via aggregate_value_p for general type probing from tree-ssa. */
6007 static rtx
6008 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
6010 static bool warnedsse, warnedmmx;
6012 if (!TARGET_64BIT && type)
6014 /* Look at the return type of the function, not the function type. */
6015 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
6017 if (!TARGET_SSE && !warnedsse)
6019 if (mode == TImode
6020 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6022 warnedsse = true;
6023 warning (0, "SSE vector return without SSE enabled "
6024 "changes the ABI");
6028 if (!TARGET_MMX && !warnedmmx)
6030 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6032 warnedmmx = true;
6033 warning (0, "MMX vector return without MMX enabled "
6034 "changes the ABI");
6039 return NULL;
6043 /* Create the va_list data type. */
6045 /* Returns the calling convention specific va_list date type.
6046 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
6048 static tree
6049 ix86_build_builtin_va_list_abi (enum calling_abi abi)
6051 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
6053 /* For i386 we use plain pointer to argument area. */
6054 if (!TARGET_64BIT || abi == MS_ABI)
6055 return build_pointer_type (char_type_node);
6057 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6058 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
6060 f_gpr = build_decl (FIELD_DECL, get_identifier ("gp_offset"),
6061 unsigned_type_node);
6062 f_fpr = build_decl (FIELD_DECL, get_identifier ("fp_offset"),
6063 unsigned_type_node);
6064 f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
6065 ptr_type_node);
6066 f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
6067 ptr_type_node);
6069 va_list_gpr_counter_field = f_gpr;
6070 va_list_fpr_counter_field = f_fpr;
6072 DECL_FIELD_CONTEXT (f_gpr) = record;
6073 DECL_FIELD_CONTEXT (f_fpr) = record;
6074 DECL_FIELD_CONTEXT (f_ovf) = record;
6075 DECL_FIELD_CONTEXT (f_sav) = record;
6077 TREE_CHAIN (record) = type_decl;
6078 TYPE_NAME (record) = type_decl;
6079 TYPE_FIELDS (record) = f_gpr;
6080 TREE_CHAIN (f_gpr) = f_fpr;
6081 TREE_CHAIN (f_fpr) = f_ovf;
6082 TREE_CHAIN (f_ovf) = f_sav;
6084 layout_type (record);
6086 /* The correct type is an array type of one element. */
6087 return build_array_type (record, build_index_type (size_zero_node));
6090 /* Setup the builtin va_list data type and for 64-bit the additional
6091 calling convention specific va_list data types. */
6093 static tree
6094 ix86_build_builtin_va_list (void)
6096 tree ret = ix86_build_builtin_va_list_abi (DEFAULT_ABI);
6098 /* Initialize abi specific va_list builtin types. */
6099 if (TARGET_64BIT)
6101 tree t;
6102 if (DEFAULT_ABI == MS_ABI)
6104 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
6105 if (TREE_CODE (t) != RECORD_TYPE)
6106 t = build_variant_type_copy (t);
6107 sysv_va_list_type_node = t;
6109 else
6111 t = ret;
6112 if (TREE_CODE (t) != RECORD_TYPE)
6113 t = build_variant_type_copy (t);
6114 sysv_va_list_type_node = t;
6116 if (DEFAULT_ABI != MS_ABI)
6118 t = ix86_build_builtin_va_list_abi (MS_ABI);
6119 if (TREE_CODE (t) != RECORD_TYPE)
6120 t = build_variant_type_copy (t);
6121 ms_va_list_type_node = t;
6123 else
6125 t = ret;
6126 if (TREE_CODE (t) != RECORD_TYPE)
6127 t = build_variant_type_copy (t);
6128 ms_va_list_type_node = t;
6132 return ret;
6135 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
6137 static void
6138 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
6140 rtx save_area, mem;
6141 rtx label;
6142 rtx label_ref;
6143 rtx tmp_reg;
6144 rtx nsse_reg;
6145 alias_set_type set;
6146 int i;
6147 int regparm = ix86_regparm;
6149 if((cum ? cum->call_abi : ix86_cfun_abi ()) != DEFAULT_ABI)
6150 regparm = DEFAULT_ABI != SYSV_ABI ? X86_64_REGPARM_MAX : X64_REGPARM_MAX;
6152 if (! cfun->va_list_gpr_size && ! cfun->va_list_fpr_size)
6153 return;
6155 /* Indicate to allocate space on the stack for varargs save area. */
6156 ix86_save_varrargs_registers = 1;
6157 /* We need 16-byte stack alignment to save SSE registers. If user
6158 asked for lower preferred_stack_boundary, lets just hope that he knows
6159 what he is doing and won't varargs SSE values.
6161 We also may end up assuming that only 64bit values are stored in SSE
6162 register let some floating point program work. */
6163 if (ix86_preferred_stack_boundary >= BIGGEST_ALIGNMENT)
6164 crtl->stack_alignment_needed = BIGGEST_ALIGNMENT;
6166 save_area = frame_pointer_rtx;
6167 set = get_varargs_alias_set ();
6169 for (i = cum->regno;
6170 i < regparm
6171 && i < cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
6172 i++)
6174 mem = gen_rtx_MEM (Pmode,
6175 plus_constant (save_area, i * UNITS_PER_WORD));
6176 MEM_NOTRAP_P (mem) = 1;
6177 set_mem_alias_set (mem, set);
6178 emit_move_insn (mem, gen_rtx_REG (Pmode,
6179 x86_64_int_parameter_registers[i]));
6182 if (cum->sse_nregs && cfun->va_list_fpr_size)
6184 /* Now emit code to save SSE registers. The AX parameter contains number
6185 of SSE parameter registers used to call this function. We use
6186 sse_prologue_save insn template that produces computed jump across
6187 SSE saves. We need some preparation work to get this working. */
6189 label = gen_label_rtx ();
6190 label_ref = gen_rtx_LABEL_REF (Pmode, label);
6192 /* Compute address to jump to :
6193 label - eax*4 + nnamed_sse_arguments*4 */
6194 tmp_reg = gen_reg_rtx (Pmode);
6195 nsse_reg = gen_reg_rtx (Pmode);
6196 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, AX_REG)));
6197 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6198 gen_rtx_MULT (Pmode, nsse_reg,
6199 GEN_INT (4))));
6200 if (cum->sse_regno)
6201 emit_move_insn
6202 (nsse_reg,
6203 gen_rtx_CONST (DImode,
6204 gen_rtx_PLUS (DImode,
6205 label_ref,
6206 GEN_INT (cum->sse_regno * 4))));
6207 else
6208 emit_move_insn (nsse_reg, label_ref);
6209 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
6211 /* Compute address of memory block we save into. We always use pointer
6212 pointing 127 bytes after first byte to store - this is needed to keep
6213 instruction size limited by 4 bytes. */
6214 tmp_reg = gen_reg_rtx (Pmode);
6215 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6216 plus_constant (save_area,
6217 8 * X86_64_REGPARM_MAX + 127)));
6218 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
6219 MEM_NOTRAP_P (mem) = 1;
6220 set_mem_alias_set (mem, set);
6221 set_mem_align (mem, BITS_PER_WORD);
6223 /* And finally do the dirty job! */
6224 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
6225 GEN_INT (cum->sse_regno), label));
6229 static void
6230 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
6232 alias_set_type set = get_varargs_alias_set ();
6233 int i;
6235 for (i = cum->regno; i < X64_REGPARM_MAX; i++)
6237 rtx reg, mem;
6239 mem = gen_rtx_MEM (Pmode,
6240 plus_constant (virtual_incoming_args_rtx,
6241 i * UNITS_PER_WORD));
6242 MEM_NOTRAP_P (mem) = 1;
6243 set_mem_alias_set (mem, set);
6245 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
6246 emit_move_insn (mem, reg);
6250 static void
6251 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6252 tree type, int *pretend_size ATTRIBUTE_UNUSED,
6253 int no_rtl)
6255 CUMULATIVE_ARGS next_cum;
6256 tree fntype;
6258 /* This argument doesn't appear to be used anymore. Which is good,
6259 because the old code here didn't suppress rtl generation. */
6260 gcc_assert (!no_rtl);
6262 if (!TARGET_64BIT)
6263 return;
6265 fntype = TREE_TYPE (current_function_decl);
6267 /* For varargs, we do not want to skip the dummy va_dcl argument.
6268 For stdargs, we do want to skip the last named argument. */
6269 next_cum = *cum;
6270 if (stdarg_p (fntype))
6271 function_arg_advance (&next_cum, mode, type, 1);
6273 if ((cum ? cum->call_abi : DEFAULT_ABI) == MS_ABI)
6274 setup_incoming_varargs_ms_64 (&next_cum);
6275 else
6276 setup_incoming_varargs_64 (&next_cum);
6279 /* Checks if TYPE is of kind va_list char *. */
6281 static bool
6282 is_va_list_char_pointer (tree type)
6284 tree canonic;
6286 /* For 32-bit it is always true. */
6287 if (!TARGET_64BIT)
6288 return true;
6289 canonic = ix86_canonical_va_list_type (type);
6290 return (canonic == ms_va_list_type_node
6291 || (DEFAULT_ABI == MS_ABI && canonic == va_list_type_node));
6294 /* Implement va_start. */
6296 static void
6297 ix86_va_start (tree valist, rtx nextarg)
6299 HOST_WIDE_INT words, n_gpr, n_fpr;
6300 tree f_gpr, f_fpr, f_ovf, f_sav;
6301 tree gpr, fpr, ovf, sav, t;
6302 tree type;
6304 /* Only 64bit target needs something special. */
6305 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
6307 std_expand_builtin_va_start (valist, nextarg);
6308 return;
6311 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
6312 f_fpr = TREE_CHAIN (f_gpr);
6313 f_ovf = TREE_CHAIN (f_fpr);
6314 f_sav = TREE_CHAIN (f_ovf);
6316 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
6317 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
6318 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
6319 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
6320 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
6322 /* Count number of gp and fp argument registers used. */
6323 words = crtl->args.info.words;
6324 n_gpr = crtl->args.info.regno;
6325 n_fpr = crtl->args.info.sse_regno;
6327 if (cfun->va_list_gpr_size)
6329 type = TREE_TYPE (gpr);
6330 t = build2 (MODIFY_EXPR, type,
6331 gpr, build_int_cst (type, n_gpr * 8));
6332 TREE_SIDE_EFFECTS (t) = 1;
6333 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6336 if (cfun->va_list_fpr_size)
6338 type = TREE_TYPE (fpr);
6339 t = build2 (MODIFY_EXPR, type, fpr,
6340 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
6341 TREE_SIDE_EFFECTS (t) = 1;
6342 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6345 /* Find the overflow area. */
6346 type = TREE_TYPE (ovf);
6347 t = make_tree (type, virtual_incoming_args_rtx);
6348 if (words != 0)
6349 t = build2 (POINTER_PLUS_EXPR, type, t,
6350 size_int (words * UNITS_PER_WORD));
6351 t = build2 (MODIFY_EXPR, type, ovf, t);
6352 TREE_SIDE_EFFECTS (t) = 1;
6353 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6355 if (cfun->va_list_gpr_size || cfun->va_list_fpr_size)
6357 /* Find the register save area.
6358 Prologue of the function save it right above stack frame. */
6359 type = TREE_TYPE (sav);
6360 t = make_tree (type, frame_pointer_rtx);
6361 t = build2 (MODIFY_EXPR, type, sav, t);
6362 TREE_SIDE_EFFECTS (t) = 1;
6363 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6367 /* Implement va_arg. */
6369 static tree
6370 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6371 gimple_seq *post_p)
6373 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
6374 tree f_gpr, f_fpr, f_ovf, f_sav;
6375 tree gpr, fpr, ovf, sav, t;
6376 int size, rsize;
6377 tree lab_false, lab_over = NULL_TREE;
6378 tree addr, t2;
6379 rtx container;
6380 int indirect_p = 0;
6381 tree ptrtype;
6382 enum machine_mode nat_mode;
6383 int arg_boundary;
6385 /* Only 64bit target needs something special. */
6386 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
6387 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6389 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
6390 f_fpr = TREE_CHAIN (f_gpr);
6391 f_ovf = TREE_CHAIN (f_fpr);
6392 f_sav = TREE_CHAIN (f_ovf);
6394 valist = build_va_arg_indirect_ref (valist);
6395 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
6396 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
6397 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
6398 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
6400 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6401 if (indirect_p)
6402 type = build_pointer_type (type);
6403 size = int_size_in_bytes (type);
6404 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6406 nat_mode = type_natural_mode (type);
6407 container = construct_container (nat_mode, TYPE_MODE (type), type, 0,
6408 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
6409 intreg, 0);
6411 /* Pull the value out of the saved registers. */
6413 addr = create_tmp_var (ptr_type_node, "addr");
6414 DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
6416 if (container)
6418 int needed_intregs, needed_sseregs;
6419 bool need_temp;
6420 tree int_addr, sse_addr;
6422 lab_false = create_artificial_label ();
6423 lab_over = create_artificial_label ();
6425 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
6427 need_temp = (!REG_P (container)
6428 && ((needed_intregs && TYPE_ALIGN (type) > 64)
6429 || TYPE_ALIGN (type) > 128));
6431 /* In case we are passing structure, verify that it is consecutive block
6432 on the register save area. If not we need to do moves. */
6433 if (!need_temp && !REG_P (container))
6435 /* Verify that all registers are strictly consecutive */
6436 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
6438 int i;
6440 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
6442 rtx slot = XVECEXP (container, 0, i);
6443 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
6444 || INTVAL (XEXP (slot, 1)) != i * 16)
6445 need_temp = 1;
6448 else
6450 int i;
6452 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
6454 rtx slot = XVECEXP (container, 0, i);
6455 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
6456 || INTVAL (XEXP (slot, 1)) != i * 8)
6457 need_temp = 1;
6461 if (!need_temp)
6463 int_addr = addr;
6464 sse_addr = addr;
6466 else
6468 int_addr = create_tmp_var (ptr_type_node, "int_addr");
6469 DECL_POINTER_ALIAS_SET (int_addr) = get_varargs_alias_set ();
6470 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
6471 DECL_POINTER_ALIAS_SET (sse_addr) = get_varargs_alias_set ();
6474 /* First ensure that we fit completely in registers. */
6475 if (needed_intregs)
6477 t = build_int_cst (TREE_TYPE (gpr),
6478 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
6479 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
6480 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
6481 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
6482 gimplify_and_add (t, pre_p);
6484 if (needed_sseregs)
6486 t = build_int_cst (TREE_TYPE (fpr),
6487 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
6488 + X86_64_REGPARM_MAX * 8);
6489 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
6490 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
6491 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
6492 gimplify_and_add (t, pre_p);
6495 /* Compute index to start of area used for integer regs. */
6496 if (needed_intregs)
6498 /* int_addr = gpr + sav; */
6499 t = fold_convert (sizetype, gpr);
6500 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
6501 gimplify_assign (int_addr, t, pre_p);
6503 if (needed_sseregs)
6505 /* sse_addr = fpr + sav; */
6506 t = fold_convert (sizetype, fpr);
6507 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
6508 gimplify_assign (sse_addr, t, pre_p);
6510 if (need_temp)
6512 int i;
6513 tree temp = create_tmp_var (type, "va_arg_tmp");
6515 /* addr = &temp; */
6516 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
6517 gimplify_assign (addr, t, pre_p);
6519 for (i = 0; i < XVECLEN (container, 0); i++)
6521 rtx slot = XVECEXP (container, 0, i);
6522 rtx reg = XEXP (slot, 0);
6523 enum machine_mode mode = GET_MODE (reg);
6524 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
6525 tree addr_type = build_pointer_type (piece_type);
6526 tree src_addr, src;
6527 int src_offset;
6528 tree dest_addr, dest;
6530 if (SSE_REGNO_P (REGNO (reg)))
6532 src_addr = sse_addr;
6533 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
6535 else
6537 src_addr = int_addr;
6538 src_offset = REGNO (reg) * 8;
6540 src_addr = fold_convert (addr_type, src_addr);
6541 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
6542 size_int (src_offset));
6543 src = build_va_arg_indirect_ref (src_addr);
6545 dest_addr = fold_convert (addr_type, addr);
6546 dest_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, dest_addr,
6547 size_int (INTVAL (XEXP (slot, 1))));
6548 dest = build_va_arg_indirect_ref (dest_addr);
6550 gimplify_assign (dest, src, pre_p);
6554 if (needed_intregs)
6556 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
6557 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
6558 gimplify_assign (gpr, t, pre_p);
6561 if (needed_sseregs)
6563 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
6564 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
6565 gimplify_assign (fpr, t, pre_p);
6568 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
6570 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
6573 /* ... otherwise out of the overflow area. */
6575 /* When we align parameter on stack for caller, if the parameter
6576 alignment is beyond PREFERRED_STACK_BOUNDARY, it will be
6577 aligned at PREFERRED_STACK_BOUNDARY. We will match callee
6578 here with caller. */
6579 arg_boundary = FUNCTION_ARG_BOUNDARY (VOIDmode, type);
6580 if ((unsigned int) arg_boundary > PREFERRED_STACK_BOUNDARY)
6581 arg_boundary = PREFERRED_STACK_BOUNDARY;
6583 /* Care for on-stack alignment if needed. */
6584 if (arg_boundary <= 64
6585 || integer_zerop (TYPE_SIZE (type)))
6586 t = ovf;
6587 else
6589 HOST_WIDE_INT align = arg_boundary / 8;
6590 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
6591 size_int (align - 1));
6592 t = fold_convert (sizetype, t);
6593 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
6594 size_int (-align));
6595 t = fold_convert (TREE_TYPE (ovf), t);
6597 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
6598 gimplify_assign (addr, t, pre_p);
6600 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
6601 size_int (rsize * UNITS_PER_WORD));
6602 gimplify_assign (unshare_expr (ovf), t, pre_p);
6604 if (container)
6605 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
6607 ptrtype = build_pointer_type (type);
6608 addr = fold_convert (ptrtype, addr);
6610 if (indirect_p)
6611 addr = build_va_arg_indirect_ref (addr);
6612 return build_va_arg_indirect_ref (addr);
6615 /* Return nonzero if OPNUM's MEM should be matched
6616 in movabs* patterns. */
6619 ix86_check_movabs (rtx insn, int opnum)
6621 rtx set, mem;
6623 set = PATTERN (insn);
6624 if (GET_CODE (set) == PARALLEL)
6625 set = XVECEXP (set, 0, 0);
6626 gcc_assert (GET_CODE (set) == SET);
6627 mem = XEXP (set, opnum);
6628 while (GET_CODE (mem) == SUBREG)
6629 mem = SUBREG_REG (mem);
6630 gcc_assert (MEM_P (mem));
6631 return (volatile_ok || !MEM_VOLATILE_P (mem));
6634 /* Initialize the table of extra 80387 mathematical constants. */
6636 static void
6637 init_ext_80387_constants (void)
6639 static const char * cst[5] =
6641 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
6642 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
6643 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
6644 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
6645 "3.1415926535897932385128089594061862044", /* 4: fldpi */
6647 int i;
6649 for (i = 0; i < 5; i++)
6651 real_from_string (&ext_80387_constants_table[i], cst[i]);
6652 /* Ensure each constant is rounded to XFmode precision. */
6653 real_convert (&ext_80387_constants_table[i],
6654 XFmode, &ext_80387_constants_table[i]);
6657 ext_80387_constants_init = 1;
6660 /* Return true if the constant is something that can be loaded with
6661 a special instruction. */
6664 standard_80387_constant_p (rtx x)
6666 enum machine_mode mode = GET_MODE (x);
6668 REAL_VALUE_TYPE r;
6670 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
6671 return -1;
6673 if (x == CONST0_RTX (mode))
6674 return 1;
6675 if (x == CONST1_RTX (mode))
6676 return 2;
6678 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
6680 /* For XFmode constants, try to find a special 80387 instruction when
6681 optimizing for size or on those CPUs that benefit from them. */
6682 if (mode == XFmode
6683 && (optimize_size || TARGET_EXT_80387_CONSTANTS))
6685 int i;
6687 if (! ext_80387_constants_init)
6688 init_ext_80387_constants ();
6690 for (i = 0; i < 5; i++)
6691 if (real_identical (&r, &ext_80387_constants_table[i]))
6692 return i + 3;
6695 /* Load of the constant -0.0 or -1.0 will be split as
6696 fldz;fchs or fld1;fchs sequence. */
6697 if (real_isnegzero (&r))
6698 return 8;
6699 if (real_identical (&r, &dconstm1))
6700 return 9;
6702 return 0;
6705 /* Return the opcode of the special instruction to be used to load
6706 the constant X. */
6708 const char *
6709 standard_80387_constant_opcode (rtx x)
6711 switch (standard_80387_constant_p (x))
6713 case 1:
6714 return "fldz";
6715 case 2:
6716 return "fld1";
6717 case 3:
6718 return "fldlg2";
6719 case 4:
6720 return "fldln2";
6721 case 5:
6722 return "fldl2e";
6723 case 6:
6724 return "fldl2t";
6725 case 7:
6726 return "fldpi";
6727 case 8:
6728 case 9:
6729 return "#";
6730 default:
6731 gcc_unreachable ();
6735 /* Return the CONST_DOUBLE representing the 80387 constant that is
6736 loaded by the specified special instruction. The argument IDX
6737 matches the return value from standard_80387_constant_p. */
6740 standard_80387_constant_rtx (int idx)
6742 int i;
6744 if (! ext_80387_constants_init)
6745 init_ext_80387_constants ();
6747 switch (idx)
6749 case 3:
6750 case 4:
6751 case 5:
6752 case 6:
6753 case 7:
6754 i = idx - 3;
6755 break;
6757 default:
6758 gcc_unreachable ();
6761 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
6762 XFmode);
6765 /* Return 1 if mode is a valid mode for sse. */
6766 static int
6767 standard_sse_mode_p (enum machine_mode mode)
6769 switch (mode)
6771 case V16QImode:
6772 case V8HImode:
6773 case V4SImode:
6774 case V2DImode:
6775 case V4SFmode:
6776 case V2DFmode:
6777 return 1;
6779 default:
6780 return 0;
6784 /* Return 1 if X is FP constant we can load to SSE register w/o using memory.
6787 standard_sse_constant_p (rtx x)
6789 enum machine_mode mode = GET_MODE (x);
6791 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
6792 return 1;
6793 if (vector_all_ones_operand (x, mode)
6794 && standard_sse_mode_p (mode))
6795 return TARGET_SSE2 ? 2 : -1;
6797 return 0;
6800 /* Return the opcode of the special instruction to be used to load
6801 the constant X. */
6803 const char *
6804 standard_sse_constant_opcode (rtx insn, rtx x)
6806 switch (standard_sse_constant_p (x))
6808 case 1:
6809 if (get_attr_mode (insn) == MODE_V4SF)
6810 return "xorps\t%0, %0";
6811 else if (get_attr_mode (insn) == MODE_V2DF)
6812 return "xorpd\t%0, %0";
6813 else
6814 return "pxor\t%0, %0";
6815 case 2:
6816 return "pcmpeqd\t%0, %0";
6818 gcc_unreachable ();
6821 /* Returns 1 if OP contains a symbol reference */
6824 symbolic_reference_mentioned_p (rtx op)
6826 const char *fmt;
6827 int i;
6829 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
6830 return 1;
6832 fmt = GET_RTX_FORMAT (GET_CODE (op));
6833 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
6835 if (fmt[i] == 'E')
6837 int j;
6839 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
6840 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
6841 return 1;
6844 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
6845 return 1;
6848 return 0;
6851 /* Return 1 if it is appropriate to emit `ret' instructions in the
6852 body of a function. Do this only if the epilogue is simple, needing a
6853 couple of insns. Prior to reloading, we can't tell how many registers
6854 must be saved, so return 0 then. Return 0 if there is no frame
6855 marker to de-allocate. */
6858 ix86_can_use_return_insn_p (void)
6860 struct ix86_frame frame;
6862 if (! reload_completed || frame_pointer_needed)
6863 return 0;
6865 /* Don't allow more than 32 pop, since that's all we can do
6866 with one instruction. */
6867 if (crtl->args.pops_args
6868 && crtl->args.size >= 32768)
6869 return 0;
6871 ix86_compute_frame_layout (&frame);
6872 return frame.to_allocate == 0 && frame.nregs == 0;
6875 /* Value should be nonzero if functions must have frame pointers.
6876 Zero means the frame pointer need not be set up (and parms may
6877 be accessed via the stack pointer) in functions that seem suitable. */
6880 ix86_frame_pointer_required (void)
6882 /* If we accessed previous frames, then the generated code expects
6883 to be able to access the saved ebp value in our frame. */
6884 if (cfun->machine->accesses_prev_frame)
6885 return 1;
6887 /* Several x86 os'es need a frame pointer for other reasons,
6888 usually pertaining to setjmp. */
6889 if (SUBTARGET_FRAME_POINTER_REQUIRED)
6890 return 1;
6892 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
6893 the frame pointer by default. Turn it back on now if we've not
6894 got a leaf function. */
6895 if (TARGET_OMIT_LEAF_FRAME_POINTER
6896 && (!current_function_is_leaf
6897 || ix86_current_function_calls_tls_descriptor))
6898 return 1;
6900 if (crtl->profile)
6901 return 1;
6903 return 0;
6906 /* Record that the current function accesses previous call frames. */
6908 void
6909 ix86_setup_frame_addresses (void)
6911 cfun->machine->accesses_prev_frame = 1;
6914 #if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
6915 # define USE_HIDDEN_LINKONCE 1
6916 #else
6917 # define USE_HIDDEN_LINKONCE 0
6918 #endif
6920 static int pic_labels_used;
6922 /* Fills in the label name that should be used for a pc thunk for
6923 the given register. */
6925 static void
6926 get_pc_thunk_name (char name[32], unsigned int regno)
6928 gcc_assert (!TARGET_64BIT);
6930 if (USE_HIDDEN_LINKONCE)
6931 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
6932 else
6933 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
6937 /* This function generates code for -fpic that loads %ebx with
6938 the return address of the caller and then returns. */
6940 void
6941 ix86_file_end (void)
6943 rtx xops[2];
6944 int regno;
6946 for (regno = 0; regno < 8; ++regno)
6948 char name[32];
6950 if (! ((pic_labels_used >> regno) & 1))
6951 continue;
6953 get_pc_thunk_name (name, regno);
6955 #if TARGET_MACHO
6956 if (TARGET_MACHO)
6958 switch_to_section (darwin_sections[text_coal_section]);
6959 fputs ("\t.weak_definition\t", asm_out_file);
6960 assemble_name (asm_out_file, name);
6961 fputs ("\n\t.private_extern\t", asm_out_file);
6962 assemble_name (asm_out_file, name);
6963 fputs ("\n", asm_out_file);
6964 ASM_OUTPUT_LABEL (asm_out_file, name);
6966 else
6967 #endif
6968 if (USE_HIDDEN_LINKONCE)
6970 tree decl;
6972 decl = build_decl (FUNCTION_DECL, get_identifier (name),
6973 error_mark_node);
6974 TREE_PUBLIC (decl) = 1;
6975 TREE_STATIC (decl) = 1;
6976 DECL_ONE_ONLY (decl) = 1;
6978 (*targetm.asm_out.unique_section) (decl, 0);
6979 switch_to_section (get_named_section (decl, NULL, 0));
6981 (*targetm.asm_out.globalize_label) (asm_out_file, name);
6982 fputs ("\t.hidden\t", asm_out_file);
6983 assemble_name (asm_out_file, name);
6984 fputc ('\n', asm_out_file);
6985 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
6987 else
6989 switch_to_section (text_section);
6990 ASM_OUTPUT_LABEL (asm_out_file, name);
6993 xops[0] = gen_rtx_REG (Pmode, regno);
6994 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
6995 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
6996 output_asm_insn ("ret", xops);
6999 if (NEED_INDICATE_EXEC_STACK)
7000 file_end_indicate_exec_stack ();
7003 /* Emit code for the SET_GOT patterns. */
7005 const char *
7006 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
7008 rtx xops[3];
7010 xops[0] = dest;
7012 if (TARGET_VXWORKS_RTP && flag_pic)
7014 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
7015 xops[2] = gen_rtx_MEM (Pmode,
7016 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
7017 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
7019 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
7020 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
7021 an unadorned address. */
7022 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
7023 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
7024 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
7025 return "";
7028 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
7030 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
7032 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
7034 if (!flag_pic)
7035 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
7036 else
7037 output_asm_insn ("call\t%a2", xops);
7039 #if TARGET_MACHO
7040 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7041 is what will be referenced by the Mach-O PIC subsystem. */
7042 if (!label)
7043 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
7044 #endif
7046 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7047 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
7049 if (flag_pic)
7050 output_asm_insn ("pop%z0\t%0", xops);
7052 else
7054 char name[32];
7055 get_pc_thunk_name (name, REGNO (dest));
7056 pic_labels_used |= 1 << REGNO (dest);
7058 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
7059 xops[2] = gen_rtx_MEM (QImode, xops[2]);
7060 output_asm_insn ("call\t%X2", xops);
7061 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7062 is what will be referenced by the Mach-O PIC subsystem. */
7063 #if TARGET_MACHO
7064 if (!label)
7065 ASM_OUTPUT_LABEL (asm_out_file, machopic_function_base_name ());
7066 else
7067 targetm.asm_out.internal_label (asm_out_file, "L",
7068 CODE_LABEL_NUMBER (label));
7069 #endif
7072 if (TARGET_MACHO)
7073 return "";
7075 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
7076 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
7077 else
7078 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
7080 return "";
7083 /* Generate an "push" pattern for input ARG. */
7085 static rtx
7086 gen_push (rtx arg)
7088 return gen_rtx_SET (VOIDmode,
7089 gen_rtx_MEM (Pmode,
7090 gen_rtx_PRE_DEC (Pmode,
7091 stack_pointer_rtx)),
7092 arg);
7095 /* Return >= 0 if there is an unused call-clobbered register available
7096 for the entire function. */
7098 static unsigned int
7099 ix86_select_alt_pic_regnum (void)
7101 if (current_function_is_leaf && !crtl->profile
7102 && !ix86_current_function_calls_tls_descriptor)
7104 int i;
7105 for (i = 2; i >= 0; --i)
7106 if (!df_regs_ever_live_p (i))
7107 return i;
7110 return INVALID_REGNUM;
7113 /* Return 1 if we need to save REGNO. */
7114 static int
7115 ix86_save_reg (unsigned int regno, int maybe_eh_return)
7117 if (pic_offset_table_rtx
7118 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
7119 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
7120 || crtl->profile
7121 || crtl->calls_eh_return
7122 || crtl->uses_const_pool))
7124 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
7125 return 0;
7126 return 1;
7129 if (crtl->calls_eh_return && maybe_eh_return)
7131 unsigned i;
7132 for (i = 0; ; i++)
7134 unsigned test = EH_RETURN_DATA_REGNO (i);
7135 if (test == INVALID_REGNUM)
7136 break;
7137 if (test == regno)
7138 return 1;
7142 if (cfun->machine->force_align_arg_pointer
7143 && regno == REGNO (cfun->machine->force_align_arg_pointer))
7144 return 1;
7146 return (df_regs_ever_live_p (regno)
7147 && !call_used_regs[regno]
7148 && !fixed_regs[regno]
7149 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
7152 /* Return number of registers to be saved on the stack. */
7154 static int
7155 ix86_nsaved_regs (void)
7157 int nregs = 0;
7158 int regno;
7160 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; regno--)
7161 if (ix86_save_reg (regno, true))
7162 nregs++;
7163 return nregs;
7166 /* Return the offset between two registers, one to be eliminated, and the other
7167 its replacement, at the start of a routine. */
7169 HOST_WIDE_INT
7170 ix86_initial_elimination_offset (int from, int to)
7172 struct ix86_frame frame;
7173 ix86_compute_frame_layout (&frame);
7175 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7176 return frame.hard_frame_pointer_offset;
7177 else if (from == FRAME_POINTER_REGNUM
7178 && to == HARD_FRAME_POINTER_REGNUM)
7179 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
7180 else
7182 gcc_assert (to == STACK_POINTER_REGNUM);
7184 if (from == ARG_POINTER_REGNUM)
7185 return frame.stack_pointer_offset;
7187 gcc_assert (from == FRAME_POINTER_REGNUM);
7188 return frame.stack_pointer_offset - frame.frame_pointer_offset;
7192 /* Fill structure ix86_frame about frame of currently computed function. */
7194 static void
7195 ix86_compute_frame_layout (struct ix86_frame *frame)
7197 HOST_WIDE_INT total_size;
7198 unsigned int stack_alignment_needed;
7199 HOST_WIDE_INT offset;
7200 unsigned int preferred_alignment;
7201 HOST_WIDE_INT size = get_frame_size ();
7203 frame->nregs = ix86_nsaved_regs ();
7204 total_size = size;
7206 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
7207 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
7209 /* During reload iteration the amount of registers saved can change.
7210 Recompute the value as needed. Do not recompute when amount of registers
7211 didn't change as reload does multiple calls to the function and does not
7212 expect the decision to change within single iteration. */
7213 if (!optimize_size
7214 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
7216 int count = frame->nregs;
7218 cfun->machine->use_fast_prologue_epilogue_nregs = count;
7219 /* The fast prologue uses move instead of push to save registers. This
7220 is significantly longer, but also executes faster as modern hardware
7221 can execute the moves in parallel, but can't do that for push/pop.
7223 Be careful about choosing what prologue to emit: When function takes
7224 many instructions to execute we may use slow version as well as in
7225 case function is known to be outside hot spot (this is known with
7226 feedback only). Weight the size of function by number of registers
7227 to save as it is cheap to use one or two push instructions but very
7228 slow to use many of them. */
7229 if (count)
7230 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
7231 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
7232 || (flag_branch_probabilities
7233 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
7234 cfun->machine->use_fast_prologue_epilogue = false;
7235 else
7236 cfun->machine->use_fast_prologue_epilogue
7237 = !expensive_function_p (count);
7239 if (TARGET_PROLOGUE_USING_MOVE
7240 && cfun->machine->use_fast_prologue_epilogue)
7241 frame->save_regs_using_mov = true;
7242 else
7243 frame->save_regs_using_mov = false;
7246 /* Skip return address and saved base pointer. */
7247 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
7249 frame->hard_frame_pointer_offset = offset;
7251 /* Do some sanity checking of stack_alignment_needed and
7252 preferred_alignment, since i386 port is the only using those features
7253 that may break easily. */
7255 gcc_assert (!size || stack_alignment_needed);
7256 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
7257 gcc_assert (preferred_alignment <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
7258 gcc_assert (stack_alignment_needed
7259 <= PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT);
7261 if (stack_alignment_needed < STACK_BOUNDARY / BITS_PER_UNIT)
7262 stack_alignment_needed = STACK_BOUNDARY / BITS_PER_UNIT;
7264 /* Register save area */
7265 offset += frame->nregs * UNITS_PER_WORD;
7267 /* Va-arg area */
7268 if (ix86_save_varrargs_registers)
7270 offset += X86_64_VARARGS_SIZE;
7271 frame->va_arg_size = X86_64_VARARGS_SIZE;
7273 else
7274 frame->va_arg_size = 0;
7276 /* Align start of frame for local function. */
7277 frame->padding1 = ((offset + stack_alignment_needed - 1)
7278 & -stack_alignment_needed) - offset;
7280 offset += frame->padding1;
7282 /* Frame pointer points here. */
7283 frame->frame_pointer_offset = offset;
7285 offset += size;
7287 /* Add outgoing arguments area. Can be skipped if we eliminated
7288 all the function calls as dead code.
7289 Skipping is however impossible when function calls alloca. Alloca
7290 expander assumes that last crtl->outgoing_args_size
7291 of stack frame are unused. */
7292 if (ACCUMULATE_OUTGOING_ARGS
7293 && (!current_function_is_leaf || cfun->calls_alloca
7294 || ix86_current_function_calls_tls_descriptor))
7296 offset += crtl->outgoing_args_size;
7297 frame->outgoing_arguments_size = crtl->outgoing_args_size;
7299 else
7300 frame->outgoing_arguments_size = 0;
7302 /* Align stack boundary. Only needed if we're calling another function
7303 or using alloca. */
7304 if (!current_function_is_leaf || cfun->calls_alloca
7305 || ix86_current_function_calls_tls_descriptor)
7306 frame->padding2 = ((offset + preferred_alignment - 1)
7307 & -preferred_alignment) - offset;
7308 else
7309 frame->padding2 = 0;
7311 offset += frame->padding2;
7313 /* We've reached end of stack frame. */
7314 frame->stack_pointer_offset = offset;
7316 /* Size prologue needs to allocate. */
7317 frame->to_allocate =
7318 (size + frame->padding1 + frame->padding2
7319 + frame->outgoing_arguments_size + frame->va_arg_size);
7321 if ((!frame->to_allocate && frame->nregs <= 1)
7322 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
7323 frame->save_regs_using_mov = false;
7325 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && current_function_sp_is_unchanging
7326 && current_function_is_leaf
7327 && !ix86_current_function_calls_tls_descriptor)
7329 frame->red_zone_size = frame->to_allocate;
7330 if (frame->save_regs_using_mov)
7331 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
7332 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
7333 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
7335 else
7336 frame->red_zone_size = 0;
7337 frame->to_allocate -= frame->red_zone_size;
7338 frame->stack_pointer_offset -= frame->red_zone_size;
7339 #if 0
7340 fprintf (stderr, "\n");
7341 fprintf (stderr, "nregs: %ld\n", (long)frame->nregs);
7342 fprintf (stderr, "size: %ld\n", (long)size);
7343 fprintf (stderr, "alignment1: %ld\n", (long)stack_alignment_needed);
7344 fprintf (stderr, "padding1: %ld\n", (long)frame->padding1);
7345 fprintf (stderr, "va_arg: %ld\n", (long)frame->va_arg_size);
7346 fprintf (stderr, "padding2: %ld\n", (long)frame->padding2);
7347 fprintf (stderr, "to_allocate: %ld\n", (long)frame->to_allocate);
7348 fprintf (stderr, "red_zone_size: %ld\n", (long)frame->red_zone_size);
7349 fprintf (stderr, "frame_pointer_offset: %ld\n", (long)frame->frame_pointer_offset);
7350 fprintf (stderr, "hard_frame_pointer_offset: %ld\n",
7351 (long)frame->hard_frame_pointer_offset);
7352 fprintf (stderr, "stack_pointer_offset: %ld\n", (long)frame->stack_pointer_offset);
7353 fprintf (stderr, "current_function_is_leaf: %ld\n", (long)current_function_is_leaf);
7354 fprintf (stderr, "cfun->calls_alloca: %ld\n", (long)cfun->calls_alloca);
7355 fprintf (stderr, "x86_current_function_calls_tls_descriptor: %ld\n", (long)ix86_current_function_calls_tls_descriptor);
7356 #endif
7359 /* Emit code to save registers in the prologue. */
7361 static void
7362 ix86_emit_save_regs (void)
7364 unsigned int regno;
7365 rtx insn;
7367 for (regno = FIRST_PSEUDO_REGISTER; regno-- > 0; )
7368 if (ix86_save_reg (regno, true))
7370 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
7371 RTX_FRAME_RELATED_P (insn) = 1;
7375 /* Emit code to save registers using MOV insns. First register
7376 is restored from POINTER + OFFSET. */
7377 static void
7378 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
7380 unsigned int regno;
7381 rtx insn;
7383 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7384 if (ix86_save_reg (regno, true))
7386 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
7387 Pmode, offset),
7388 gen_rtx_REG (Pmode, regno));
7389 RTX_FRAME_RELATED_P (insn) = 1;
7390 offset += UNITS_PER_WORD;
7394 /* Expand prologue or epilogue stack adjustment.
7395 The pattern exist to put a dependency on all ebp-based memory accesses.
7396 STYLE should be negative if instructions should be marked as frame related,
7397 zero if %r11 register is live and cannot be freely used and positive
7398 otherwise. */
7400 static void
7401 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset, int style)
7403 rtx insn;
7405 if (! TARGET_64BIT)
7406 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
7407 else if (x86_64_immediate_operand (offset, DImode))
7408 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
7409 else
7411 rtx r11;
7412 /* r11 is used by indirect sibcall return as well, set before the
7413 epilogue and used after the epilogue. ATM indirect sibcall
7414 shouldn't be used together with huge frame sizes in one
7415 function because of the frame_size check in sibcall.c. */
7416 gcc_assert (style);
7417 r11 = gen_rtx_REG (DImode, R11_REG);
7418 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
7419 if (style < 0)
7420 RTX_FRAME_RELATED_P (insn) = 1;
7421 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
7422 offset));
7424 if (style < 0)
7425 RTX_FRAME_RELATED_P (insn) = 1;
7428 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
7430 static rtx
7431 ix86_internal_arg_pointer (void)
7433 bool has_force_align_arg_pointer =
7434 (0 != lookup_attribute (ix86_force_align_arg_pointer_string,
7435 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))));
7436 if ((FORCE_PREFERRED_STACK_BOUNDARY_IN_MAIN
7437 && DECL_NAME (current_function_decl)
7438 && MAIN_NAME_P (DECL_NAME (current_function_decl))
7439 && DECL_FILE_SCOPE_P (current_function_decl))
7440 || ix86_force_align_arg_pointer
7441 || has_force_align_arg_pointer)
7443 /* Nested functions can't realign the stack due to a register
7444 conflict. */
7445 if (DECL_CONTEXT (current_function_decl)
7446 && TREE_CODE (DECL_CONTEXT (current_function_decl)) == FUNCTION_DECL)
7448 if (ix86_force_align_arg_pointer)
7449 warning (0, "-mstackrealign ignored for nested functions");
7450 if (has_force_align_arg_pointer)
7451 error ("%s not supported for nested functions",
7452 ix86_force_align_arg_pointer_string);
7453 return virtual_incoming_args_rtx;
7455 cfun->machine->force_align_arg_pointer = gen_rtx_REG (Pmode, CX_REG);
7456 return copy_to_reg (cfun->machine->force_align_arg_pointer);
7458 else
7459 return virtual_incoming_args_rtx;
7462 /* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
7463 This is called from dwarf2out.c to emit call frame instructions
7464 for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
7465 static void
7466 ix86_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
7468 rtx unspec = SET_SRC (pattern);
7469 gcc_assert (GET_CODE (unspec) == UNSPEC);
7471 switch (index)
7473 case UNSPEC_REG_SAVE:
7474 dwarf2out_reg_save_reg (label, XVECEXP (unspec, 0, 0),
7475 SET_DEST (pattern));
7476 break;
7477 case UNSPEC_DEF_CFA:
7478 dwarf2out_def_cfa (label, REGNO (SET_DEST (pattern)),
7479 INTVAL (XVECEXP (unspec, 0, 0)));
7480 break;
7481 default:
7482 gcc_unreachable ();
7486 /* Expand the prologue into a bunch of separate insns. */
7488 void
7489 ix86_expand_prologue (void)
7491 rtx insn;
7492 bool pic_reg_used;
7493 struct ix86_frame frame;
7494 HOST_WIDE_INT allocate;
7496 ix86_compute_frame_layout (&frame);
7498 if (cfun->machine->force_align_arg_pointer)
7500 rtx x, y;
7502 /* Grab the argument pointer. */
7503 x = plus_constant (stack_pointer_rtx, 4);
7504 y = cfun->machine->force_align_arg_pointer;
7505 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
7506 RTX_FRAME_RELATED_P (insn) = 1;
7508 /* The unwind info consists of two parts: install the fafp as the cfa,
7509 and record the fafp as the "save register" of the stack pointer.
7510 The later is there in order that the unwinder can see where it
7511 should restore the stack pointer across the and insn. */
7512 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_DEF_CFA);
7513 x = gen_rtx_SET (VOIDmode, y, x);
7514 RTX_FRAME_RELATED_P (x) = 1;
7515 y = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, stack_pointer_rtx),
7516 UNSPEC_REG_SAVE);
7517 y = gen_rtx_SET (VOIDmode, cfun->machine->force_align_arg_pointer, y);
7518 RTX_FRAME_RELATED_P (y) = 1;
7519 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x, y));
7520 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
7521 REG_NOTES (insn) = x;
7523 /* Align the stack. */
7524 emit_insn (gen_andsi3 (stack_pointer_rtx, stack_pointer_rtx,
7525 GEN_INT (-16)));
7527 /* And here we cheat like madmen with the unwind info. We force the
7528 cfa register back to sp+4, which is exactly what it was at the
7529 start of the function. Re-pushing the return address results in
7530 the return at the same spot relative to the cfa, and thus is
7531 correct wrt the unwind info. */
7532 x = cfun->machine->force_align_arg_pointer;
7533 x = gen_frame_mem (Pmode, plus_constant (x, -4));
7534 insn = emit_insn (gen_push (x));
7535 RTX_FRAME_RELATED_P (insn) = 1;
7537 x = GEN_INT (4);
7538 x = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, x), UNSPEC_DEF_CFA);
7539 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
7540 x = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, x, NULL);
7541 REG_NOTES (insn) = x;
7544 /* Note: AT&T enter does NOT have reversed args. Enter is probably
7545 slower on all targets. Also sdb doesn't like it. */
7547 if (frame_pointer_needed)
7549 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
7550 RTX_FRAME_RELATED_P (insn) = 1;
7552 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
7553 RTX_FRAME_RELATED_P (insn) = 1;
7556 allocate = frame.to_allocate;
7558 if (!frame.save_regs_using_mov)
7559 ix86_emit_save_regs ();
7560 else
7561 allocate += frame.nregs * UNITS_PER_WORD;
7563 /* When using red zone we may start register saving before allocating
7564 the stack frame saving one cycle of the prologue. However I will
7565 avoid doing this if I am going to have to probe the stack since
7566 at least on x86_64 the stack probe can turn into a call that clobbers
7567 a red zone location */
7568 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && frame.save_regs_using_mov
7569 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT))
7570 ix86_emit_save_regs_using_mov (frame_pointer_needed ? hard_frame_pointer_rtx
7571 : stack_pointer_rtx,
7572 -frame.nregs * UNITS_PER_WORD);
7574 if (allocate == 0)
7576 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
7577 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
7578 GEN_INT (-allocate), -1);
7579 else
7581 /* Only valid for Win32. */
7582 rtx eax = gen_rtx_REG (Pmode, AX_REG);
7583 bool eax_live;
7584 rtx t;
7586 gcc_assert (!TARGET_64BIT || cfun->machine->call_abi == MS_ABI);
7588 if (cfun->machine->call_abi == MS_ABI)
7589 eax_live = false;
7590 else
7591 eax_live = ix86_eax_live_at_start_p ();
7593 if (eax_live)
7595 emit_insn (gen_push (eax));
7596 allocate -= UNITS_PER_WORD;
7599 emit_move_insn (eax, GEN_INT (allocate));
7601 if (TARGET_64BIT)
7602 insn = gen_allocate_stack_worker_64 (eax);
7603 else
7604 insn = gen_allocate_stack_worker_32 (eax);
7605 insn = emit_insn (insn);
7606 RTX_FRAME_RELATED_P (insn) = 1;
7607 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
7608 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
7609 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7610 t, REG_NOTES (insn));
7612 if (eax_live)
7614 if (frame_pointer_needed)
7615 t = plus_constant (hard_frame_pointer_rtx,
7616 allocate
7617 - frame.to_allocate
7618 - frame.nregs * UNITS_PER_WORD);
7619 else
7620 t = plus_constant (stack_pointer_rtx, allocate);
7621 emit_move_insn (eax, gen_rtx_MEM (Pmode, t));
7625 if (frame.save_regs_using_mov
7626 && !(!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
7627 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)))
7629 if (!frame_pointer_needed || !frame.to_allocate)
7630 ix86_emit_save_regs_using_mov (stack_pointer_rtx, frame.to_allocate);
7631 else
7632 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
7633 -frame.nregs * UNITS_PER_WORD);
7636 pic_reg_used = false;
7637 if (pic_offset_table_rtx
7638 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
7639 || crtl->profile))
7641 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
7643 if (alt_pic_reg_used != INVALID_REGNUM)
7644 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
7646 pic_reg_used = true;
7649 if (pic_reg_used)
7651 if (TARGET_64BIT)
7653 if (ix86_cmodel == CM_LARGE_PIC)
7655 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
7656 rtx label = gen_label_rtx ();
7657 emit_label (label);
7658 LABEL_PRESERVE_P (label) = 1;
7659 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
7660 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
7661 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
7662 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
7663 pic_offset_table_rtx, tmp_reg));
7665 else
7666 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
7668 else
7669 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
7672 /* Prevent function calls from being scheduled before the call to mcount.
7673 In the pic_reg_used case, make sure that the got load isn't deleted. */
7674 if (crtl->profile)
7676 if (pic_reg_used)
7677 emit_insn (gen_prologue_use (pic_offset_table_rtx));
7678 emit_insn (gen_blockage ());
7681 /* Emit cld instruction if stringops are used in the function. */
7682 if (TARGET_CLD && ix86_current_function_needs_cld)
7683 emit_insn (gen_cld ());
7686 /* Emit code to restore saved registers using MOV insns. First register
7687 is restored from POINTER + OFFSET. */
7688 static void
7689 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
7690 int maybe_eh_return)
7692 int regno;
7693 rtx base_address = gen_rtx_MEM (Pmode, pointer);
7695 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7696 if (ix86_save_reg (regno, maybe_eh_return))
7698 /* Ensure that adjust_address won't be forced to produce pointer
7699 out of range allowed by x86-64 instruction set. */
7700 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
7702 rtx r11;
7704 r11 = gen_rtx_REG (DImode, R11_REG);
7705 emit_move_insn (r11, GEN_INT (offset));
7706 emit_insn (gen_adddi3 (r11, r11, pointer));
7707 base_address = gen_rtx_MEM (Pmode, r11);
7708 offset = 0;
7710 emit_move_insn (gen_rtx_REG (Pmode, regno),
7711 adjust_address (base_address, Pmode, offset));
7712 offset += UNITS_PER_WORD;
7716 /* Restore function stack, frame, and registers. */
7718 void
7719 ix86_expand_epilogue (int style)
7721 int regno;
7722 int sp_valid = !frame_pointer_needed || current_function_sp_is_unchanging;
7723 struct ix86_frame frame;
7724 HOST_WIDE_INT offset;
7726 ix86_compute_frame_layout (&frame);
7728 /* Calculate start of saved registers relative to ebp. Special care
7729 must be taken for the normal return case of a function using
7730 eh_return: the eax and edx registers are marked as saved, but not
7731 restored along this path. */
7732 offset = frame.nregs;
7733 if (crtl->calls_eh_return && style != 2)
7734 offset -= 2;
7735 offset *= -UNITS_PER_WORD;
7737 /* If we're only restoring one register and sp is not valid then
7738 using a move instruction to restore the register since it's
7739 less work than reloading sp and popping the register.
7741 The default code result in stack adjustment using add/lea instruction,
7742 while this code results in LEAVE instruction (or discrete equivalent),
7743 so it is profitable in some other cases as well. Especially when there
7744 are no registers to restore. We also use this code when TARGET_USE_LEAVE
7745 and there is exactly one register to pop. This heuristic may need some
7746 tuning in future. */
7747 if ((!sp_valid && frame.nregs <= 1)
7748 || (TARGET_EPILOGUE_USING_MOVE
7749 && cfun->machine->use_fast_prologue_epilogue
7750 && (frame.nregs > 1 || frame.to_allocate))
7751 || (frame_pointer_needed && !frame.nregs && frame.to_allocate)
7752 || (frame_pointer_needed && TARGET_USE_LEAVE
7753 && cfun->machine->use_fast_prologue_epilogue
7754 && frame.nregs == 1)
7755 || crtl->calls_eh_return)
7757 /* Restore registers. We can use ebp or esp to address the memory
7758 locations. If both are available, default to ebp, since offsets
7759 are known to be small. Only exception is esp pointing directly to the
7760 end of block of saved registers, where we may simplify addressing
7761 mode. */
7763 if (!frame_pointer_needed || (sp_valid && !frame.to_allocate))
7764 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
7765 frame.to_allocate, style == 2);
7766 else
7767 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
7768 offset, style == 2);
7770 /* eh_return epilogues need %ecx added to the stack pointer. */
7771 if (style == 2)
7773 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
7775 if (frame_pointer_needed)
7777 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
7778 tmp = plus_constant (tmp, UNITS_PER_WORD);
7779 emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
7781 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
7782 emit_move_insn (hard_frame_pointer_rtx, tmp);
7784 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
7785 const0_rtx, style);
7787 else
7789 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
7790 tmp = plus_constant (tmp, (frame.to_allocate
7791 + frame.nregs * UNITS_PER_WORD));
7792 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
7795 else if (!frame_pointer_needed)
7796 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
7797 GEN_INT (frame.to_allocate
7798 + frame.nregs * UNITS_PER_WORD),
7799 style);
7800 /* If not an i386, mov & pop is faster than "leave". */
7801 else if (TARGET_USE_LEAVE || optimize_size
7802 || !cfun->machine->use_fast_prologue_epilogue)
7803 emit_insn ((*ix86_gen_leave) ());
7804 else
7806 pro_epilogue_adjust_stack (stack_pointer_rtx,
7807 hard_frame_pointer_rtx,
7808 const0_rtx, style);
7810 emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
7813 else
7815 /* First step is to deallocate the stack frame so that we can
7816 pop the registers. */
7817 if (!sp_valid)
7819 gcc_assert (frame_pointer_needed);
7820 pro_epilogue_adjust_stack (stack_pointer_rtx,
7821 hard_frame_pointer_rtx,
7822 GEN_INT (offset), style);
7824 else if (frame.to_allocate)
7825 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
7826 GEN_INT (frame.to_allocate), style);
7828 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7829 if (ix86_save_reg (regno, false))
7830 emit_insn ((*ix86_gen_pop1) (gen_rtx_REG (Pmode, regno)));
7831 if (frame_pointer_needed)
7833 /* Leave results in shorter dependency chains on CPUs that are
7834 able to grok it fast. */
7835 if (TARGET_USE_LEAVE)
7836 emit_insn ((*ix86_gen_leave) ());
7837 else
7838 emit_insn ((*ix86_gen_pop1) (hard_frame_pointer_rtx));
7842 if (cfun->machine->force_align_arg_pointer)
7844 emit_insn (gen_addsi3 (stack_pointer_rtx,
7845 cfun->machine->force_align_arg_pointer,
7846 GEN_INT (-4)));
7849 /* Sibcall epilogues don't want a return instruction. */
7850 if (style == 0)
7851 return;
7853 if (crtl->args.pops_args && crtl->args.size)
7855 rtx popc = GEN_INT (crtl->args.pops_args);
7857 /* i386 can only pop 64K bytes. If asked to pop more, pop
7858 return address, do explicit add, and jump indirectly to the
7859 caller. */
7861 if (crtl->args.pops_args >= 65536)
7863 rtx ecx = gen_rtx_REG (SImode, CX_REG);
7865 /* There is no "pascal" calling convention in any 64bit ABI. */
7866 gcc_assert (!TARGET_64BIT);
7868 emit_insn (gen_popsi1 (ecx));
7869 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, popc));
7870 emit_jump_insn (gen_return_indirect_internal (ecx));
7872 else
7873 emit_jump_insn (gen_return_pop_internal (popc));
7875 else
7876 emit_jump_insn (gen_return_internal ());
7879 /* Reset from the function's potential modifications. */
7881 static void
7882 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
7883 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
7885 if (pic_offset_table_rtx)
7886 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
7887 #if TARGET_MACHO
7888 /* Mach-O doesn't support labels at the end of objects, so if
7889 it looks like we might want one, insert a NOP. */
7891 rtx insn = get_last_insn ();
7892 while (insn
7893 && NOTE_P (insn)
7894 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
7895 insn = PREV_INSN (insn);
7896 if (insn
7897 && (LABEL_P (insn)
7898 || (NOTE_P (insn)
7899 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
7900 fputs ("\tnop\n", file);
7902 #endif
7906 /* Extract the parts of an RTL expression that is a valid memory address
7907 for an instruction. Return 0 if the structure of the address is
7908 grossly off. Return -1 if the address contains ASHIFT, so it is not
7909 strictly valid, but still used for computing length of lea instruction. */
7912 ix86_decompose_address (rtx addr, struct ix86_address *out)
7914 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
7915 rtx base_reg, index_reg;
7916 HOST_WIDE_INT scale = 1;
7917 rtx scale_rtx = NULL_RTX;
7918 int retval = 1;
7919 enum ix86_address_seg seg = SEG_DEFAULT;
7921 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
7922 base = addr;
7923 else if (GET_CODE (addr) == PLUS)
7925 rtx addends[4], op;
7926 int n = 0, i;
7928 op = addr;
7931 if (n >= 4)
7932 return 0;
7933 addends[n++] = XEXP (op, 1);
7934 op = XEXP (op, 0);
7936 while (GET_CODE (op) == PLUS);
7937 if (n >= 4)
7938 return 0;
7939 addends[n] = op;
7941 for (i = n; i >= 0; --i)
7943 op = addends[i];
7944 switch (GET_CODE (op))
7946 case MULT:
7947 if (index)
7948 return 0;
7949 index = XEXP (op, 0);
7950 scale_rtx = XEXP (op, 1);
7951 break;
7953 case UNSPEC:
7954 if (XINT (op, 1) == UNSPEC_TP
7955 && TARGET_TLS_DIRECT_SEG_REFS
7956 && seg == SEG_DEFAULT)
7957 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
7958 else
7959 return 0;
7960 break;
7962 case REG:
7963 case SUBREG:
7964 if (!base)
7965 base = op;
7966 else if (!index)
7967 index = op;
7968 else
7969 return 0;
7970 break;
7972 case CONST:
7973 case CONST_INT:
7974 case SYMBOL_REF:
7975 case LABEL_REF:
7976 if (disp)
7977 return 0;
7978 disp = op;
7979 break;
7981 default:
7982 return 0;
7986 else if (GET_CODE (addr) == MULT)
7988 index = XEXP (addr, 0); /* index*scale */
7989 scale_rtx = XEXP (addr, 1);
7991 else if (GET_CODE (addr) == ASHIFT)
7993 rtx tmp;
7995 /* We're called for lea too, which implements ashift on occasion. */
7996 index = XEXP (addr, 0);
7997 tmp = XEXP (addr, 1);
7998 if (!CONST_INT_P (tmp))
7999 return 0;
8000 scale = INTVAL (tmp);
8001 if ((unsigned HOST_WIDE_INT) scale > 3)
8002 return 0;
8003 scale = 1 << scale;
8004 retval = -1;
8006 else
8007 disp = addr; /* displacement */
8009 /* Extract the integral value of scale. */
8010 if (scale_rtx)
8012 if (!CONST_INT_P (scale_rtx))
8013 return 0;
8014 scale = INTVAL (scale_rtx);
8017 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
8018 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
8020 /* Allow arg pointer and stack pointer as index if there is not scaling. */
8021 if (base_reg && index_reg && scale == 1
8022 && (index_reg == arg_pointer_rtx
8023 || index_reg == frame_pointer_rtx
8024 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
8026 rtx tmp;
8027 tmp = base, base = index, index = tmp;
8028 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
8031 /* Special case: %ebp cannot be encoded as a base without a displacement. */
8032 if ((base_reg == hard_frame_pointer_rtx
8033 || base_reg == frame_pointer_rtx
8034 || base_reg == arg_pointer_rtx) && !disp)
8035 disp = const0_rtx;
8037 /* Special case: on K6, [%esi] makes the instruction vector decoded.
8038 Avoid this by transforming to [%esi+0]. */
8039 if (TARGET_K6 && !optimize_size
8040 && base_reg && !index_reg && !disp
8041 && REG_P (base_reg)
8042 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
8043 disp = const0_rtx;
8045 /* Special case: encode reg+reg instead of reg*2. */
8046 if (!base && index && scale && scale == 2)
8047 base = index, base_reg = index_reg, scale = 1;
8049 /* Special case: scaling cannot be encoded without base or displacement. */
8050 if (!base && !disp && index && scale != 1)
8051 disp = const0_rtx;
8053 out->base = base;
8054 out->index = index;
8055 out->disp = disp;
8056 out->scale = scale;
8057 out->seg = seg;
8059 return retval;
8062 /* Return cost of the memory address x.
8063 For i386, it is better to use a complex address than let gcc copy
8064 the address into a reg and make a new pseudo. But not if the address
8065 requires to two regs - that would mean more pseudos with longer
8066 lifetimes. */
8067 static int
8068 ix86_address_cost (rtx x)
8070 struct ix86_address parts;
8071 int cost = 1;
8072 int ok = ix86_decompose_address (x, &parts);
8074 gcc_assert (ok);
8076 if (parts.base && GET_CODE (parts.base) == SUBREG)
8077 parts.base = SUBREG_REG (parts.base);
8078 if (parts.index && GET_CODE (parts.index) == SUBREG)
8079 parts.index = SUBREG_REG (parts.index);
8081 /* Attempt to minimize number of registers in the address. */
8082 if ((parts.base
8083 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
8084 || (parts.index
8085 && (!REG_P (parts.index)
8086 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
8087 cost++;
8089 if (parts.base
8090 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
8091 && parts.index
8092 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
8093 && parts.base != parts.index)
8094 cost++;
8096 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
8097 since it's predecode logic can't detect the length of instructions
8098 and it degenerates to vector decoded. Increase cost of such
8099 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
8100 to split such addresses or even refuse such addresses at all.
8102 Following addressing modes are affected:
8103 [base+scale*index]
8104 [scale*index+disp]
8105 [base+index]
8107 The first and last case may be avoidable by explicitly coding the zero in
8108 memory address, but I don't have AMD-K6 machine handy to check this
8109 theory. */
8111 if (TARGET_K6
8112 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
8113 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
8114 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
8115 cost += 10;
8117 return cost;
8120 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
8121 this is used for to form addresses to local data when -fPIC is in
8122 use. */
8124 static bool
8125 darwin_local_data_pic (rtx disp)
8127 if (GET_CODE (disp) == MINUS)
8129 if (GET_CODE (XEXP (disp, 0)) == LABEL_REF
8130 || GET_CODE (XEXP (disp, 0)) == SYMBOL_REF)
8131 if (GET_CODE (XEXP (disp, 1)) == SYMBOL_REF)
8133 const char *sym_name = XSTR (XEXP (disp, 1), 0);
8134 if (! strcmp (sym_name, "<pic base>"))
8135 return true;
8139 return false;
8142 /* Determine if a given RTX is a valid constant. We already know this
8143 satisfies CONSTANT_P. */
8145 bool
8146 legitimate_constant_p (rtx x)
8148 switch (GET_CODE (x))
8150 case CONST:
8151 x = XEXP (x, 0);
8153 if (GET_CODE (x) == PLUS)
8155 if (!CONST_INT_P (XEXP (x, 1)))
8156 return false;
8157 x = XEXP (x, 0);
8160 if (TARGET_MACHO && darwin_local_data_pic (x))
8161 return true;
8163 /* Only some unspecs are valid as "constants". */
8164 if (GET_CODE (x) == UNSPEC)
8165 switch (XINT (x, 1))
8167 case UNSPEC_GOT:
8168 case UNSPEC_GOTOFF:
8169 case UNSPEC_PLTOFF:
8170 return TARGET_64BIT;
8171 case UNSPEC_TPOFF:
8172 case UNSPEC_NTPOFF:
8173 x = XVECEXP (x, 0, 0);
8174 return (GET_CODE (x) == SYMBOL_REF
8175 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
8176 case UNSPEC_DTPOFF:
8177 x = XVECEXP (x, 0, 0);
8178 return (GET_CODE (x) == SYMBOL_REF
8179 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
8180 default:
8181 return false;
8184 /* We must have drilled down to a symbol. */
8185 if (GET_CODE (x) == LABEL_REF)
8186 return true;
8187 if (GET_CODE (x) != SYMBOL_REF)
8188 return false;
8189 /* FALLTHRU */
8191 case SYMBOL_REF:
8192 /* TLS symbols are never valid. */
8193 if (SYMBOL_REF_TLS_MODEL (x))
8194 return false;
8196 /* DLLIMPORT symbols are never valid. */
8197 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
8198 && SYMBOL_REF_DLLIMPORT_P (x))
8199 return false;
8200 break;
8202 case CONST_DOUBLE:
8203 if (GET_MODE (x) == TImode
8204 && x != CONST0_RTX (TImode)
8205 && !TARGET_64BIT)
8206 return false;
8207 break;
8209 case CONST_VECTOR:
8210 if (x == CONST0_RTX (GET_MODE (x)))
8211 return true;
8212 return false;
8214 default:
8215 break;
8218 /* Otherwise we handle everything else in the move patterns. */
8219 return true;
8222 /* Determine if it's legal to put X into the constant pool. This
8223 is not possible for the address of thread-local symbols, which
8224 is checked above. */
8226 static bool
8227 ix86_cannot_force_const_mem (rtx x)
8229 /* We can always put integral constants and vectors in memory. */
8230 switch (GET_CODE (x))
8232 case CONST_INT:
8233 case CONST_DOUBLE:
8234 case CONST_VECTOR:
8235 return false;
8237 default:
8238 break;
8240 return !legitimate_constant_p (x);
8243 /* Determine if a given RTX is a valid constant address. */
8245 bool
8246 constant_address_p (rtx x)
8248 return CONSTANT_P (x) && legitimate_address_p (Pmode, x, 1);
8251 /* Nonzero if the constant value X is a legitimate general operand
8252 when generating PIC code. It is given that flag_pic is on and
8253 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
8255 bool
8256 legitimate_pic_operand_p (rtx x)
8258 rtx inner;
8260 switch (GET_CODE (x))
8262 case CONST:
8263 inner = XEXP (x, 0);
8264 if (GET_CODE (inner) == PLUS
8265 && CONST_INT_P (XEXP (inner, 1)))
8266 inner = XEXP (inner, 0);
8268 /* Only some unspecs are valid as "constants". */
8269 if (GET_CODE (inner) == UNSPEC)
8270 switch (XINT (inner, 1))
8272 case UNSPEC_GOT:
8273 case UNSPEC_GOTOFF:
8274 case UNSPEC_PLTOFF:
8275 return TARGET_64BIT;
8276 case UNSPEC_TPOFF:
8277 x = XVECEXP (inner, 0, 0);
8278 return (GET_CODE (x) == SYMBOL_REF
8279 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
8280 default:
8281 return false;
8283 /* FALLTHRU */
8285 case SYMBOL_REF:
8286 case LABEL_REF:
8287 return legitimate_pic_address_disp_p (x);
8289 default:
8290 return true;
8294 /* Determine if a given CONST RTX is a valid memory displacement
8295 in PIC mode. */
8298 legitimate_pic_address_disp_p (rtx disp)
8300 bool saw_plus;
8302 /* In 64bit mode we can allow direct addresses of symbols and labels
8303 when they are not dynamic symbols. */
8304 if (TARGET_64BIT)
8306 rtx op0 = disp, op1;
8308 switch (GET_CODE (disp))
8310 case LABEL_REF:
8311 return true;
8313 case CONST:
8314 if (GET_CODE (XEXP (disp, 0)) != PLUS)
8315 break;
8316 op0 = XEXP (XEXP (disp, 0), 0);
8317 op1 = XEXP (XEXP (disp, 0), 1);
8318 if (!CONST_INT_P (op1)
8319 || INTVAL (op1) >= 16*1024*1024
8320 || INTVAL (op1) < -16*1024*1024)
8321 break;
8322 if (GET_CODE (op0) == LABEL_REF)
8323 return true;
8324 if (GET_CODE (op0) != SYMBOL_REF)
8325 break;
8326 /* FALLTHRU */
8328 case SYMBOL_REF:
8329 /* TLS references should always be enclosed in UNSPEC. */
8330 if (SYMBOL_REF_TLS_MODEL (op0))
8331 return false;
8332 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
8333 && ix86_cmodel != CM_LARGE_PIC)
8334 return true;
8335 break;
8337 default:
8338 break;
8341 if (GET_CODE (disp) != CONST)
8342 return 0;
8343 disp = XEXP (disp, 0);
8345 if (TARGET_64BIT)
8347 /* We are unsafe to allow PLUS expressions. This limit allowed distance
8348 of GOT tables. We should not need these anyway. */
8349 if (GET_CODE (disp) != UNSPEC
8350 || (XINT (disp, 1) != UNSPEC_GOTPCREL
8351 && XINT (disp, 1) != UNSPEC_GOTOFF
8352 && XINT (disp, 1) != UNSPEC_PLTOFF))
8353 return 0;
8355 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
8356 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
8357 return 0;
8358 return 1;
8361 saw_plus = false;
8362 if (GET_CODE (disp) == PLUS)
8364 if (!CONST_INT_P (XEXP (disp, 1)))
8365 return 0;
8366 disp = XEXP (disp, 0);
8367 saw_plus = true;
8370 if (TARGET_MACHO && darwin_local_data_pic (disp))
8371 return 1;
8373 if (GET_CODE (disp) != UNSPEC)
8374 return 0;
8376 switch (XINT (disp, 1))
8378 case UNSPEC_GOT:
8379 if (saw_plus)
8380 return false;
8381 /* We need to check for both symbols and labels because VxWorks loads
8382 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
8383 details. */
8384 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
8385 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
8386 case UNSPEC_GOTOFF:
8387 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
8388 While ABI specify also 32bit relocation but we don't produce it in
8389 small PIC model at all. */
8390 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
8391 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
8392 && !TARGET_64BIT)
8393 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
8394 return false;
8395 case UNSPEC_GOTTPOFF:
8396 case UNSPEC_GOTNTPOFF:
8397 case UNSPEC_INDNTPOFF:
8398 if (saw_plus)
8399 return false;
8400 disp = XVECEXP (disp, 0, 0);
8401 return (GET_CODE (disp) == SYMBOL_REF
8402 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
8403 case UNSPEC_NTPOFF:
8404 disp = XVECEXP (disp, 0, 0);
8405 return (GET_CODE (disp) == SYMBOL_REF
8406 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
8407 case UNSPEC_DTPOFF:
8408 disp = XVECEXP (disp, 0, 0);
8409 return (GET_CODE (disp) == SYMBOL_REF
8410 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
8413 return 0;
8416 /* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression that is a valid
8417 memory address for an instruction. The MODE argument is the machine mode
8418 for the MEM expression that wants to use this address.
8420 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
8421 convert common non-canonical forms to canonical form so that they will
8422 be recognized. */
8425 legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
8426 rtx addr, int strict)
8428 struct ix86_address parts;
8429 rtx base, index, disp;
8430 HOST_WIDE_INT scale;
8431 const char *reason = NULL;
8432 rtx reason_rtx = NULL_RTX;
8434 if (ix86_decompose_address (addr, &parts) <= 0)
8436 reason = "decomposition failed";
8437 goto report_error;
8440 base = parts.base;
8441 index = parts.index;
8442 disp = parts.disp;
8443 scale = parts.scale;
8445 /* Validate base register.
8447 Don't allow SUBREG's that span more than a word here. It can lead to spill
8448 failures when the base is one word out of a two word structure, which is
8449 represented internally as a DImode int. */
8451 if (base)
8453 rtx reg;
8454 reason_rtx = base;
8456 if (REG_P (base))
8457 reg = base;
8458 else if (GET_CODE (base) == SUBREG
8459 && REG_P (SUBREG_REG (base))
8460 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
8461 <= UNITS_PER_WORD)
8462 reg = SUBREG_REG (base);
8463 else
8465 reason = "base is not a register";
8466 goto report_error;
8469 if (GET_MODE (base) != Pmode)
8471 reason = "base is not in Pmode";
8472 goto report_error;
8475 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
8476 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
8478 reason = "base is not valid";
8479 goto report_error;
8483 /* Validate index register.
8485 Don't allow SUBREG's that span more than a word here -- same as above. */
8487 if (index)
8489 rtx reg;
8490 reason_rtx = index;
8492 if (REG_P (index))
8493 reg = index;
8494 else if (GET_CODE (index) == SUBREG
8495 && REG_P (SUBREG_REG (index))
8496 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
8497 <= UNITS_PER_WORD)
8498 reg = SUBREG_REG (index);
8499 else
8501 reason = "index is not a register";
8502 goto report_error;
8505 if (GET_MODE (index) != Pmode)
8507 reason = "index is not in Pmode";
8508 goto report_error;
8511 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
8512 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
8514 reason = "index is not valid";
8515 goto report_error;
8519 /* Validate scale factor. */
8520 if (scale != 1)
8522 reason_rtx = GEN_INT (scale);
8523 if (!index)
8525 reason = "scale without index";
8526 goto report_error;
8529 if (scale != 2 && scale != 4 && scale != 8)
8531 reason = "scale is not a valid multiplier";
8532 goto report_error;
8536 /* Validate displacement. */
8537 if (disp)
8539 reason_rtx = disp;
8541 if (GET_CODE (disp) == CONST
8542 && GET_CODE (XEXP (disp, 0)) == UNSPEC)
8543 switch (XINT (XEXP (disp, 0), 1))
8545 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
8546 used. While ABI specify also 32bit relocations, we don't produce
8547 them at all and use IP relative instead. */
8548 case UNSPEC_GOT:
8549 case UNSPEC_GOTOFF:
8550 gcc_assert (flag_pic);
8551 if (!TARGET_64BIT)
8552 goto is_legitimate_pic;
8553 reason = "64bit address unspec";
8554 goto report_error;
8556 case UNSPEC_GOTPCREL:
8557 gcc_assert (flag_pic);
8558 goto is_legitimate_pic;
8560 case UNSPEC_GOTTPOFF:
8561 case UNSPEC_GOTNTPOFF:
8562 case UNSPEC_INDNTPOFF:
8563 case UNSPEC_NTPOFF:
8564 case UNSPEC_DTPOFF:
8565 break;
8567 default:
8568 reason = "invalid address unspec";
8569 goto report_error;
8572 else if (SYMBOLIC_CONST (disp)
8573 && (flag_pic
8574 || (TARGET_MACHO
8575 #if TARGET_MACHO
8576 && MACHOPIC_INDIRECT
8577 && !machopic_operand_p (disp)
8578 #endif
8582 is_legitimate_pic:
8583 if (TARGET_64BIT && (index || base))
8585 /* foo@dtpoff(%rX) is ok. */
8586 if (GET_CODE (disp) != CONST
8587 || GET_CODE (XEXP (disp, 0)) != PLUS
8588 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
8589 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
8590 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
8591 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
8593 reason = "non-constant pic memory reference";
8594 goto report_error;
8597 else if (! legitimate_pic_address_disp_p (disp))
8599 reason = "displacement is an invalid pic construct";
8600 goto report_error;
8603 /* This code used to verify that a symbolic pic displacement
8604 includes the pic_offset_table_rtx register.
8606 While this is good idea, unfortunately these constructs may
8607 be created by "adds using lea" optimization for incorrect
8608 code like:
8610 int a;
8611 int foo(int i)
8613 return *(&a+i);
8616 This code is nonsensical, but results in addressing
8617 GOT table with pic_offset_table_rtx base. We can't
8618 just refuse it easily, since it gets matched by
8619 "addsi3" pattern, that later gets split to lea in the
8620 case output register differs from input. While this
8621 can be handled by separate addsi pattern for this case
8622 that never results in lea, this seems to be easier and
8623 correct fix for crash to disable this test. */
8625 else if (GET_CODE (disp) != LABEL_REF
8626 && !CONST_INT_P (disp)
8627 && (GET_CODE (disp) != CONST
8628 || !legitimate_constant_p (disp))
8629 && (GET_CODE (disp) != SYMBOL_REF
8630 || !legitimate_constant_p (disp)))
8632 reason = "displacement is not constant";
8633 goto report_error;
8635 else if (TARGET_64BIT
8636 && !x86_64_immediate_operand (disp, VOIDmode))
8638 reason = "displacement is out of range";
8639 goto report_error;
8643 /* Everything looks valid. */
8644 return TRUE;
8646 report_error:
8647 return FALSE;
8650 /* Return a unique alias set for the GOT. */
8652 static alias_set_type
8653 ix86_GOT_alias_set (void)
8655 static alias_set_type set = -1;
8656 if (set == -1)
8657 set = new_alias_set ();
8658 return set;
8661 /* Return a legitimate reference for ORIG (an address) using the
8662 register REG. If REG is 0, a new pseudo is generated.
8664 There are two types of references that must be handled:
8666 1. Global data references must load the address from the GOT, via
8667 the PIC reg. An insn is emitted to do this load, and the reg is
8668 returned.
8670 2. Static data references, constant pool addresses, and code labels
8671 compute the address as an offset from the GOT, whose base is in
8672 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
8673 differentiate them from global data objects. The returned
8674 address is the PIC reg + an unspec constant.
8676 GO_IF_LEGITIMATE_ADDRESS rejects symbolic references unless the PIC
8677 reg also appears in the address. */
8679 static rtx
8680 legitimize_pic_address (rtx orig, rtx reg)
8682 rtx addr = orig;
8683 rtx new_rtx = orig;
8684 rtx base;
8686 #if TARGET_MACHO
8687 if (TARGET_MACHO && !TARGET_64BIT)
8689 if (reg == 0)
8690 reg = gen_reg_rtx (Pmode);
8691 /* Use the generic Mach-O PIC machinery. */
8692 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
8694 #endif
8696 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
8697 new_rtx = addr;
8698 else if (TARGET_64BIT
8699 && ix86_cmodel != CM_SMALL_PIC
8700 && gotoff_operand (addr, Pmode))
8702 rtx tmpreg;
8703 /* This symbol may be referenced via a displacement from the PIC
8704 base address (@GOTOFF). */
8706 if (reload_in_progress)
8707 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
8708 if (GET_CODE (addr) == CONST)
8709 addr = XEXP (addr, 0);
8710 if (GET_CODE (addr) == PLUS)
8712 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
8713 UNSPEC_GOTOFF);
8714 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
8716 else
8717 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
8718 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
8719 if (!reg)
8720 tmpreg = gen_reg_rtx (Pmode);
8721 else
8722 tmpreg = reg;
8723 emit_move_insn (tmpreg, new_rtx);
8725 if (reg != 0)
8727 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
8728 tmpreg, 1, OPTAB_DIRECT);
8729 new_rtx = reg;
8731 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
8733 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
8735 /* This symbol may be referenced via a displacement from the PIC
8736 base address (@GOTOFF). */
8738 if (reload_in_progress)
8739 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
8740 if (GET_CODE (addr) == CONST)
8741 addr = XEXP (addr, 0);
8742 if (GET_CODE (addr) == PLUS)
8744 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
8745 UNSPEC_GOTOFF);
8746 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
8748 else
8749 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
8750 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
8751 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
8753 if (reg != 0)
8755 emit_move_insn (reg, new_rtx);
8756 new_rtx = reg;
8759 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
8760 /* We can't use @GOTOFF for text labels on VxWorks;
8761 see gotoff_operand. */
8762 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
8764 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
8766 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
8767 return legitimize_dllimport_symbol (addr, true);
8768 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
8769 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
8770 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
8772 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
8773 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
8777 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
8779 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
8780 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
8781 new_rtx = gen_const_mem (Pmode, new_rtx);
8782 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
8784 if (reg == 0)
8785 reg = gen_reg_rtx (Pmode);
8786 /* Use directly gen_movsi, otherwise the address is loaded
8787 into register for CSE. We don't want to CSE this addresses,
8788 instead we CSE addresses from the GOT table, so skip this. */
8789 emit_insn (gen_movsi (reg, new_rtx));
8790 new_rtx = reg;
8792 else
8794 /* This symbol must be referenced via a load from the
8795 Global Offset Table (@GOT). */
8797 if (reload_in_progress)
8798 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
8799 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
8800 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
8801 if (TARGET_64BIT)
8802 new_rtx = force_reg (Pmode, new_rtx);
8803 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
8804 new_rtx = gen_const_mem (Pmode, new_rtx);
8805 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
8807 if (reg == 0)
8808 reg = gen_reg_rtx (Pmode);
8809 emit_move_insn (reg, new_rtx);
8810 new_rtx = reg;
8813 else
8815 if (CONST_INT_P (addr)
8816 && !x86_64_immediate_operand (addr, VOIDmode))
8818 if (reg)
8820 emit_move_insn (reg, addr);
8821 new_rtx = reg;
8823 else
8824 new_rtx = force_reg (Pmode, addr);
8826 else if (GET_CODE (addr) == CONST)
8828 addr = XEXP (addr, 0);
8830 /* We must match stuff we generate before. Assume the only
8831 unspecs that can get here are ours. Not that we could do
8832 anything with them anyway.... */
8833 if (GET_CODE (addr) == UNSPEC
8834 || (GET_CODE (addr) == PLUS
8835 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
8836 return orig;
8837 gcc_assert (GET_CODE (addr) == PLUS);
8839 if (GET_CODE (addr) == PLUS)
8841 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
8843 /* Check first to see if this is a constant offset from a @GOTOFF
8844 symbol reference. */
8845 if (gotoff_operand (op0, Pmode)
8846 && CONST_INT_P (op1))
8848 if (!TARGET_64BIT)
8850 if (reload_in_progress)
8851 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
8852 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
8853 UNSPEC_GOTOFF);
8854 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
8855 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
8856 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
8858 if (reg != 0)
8860 emit_move_insn (reg, new_rtx);
8861 new_rtx = reg;
8864 else
8866 if (INTVAL (op1) < -16*1024*1024
8867 || INTVAL (op1) >= 16*1024*1024)
8869 if (!x86_64_immediate_operand (op1, Pmode))
8870 op1 = force_reg (Pmode, op1);
8871 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
8875 else
8877 base = legitimize_pic_address (XEXP (addr, 0), reg);
8878 new_rtx = legitimize_pic_address (XEXP (addr, 1),
8879 base == reg ? NULL_RTX : reg);
8881 if (CONST_INT_P (new_rtx))
8882 new_rtx = plus_constant (base, INTVAL (new_rtx));
8883 else
8885 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
8887 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
8888 new_rtx = XEXP (new_rtx, 1);
8890 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
8895 return new_rtx;
8898 /* Load the thread pointer. If TO_REG is true, force it into a register. */
8900 static rtx
8901 get_thread_pointer (int to_reg)
8903 rtx tp, reg, insn;
8905 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
8906 if (!to_reg)
8907 return tp;
8909 reg = gen_reg_rtx (Pmode);
8910 insn = gen_rtx_SET (VOIDmode, reg, tp);
8911 insn = emit_insn (insn);
8913 return reg;
8916 /* A subroutine of legitimize_address and ix86_expand_move. FOR_MOV is
8917 false if we expect this to be used for a memory address and true if
8918 we expect to load the address into a register. */
8920 static rtx
8921 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
8923 rtx dest, base, off, pic, tp;
8924 int type;
8926 switch (model)
8928 case TLS_MODEL_GLOBAL_DYNAMIC:
8929 dest = gen_reg_rtx (Pmode);
8930 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
8932 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
8934 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
8936 start_sequence ();
8937 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
8938 insns = get_insns ();
8939 end_sequence ();
8941 RTL_CONST_CALL_P (insns) = 1;
8942 emit_libcall_block (insns, dest, rax, x);
8944 else if (TARGET_64BIT && TARGET_GNU2_TLS)
8945 emit_insn (gen_tls_global_dynamic_64 (dest, x));
8946 else
8947 emit_insn (gen_tls_global_dynamic_32 (dest, x));
8949 if (TARGET_GNU2_TLS)
8951 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
8953 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
8955 break;
8957 case TLS_MODEL_LOCAL_DYNAMIC:
8958 base = gen_reg_rtx (Pmode);
8959 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
8961 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
8963 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
8965 start_sequence ();
8966 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
8967 insns = get_insns ();
8968 end_sequence ();
8970 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
8971 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
8972 RTL_CONST_CALL_P (insns) = 1;
8973 emit_libcall_block (insns, base, rax, note);
8975 else if (TARGET_64BIT && TARGET_GNU2_TLS)
8976 emit_insn (gen_tls_local_dynamic_base_64 (base));
8977 else
8978 emit_insn (gen_tls_local_dynamic_base_32 (base));
8980 if (TARGET_GNU2_TLS)
8982 rtx x = ix86_tls_module_base ();
8984 set_unique_reg_note (get_last_insn (), REG_EQUIV,
8985 gen_rtx_MINUS (Pmode, x, tp));
8988 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
8989 off = gen_rtx_CONST (Pmode, off);
8991 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
8993 if (TARGET_GNU2_TLS)
8995 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
8997 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
9000 break;
9002 case TLS_MODEL_INITIAL_EXEC:
9003 if (TARGET_64BIT)
9005 pic = NULL;
9006 type = UNSPEC_GOTNTPOFF;
9008 else if (flag_pic)
9010 if (reload_in_progress)
9011 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9012 pic = pic_offset_table_rtx;
9013 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
9015 else if (!TARGET_ANY_GNU_TLS)
9017 pic = gen_reg_rtx (Pmode);
9018 emit_insn (gen_set_got (pic));
9019 type = UNSPEC_GOTTPOFF;
9021 else
9023 pic = NULL;
9024 type = UNSPEC_INDNTPOFF;
9027 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
9028 off = gen_rtx_CONST (Pmode, off);
9029 if (pic)
9030 off = gen_rtx_PLUS (Pmode, pic, off);
9031 off = gen_const_mem (Pmode, off);
9032 set_mem_alias_set (off, ix86_GOT_alias_set ());
9034 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
9036 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
9037 off = force_reg (Pmode, off);
9038 return gen_rtx_PLUS (Pmode, base, off);
9040 else
9042 base = get_thread_pointer (true);
9043 dest = gen_reg_rtx (Pmode);
9044 emit_insn (gen_subsi3 (dest, base, off));
9046 break;
9048 case TLS_MODEL_LOCAL_EXEC:
9049 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
9050 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
9051 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
9052 off = gen_rtx_CONST (Pmode, off);
9054 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
9056 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
9057 return gen_rtx_PLUS (Pmode, base, off);
9059 else
9061 base = get_thread_pointer (true);
9062 dest = gen_reg_rtx (Pmode);
9063 emit_insn (gen_subsi3 (dest, base, off));
9065 break;
9067 default:
9068 gcc_unreachable ();
9071 return dest;
9074 /* Create or return the unique __imp_DECL dllimport symbol corresponding
9075 to symbol DECL. */
9077 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
9078 htab_t dllimport_map;
9080 static tree
9081 get_dllimport_decl (tree decl)
9083 struct tree_map *h, in;
9084 void **loc;
9085 const char *name;
9086 const char *prefix;
9087 size_t namelen, prefixlen;
9088 char *imp_name;
9089 tree to;
9090 rtx rtl;
9092 if (!dllimport_map)
9093 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
9095 in.hash = htab_hash_pointer (decl);
9096 in.base.from = decl;
9097 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
9098 h = (struct tree_map *) *loc;
9099 if (h)
9100 return h->to;
9102 *loc = h = GGC_NEW (struct tree_map);
9103 h->hash = in.hash;
9104 h->base.from = decl;
9105 h->to = to = build_decl (VAR_DECL, NULL, ptr_type_node);
9106 DECL_ARTIFICIAL (to) = 1;
9107 DECL_IGNORED_P (to) = 1;
9108 DECL_EXTERNAL (to) = 1;
9109 TREE_READONLY (to) = 1;
9111 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
9112 name = targetm.strip_name_encoding (name);
9113 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
9114 ? "*__imp_" : "*__imp__";
9115 namelen = strlen (name);
9116 prefixlen = strlen (prefix);
9117 imp_name = (char *) alloca (namelen + prefixlen + 1);
9118 memcpy (imp_name, prefix, prefixlen);
9119 memcpy (imp_name + prefixlen, name, namelen + 1);
9121 name = ggc_alloc_string (imp_name, namelen + prefixlen);
9122 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
9123 SET_SYMBOL_REF_DECL (rtl, to);
9124 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
9126 rtl = gen_const_mem (Pmode, rtl);
9127 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
9129 SET_DECL_RTL (to, rtl);
9130 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
9132 return to;
9135 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
9136 true if we require the result be a register. */
9138 static rtx
9139 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
9141 tree imp_decl;
9142 rtx x;
9144 gcc_assert (SYMBOL_REF_DECL (symbol));
9145 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
9147 x = DECL_RTL (imp_decl);
9148 if (want_reg)
9149 x = force_reg (Pmode, x);
9150 return x;
9153 /* Try machine-dependent ways of modifying an illegitimate address
9154 to be legitimate. If we find one, return the new, valid address.
9155 This macro is used in only one place: `memory_address' in explow.c.
9157 OLDX is the address as it was before break_out_memory_refs was called.
9158 In some cases it is useful to look at this to decide what needs to be done.
9160 MODE and WIN are passed so that this macro can use
9161 GO_IF_LEGITIMATE_ADDRESS.
9163 It is always safe for this macro to do nothing. It exists to recognize
9164 opportunities to optimize the output.
9166 For the 80386, we handle X+REG by loading X into a register R and
9167 using R+REG. R will go in a general reg and indexing will be used.
9168 However, if REG is a broken-out memory address or multiplication,
9169 nothing needs to be done because REG can certainly go in a general reg.
9171 When -fpic is used, special handling is needed for symbolic references.
9172 See comments by legitimize_pic_address in i386.c for details. */
9175 legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
9177 int changed = 0;
9178 unsigned log;
9180 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
9181 if (log)
9182 return legitimize_tls_address (x, (enum tls_model) log, false);
9183 if (GET_CODE (x) == CONST
9184 && GET_CODE (XEXP (x, 0)) == PLUS
9185 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9186 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
9188 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
9189 (enum tls_model) log, false);
9190 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
9193 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
9195 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
9196 return legitimize_dllimport_symbol (x, true);
9197 if (GET_CODE (x) == CONST
9198 && GET_CODE (XEXP (x, 0)) == PLUS
9199 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9200 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
9202 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
9203 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
9207 if (flag_pic && SYMBOLIC_CONST (x))
9208 return legitimize_pic_address (x, 0);
9210 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
9211 if (GET_CODE (x) == ASHIFT
9212 && CONST_INT_P (XEXP (x, 1))
9213 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
9215 changed = 1;
9216 log = INTVAL (XEXP (x, 1));
9217 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
9218 GEN_INT (1 << log));
9221 if (GET_CODE (x) == PLUS)
9223 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
9225 if (GET_CODE (XEXP (x, 0)) == ASHIFT
9226 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9227 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
9229 changed = 1;
9230 log = INTVAL (XEXP (XEXP (x, 0), 1));
9231 XEXP (x, 0) = gen_rtx_MULT (Pmode,
9232 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
9233 GEN_INT (1 << log));
9236 if (GET_CODE (XEXP (x, 1)) == ASHIFT
9237 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
9238 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
9240 changed = 1;
9241 log = INTVAL (XEXP (XEXP (x, 1), 1));
9242 XEXP (x, 1) = gen_rtx_MULT (Pmode,
9243 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
9244 GEN_INT (1 << log));
9247 /* Put multiply first if it isn't already. */
9248 if (GET_CODE (XEXP (x, 1)) == MULT)
9250 rtx tmp = XEXP (x, 0);
9251 XEXP (x, 0) = XEXP (x, 1);
9252 XEXP (x, 1) = tmp;
9253 changed = 1;
9256 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
9257 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
9258 created by virtual register instantiation, register elimination, and
9259 similar optimizations. */
9260 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
9262 changed = 1;
9263 x = gen_rtx_PLUS (Pmode,
9264 gen_rtx_PLUS (Pmode, XEXP (x, 0),
9265 XEXP (XEXP (x, 1), 0)),
9266 XEXP (XEXP (x, 1), 1));
9269 /* Canonicalize
9270 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
9271 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
9272 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
9273 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
9274 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
9275 && CONSTANT_P (XEXP (x, 1)))
9277 rtx constant;
9278 rtx other = NULL_RTX;
9280 if (CONST_INT_P (XEXP (x, 1)))
9282 constant = XEXP (x, 1);
9283 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
9285 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
9287 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
9288 other = XEXP (x, 1);
9290 else
9291 constant = 0;
9293 if (constant)
9295 changed = 1;
9296 x = gen_rtx_PLUS (Pmode,
9297 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
9298 XEXP (XEXP (XEXP (x, 0), 1), 0)),
9299 plus_constant (other, INTVAL (constant)));
9303 if (changed && legitimate_address_p (mode, x, FALSE))
9304 return x;
9306 if (GET_CODE (XEXP (x, 0)) == MULT)
9308 changed = 1;
9309 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
9312 if (GET_CODE (XEXP (x, 1)) == MULT)
9314 changed = 1;
9315 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
9318 if (changed
9319 && REG_P (XEXP (x, 1))
9320 && REG_P (XEXP (x, 0)))
9321 return x;
9323 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
9325 changed = 1;
9326 x = legitimize_pic_address (x, 0);
9329 if (changed && legitimate_address_p (mode, x, FALSE))
9330 return x;
9332 if (REG_P (XEXP (x, 0)))
9334 rtx temp = gen_reg_rtx (Pmode);
9335 rtx val = force_operand (XEXP (x, 1), temp);
9336 if (val != temp)
9337 emit_move_insn (temp, val);
9339 XEXP (x, 1) = temp;
9340 return x;
9343 else if (REG_P (XEXP (x, 1)))
9345 rtx temp = gen_reg_rtx (Pmode);
9346 rtx val = force_operand (XEXP (x, 0), temp);
9347 if (val != temp)
9348 emit_move_insn (temp, val);
9350 XEXP (x, 0) = temp;
9351 return x;
9355 return x;
9358 /* Print an integer constant expression in assembler syntax. Addition
9359 and subtraction are the only arithmetic that may appear in these
9360 expressions. FILE is the stdio stream to write to, X is the rtx, and
9361 CODE is the operand print code from the output string. */
9363 static void
9364 output_pic_addr_const (FILE *file, rtx x, int code)
9366 char buf[256];
9368 switch (GET_CODE (x))
9370 case PC:
9371 gcc_assert (flag_pic);
9372 putc ('.', file);
9373 break;
9375 case SYMBOL_REF:
9376 if (! TARGET_MACHO || TARGET_64BIT)
9377 output_addr_const (file, x);
9378 else
9380 const char *name = XSTR (x, 0);
9382 /* Mark the decl as referenced so that cgraph will
9383 output the function. */
9384 if (SYMBOL_REF_DECL (x))
9385 mark_decl_referenced (SYMBOL_REF_DECL (x));
9387 #if TARGET_MACHO
9388 if (MACHOPIC_INDIRECT
9389 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
9390 name = machopic_indirection_name (x, /*stub_p=*/true);
9391 #endif
9392 assemble_name (file, name);
9394 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
9395 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
9396 fputs ("@PLT", file);
9397 break;
9399 case LABEL_REF:
9400 x = XEXP (x, 0);
9401 /* FALLTHRU */
9402 case CODE_LABEL:
9403 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
9404 assemble_name (asm_out_file, buf);
9405 break;
9407 case CONST_INT:
9408 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
9409 break;
9411 case CONST:
9412 /* This used to output parentheses around the expression,
9413 but that does not work on the 386 (either ATT or BSD assembler). */
9414 output_pic_addr_const (file, XEXP (x, 0), code);
9415 break;
9417 case CONST_DOUBLE:
9418 if (GET_MODE (x) == VOIDmode)
9420 /* We can use %d if the number is <32 bits and positive. */
9421 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
9422 fprintf (file, "0x%lx%08lx",
9423 (unsigned long) CONST_DOUBLE_HIGH (x),
9424 (unsigned long) CONST_DOUBLE_LOW (x));
9425 else
9426 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
9428 else
9429 /* We can't handle floating point constants;
9430 PRINT_OPERAND must handle them. */
9431 output_operand_lossage ("floating constant misused");
9432 break;
9434 case PLUS:
9435 /* Some assemblers need integer constants to appear first. */
9436 if (CONST_INT_P (XEXP (x, 0)))
9438 output_pic_addr_const (file, XEXP (x, 0), code);
9439 putc ('+', file);
9440 output_pic_addr_const (file, XEXP (x, 1), code);
9442 else
9444 gcc_assert (CONST_INT_P (XEXP (x, 1)));
9445 output_pic_addr_const (file, XEXP (x, 1), code);
9446 putc ('+', file);
9447 output_pic_addr_const (file, XEXP (x, 0), code);
9449 break;
9451 case MINUS:
9452 if (!TARGET_MACHO)
9453 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
9454 output_pic_addr_const (file, XEXP (x, 0), code);
9455 putc ('-', file);
9456 output_pic_addr_const (file, XEXP (x, 1), code);
9457 if (!TARGET_MACHO)
9458 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
9459 break;
9461 case UNSPEC:
9462 gcc_assert (XVECLEN (x, 0) == 1);
9463 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
9464 switch (XINT (x, 1))
9466 case UNSPEC_GOT:
9467 fputs ("@GOT", file);
9468 break;
9469 case UNSPEC_GOTOFF:
9470 fputs ("@GOTOFF", file);
9471 break;
9472 case UNSPEC_PLTOFF:
9473 fputs ("@PLTOFF", file);
9474 break;
9475 case UNSPEC_GOTPCREL:
9476 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
9477 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
9478 break;
9479 case UNSPEC_GOTTPOFF:
9480 /* FIXME: This might be @TPOFF in Sun ld too. */
9481 fputs ("@GOTTPOFF", file);
9482 break;
9483 case UNSPEC_TPOFF:
9484 fputs ("@TPOFF", file);
9485 break;
9486 case UNSPEC_NTPOFF:
9487 if (TARGET_64BIT)
9488 fputs ("@TPOFF", file);
9489 else
9490 fputs ("@NTPOFF", file);
9491 break;
9492 case UNSPEC_DTPOFF:
9493 fputs ("@DTPOFF", file);
9494 break;
9495 case UNSPEC_GOTNTPOFF:
9496 if (TARGET_64BIT)
9497 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
9498 "@GOTTPOFF(%rip)": "@GOTTPOFF[rip]", file);
9499 else
9500 fputs ("@GOTNTPOFF", file);
9501 break;
9502 case UNSPEC_INDNTPOFF:
9503 fputs ("@INDNTPOFF", file);
9504 break;
9505 default:
9506 output_operand_lossage ("invalid UNSPEC as operand");
9507 break;
9509 break;
9511 default:
9512 output_operand_lossage ("invalid expression as operand");
9516 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
9517 We need to emit DTP-relative relocations. */
9519 static void ATTRIBUTE_UNUSED
9520 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
9522 fputs (ASM_LONG, file);
9523 output_addr_const (file, x);
9524 fputs ("@DTPOFF", file);
9525 switch (size)
9527 case 4:
9528 break;
9529 case 8:
9530 fputs (", 0", file);
9531 break;
9532 default:
9533 gcc_unreachable ();
9537 /* In the name of slightly smaller debug output, and to cater to
9538 general assembler lossage, recognize PIC+GOTOFF and turn it back
9539 into a direct symbol reference.
9541 On Darwin, this is necessary to avoid a crash, because Darwin
9542 has a different PIC label for each routine but the DWARF debugging
9543 information is not associated with any particular routine, so it's
9544 necessary to remove references to the PIC label from RTL stored by
9545 the DWARF output code. */
9547 static rtx
9548 ix86_delegitimize_address (rtx orig_x)
9550 rtx x = orig_x;
9551 /* reg_addend is NULL or a multiple of some register. */
9552 rtx reg_addend = NULL_RTX;
9553 /* const_addend is NULL or a const_int. */
9554 rtx const_addend = NULL_RTX;
9555 /* This is the result, or NULL. */
9556 rtx result = NULL_RTX;
9558 if (MEM_P (x))
9559 x = XEXP (x, 0);
9561 if (TARGET_64BIT)
9563 if (GET_CODE (x) != CONST
9564 || GET_CODE (XEXP (x, 0)) != UNSPEC
9565 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
9566 || !MEM_P (orig_x))
9567 return orig_x;
9568 return XVECEXP (XEXP (x, 0), 0, 0);
9571 if (GET_CODE (x) != PLUS
9572 || GET_CODE (XEXP (x, 1)) != CONST)
9573 return orig_x;
9575 if (REG_P (XEXP (x, 0))
9576 && REGNO (XEXP (x, 0)) == PIC_OFFSET_TABLE_REGNUM)
9577 /* %ebx + GOT/GOTOFF */
9579 else if (GET_CODE (XEXP (x, 0)) == PLUS)
9581 /* %ebx + %reg * scale + GOT/GOTOFF */
9582 reg_addend = XEXP (x, 0);
9583 if (REG_P (XEXP (reg_addend, 0))
9584 && REGNO (XEXP (reg_addend, 0)) == PIC_OFFSET_TABLE_REGNUM)
9585 reg_addend = XEXP (reg_addend, 1);
9586 else if (REG_P (XEXP (reg_addend, 1))
9587 && REGNO (XEXP (reg_addend, 1)) == PIC_OFFSET_TABLE_REGNUM)
9588 reg_addend = XEXP (reg_addend, 0);
9589 else
9590 return orig_x;
9591 if (!REG_P (reg_addend)
9592 && GET_CODE (reg_addend) != MULT
9593 && GET_CODE (reg_addend) != ASHIFT)
9594 return orig_x;
9596 else
9597 return orig_x;
9599 x = XEXP (XEXP (x, 1), 0);
9600 if (GET_CODE (x) == PLUS
9601 && CONST_INT_P (XEXP (x, 1)))
9603 const_addend = XEXP (x, 1);
9604 x = XEXP (x, 0);
9607 if (GET_CODE (x) == UNSPEC
9608 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x))
9609 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
9610 result = XVECEXP (x, 0, 0);
9612 if (TARGET_MACHO && darwin_local_data_pic (x)
9613 && !MEM_P (orig_x))
9614 result = XEXP (x, 0);
9616 if (! result)
9617 return orig_x;
9619 if (const_addend)
9620 result = gen_rtx_PLUS (Pmode, result, const_addend);
9621 if (reg_addend)
9622 result = gen_rtx_PLUS (Pmode, reg_addend, result);
9623 return result;
9626 /* If X is a machine specific address (i.e. a symbol or label being
9627 referenced as a displacement from the GOT implemented using an
9628 UNSPEC), then return the base term. Otherwise return X. */
9631 ix86_find_base_term (rtx x)
9633 rtx term;
9635 if (TARGET_64BIT)
9637 if (GET_CODE (x) != CONST)
9638 return x;
9639 term = XEXP (x, 0);
9640 if (GET_CODE (term) == PLUS
9641 && (CONST_INT_P (XEXP (term, 1))
9642 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
9643 term = XEXP (term, 0);
9644 if (GET_CODE (term) != UNSPEC
9645 || XINT (term, 1) != UNSPEC_GOTPCREL)
9646 return x;
9648 term = XVECEXP (term, 0, 0);
9650 if (GET_CODE (term) != SYMBOL_REF
9651 && GET_CODE (term) != LABEL_REF)
9652 return x;
9654 return term;
9657 term = ix86_delegitimize_address (x);
9659 if (GET_CODE (term) != SYMBOL_REF
9660 && GET_CODE (term) != LABEL_REF)
9661 return x;
9663 return term;
9666 static void
9667 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
9668 int fp, FILE *file)
9670 const char *suffix;
9672 if (mode == CCFPmode || mode == CCFPUmode)
9674 enum rtx_code second_code, bypass_code;
9675 ix86_fp_comparison_codes (code, &bypass_code, &code, &second_code);
9676 gcc_assert (bypass_code == UNKNOWN && second_code == UNKNOWN);
9677 code = ix86_fp_compare_code_to_integer (code);
9678 mode = CCmode;
9680 if (reverse)
9681 code = reverse_condition (code);
9683 switch (code)
9685 case EQ:
9686 switch (mode)
9688 case CCAmode:
9689 suffix = "a";
9690 break;
9692 case CCCmode:
9693 suffix = "c";
9694 break;
9696 case CCOmode:
9697 suffix = "o";
9698 break;
9700 case CCSmode:
9701 suffix = "s";
9702 break;
9704 default:
9705 suffix = "e";
9707 break;
9708 case NE:
9709 switch (mode)
9711 case CCAmode:
9712 suffix = "na";
9713 break;
9715 case CCCmode:
9716 suffix = "nc";
9717 break;
9719 case CCOmode:
9720 suffix = "no";
9721 break;
9723 case CCSmode:
9724 suffix = "ns";
9725 break;
9727 default:
9728 suffix = "ne";
9730 break;
9731 case GT:
9732 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
9733 suffix = "g";
9734 break;
9735 case GTU:
9736 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
9737 Those same assemblers have the same but opposite lossage on cmov. */
9738 if (mode == CCmode)
9739 suffix = fp ? "nbe" : "a";
9740 else if (mode == CCCmode)
9741 suffix = "b";
9742 else
9743 gcc_unreachable ();
9744 break;
9745 case LT:
9746 switch (mode)
9748 case CCNOmode:
9749 case CCGOCmode:
9750 suffix = "s";
9751 break;
9753 case CCmode:
9754 case CCGCmode:
9755 suffix = "l";
9756 break;
9758 default:
9759 gcc_unreachable ();
9761 break;
9762 case LTU:
9763 gcc_assert (mode == CCmode || mode == CCCmode);
9764 suffix = "b";
9765 break;
9766 case GE:
9767 switch (mode)
9769 case CCNOmode:
9770 case CCGOCmode:
9771 suffix = "ns";
9772 break;
9774 case CCmode:
9775 case CCGCmode:
9776 suffix = "ge";
9777 break;
9779 default:
9780 gcc_unreachable ();
9782 break;
9783 case GEU:
9784 /* ??? As above. */
9785 gcc_assert (mode == CCmode || mode == CCCmode);
9786 suffix = fp ? "nb" : "ae";
9787 break;
9788 case LE:
9789 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
9790 suffix = "le";
9791 break;
9792 case LEU:
9793 /* ??? As above. */
9794 if (mode == CCmode)
9795 suffix = "be";
9796 else if (mode == CCCmode)
9797 suffix = fp ? "nb" : "ae";
9798 else
9799 gcc_unreachable ();
9800 break;
9801 case UNORDERED:
9802 suffix = fp ? "u" : "p";
9803 break;
9804 case ORDERED:
9805 suffix = fp ? "nu" : "np";
9806 break;
9807 default:
9808 gcc_unreachable ();
9810 fputs (suffix, file);
9813 /* Print the name of register X to FILE based on its machine mode and number.
9814 If CODE is 'w', pretend the mode is HImode.
9815 If CODE is 'b', pretend the mode is QImode.
9816 If CODE is 'k', pretend the mode is SImode.
9817 If CODE is 'q', pretend the mode is DImode.
9818 If CODE is 'h', pretend the reg is the 'high' byte register.
9819 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op. */
9821 void
9822 print_reg (rtx x, int code, FILE *file)
9824 gcc_assert (x == pc_rtx
9825 || (REGNO (x) != ARG_POINTER_REGNUM
9826 && REGNO (x) != FRAME_POINTER_REGNUM
9827 && REGNO (x) != FLAGS_REG
9828 && REGNO (x) != FPSR_REG
9829 && REGNO (x) != FPCR_REG));
9831 if (ASSEMBLER_DIALECT == ASM_ATT)
9832 putc ('%', file);
9834 if (x == pc_rtx)
9836 gcc_assert (TARGET_64BIT);
9837 fputs ("rip", file);
9838 return;
9841 if (code == 'w' || MMX_REG_P (x))
9842 code = 2;
9843 else if (code == 'b')
9844 code = 1;
9845 else if (code == 'k')
9846 code = 4;
9847 else if (code == 'q')
9848 code = 8;
9849 else if (code == 'y')
9850 code = 3;
9851 else if (code == 'h')
9852 code = 0;
9853 else
9854 code = GET_MODE_SIZE (GET_MODE (x));
9856 /* Irritatingly, AMD extended registers use different naming convention
9857 from the normal registers. */
9858 if (REX_INT_REG_P (x))
9860 gcc_assert (TARGET_64BIT);
9861 switch (code)
9863 case 0:
9864 error ("extended registers have no high halves");
9865 break;
9866 case 1:
9867 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
9868 break;
9869 case 2:
9870 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
9871 break;
9872 case 4:
9873 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
9874 break;
9875 case 8:
9876 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
9877 break;
9878 default:
9879 error ("unsupported operand size for extended register");
9880 break;
9882 return;
9884 switch (code)
9886 case 3:
9887 if (STACK_TOP_P (x))
9889 fputs ("st(0)", file);
9890 break;
9892 /* FALLTHRU */
9893 case 8:
9894 case 4:
9895 case 12:
9896 if (! ANY_FP_REG_P (x))
9897 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
9898 /* FALLTHRU */
9899 case 16:
9900 case 2:
9901 normal:
9902 fputs (hi_reg_name[REGNO (x)], file);
9903 break;
9904 case 1:
9905 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
9906 goto normal;
9907 fputs (qi_reg_name[REGNO (x)], file);
9908 break;
9909 case 0:
9910 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
9911 goto normal;
9912 fputs (qi_high_reg_name[REGNO (x)], file);
9913 break;
9914 default:
9915 gcc_unreachable ();
9919 /* Locate some local-dynamic symbol still in use by this function
9920 so that we can print its name in some tls_local_dynamic_base
9921 pattern. */
9923 static int
9924 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
9926 rtx x = *px;
9928 if (GET_CODE (x) == SYMBOL_REF
9929 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
9931 cfun->machine->some_ld_name = XSTR (x, 0);
9932 return 1;
9935 return 0;
9938 static const char *
9939 get_some_local_dynamic_name (void)
9941 rtx insn;
9943 if (cfun->machine->some_ld_name)
9944 return cfun->machine->some_ld_name;
9946 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
9947 if (INSN_P (insn)
9948 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
9949 return cfun->machine->some_ld_name;
9951 gcc_unreachable ();
9954 /* Meaning of CODE:
9955 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
9956 C -- print opcode suffix for set/cmov insn.
9957 c -- like C, but print reversed condition
9958 E,e -- likewise, but for compare-and-branch fused insn.
9959 F,f -- likewise, but for floating-point.
9960 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
9961 otherwise nothing
9962 R -- print the prefix for register names.
9963 z -- print the opcode suffix for the size of the current operand.
9964 * -- print a star (in certain assembler syntax)
9965 A -- print an absolute memory reference.
9966 w -- print the operand as if it's a "word" (HImode) even if it isn't.
9967 s -- print a shift double count, followed by the assemblers argument
9968 delimiter.
9969 b -- print the QImode name of the register for the indicated operand.
9970 %b0 would print %al if operands[0] is reg 0.
9971 w -- likewise, print the HImode name of the register.
9972 k -- likewise, print the SImode name of the register.
9973 q -- likewise, print the DImode name of the register.
9974 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
9975 y -- print "st(0)" instead of "st" as a register.
9976 D -- print condition for SSE cmp instruction.
9977 P -- if PIC, print an @PLT suffix.
9978 X -- don't print any sort of PIC '@' suffix for a symbol.
9979 & -- print some in-use local-dynamic symbol name.
9980 H -- print a memory address offset by 8; used for sse high-parts
9981 Y -- print condition for SSE5 com* instruction.
9982 + -- print a branch hint as 'cs' or 'ds' prefix
9983 ; -- print a semicolon (after prefixes due to bug in older gas).
9986 void
9987 print_operand (FILE *file, rtx x, int code)
9989 if (code)
9991 switch (code)
9993 case '*':
9994 if (ASSEMBLER_DIALECT == ASM_ATT)
9995 putc ('*', file);
9996 return;
9998 case '&':
9999 assemble_name (file, get_some_local_dynamic_name ());
10000 return;
10002 case 'A':
10003 switch (ASSEMBLER_DIALECT)
10005 case ASM_ATT:
10006 putc ('*', file);
10007 break;
10009 case ASM_INTEL:
10010 /* Intel syntax. For absolute addresses, registers should not
10011 be surrounded by braces. */
10012 if (!REG_P (x))
10014 putc ('[', file);
10015 PRINT_OPERAND (file, x, 0);
10016 putc (']', file);
10017 return;
10019 break;
10021 default:
10022 gcc_unreachable ();
10025 PRINT_OPERAND (file, x, 0);
10026 return;
10029 case 'L':
10030 if (ASSEMBLER_DIALECT == ASM_ATT)
10031 putc ('l', file);
10032 return;
10034 case 'W':
10035 if (ASSEMBLER_DIALECT == ASM_ATT)
10036 putc ('w', file);
10037 return;
10039 case 'B':
10040 if (ASSEMBLER_DIALECT == ASM_ATT)
10041 putc ('b', file);
10042 return;
10044 case 'Q':
10045 if (ASSEMBLER_DIALECT == ASM_ATT)
10046 putc ('l', file);
10047 return;
10049 case 'S':
10050 if (ASSEMBLER_DIALECT == ASM_ATT)
10051 putc ('s', file);
10052 return;
10054 case 'T':
10055 if (ASSEMBLER_DIALECT == ASM_ATT)
10056 putc ('t', file);
10057 return;
10059 case 'z':
10060 /* 387 opcodes don't get size suffixes if the operands are
10061 registers. */
10062 if (STACK_REG_P (x))
10063 return;
10065 /* Likewise if using Intel opcodes. */
10066 if (ASSEMBLER_DIALECT == ASM_INTEL)
10067 return;
10069 /* This is the size of op from size of operand. */
10070 switch (GET_MODE_SIZE (GET_MODE (x)))
10072 case 1:
10073 putc ('b', file);
10074 return;
10076 case 2:
10077 if (MEM_P (x))
10079 #ifdef HAVE_GAS_FILDS_FISTS
10080 putc ('s', file);
10081 #endif
10082 return;
10084 else
10085 putc ('w', file);
10086 return;
10088 case 4:
10089 if (GET_MODE (x) == SFmode)
10091 putc ('s', file);
10092 return;
10094 else
10095 putc ('l', file);
10096 return;
10098 case 12:
10099 case 16:
10100 putc ('t', file);
10101 return;
10103 case 8:
10104 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
10106 if (MEM_P (x))
10108 #ifdef GAS_MNEMONICS
10109 putc ('q', file);
10110 #else
10111 putc ('l', file);
10112 putc ('l', file);
10113 #endif
10115 else
10116 putc ('q', file);
10118 else
10119 putc ('l', file);
10120 return;
10122 default:
10123 gcc_unreachable ();
10126 case 'b':
10127 case 'w':
10128 case 'k':
10129 case 'q':
10130 case 'h':
10131 case 'y':
10132 case 'X':
10133 case 'P':
10134 break;
10136 case 's':
10137 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
10139 PRINT_OPERAND (file, x, 0);
10140 fputs (", ", file);
10142 return;
10144 case 'D':
10145 /* Little bit of braindamage here. The SSE compare instructions
10146 does use completely different names for the comparisons that the
10147 fp conditional moves. */
10148 switch (GET_CODE (x))
10150 case EQ:
10151 case UNEQ:
10152 fputs ("eq", file);
10153 break;
10154 case LT:
10155 case UNLT:
10156 fputs ("lt", file);
10157 break;
10158 case LE:
10159 case UNLE:
10160 fputs ("le", file);
10161 break;
10162 case UNORDERED:
10163 fputs ("unord", file);
10164 break;
10165 case NE:
10166 case LTGT:
10167 fputs ("neq", file);
10168 break;
10169 case UNGE:
10170 case GE:
10171 fputs ("nlt", file);
10172 break;
10173 case UNGT:
10174 case GT:
10175 fputs ("nle", file);
10176 break;
10177 case ORDERED:
10178 fputs ("ord", file);
10179 break;
10180 default:
10181 gcc_unreachable ();
10183 return;
10184 case 'O':
10185 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
10186 if (ASSEMBLER_DIALECT == ASM_ATT)
10188 switch (GET_MODE (x))
10190 case HImode: putc ('w', file); break;
10191 case SImode:
10192 case SFmode: putc ('l', file); break;
10193 case DImode:
10194 case DFmode: putc ('q', file); break;
10195 default: gcc_unreachable ();
10197 putc ('.', file);
10199 #endif
10200 return;
10201 case 'C':
10202 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
10203 return;
10204 case 'F':
10205 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
10206 if (ASSEMBLER_DIALECT == ASM_ATT)
10207 putc ('.', file);
10208 #endif
10209 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
10210 return;
10212 /* Like above, but reverse condition */
10213 case 'c':
10214 /* Check to see if argument to %c is really a constant
10215 and not a condition code which needs to be reversed. */
10216 if (!COMPARISON_P (x))
10218 output_operand_lossage ("operand is neither a constant nor a condition code, invalid operand code 'c'");
10219 return;
10221 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
10222 return;
10223 case 'f':
10224 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
10225 if (ASSEMBLER_DIALECT == ASM_ATT)
10226 putc ('.', file);
10227 #endif
10228 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
10229 return;
10231 case 'E':
10232 put_condition_code (GET_CODE (x), CCmode, 0, 0, file);
10233 return;
10235 case 'e':
10236 put_condition_code (GET_CODE (x), CCmode, 1, 0, file);
10237 return;
10239 case 'H':
10240 /* It doesn't actually matter what mode we use here, as we're
10241 only going to use this for printing. */
10242 x = adjust_address_nv (x, DImode, 8);
10243 break;
10245 case '+':
10247 rtx x;
10249 if (!optimize || optimize_size || !TARGET_BRANCH_PREDICTION_HINTS)
10250 return;
10252 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
10253 if (x)
10255 int pred_val = INTVAL (XEXP (x, 0));
10257 if (pred_val < REG_BR_PROB_BASE * 45 / 100
10258 || pred_val > REG_BR_PROB_BASE * 55 / 100)
10260 int taken = pred_val > REG_BR_PROB_BASE / 2;
10261 int cputaken = final_forward_branch_p (current_output_insn) == 0;
10263 /* Emit hints only in the case default branch prediction
10264 heuristics would fail. */
10265 if (taken != cputaken)
10267 /* We use 3e (DS) prefix for taken branches and
10268 2e (CS) prefix for not taken branches. */
10269 if (taken)
10270 fputs ("ds ; ", file);
10271 else
10272 fputs ("cs ; ", file);
10276 return;
10279 case 'Y':
10280 switch (GET_CODE (x))
10282 case NE:
10283 fputs ("neq", file);
10284 break;
10285 case EQ:
10286 fputs ("eq", file);
10287 break;
10288 case GE:
10289 case GEU:
10290 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "ge" : "unlt", file);
10291 break;
10292 case GT:
10293 case GTU:
10294 fputs (INTEGRAL_MODE_P (GET_MODE (x)) ? "gt" : "unle", file);
10295 break;
10296 case LE:
10297 case LEU:
10298 fputs ("le", file);
10299 break;
10300 case LT:
10301 case LTU:
10302 fputs ("lt", file);
10303 break;
10304 case UNORDERED:
10305 fputs ("unord", file);
10306 break;
10307 case ORDERED:
10308 fputs ("ord", file);
10309 break;
10310 case UNEQ:
10311 fputs ("ueq", file);
10312 break;
10313 case UNGE:
10314 fputs ("nlt", file);
10315 break;
10316 case UNGT:
10317 fputs ("nle", file);
10318 break;
10319 case UNLE:
10320 fputs ("ule", file);
10321 break;
10322 case UNLT:
10323 fputs ("ult", file);
10324 break;
10325 case LTGT:
10326 fputs ("une", file);
10327 break;
10328 default:
10329 gcc_unreachable ();
10331 return;
10333 case ';':
10334 #if TARGET_MACHO
10335 fputs (" ; ", file);
10336 #else
10337 fputc (' ', file);
10338 #endif
10339 return;
10341 default:
10342 output_operand_lossage ("invalid operand code '%c'", code);
10346 if (REG_P (x))
10347 print_reg (x, code, file);
10349 else if (MEM_P (x))
10351 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
10352 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
10353 && GET_MODE (x) != BLKmode)
10355 const char * size;
10356 switch (GET_MODE_SIZE (GET_MODE (x)))
10358 case 1: size = "BYTE"; break;
10359 case 2: size = "WORD"; break;
10360 case 4: size = "DWORD"; break;
10361 case 8: size = "QWORD"; break;
10362 case 12: size = "XWORD"; break;
10363 case 16:
10364 if (GET_MODE (x) == XFmode)
10365 size = "XWORD";
10366 else
10367 size = "XMMWORD";
10368 break;
10369 default:
10370 gcc_unreachable ();
10373 /* Check for explicit size override (codes 'b', 'w' and 'k') */
10374 if (code == 'b')
10375 size = "BYTE";
10376 else if (code == 'w')
10377 size = "WORD";
10378 else if (code == 'k')
10379 size = "DWORD";
10381 fputs (size, file);
10382 fputs (" PTR ", file);
10385 x = XEXP (x, 0);
10386 /* Avoid (%rip) for call operands. */
10387 if (CONSTANT_ADDRESS_P (x) && code == 'P'
10388 && !CONST_INT_P (x))
10389 output_addr_const (file, x);
10390 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
10391 output_operand_lossage ("invalid constraints for operand");
10392 else
10393 output_address (x);
10396 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
10398 REAL_VALUE_TYPE r;
10399 long l;
10401 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
10402 REAL_VALUE_TO_TARGET_SINGLE (r, l);
10404 if (ASSEMBLER_DIALECT == ASM_ATT)
10405 putc ('$', file);
10406 fprintf (file, "0x%08lx", (long unsigned int) l);
10409 /* These float cases don't actually occur as immediate operands. */
10410 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
10412 char dstr[30];
10414 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
10415 fprintf (file, "%s", dstr);
10418 else if (GET_CODE (x) == CONST_DOUBLE
10419 && GET_MODE (x) == XFmode)
10421 char dstr[30];
10423 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
10424 fprintf (file, "%s", dstr);
10427 else
10429 /* We have patterns that allow zero sets of memory, for instance.
10430 In 64-bit mode, we should probably support all 8-byte vectors,
10431 since we can in fact encode that into an immediate. */
10432 if (GET_CODE (x) == CONST_VECTOR)
10434 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
10435 x = const0_rtx;
10438 if (code != 'P')
10440 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
10442 if (ASSEMBLER_DIALECT == ASM_ATT)
10443 putc ('$', file);
10445 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
10446 || GET_CODE (x) == LABEL_REF)
10448 if (ASSEMBLER_DIALECT == ASM_ATT)
10449 putc ('$', file);
10450 else
10451 fputs ("OFFSET FLAT:", file);
10454 if (CONST_INT_P (x))
10455 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
10456 else if (flag_pic)
10457 output_pic_addr_const (file, x, code);
10458 else
10459 output_addr_const (file, x);
10463 /* Print a memory operand whose address is ADDR. */
10465 void
10466 print_operand_address (FILE *file, rtx addr)
10468 struct ix86_address parts;
10469 rtx base, index, disp;
10470 int scale;
10471 int ok = ix86_decompose_address (addr, &parts);
10473 gcc_assert (ok);
10475 base = parts.base;
10476 index = parts.index;
10477 disp = parts.disp;
10478 scale = parts.scale;
10480 switch (parts.seg)
10482 case SEG_DEFAULT:
10483 break;
10484 case SEG_FS:
10485 case SEG_GS:
10486 if (ASSEMBLER_DIALECT == ASM_ATT)
10487 putc ('%', file);
10488 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
10489 break;
10490 default:
10491 gcc_unreachable ();
10494 /* Use one byte shorter RIP relative addressing for 64bit mode. */
10495 if (TARGET_64BIT && !base && !index)
10497 rtx symbol = disp;
10499 if (GET_CODE (disp) == CONST
10500 && GET_CODE (XEXP (disp, 0)) == PLUS
10501 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
10502 symbol = XEXP (XEXP (disp, 0), 0);
10504 if (GET_CODE (symbol) == LABEL_REF
10505 || (GET_CODE (symbol) == SYMBOL_REF
10506 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
10507 base = pc_rtx;
10509 if (!base && !index)
10511 /* Displacement only requires special attention. */
10513 if (CONST_INT_P (disp))
10515 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
10516 fputs ("ds:", file);
10517 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
10519 else if (flag_pic)
10520 output_pic_addr_const (file, disp, 0);
10521 else
10522 output_addr_const (file, disp);
10524 else
10526 if (ASSEMBLER_DIALECT == ASM_ATT)
10528 if (disp)
10530 if (flag_pic)
10531 output_pic_addr_const (file, disp, 0);
10532 else if (GET_CODE (disp) == LABEL_REF)
10533 output_asm_label (disp);
10534 else
10535 output_addr_const (file, disp);
10538 putc ('(', file);
10539 if (base)
10540 print_reg (base, 0, file);
10541 if (index)
10543 putc (',', file);
10544 print_reg (index, 0, file);
10545 if (scale != 1)
10546 fprintf (file, ",%d", scale);
10548 putc (')', file);
10550 else
10552 rtx offset = NULL_RTX;
10554 if (disp)
10556 /* Pull out the offset of a symbol; print any symbol itself. */
10557 if (GET_CODE (disp) == CONST
10558 && GET_CODE (XEXP (disp, 0)) == PLUS
10559 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
10561 offset = XEXP (XEXP (disp, 0), 1);
10562 disp = gen_rtx_CONST (VOIDmode,
10563 XEXP (XEXP (disp, 0), 0));
10566 if (flag_pic)
10567 output_pic_addr_const (file, disp, 0);
10568 else if (GET_CODE (disp) == LABEL_REF)
10569 output_asm_label (disp);
10570 else if (CONST_INT_P (disp))
10571 offset = disp;
10572 else
10573 output_addr_const (file, disp);
10576 putc ('[', file);
10577 if (base)
10579 print_reg (base, 0, file);
10580 if (offset)
10582 if (INTVAL (offset) >= 0)
10583 putc ('+', file);
10584 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
10587 else if (offset)
10588 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
10589 else
10590 putc ('0', file);
10592 if (index)
10594 putc ('+', file);
10595 print_reg (index, 0, file);
10596 if (scale != 1)
10597 fprintf (file, "*%d", scale);
10599 putc (']', file);
10604 bool
10605 output_addr_const_extra (FILE *file, rtx x)
10607 rtx op;
10609 if (GET_CODE (x) != UNSPEC)
10610 return false;
10612 op = XVECEXP (x, 0, 0);
10613 switch (XINT (x, 1))
10615 case UNSPEC_GOTTPOFF:
10616 output_addr_const (file, op);
10617 /* FIXME: This might be @TPOFF in Sun ld. */
10618 fputs ("@GOTTPOFF", file);
10619 break;
10620 case UNSPEC_TPOFF:
10621 output_addr_const (file, op);
10622 fputs ("@TPOFF", file);
10623 break;
10624 case UNSPEC_NTPOFF:
10625 output_addr_const (file, op);
10626 if (TARGET_64BIT)
10627 fputs ("@TPOFF", file);
10628 else
10629 fputs ("@NTPOFF", file);
10630 break;
10631 case UNSPEC_DTPOFF:
10632 output_addr_const (file, op);
10633 fputs ("@DTPOFF", file);
10634 break;
10635 case UNSPEC_GOTNTPOFF:
10636 output_addr_const (file, op);
10637 if (TARGET_64BIT)
10638 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10639 "@GOTTPOFF(%rip)" : "@GOTTPOFF[rip]", file);
10640 else
10641 fputs ("@GOTNTPOFF", file);
10642 break;
10643 case UNSPEC_INDNTPOFF:
10644 output_addr_const (file, op);
10645 fputs ("@INDNTPOFF", file);
10646 break;
10648 default:
10649 return false;
10652 return true;
10655 /* Split one or more DImode RTL references into pairs of SImode
10656 references. The RTL can be REG, offsettable MEM, integer constant, or
10657 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
10658 split and "num" is its length. lo_half and hi_half are output arrays
10659 that parallel "operands". */
10661 void
10662 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
10664 while (num--)
10666 rtx op = operands[num];
10668 /* simplify_subreg refuse to split volatile memory addresses,
10669 but we still have to handle it. */
10670 if (MEM_P (op))
10672 lo_half[num] = adjust_address (op, SImode, 0);
10673 hi_half[num] = adjust_address (op, SImode, 4);
10675 else
10677 lo_half[num] = simplify_gen_subreg (SImode, op,
10678 GET_MODE (op) == VOIDmode
10679 ? DImode : GET_MODE (op), 0);
10680 hi_half[num] = simplify_gen_subreg (SImode, op,
10681 GET_MODE (op) == VOIDmode
10682 ? DImode : GET_MODE (op), 4);
10686 /* Split one or more TImode RTL references into pairs of DImode
10687 references. The RTL can be REG, offsettable MEM, integer constant, or
10688 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
10689 split and "num" is its length. lo_half and hi_half are output arrays
10690 that parallel "operands". */
10692 void
10693 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
10695 while (num--)
10697 rtx op = operands[num];
10699 /* simplify_subreg refuse to split volatile memory addresses, but we
10700 still have to handle it. */
10701 if (MEM_P (op))
10703 lo_half[num] = adjust_address (op, DImode, 0);
10704 hi_half[num] = adjust_address (op, DImode, 8);
10706 else
10708 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
10709 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
10714 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
10715 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
10716 is the expression of the binary operation. The output may either be
10717 emitted here, or returned to the caller, like all output_* functions.
10719 There is no guarantee that the operands are the same mode, as they
10720 might be within FLOAT or FLOAT_EXTEND expressions. */
10722 #ifndef SYSV386_COMPAT
10723 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
10724 wants to fix the assemblers because that causes incompatibility
10725 with gcc. No-one wants to fix gcc because that causes
10726 incompatibility with assemblers... You can use the option of
10727 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
10728 #define SYSV386_COMPAT 1
10729 #endif
10731 const char *
10732 output_387_binary_op (rtx insn, rtx *operands)
10734 static char buf[30];
10735 const char *p;
10736 const char *ssep;
10737 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
10739 #ifdef ENABLE_CHECKING
10740 /* Even if we do not want to check the inputs, this documents input
10741 constraints. Which helps in understanding the following code. */
10742 if (STACK_REG_P (operands[0])
10743 && ((REG_P (operands[1])
10744 && REGNO (operands[0]) == REGNO (operands[1])
10745 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
10746 || (REG_P (operands[2])
10747 && REGNO (operands[0]) == REGNO (operands[2])
10748 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
10749 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
10750 ; /* ok */
10751 else
10752 gcc_assert (is_sse);
10753 #endif
10755 switch (GET_CODE (operands[3]))
10757 case PLUS:
10758 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
10759 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
10760 p = "fiadd";
10761 else
10762 p = "fadd";
10763 ssep = "add";
10764 break;
10766 case MINUS:
10767 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
10768 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
10769 p = "fisub";
10770 else
10771 p = "fsub";
10772 ssep = "sub";
10773 break;
10775 case MULT:
10776 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
10777 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
10778 p = "fimul";
10779 else
10780 p = "fmul";
10781 ssep = "mul";
10782 break;
10784 case DIV:
10785 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
10786 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
10787 p = "fidiv";
10788 else
10789 p = "fdiv";
10790 ssep = "div";
10791 break;
10793 default:
10794 gcc_unreachable ();
10797 if (is_sse)
10799 strcpy (buf, ssep);
10800 if (GET_MODE (operands[0]) == SFmode)
10801 strcat (buf, "ss\t{%2, %0|%0, %2}");
10802 else
10803 strcat (buf, "sd\t{%2, %0|%0, %2}");
10804 return buf;
10806 strcpy (buf, p);
10808 switch (GET_CODE (operands[3]))
10810 case MULT:
10811 case PLUS:
10812 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
10814 rtx temp = operands[2];
10815 operands[2] = operands[1];
10816 operands[1] = temp;
10819 /* know operands[0] == operands[1]. */
10821 if (MEM_P (operands[2]))
10823 p = "%z2\t%2";
10824 break;
10827 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
10829 if (STACK_TOP_P (operands[0]))
10830 /* How is it that we are storing to a dead operand[2]?
10831 Well, presumably operands[1] is dead too. We can't
10832 store the result to st(0) as st(0) gets popped on this
10833 instruction. Instead store to operands[2] (which I
10834 think has to be st(1)). st(1) will be popped later.
10835 gcc <= 2.8.1 didn't have this check and generated
10836 assembly code that the Unixware assembler rejected. */
10837 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
10838 else
10839 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
10840 break;
10843 if (STACK_TOP_P (operands[0]))
10844 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
10845 else
10846 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
10847 break;
10849 case MINUS:
10850 case DIV:
10851 if (MEM_P (operands[1]))
10853 p = "r%z1\t%1";
10854 break;
10857 if (MEM_P (operands[2]))
10859 p = "%z2\t%2";
10860 break;
10863 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
10865 #if SYSV386_COMPAT
10866 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
10867 derived assemblers, confusingly reverse the direction of
10868 the operation for fsub{r} and fdiv{r} when the
10869 destination register is not st(0). The Intel assembler
10870 doesn't have this brain damage. Read !SYSV386_COMPAT to
10871 figure out what the hardware really does. */
10872 if (STACK_TOP_P (operands[0]))
10873 p = "{p\t%0, %2|rp\t%2, %0}";
10874 else
10875 p = "{rp\t%2, %0|p\t%0, %2}";
10876 #else
10877 if (STACK_TOP_P (operands[0]))
10878 /* As above for fmul/fadd, we can't store to st(0). */
10879 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
10880 else
10881 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
10882 #endif
10883 break;
10886 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
10888 #if SYSV386_COMPAT
10889 if (STACK_TOP_P (operands[0]))
10890 p = "{rp\t%0, %1|p\t%1, %0}";
10891 else
10892 p = "{p\t%1, %0|rp\t%0, %1}";
10893 #else
10894 if (STACK_TOP_P (operands[0]))
10895 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
10896 else
10897 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
10898 #endif
10899 break;
10902 if (STACK_TOP_P (operands[0]))
10904 if (STACK_TOP_P (operands[1]))
10905 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
10906 else
10907 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
10908 break;
10910 else if (STACK_TOP_P (operands[1]))
10912 #if SYSV386_COMPAT
10913 p = "{\t%1, %0|r\t%0, %1}";
10914 #else
10915 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
10916 #endif
10918 else
10920 #if SYSV386_COMPAT
10921 p = "{r\t%2, %0|\t%0, %2}";
10922 #else
10923 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
10924 #endif
10926 break;
10928 default:
10929 gcc_unreachable ();
10932 strcat (buf, p);
10933 return buf;
10936 /* Return needed mode for entity in optimize_mode_switching pass. */
10939 ix86_mode_needed (int entity, rtx insn)
10941 enum attr_i387_cw mode;
10943 /* The mode UNINITIALIZED is used to store control word after a
10944 function call or ASM pattern. The mode ANY specify that function
10945 has no requirements on the control word and make no changes in the
10946 bits we are interested in. */
10948 if (CALL_P (insn)
10949 || (NONJUMP_INSN_P (insn)
10950 && (asm_noperands (PATTERN (insn)) >= 0
10951 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
10952 return I387_CW_UNINITIALIZED;
10954 if (recog_memoized (insn) < 0)
10955 return I387_CW_ANY;
10957 mode = get_attr_i387_cw (insn);
10959 switch (entity)
10961 case I387_TRUNC:
10962 if (mode == I387_CW_TRUNC)
10963 return mode;
10964 break;
10966 case I387_FLOOR:
10967 if (mode == I387_CW_FLOOR)
10968 return mode;
10969 break;
10971 case I387_CEIL:
10972 if (mode == I387_CW_CEIL)
10973 return mode;
10974 break;
10976 case I387_MASK_PM:
10977 if (mode == I387_CW_MASK_PM)
10978 return mode;
10979 break;
10981 default:
10982 gcc_unreachable ();
10985 return I387_CW_ANY;
10988 /* Output code to initialize control word copies used by trunc?f?i and
10989 rounding patterns. CURRENT_MODE is set to current control word,
10990 while NEW_MODE is set to new control word. */
10992 void
10993 emit_i387_cw_initialization (int mode)
10995 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
10996 rtx new_mode;
10998 enum ix86_stack_slot slot;
11000 rtx reg = gen_reg_rtx (HImode);
11002 emit_insn (gen_x86_fnstcw_1 (stored_mode));
11003 emit_move_insn (reg, copy_rtx (stored_mode));
11005 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL || optimize_size)
11007 switch (mode)
11009 case I387_CW_TRUNC:
11010 /* round toward zero (truncate) */
11011 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
11012 slot = SLOT_CW_TRUNC;
11013 break;
11015 case I387_CW_FLOOR:
11016 /* round down toward -oo */
11017 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
11018 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
11019 slot = SLOT_CW_FLOOR;
11020 break;
11022 case I387_CW_CEIL:
11023 /* round up toward +oo */
11024 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
11025 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
11026 slot = SLOT_CW_CEIL;
11027 break;
11029 case I387_CW_MASK_PM:
11030 /* mask precision exception for nearbyint() */
11031 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
11032 slot = SLOT_CW_MASK_PM;
11033 break;
11035 default:
11036 gcc_unreachable ();
11039 else
11041 switch (mode)
11043 case I387_CW_TRUNC:
11044 /* round toward zero (truncate) */
11045 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
11046 slot = SLOT_CW_TRUNC;
11047 break;
11049 case I387_CW_FLOOR:
11050 /* round down toward -oo */
11051 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
11052 slot = SLOT_CW_FLOOR;
11053 break;
11055 case I387_CW_CEIL:
11056 /* round up toward +oo */
11057 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
11058 slot = SLOT_CW_CEIL;
11059 break;
11061 case I387_CW_MASK_PM:
11062 /* mask precision exception for nearbyint() */
11063 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
11064 slot = SLOT_CW_MASK_PM;
11065 break;
11067 default:
11068 gcc_unreachable ();
11072 gcc_assert (slot < MAX_386_STACK_LOCALS);
11074 new_mode = assign_386_stack_local (HImode, slot);
11075 emit_move_insn (new_mode, reg);
11078 /* Output code for INSN to convert a float to a signed int. OPERANDS
11079 are the insn operands. The output may be [HSD]Imode and the input
11080 operand may be [SDX]Fmode. */
11082 const char *
11083 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
11085 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
11086 int dimode_p = GET_MODE (operands[0]) == DImode;
11087 int round_mode = get_attr_i387_cw (insn);
11089 /* Jump through a hoop or two for DImode, since the hardware has no
11090 non-popping instruction. We used to do this a different way, but
11091 that was somewhat fragile and broke with post-reload splitters. */
11092 if ((dimode_p || fisttp) && !stack_top_dies)
11093 output_asm_insn ("fld\t%y1", operands);
11095 gcc_assert (STACK_TOP_P (operands[1]));
11096 gcc_assert (MEM_P (operands[0]));
11097 gcc_assert (GET_MODE (operands[1]) != TFmode);
11099 if (fisttp)
11100 output_asm_insn ("fisttp%z0\t%0", operands);
11101 else
11103 if (round_mode != I387_CW_ANY)
11104 output_asm_insn ("fldcw\t%3", operands);
11105 if (stack_top_dies || dimode_p)
11106 output_asm_insn ("fistp%z0\t%0", operands);
11107 else
11108 output_asm_insn ("fist%z0\t%0", operands);
11109 if (round_mode != I387_CW_ANY)
11110 output_asm_insn ("fldcw\t%2", operands);
11113 return "";
11116 /* Output code for x87 ffreep insn. The OPNO argument, which may only
11117 have the values zero or one, indicates the ffreep insn's operand
11118 from the OPERANDS array. */
11120 static const char *
11121 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
11123 if (TARGET_USE_FFREEP)
11124 #if HAVE_AS_IX86_FFREEP
11125 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
11126 #else
11128 static char retval[] = ".word\t0xc_df";
11129 int regno = REGNO (operands[opno]);
11131 gcc_assert (FP_REGNO_P (regno));
11133 retval[9] = '0' + (regno - FIRST_STACK_REG);
11134 return retval;
11136 #endif
11138 return opno ? "fstp\t%y1" : "fstp\t%y0";
11142 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
11143 should be used. UNORDERED_P is true when fucom should be used. */
11145 const char *
11146 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
11148 int stack_top_dies;
11149 rtx cmp_op0, cmp_op1;
11150 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
11152 if (eflags_p)
11154 cmp_op0 = operands[0];
11155 cmp_op1 = operands[1];
11157 else
11159 cmp_op0 = operands[1];
11160 cmp_op1 = operands[2];
11163 if (is_sse)
11165 if (GET_MODE (operands[0]) == SFmode)
11166 if (unordered_p)
11167 return "ucomiss\t{%1, %0|%0, %1}";
11168 else
11169 return "comiss\t{%1, %0|%0, %1}";
11170 else
11171 if (unordered_p)
11172 return "ucomisd\t{%1, %0|%0, %1}";
11173 else
11174 return "comisd\t{%1, %0|%0, %1}";
11177 gcc_assert (STACK_TOP_P (cmp_op0));
11179 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
11181 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
11183 if (stack_top_dies)
11185 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
11186 return output_387_ffreep (operands, 1);
11188 else
11189 return "ftst\n\tfnstsw\t%0";
11192 if (STACK_REG_P (cmp_op1)
11193 && stack_top_dies
11194 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
11195 && REGNO (cmp_op1) != FIRST_STACK_REG)
11197 /* If both the top of the 387 stack dies, and the other operand
11198 is also a stack register that dies, then this must be a
11199 `fcompp' float compare */
11201 if (eflags_p)
11203 /* There is no double popping fcomi variant. Fortunately,
11204 eflags is immune from the fstp's cc clobbering. */
11205 if (unordered_p)
11206 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
11207 else
11208 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
11209 return output_387_ffreep (operands, 0);
11211 else
11213 if (unordered_p)
11214 return "fucompp\n\tfnstsw\t%0";
11215 else
11216 return "fcompp\n\tfnstsw\t%0";
11219 else
11221 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
11223 static const char * const alt[16] =
11225 "fcom%z2\t%y2\n\tfnstsw\t%0",
11226 "fcomp%z2\t%y2\n\tfnstsw\t%0",
11227 "fucom%z2\t%y2\n\tfnstsw\t%0",
11228 "fucomp%z2\t%y2\n\tfnstsw\t%0",
11230 "ficom%z2\t%y2\n\tfnstsw\t%0",
11231 "ficomp%z2\t%y2\n\tfnstsw\t%0",
11232 NULL,
11233 NULL,
11235 "fcomi\t{%y1, %0|%0, %y1}",
11236 "fcomip\t{%y1, %0|%0, %y1}",
11237 "fucomi\t{%y1, %0|%0, %y1}",
11238 "fucomip\t{%y1, %0|%0, %y1}",
11240 NULL,
11241 NULL,
11242 NULL,
11243 NULL
11246 int mask;
11247 const char *ret;
11249 mask = eflags_p << 3;
11250 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
11251 mask |= unordered_p << 1;
11252 mask |= stack_top_dies;
11254 gcc_assert (mask < 16);
11255 ret = alt[mask];
11256 gcc_assert (ret);
11258 return ret;
11262 void
11263 ix86_output_addr_vec_elt (FILE *file, int value)
11265 const char *directive = ASM_LONG;
11267 #ifdef ASM_QUAD
11268 if (TARGET_64BIT)
11269 directive = ASM_QUAD;
11270 #else
11271 gcc_assert (!TARGET_64BIT);
11272 #endif
11274 fprintf (file, "%s%s%d\n", directive, LPREFIX, value);
11277 void
11278 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
11280 const char *directive = ASM_LONG;
11282 #ifdef ASM_QUAD
11283 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
11284 directive = ASM_QUAD;
11285 #else
11286 gcc_assert (!TARGET_64BIT);
11287 #endif
11288 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
11289 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
11290 fprintf (file, "%s%s%d-%s%d\n",
11291 directive, LPREFIX, value, LPREFIX, rel);
11292 else if (HAVE_AS_GOTOFF_IN_DATA)
11293 fprintf (file, "%s%s%d@GOTOFF\n", ASM_LONG, LPREFIX, value);
11294 #if TARGET_MACHO
11295 else if (TARGET_MACHO)
11297 fprintf (file, "%s%s%d-", ASM_LONG, LPREFIX, value);
11298 machopic_output_function_base_name (file);
11299 fprintf(file, "\n");
11301 #endif
11302 else
11303 asm_fprintf (file, "%s%U%s+[.-%s%d]\n",
11304 ASM_LONG, GOT_SYMBOL_NAME, LPREFIX, value);
11307 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
11308 for the target. */
11310 void
11311 ix86_expand_clear (rtx dest)
11313 rtx tmp;
11315 /* We play register width games, which are only valid after reload. */
11316 gcc_assert (reload_completed);
11318 /* Avoid HImode and its attendant prefix byte. */
11319 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
11320 dest = gen_rtx_REG (SImode, REGNO (dest));
11321 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
11323 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
11324 if (reload_completed && (!TARGET_USE_MOV0 || optimize_size))
11326 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
11327 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
11330 emit_insn (tmp);
11333 /* X is an unchanging MEM. If it is a constant pool reference, return
11334 the constant pool rtx, else NULL. */
11337 maybe_get_pool_constant (rtx x)
11339 x = ix86_delegitimize_address (XEXP (x, 0));
11341 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
11342 return get_pool_constant (x);
11344 return NULL_RTX;
11347 void
11348 ix86_expand_move (enum machine_mode mode, rtx operands[])
11350 rtx op0, op1;
11351 enum tls_model model;
11353 op0 = operands[0];
11354 op1 = operands[1];
11356 if (GET_CODE (op1) == SYMBOL_REF)
11358 model = SYMBOL_REF_TLS_MODEL (op1);
11359 if (model)
11361 op1 = legitimize_tls_address (op1, model, true);
11362 op1 = force_operand (op1, op0);
11363 if (op1 == op0)
11364 return;
11366 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
11367 && SYMBOL_REF_DLLIMPORT_P (op1))
11368 op1 = legitimize_dllimport_symbol (op1, false);
11370 else if (GET_CODE (op1) == CONST
11371 && GET_CODE (XEXP (op1, 0)) == PLUS
11372 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
11374 rtx addend = XEXP (XEXP (op1, 0), 1);
11375 rtx symbol = XEXP (XEXP (op1, 0), 0);
11376 rtx tmp = NULL;
11378 model = SYMBOL_REF_TLS_MODEL (symbol);
11379 if (model)
11380 tmp = legitimize_tls_address (symbol, model, true);
11381 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
11382 && SYMBOL_REF_DLLIMPORT_P (symbol))
11383 tmp = legitimize_dllimport_symbol (symbol, true);
11385 if (tmp)
11387 tmp = force_operand (tmp, NULL);
11388 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
11389 op0, 1, OPTAB_DIRECT);
11390 if (tmp == op0)
11391 return;
11395 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
11397 if (TARGET_MACHO && !TARGET_64BIT)
11399 #if TARGET_MACHO
11400 if (MACHOPIC_PURE)
11402 rtx temp = ((reload_in_progress
11403 || ((op0 && REG_P (op0))
11404 && mode == Pmode))
11405 ? op0 : gen_reg_rtx (Pmode));
11406 op1 = machopic_indirect_data_reference (op1, temp);
11407 op1 = machopic_legitimize_pic_address (op1, mode,
11408 temp == op1 ? 0 : temp);
11410 else if (MACHOPIC_INDIRECT)
11411 op1 = machopic_indirect_data_reference (op1, 0);
11412 if (op0 == op1)
11413 return;
11414 #endif
11416 else
11418 if (MEM_P (op0))
11419 op1 = force_reg (Pmode, op1);
11420 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
11422 rtx reg = !can_create_pseudo_p () ? op0 : NULL_RTX;
11423 op1 = legitimize_pic_address (op1, reg);
11424 if (op0 == op1)
11425 return;
11429 else
11431 if (MEM_P (op0)
11432 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
11433 || !push_operand (op0, mode))
11434 && MEM_P (op1))
11435 op1 = force_reg (mode, op1);
11437 if (push_operand (op0, mode)
11438 && ! general_no_elim_operand (op1, mode))
11439 op1 = copy_to_mode_reg (mode, op1);
11441 /* Force large constants in 64bit compilation into register
11442 to get them CSEed. */
11443 if (can_create_pseudo_p ()
11444 && (mode == DImode) && TARGET_64BIT
11445 && immediate_operand (op1, mode)
11446 && !x86_64_zext_immediate_operand (op1, VOIDmode)
11447 && !register_operand (op0, mode)
11448 && optimize)
11449 op1 = copy_to_mode_reg (mode, op1);
11451 if (can_create_pseudo_p ()
11452 && FLOAT_MODE_P (mode)
11453 && GET_CODE (op1) == CONST_DOUBLE)
11455 /* If we are loading a floating point constant to a register,
11456 force the value to memory now, since we'll get better code
11457 out the back end. */
11459 op1 = validize_mem (force_const_mem (mode, op1));
11460 if (!register_operand (op0, mode))
11462 rtx temp = gen_reg_rtx (mode);
11463 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
11464 emit_move_insn (op0, temp);
11465 return;
11470 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
11473 void
11474 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
11476 rtx op0 = operands[0], op1 = operands[1];
11477 unsigned int align = GET_MODE_ALIGNMENT (mode);
11479 /* Force constants other than zero into memory. We do not know how
11480 the instructions used to build constants modify the upper 64 bits
11481 of the register, once we have that information we may be able
11482 to handle some of them more efficiently. */
11483 if (can_create_pseudo_p ()
11484 && register_operand (op0, mode)
11485 && (CONSTANT_P (op1)
11486 || (GET_CODE (op1) == SUBREG
11487 && CONSTANT_P (SUBREG_REG (op1))))
11488 && standard_sse_constant_p (op1) <= 0)
11489 op1 = validize_mem (force_const_mem (mode, op1));
11491 /* We need to check memory alignment for SSE mode since attribute
11492 can make operands unaligned. */
11493 if (can_create_pseudo_p ()
11494 && SSE_REG_MODE_P (mode)
11495 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
11496 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
11498 rtx tmp[2];
11500 /* ix86_expand_vector_move_misalign() does not like constants ... */
11501 if (CONSTANT_P (op1)
11502 || (GET_CODE (op1) == SUBREG
11503 && CONSTANT_P (SUBREG_REG (op1))))
11504 op1 = validize_mem (force_const_mem (mode, op1));
11506 /* ... nor both arguments in memory. */
11507 if (!register_operand (op0, mode)
11508 && !register_operand (op1, mode))
11509 op1 = force_reg (mode, op1);
11511 tmp[0] = op0; tmp[1] = op1;
11512 ix86_expand_vector_move_misalign (mode, tmp);
11513 return;
11516 /* Make operand1 a register if it isn't already. */
11517 if (can_create_pseudo_p ()
11518 && !register_operand (op0, mode)
11519 && !register_operand (op1, mode))
11521 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
11522 return;
11525 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
11528 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
11529 straight to ix86_expand_vector_move. */
11530 /* Code generation for scalar reg-reg moves of single and double precision data:
11531 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
11532 movaps reg, reg
11533 else
11534 movss reg, reg
11535 if (x86_sse_partial_reg_dependency == true)
11536 movapd reg, reg
11537 else
11538 movsd reg, reg
11540 Code generation for scalar loads of double precision data:
11541 if (x86_sse_split_regs == true)
11542 movlpd mem, reg (gas syntax)
11543 else
11544 movsd mem, reg
11546 Code generation for unaligned packed loads of single precision data
11547 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
11548 if (x86_sse_unaligned_move_optimal)
11549 movups mem, reg
11551 if (x86_sse_partial_reg_dependency == true)
11553 xorps reg, reg
11554 movlps mem, reg
11555 movhps mem+8, reg
11557 else
11559 movlps mem, reg
11560 movhps mem+8, reg
11563 Code generation for unaligned packed loads of double precision data
11564 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
11565 if (x86_sse_unaligned_move_optimal)
11566 movupd mem, reg
11568 if (x86_sse_split_regs == true)
11570 movlpd mem, reg
11571 movhpd mem+8, reg
11573 else
11575 movsd mem, reg
11576 movhpd mem+8, reg
11580 void
11581 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
11583 rtx op0, op1, m;
11585 op0 = operands[0];
11586 op1 = operands[1];
11588 if (MEM_P (op1))
11590 /* If we're optimizing for size, movups is the smallest. */
11591 if (optimize_size)
11593 op0 = gen_lowpart (V4SFmode, op0);
11594 op1 = gen_lowpart (V4SFmode, op1);
11595 emit_insn (gen_sse_movups (op0, op1));
11596 return;
11599 /* ??? If we have typed data, then it would appear that using
11600 movdqu is the only way to get unaligned data loaded with
11601 integer type. */
11602 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
11604 op0 = gen_lowpart (V16QImode, op0);
11605 op1 = gen_lowpart (V16QImode, op1);
11606 emit_insn (gen_sse2_movdqu (op0, op1));
11607 return;
11610 if (TARGET_SSE2 && mode == V2DFmode)
11612 rtx zero;
11614 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
11616 op0 = gen_lowpart (V2DFmode, op0);
11617 op1 = gen_lowpart (V2DFmode, op1);
11618 emit_insn (gen_sse2_movupd (op0, op1));
11619 return;
11622 /* When SSE registers are split into halves, we can avoid
11623 writing to the top half twice. */
11624 if (TARGET_SSE_SPLIT_REGS)
11626 emit_clobber (op0);
11627 zero = op0;
11629 else
11631 /* ??? Not sure about the best option for the Intel chips.
11632 The following would seem to satisfy; the register is
11633 entirely cleared, breaking the dependency chain. We
11634 then store to the upper half, with a dependency depth
11635 of one. A rumor has it that Intel recommends two movsd
11636 followed by an unpacklpd, but this is unconfirmed. And
11637 given that the dependency depth of the unpacklpd would
11638 still be one, I'm not sure why this would be better. */
11639 zero = CONST0_RTX (V2DFmode);
11642 m = adjust_address (op1, DFmode, 0);
11643 emit_insn (gen_sse2_loadlpd (op0, zero, m));
11644 m = adjust_address (op1, DFmode, 8);
11645 emit_insn (gen_sse2_loadhpd (op0, op0, m));
11647 else
11649 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
11651 op0 = gen_lowpart (V4SFmode, op0);
11652 op1 = gen_lowpart (V4SFmode, op1);
11653 emit_insn (gen_sse_movups (op0, op1));
11654 return;
11657 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
11658 emit_move_insn (op0, CONST0_RTX (mode));
11659 else
11660 emit_clobber (op0);
11662 if (mode != V4SFmode)
11663 op0 = gen_lowpart (V4SFmode, op0);
11664 m = adjust_address (op1, V2SFmode, 0);
11665 emit_insn (gen_sse_loadlps (op0, op0, m));
11666 m = adjust_address (op1, V2SFmode, 8);
11667 emit_insn (gen_sse_loadhps (op0, op0, m));
11670 else if (MEM_P (op0))
11672 /* If we're optimizing for size, movups is the smallest. */
11673 if (optimize_size)
11675 op0 = gen_lowpart (V4SFmode, op0);
11676 op1 = gen_lowpart (V4SFmode, op1);
11677 emit_insn (gen_sse_movups (op0, op1));
11678 return;
11681 /* ??? Similar to above, only less clear because of quote
11682 typeless stores unquote. */
11683 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
11684 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
11686 op0 = gen_lowpart (V16QImode, op0);
11687 op1 = gen_lowpart (V16QImode, op1);
11688 emit_insn (gen_sse2_movdqu (op0, op1));
11689 return;
11692 if (TARGET_SSE2 && mode == V2DFmode)
11694 m = adjust_address (op0, DFmode, 0);
11695 emit_insn (gen_sse2_storelpd (m, op1));
11696 m = adjust_address (op0, DFmode, 8);
11697 emit_insn (gen_sse2_storehpd (m, op1));
11699 else
11701 if (mode != V4SFmode)
11702 op1 = gen_lowpart (V4SFmode, op1);
11703 m = adjust_address (op0, V2SFmode, 0);
11704 emit_insn (gen_sse_storelps (m, op1));
11705 m = adjust_address (op0, V2SFmode, 8);
11706 emit_insn (gen_sse_storehps (m, op1));
11709 else
11710 gcc_unreachable ();
11713 /* Expand a push in MODE. This is some mode for which we do not support
11714 proper push instructions, at least from the registers that we expect
11715 the value to live in. */
11717 void
11718 ix86_expand_push (enum machine_mode mode, rtx x)
11720 rtx tmp;
11722 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
11723 GEN_INT (-GET_MODE_SIZE (mode)),
11724 stack_pointer_rtx, 1, OPTAB_DIRECT);
11725 if (tmp != stack_pointer_rtx)
11726 emit_move_insn (stack_pointer_rtx, tmp);
11728 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
11729 emit_move_insn (tmp, x);
11732 /* Helper function of ix86_fixup_binary_operands to canonicalize
11733 operand order. Returns true if the operands should be swapped. */
11735 static bool
11736 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
11737 rtx operands[])
11739 rtx dst = operands[0];
11740 rtx src1 = operands[1];
11741 rtx src2 = operands[2];
11743 /* If the operation is not commutative, we can't do anything. */
11744 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
11745 return false;
11747 /* Highest priority is that src1 should match dst. */
11748 if (rtx_equal_p (dst, src1))
11749 return false;
11750 if (rtx_equal_p (dst, src2))
11751 return true;
11753 /* Next highest priority is that immediate constants come second. */
11754 if (immediate_operand (src2, mode))
11755 return false;
11756 if (immediate_operand (src1, mode))
11757 return true;
11759 /* Lowest priority is that memory references should come second. */
11760 if (MEM_P (src2))
11761 return false;
11762 if (MEM_P (src1))
11763 return true;
11765 return false;
11769 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
11770 destination to use for the operation. If different from the true
11771 destination in operands[0], a copy operation will be required. */
11774 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
11775 rtx operands[])
11777 rtx dst = operands[0];
11778 rtx src1 = operands[1];
11779 rtx src2 = operands[2];
11781 /* Canonicalize operand order. */
11782 if (ix86_swap_binary_operands_p (code, mode, operands))
11784 rtx temp;
11786 /* It is invalid to swap operands of different modes. */
11787 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
11789 temp = src1;
11790 src1 = src2;
11791 src2 = temp;
11794 /* Both source operands cannot be in memory. */
11795 if (MEM_P (src1) && MEM_P (src2))
11797 /* Optimization: Only read from memory once. */
11798 if (rtx_equal_p (src1, src2))
11800 src2 = force_reg (mode, src2);
11801 src1 = src2;
11803 else
11804 src2 = force_reg (mode, src2);
11807 /* If the destination is memory, and we do not have matching source
11808 operands, do things in registers. */
11809 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
11810 dst = gen_reg_rtx (mode);
11812 /* Source 1 cannot be a constant. */
11813 if (CONSTANT_P (src1))
11814 src1 = force_reg (mode, src1);
11816 /* Source 1 cannot be a non-matching memory. */
11817 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
11818 src1 = force_reg (mode, src1);
11820 operands[1] = src1;
11821 operands[2] = src2;
11822 return dst;
11825 /* Similarly, but assume that the destination has already been
11826 set up properly. */
11828 void
11829 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
11830 enum machine_mode mode, rtx operands[])
11832 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
11833 gcc_assert (dst == operands[0]);
11836 /* Attempt to expand a binary operator. Make the expansion closer to the
11837 actual machine, then just general_operand, which will allow 3 separate
11838 memory references (one output, two input) in a single insn. */
11840 void
11841 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
11842 rtx operands[])
11844 rtx src1, src2, dst, op, clob;
11846 dst = ix86_fixup_binary_operands (code, mode, operands);
11847 src1 = operands[1];
11848 src2 = operands[2];
11850 /* Emit the instruction. */
11852 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
11853 if (reload_in_progress)
11855 /* Reload doesn't know about the flags register, and doesn't know that
11856 it doesn't want to clobber it. We can only do this with PLUS. */
11857 gcc_assert (code == PLUS);
11858 emit_insn (op);
11860 else
11862 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
11863 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
11866 /* Fix up the destination if needed. */
11867 if (dst != operands[0])
11868 emit_move_insn (operands[0], dst);
11871 /* Return TRUE or FALSE depending on whether the binary operator meets the
11872 appropriate constraints. */
11875 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
11876 rtx operands[3])
11878 rtx dst = operands[0];
11879 rtx src1 = operands[1];
11880 rtx src2 = operands[2];
11882 /* Both source operands cannot be in memory. */
11883 if (MEM_P (src1) && MEM_P (src2))
11884 return 0;
11886 /* Canonicalize operand order for commutative operators. */
11887 if (ix86_swap_binary_operands_p (code, mode, operands))
11889 rtx temp = src1;
11890 src1 = src2;
11891 src2 = temp;
11894 /* If the destination is memory, we must have a matching source operand. */
11895 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
11896 return 0;
11898 /* Source 1 cannot be a constant. */
11899 if (CONSTANT_P (src1))
11900 return 0;
11902 /* Source 1 cannot be a non-matching memory. */
11903 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
11904 return 0;
11906 return 1;
11909 /* Attempt to expand a unary operator. Make the expansion closer to the
11910 actual machine, then just general_operand, which will allow 2 separate
11911 memory references (one output, one input) in a single insn. */
11913 void
11914 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
11915 rtx operands[])
11917 int matching_memory;
11918 rtx src, dst, op, clob;
11920 dst = operands[0];
11921 src = operands[1];
11923 /* If the destination is memory, and we do not have matching source
11924 operands, do things in registers. */
11925 matching_memory = 0;
11926 if (MEM_P (dst))
11928 if (rtx_equal_p (dst, src))
11929 matching_memory = 1;
11930 else
11931 dst = gen_reg_rtx (mode);
11934 /* When source operand is memory, destination must match. */
11935 if (MEM_P (src) && !matching_memory)
11936 src = force_reg (mode, src);
11938 /* Emit the instruction. */
11940 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
11941 if (reload_in_progress || code == NOT)
11943 /* Reload doesn't know about the flags register, and doesn't know that
11944 it doesn't want to clobber it. */
11945 gcc_assert (code == NOT);
11946 emit_insn (op);
11948 else
11950 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
11951 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
11954 /* Fix up the destination if needed. */
11955 if (dst != operands[0])
11956 emit_move_insn (operands[0], dst);
11959 /* Return TRUE or FALSE depending on whether the unary operator meets the
11960 appropriate constraints. */
11963 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
11964 enum machine_mode mode ATTRIBUTE_UNUSED,
11965 rtx operands[2] ATTRIBUTE_UNUSED)
11967 /* If one of operands is memory, source and destination must match. */
11968 if ((MEM_P (operands[0])
11969 || MEM_P (operands[1]))
11970 && ! rtx_equal_p (operands[0], operands[1]))
11971 return FALSE;
11972 return TRUE;
11975 /* Post-reload splitter for converting an SF or DFmode value in an
11976 SSE register into an unsigned SImode. */
11978 void
11979 ix86_split_convert_uns_si_sse (rtx operands[])
11981 enum machine_mode vecmode;
11982 rtx value, large, zero_or_two31, input, two31, x;
11984 large = operands[1];
11985 zero_or_two31 = operands[2];
11986 input = operands[3];
11987 two31 = operands[4];
11988 vecmode = GET_MODE (large);
11989 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
11991 /* Load up the value into the low element. We must ensure that the other
11992 elements are valid floats -- zero is the easiest such value. */
11993 if (MEM_P (input))
11995 if (vecmode == V4SFmode)
11996 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
11997 else
11998 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
12000 else
12002 input = gen_rtx_REG (vecmode, REGNO (input));
12003 emit_move_insn (value, CONST0_RTX (vecmode));
12004 if (vecmode == V4SFmode)
12005 emit_insn (gen_sse_movss (value, value, input));
12006 else
12007 emit_insn (gen_sse2_movsd (value, value, input));
12010 emit_move_insn (large, two31);
12011 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
12013 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
12014 emit_insn (gen_rtx_SET (VOIDmode, large, x));
12016 x = gen_rtx_AND (vecmode, zero_or_two31, large);
12017 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
12019 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
12020 emit_insn (gen_rtx_SET (VOIDmode, value, x));
12022 large = gen_rtx_REG (V4SImode, REGNO (large));
12023 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
12025 x = gen_rtx_REG (V4SImode, REGNO (value));
12026 if (vecmode == V4SFmode)
12027 emit_insn (gen_sse2_cvttps2dq (x, value));
12028 else
12029 emit_insn (gen_sse2_cvttpd2dq (x, value));
12030 value = x;
12032 emit_insn (gen_xorv4si3 (value, value, large));
12035 /* Convert an unsigned DImode value into a DFmode, using only SSE.
12036 Expects the 64-bit DImode to be supplied in a pair of integral
12037 registers. Requires SSE2; will use SSE3 if available. For x86_32,
12038 -mfpmath=sse, !optimize_size only. */
12040 void
12041 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
12043 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
12044 rtx int_xmm, fp_xmm;
12045 rtx biases, exponents;
12046 rtx x;
12048 int_xmm = gen_reg_rtx (V4SImode);
12049 if (TARGET_INTER_UNIT_MOVES)
12050 emit_insn (gen_movdi_to_sse (int_xmm, input));
12051 else if (TARGET_SSE_SPLIT_REGS)
12053 emit_clobber (int_xmm);
12054 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
12056 else
12058 x = gen_reg_rtx (V2DImode);
12059 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
12060 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
12063 x = gen_rtx_CONST_VECTOR (V4SImode,
12064 gen_rtvec (4, GEN_INT (0x43300000UL),
12065 GEN_INT (0x45300000UL),
12066 const0_rtx, const0_rtx));
12067 exponents = validize_mem (force_const_mem (V4SImode, x));
12069 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
12070 emit_insn (gen_sse2_punpckldq (int_xmm, int_xmm, exponents));
12072 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
12073 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
12074 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
12075 (0x1.0p84 + double(fp_value_hi_xmm)).
12076 Note these exponents differ by 32. */
12078 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
12080 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
12081 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
12082 real_ldexp (&bias_lo_rvt, &dconst1, 52);
12083 real_ldexp (&bias_hi_rvt, &dconst1, 84);
12084 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
12085 x = const_double_from_real_value (bias_hi_rvt, DFmode);
12086 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
12087 biases = validize_mem (force_const_mem (V2DFmode, biases));
12088 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
12090 /* Add the upper and lower DFmode values together. */
12091 if (TARGET_SSE3)
12092 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
12093 else
12095 x = copy_to_mode_reg (V2DFmode, fp_xmm);
12096 emit_insn (gen_sse2_unpckhpd (fp_xmm, fp_xmm, fp_xmm));
12097 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
12100 ix86_expand_vector_extract (false, target, fp_xmm, 0);
12103 /* Not used, but eases macroization of patterns. */
12104 void
12105 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
12106 rtx input ATTRIBUTE_UNUSED)
12108 gcc_unreachable ();
12111 /* Convert an unsigned SImode value into a DFmode. Only currently used
12112 for SSE, but applicable anywhere. */
12114 void
12115 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
12117 REAL_VALUE_TYPE TWO31r;
12118 rtx x, fp;
12120 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
12121 NULL, 1, OPTAB_DIRECT);
12123 fp = gen_reg_rtx (DFmode);
12124 emit_insn (gen_floatsidf2 (fp, x));
12126 real_ldexp (&TWO31r, &dconst1, 31);
12127 x = const_double_from_real_value (TWO31r, DFmode);
12129 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
12130 if (x != target)
12131 emit_move_insn (target, x);
12134 /* Convert a signed DImode value into a DFmode. Only used for SSE in
12135 32-bit mode; otherwise we have a direct convert instruction. */
12137 void
12138 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
12140 REAL_VALUE_TYPE TWO32r;
12141 rtx fp_lo, fp_hi, x;
12143 fp_lo = gen_reg_rtx (DFmode);
12144 fp_hi = gen_reg_rtx (DFmode);
12146 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
12148 real_ldexp (&TWO32r, &dconst1, 32);
12149 x = const_double_from_real_value (TWO32r, DFmode);
12150 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
12152 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
12154 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
12155 0, OPTAB_DIRECT);
12156 if (x != target)
12157 emit_move_insn (target, x);
12160 /* Convert an unsigned SImode value into a SFmode, using only SSE.
12161 For x86_32, -mfpmath=sse, !optimize_size only. */
12162 void
12163 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
12165 REAL_VALUE_TYPE ONE16r;
12166 rtx fp_hi, fp_lo, int_hi, int_lo, x;
12168 real_ldexp (&ONE16r, &dconst1, 16);
12169 x = const_double_from_real_value (ONE16r, SFmode);
12170 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
12171 NULL, 0, OPTAB_DIRECT);
12172 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
12173 NULL, 0, OPTAB_DIRECT);
12174 fp_hi = gen_reg_rtx (SFmode);
12175 fp_lo = gen_reg_rtx (SFmode);
12176 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
12177 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
12178 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
12179 0, OPTAB_DIRECT);
12180 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
12181 0, OPTAB_DIRECT);
12182 if (!rtx_equal_p (target, fp_hi))
12183 emit_move_insn (target, fp_hi);
12186 /* A subroutine of ix86_build_signbit_mask_vector. If VECT is true,
12187 then replicate the value for all elements of the vector
12188 register. */
12191 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
12193 rtvec v;
12194 switch (mode)
12196 case SImode:
12197 gcc_assert (vect);
12198 v = gen_rtvec (4, value, value, value, value);
12199 return gen_rtx_CONST_VECTOR (V4SImode, v);
12201 case DImode:
12202 gcc_assert (vect);
12203 v = gen_rtvec (2, value, value);
12204 return gen_rtx_CONST_VECTOR (V2DImode, v);
12206 case SFmode:
12207 if (vect)
12208 v = gen_rtvec (4, value, value, value, value);
12209 else
12210 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
12211 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
12212 return gen_rtx_CONST_VECTOR (V4SFmode, v);
12214 case DFmode:
12215 if (vect)
12216 v = gen_rtvec (2, value, value);
12217 else
12218 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
12219 return gen_rtx_CONST_VECTOR (V2DFmode, v);
12221 default:
12222 gcc_unreachable ();
12226 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
12227 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
12228 for an SSE register. If VECT is true, then replicate the mask for
12229 all elements of the vector register. If INVERT is true, then create
12230 a mask excluding the sign bit. */
12233 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
12235 enum machine_mode vec_mode, imode;
12236 HOST_WIDE_INT hi, lo;
12237 int shift = 63;
12238 rtx v;
12239 rtx mask;
12241 /* Find the sign bit, sign extended to 2*HWI. */
12242 switch (mode)
12244 case SImode:
12245 case SFmode:
12246 imode = SImode;
12247 vec_mode = (mode == SImode) ? V4SImode : V4SFmode;
12248 lo = 0x80000000, hi = lo < 0;
12249 break;
12251 case DImode:
12252 case DFmode:
12253 imode = DImode;
12254 vec_mode = (mode == DImode) ? V2DImode : V2DFmode;
12255 if (HOST_BITS_PER_WIDE_INT >= 64)
12256 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
12257 else
12258 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
12259 break;
12261 case TImode:
12262 case TFmode:
12263 vec_mode = VOIDmode;
12264 if (HOST_BITS_PER_WIDE_INT >= 64)
12266 imode = TImode;
12267 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
12269 else
12271 rtvec vec;
12273 imode = DImode;
12274 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
12276 if (invert)
12278 lo = ~lo, hi = ~hi;
12279 v = constm1_rtx;
12281 else
12282 v = const0_rtx;
12284 mask = immed_double_const (lo, hi, imode);
12286 vec = gen_rtvec (2, v, mask);
12287 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
12288 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
12290 return v;
12292 break;
12294 default:
12295 gcc_unreachable ();
12298 if (invert)
12299 lo = ~lo, hi = ~hi;
12301 /* Force this value into the low part of a fp vector constant. */
12302 mask = immed_double_const (lo, hi, imode);
12303 mask = gen_lowpart (mode, mask);
12305 if (vec_mode == VOIDmode)
12306 return force_reg (mode, mask);
12308 v = ix86_build_const_vector (mode, vect, mask);
12309 return force_reg (vec_mode, v);
12312 /* Generate code for floating point ABS or NEG. */
12314 void
12315 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
12316 rtx operands[])
12318 rtx mask, set, use, clob, dst, src;
12319 bool use_sse = false;
12320 bool vector_mode = VECTOR_MODE_P (mode);
12321 enum machine_mode elt_mode = mode;
12323 if (vector_mode)
12325 elt_mode = GET_MODE_INNER (mode);
12326 use_sse = true;
12328 else if (mode == TFmode)
12329 use_sse = true;
12330 else if (TARGET_SSE_MATH)
12331 use_sse = SSE_FLOAT_MODE_P (mode);
12333 /* NEG and ABS performed with SSE use bitwise mask operations.
12334 Create the appropriate mask now. */
12335 if (use_sse)
12336 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
12337 else
12338 mask = NULL_RTX;
12340 dst = operands[0];
12341 src = operands[1];
12343 if (vector_mode)
12345 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
12346 set = gen_rtx_SET (VOIDmode, dst, set);
12347 emit_insn (set);
12349 else
12351 set = gen_rtx_fmt_e (code, mode, src);
12352 set = gen_rtx_SET (VOIDmode, dst, set);
12353 if (mask)
12355 use = gen_rtx_USE (VOIDmode, mask);
12356 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
12357 emit_insn (gen_rtx_PARALLEL (VOIDmode,
12358 gen_rtvec (3, set, use, clob)));
12360 else
12361 emit_insn (set);
12365 /* Expand a copysign operation. Special case operand 0 being a constant. */
12367 void
12368 ix86_expand_copysign (rtx operands[])
12370 enum machine_mode mode;
12371 rtx dest, op0, op1, mask, nmask;
12373 dest = operands[0];
12374 op0 = operands[1];
12375 op1 = operands[2];
12377 mode = GET_MODE (dest);
12379 if (GET_CODE (op0) == CONST_DOUBLE)
12381 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
12383 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
12384 op0 = simplify_unary_operation (ABS, mode, op0, mode);
12386 if (mode == SFmode || mode == DFmode)
12388 enum machine_mode vmode;
12390 vmode = mode == SFmode ? V4SFmode : V2DFmode;
12392 if (op0 == CONST0_RTX (mode))
12393 op0 = CONST0_RTX (vmode);
12394 else
12396 rtvec v;
12398 if (mode == SFmode)
12399 v = gen_rtvec (4, op0, CONST0_RTX (SFmode),
12400 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
12401 else
12402 v = gen_rtvec (2, op0, CONST0_RTX (DFmode));
12404 op0 = force_reg (vmode, gen_rtx_CONST_VECTOR (vmode, v));
12407 else if (op0 != CONST0_RTX (mode))
12408 op0 = force_reg (mode, op0);
12410 mask = ix86_build_signbit_mask (mode, 0, 0);
12412 if (mode == SFmode)
12413 copysign_insn = gen_copysignsf3_const;
12414 else if (mode == DFmode)
12415 copysign_insn = gen_copysigndf3_const;
12416 else
12417 copysign_insn = gen_copysigntf3_const;
12419 emit_insn (copysign_insn (dest, op0, op1, mask));
12421 else
12423 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
12425 nmask = ix86_build_signbit_mask (mode, 0, 1);
12426 mask = ix86_build_signbit_mask (mode, 0, 0);
12428 if (mode == SFmode)
12429 copysign_insn = gen_copysignsf3_var;
12430 else if (mode == DFmode)
12431 copysign_insn = gen_copysigndf3_var;
12432 else
12433 copysign_insn = gen_copysigntf3_var;
12435 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
12439 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
12440 be a constant, and so has already been expanded into a vector constant. */
12442 void
12443 ix86_split_copysign_const (rtx operands[])
12445 enum machine_mode mode, vmode;
12446 rtx dest, op0, op1, mask, x;
12448 dest = operands[0];
12449 op0 = operands[1];
12450 op1 = operands[2];
12451 mask = operands[3];
12453 mode = GET_MODE (dest);
12454 vmode = GET_MODE (mask);
12456 dest = simplify_gen_subreg (vmode, dest, mode, 0);
12457 x = gen_rtx_AND (vmode, dest, mask);
12458 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
12460 if (op0 != CONST0_RTX (vmode))
12462 x = gen_rtx_IOR (vmode, dest, op0);
12463 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
12467 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
12468 so we have to do two masks. */
12470 void
12471 ix86_split_copysign_var (rtx operands[])
12473 enum machine_mode mode, vmode;
12474 rtx dest, scratch, op0, op1, mask, nmask, x;
12476 dest = operands[0];
12477 scratch = operands[1];
12478 op0 = operands[2];
12479 op1 = operands[3];
12480 nmask = operands[4];
12481 mask = operands[5];
12483 mode = GET_MODE (dest);
12484 vmode = GET_MODE (mask);
12486 if (rtx_equal_p (op0, op1))
12488 /* Shouldn't happen often (it's useless, obviously), but when it does
12489 we'd generate incorrect code if we continue below. */
12490 emit_move_insn (dest, op0);
12491 return;
12494 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
12496 gcc_assert (REGNO (op1) == REGNO (scratch));
12498 x = gen_rtx_AND (vmode, scratch, mask);
12499 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
12501 dest = mask;
12502 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
12503 x = gen_rtx_NOT (vmode, dest);
12504 x = gen_rtx_AND (vmode, x, op0);
12505 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
12507 else
12509 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
12511 x = gen_rtx_AND (vmode, scratch, mask);
12513 else /* alternative 2,4 */
12515 gcc_assert (REGNO (mask) == REGNO (scratch));
12516 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
12517 x = gen_rtx_AND (vmode, scratch, op1);
12519 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
12521 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
12523 dest = simplify_gen_subreg (vmode, op0, mode, 0);
12524 x = gen_rtx_AND (vmode, dest, nmask);
12526 else /* alternative 3,4 */
12528 gcc_assert (REGNO (nmask) == REGNO (dest));
12529 dest = nmask;
12530 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
12531 x = gen_rtx_AND (vmode, dest, op0);
12533 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
12536 x = gen_rtx_IOR (vmode, dest, scratch);
12537 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
12540 /* Return TRUE or FALSE depending on whether the first SET in INSN
12541 has source and destination with matching CC modes, and that the
12542 CC mode is at least as constrained as REQ_MODE. */
12545 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
12547 rtx set;
12548 enum machine_mode set_mode;
12550 set = PATTERN (insn);
12551 if (GET_CODE (set) == PARALLEL)
12552 set = XVECEXP (set, 0, 0);
12553 gcc_assert (GET_CODE (set) == SET);
12554 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
12556 set_mode = GET_MODE (SET_DEST (set));
12557 switch (set_mode)
12559 case CCNOmode:
12560 if (req_mode != CCNOmode
12561 && (req_mode != CCmode
12562 || XEXP (SET_SRC (set), 1) != const0_rtx))
12563 return 0;
12564 break;
12565 case CCmode:
12566 if (req_mode == CCGCmode)
12567 return 0;
12568 /* FALLTHRU */
12569 case CCGCmode:
12570 if (req_mode == CCGOCmode || req_mode == CCNOmode)
12571 return 0;
12572 /* FALLTHRU */
12573 case CCGOCmode:
12574 if (req_mode == CCZmode)
12575 return 0;
12576 /* FALLTHRU */
12577 case CCZmode:
12578 break;
12580 default:
12581 gcc_unreachable ();
12584 return (GET_MODE (SET_SRC (set)) == set_mode);
12587 /* Generate insn patterns to do an integer compare of OPERANDS. */
12589 static rtx
12590 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
12592 enum machine_mode cmpmode;
12593 rtx tmp, flags;
12595 cmpmode = SELECT_CC_MODE (code, op0, op1);
12596 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
12598 /* This is very simple, but making the interface the same as in the
12599 FP case makes the rest of the code easier. */
12600 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
12601 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
12603 /* Return the test that should be put into the flags user, i.e.
12604 the bcc, scc, or cmov instruction. */
12605 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
12608 /* Figure out whether to use ordered or unordered fp comparisons.
12609 Return the appropriate mode to use. */
12611 enum machine_mode
12612 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
12614 /* ??? In order to make all comparisons reversible, we do all comparisons
12615 non-trapping when compiling for IEEE. Once gcc is able to distinguish
12616 all forms trapping and nontrapping comparisons, we can make inequality
12617 comparisons trapping again, since it results in better code when using
12618 FCOM based compares. */
12619 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
12622 enum machine_mode
12623 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
12625 enum machine_mode mode = GET_MODE (op0);
12627 if (SCALAR_FLOAT_MODE_P (mode))
12629 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
12630 return ix86_fp_compare_mode (code);
12633 switch (code)
12635 /* Only zero flag is needed. */
12636 case EQ: /* ZF=0 */
12637 case NE: /* ZF!=0 */
12638 return CCZmode;
12639 /* Codes needing carry flag. */
12640 case GEU: /* CF=0 */
12641 case LTU: /* CF=1 */
12642 /* Detect overflow checks. They need just the carry flag. */
12643 if (GET_CODE (op0) == PLUS
12644 && rtx_equal_p (op1, XEXP (op0, 0)))
12645 return CCCmode;
12646 else
12647 return CCmode;
12648 case GTU: /* CF=0 & ZF=0 */
12649 case LEU: /* CF=1 | ZF=1 */
12650 /* Detect overflow checks. They need just the carry flag. */
12651 if (GET_CODE (op0) == MINUS
12652 && rtx_equal_p (op1, XEXP (op0, 0)))
12653 return CCCmode;
12654 else
12655 return CCmode;
12656 /* Codes possibly doable only with sign flag when
12657 comparing against zero. */
12658 case GE: /* SF=OF or SF=0 */
12659 case LT: /* SF<>OF or SF=1 */
12660 if (op1 == const0_rtx)
12661 return CCGOCmode;
12662 else
12663 /* For other cases Carry flag is not required. */
12664 return CCGCmode;
12665 /* Codes doable only with sign flag when comparing
12666 against zero, but we miss jump instruction for it
12667 so we need to use relational tests against overflow
12668 that thus needs to be zero. */
12669 case GT: /* ZF=0 & SF=OF */
12670 case LE: /* ZF=1 | SF<>OF */
12671 if (op1 == const0_rtx)
12672 return CCNOmode;
12673 else
12674 return CCGCmode;
12675 /* strcmp pattern do (use flags) and combine may ask us for proper
12676 mode. */
12677 case USE:
12678 return CCmode;
12679 default:
12680 gcc_unreachable ();
12684 /* Return the fixed registers used for condition codes. */
12686 static bool
12687 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
12689 *p1 = FLAGS_REG;
12690 *p2 = FPSR_REG;
12691 return true;
12694 /* If two condition code modes are compatible, return a condition code
12695 mode which is compatible with both. Otherwise, return
12696 VOIDmode. */
12698 static enum machine_mode
12699 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
12701 if (m1 == m2)
12702 return m1;
12704 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
12705 return VOIDmode;
12707 if ((m1 == CCGCmode && m2 == CCGOCmode)
12708 || (m1 == CCGOCmode && m2 == CCGCmode))
12709 return CCGCmode;
12711 switch (m1)
12713 default:
12714 gcc_unreachable ();
12716 case CCmode:
12717 case CCGCmode:
12718 case CCGOCmode:
12719 case CCNOmode:
12720 case CCAmode:
12721 case CCCmode:
12722 case CCOmode:
12723 case CCSmode:
12724 case CCZmode:
12725 switch (m2)
12727 default:
12728 return VOIDmode;
12730 case CCmode:
12731 case CCGCmode:
12732 case CCGOCmode:
12733 case CCNOmode:
12734 case CCAmode:
12735 case CCCmode:
12736 case CCOmode:
12737 case CCSmode:
12738 case CCZmode:
12739 return CCmode;
12742 case CCFPmode:
12743 case CCFPUmode:
12744 /* These are only compatible with themselves, which we already
12745 checked above. */
12746 return VOIDmode;
12750 /* Split comparison code CODE into comparisons we can do using branch
12751 instructions. BYPASS_CODE is comparison code for branch that will
12752 branch around FIRST_CODE and SECOND_CODE. If some of branches
12753 is not required, set value to UNKNOWN.
12754 We never require more than two branches. */
12756 void
12757 ix86_fp_comparison_codes (enum rtx_code code, enum rtx_code *bypass_code,
12758 enum rtx_code *first_code,
12759 enum rtx_code *second_code)
12761 *first_code = code;
12762 *bypass_code = UNKNOWN;
12763 *second_code = UNKNOWN;
12765 /* The fcomi comparison sets flags as follows:
12767 cmp ZF PF CF
12768 > 0 0 0
12769 < 0 0 1
12770 = 1 0 0
12771 un 1 1 1 */
12773 switch (code)
12775 case GT: /* GTU - CF=0 & ZF=0 */
12776 case GE: /* GEU - CF=0 */
12777 case ORDERED: /* PF=0 */
12778 case UNORDERED: /* PF=1 */
12779 case UNEQ: /* EQ - ZF=1 */
12780 case UNLT: /* LTU - CF=1 */
12781 case UNLE: /* LEU - CF=1 | ZF=1 */
12782 case LTGT: /* EQ - ZF=0 */
12783 break;
12784 case LT: /* LTU - CF=1 - fails on unordered */
12785 *first_code = UNLT;
12786 *bypass_code = UNORDERED;
12787 break;
12788 case LE: /* LEU - CF=1 | ZF=1 - fails on unordered */
12789 *first_code = UNLE;
12790 *bypass_code = UNORDERED;
12791 break;
12792 case EQ: /* EQ - ZF=1 - fails on unordered */
12793 *first_code = UNEQ;
12794 *bypass_code = UNORDERED;
12795 break;
12796 case NE: /* NE - ZF=0 - fails on unordered */
12797 *first_code = LTGT;
12798 *second_code = UNORDERED;
12799 break;
12800 case UNGE: /* GEU - CF=0 - fails on unordered */
12801 *first_code = GE;
12802 *second_code = UNORDERED;
12803 break;
12804 case UNGT: /* GTU - CF=0 & ZF=0 - fails on unordered */
12805 *first_code = GT;
12806 *second_code = UNORDERED;
12807 break;
12808 default:
12809 gcc_unreachable ();
12811 if (!TARGET_IEEE_FP)
12813 *second_code = UNKNOWN;
12814 *bypass_code = UNKNOWN;
12818 /* Return cost of comparison done fcom + arithmetics operations on AX.
12819 All following functions do use number of instructions as a cost metrics.
12820 In future this should be tweaked to compute bytes for optimize_size and
12821 take into account performance of various instructions on various CPUs. */
12822 static int
12823 ix86_fp_comparison_arithmetics_cost (enum rtx_code code)
12825 if (!TARGET_IEEE_FP)
12826 return 4;
12827 /* The cost of code output by ix86_expand_fp_compare. */
12828 switch (code)
12830 case UNLE:
12831 case UNLT:
12832 case LTGT:
12833 case GT:
12834 case GE:
12835 case UNORDERED:
12836 case ORDERED:
12837 case UNEQ:
12838 return 4;
12839 break;
12840 case LT:
12841 case NE:
12842 case EQ:
12843 case UNGE:
12844 return 5;
12845 break;
12846 case LE:
12847 case UNGT:
12848 return 6;
12849 break;
12850 default:
12851 gcc_unreachable ();
12855 /* Return cost of comparison done using fcomi operation.
12856 See ix86_fp_comparison_arithmetics_cost for the metrics. */
12857 static int
12858 ix86_fp_comparison_fcomi_cost (enum rtx_code code)
12860 enum rtx_code bypass_code, first_code, second_code;
12861 /* Return arbitrarily high cost when instruction is not supported - this
12862 prevents gcc from using it. */
12863 if (!TARGET_CMOVE)
12864 return 1024;
12865 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
12866 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 2;
12869 /* Return cost of comparison done using sahf operation.
12870 See ix86_fp_comparison_arithmetics_cost for the metrics. */
12871 static int
12872 ix86_fp_comparison_sahf_cost (enum rtx_code code)
12874 enum rtx_code bypass_code, first_code, second_code;
12875 /* Return arbitrarily high cost when instruction is not preferred - this
12876 avoids gcc from using it. */
12877 if (!(TARGET_SAHF && (TARGET_USE_SAHF || optimize_size)))
12878 return 1024;
12879 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
12880 return (bypass_code != UNKNOWN || second_code != UNKNOWN) + 3;
12883 /* Compute cost of the comparison done using any method.
12884 See ix86_fp_comparison_arithmetics_cost for the metrics. */
12885 static int
12886 ix86_fp_comparison_cost (enum rtx_code code)
12888 int fcomi_cost, sahf_cost, arithmetics_cost = 1024;
12889 int min;
12891 fcomi_cost = ix86_fp_comparison_fcomi_cost (code);
12892 sahf_cost = ix86_fp_comparison_sahf_cost (code);
12894 min = arithmetics_cost = ix86_fp_comparison_arithmetics_cost (code);
12895 if (min > sahf_cost)
12896 min = sahf_cost;
12897 if (min > fcomi_cost)
12898 min = fcomi_cost;
12899 return min;
12902 /* Return true if we should use an FCOMI instruction for this
12903 fp comparison. */
12906 ix86_use_fcomi_compare (enum rtx_code code ATTRIBUTE_UNUSED)
12908 enum rtx_code swapped_code = swap_condition (code);
12910 return ((ix86_fp_comparison_cost (code)
12911 == ix86_fp_comparison_fcomi_cost (code))
12912 || (ix86_fp_comparison_cost (swapped_code)
12913 == ix86_fp_comparison_fcomi_cost (swapped_code)));
12916 /* Swap, force into registers, or otherwise massage the two operands
12917 to a fp comparison. The operands are updated in place; the new
12918 comparison code is returned. */
12920 static enum rtx_code
12921 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
12923 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
12924 rtx op0 = *pop0, op1 = *pop1;
12925 enum machine_mode op_mode = GET_MODE (op0);
12926 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
12928 /* All of the unordered compare instructions only work on registers.
12929 The same is true of the fcomi compare instructions. The XFmode
12930 compare instructions require registers except when comparing
12931 against zero or when converting operand 1 from fixed point to
12932 floating point. */
12934 if (!is_sse
12935 && (fpcmp_mode == CCFPUmode
12936 || (op_mode == XFmode
12937 && ! (standard_80387_constant_p (op0) == 1
12938 || standard_80387_constant_p (op1) == 1)
12939 && GET_CODE (op1) != FLOAT)
12940 || ix86_use_fcomi_compare (code)))
12942 op0 = force_reg (op_mode, op0);
12943 op1 = force_reg (op_mode, op1);
12945 else
12947 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
12948 things around if they appear profitable, otherwise force op0
12949 into a register. */
12951 if (standard_80387_constant_p (op0) == 0
12952 || (MEM_P (op0)
12953 && ! (standard_80387_constant_p (op1) == 0
12954 || MEM_P (op1))))
12956 rtx tmp;
12957 tmp = op0, op0 = op1, op1 = tmp;
12958 code = swap_condition (code);
12961 if (!REG_P (op0))
12962 op0 = force_reg (op_mode, op0);
12964 if (CONSTANT_P (op1))
12966 int tmp = standard_80387_constant_p (op1);
12967 if (tmp == 0)
12968 op1 = validize_mem (force_const_mem (op_mode, op1));
12969 else if (tmp == 1)
12971 if (TARGET_CMOVE)
12972 op1 = force_reg (op_mode, op1);
12974 else
12975 op1 = force_reg (op_mode, op1);
12979 /* Try to rearrange the comparison to make it cheaper. */
12980 if (ix86_fp_comparison_cost (code)
12981 > ix86_fp_comparison_cost (swap_condition (code))
12982 && (REG_P (op1) || can_create_pseudo_p ()))
12984 rtx tmp;
12985 tmp = op0, op0 = op1, op1 = tmp;
12986 code = swap_condition (code);
12987 if (!REG_P (op0))
12988 op0 = force_reg (op_mode, op0);
12991 *pop0 = op0;
12992 *pop1 = op1;
12993 return code;
12996 /* Convert comparison codes we use to represent FP comparison to integer
12997 code that will result in proper branch. Return UNKNOWN if no such code
12998 is available. */
13000 enum rtx_code
13001 ix86_fp_compare_code_to_integer (enum rtx_code code)
13003 switch (code)
13005 case GT:
13006 return GTU;
13007 case GE:
13008 return GEU;
13009 case ORDERED:
13010 case UNORDERED:
13011 return code;
13012 break;
13013 case UNEQ:
13014 return EQ;
13015 break;
13016 case UNLT:
13017 return LTU;
13018 break;
13019 case UNLE:
13020 return LEU;
13021 break;
13022 case LTGT:
13023 return NE;
13024 break;
13025 default:
13026 return UNKNOWN;
13030 /* Generate insn patterns to do a floating point compare of OPERANDS. */
13032 static rtx
13033 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch,
13034 rtx *second_test, rtx *bypass_test)
13036 enum machine_mode fpcmp_mode, intcmp_mode;
13037 rtx tmp, tmp2;
13038 int cost = ix86_fp_comparison_cost (code);
13039 enum rtx_code bypass_code, first_code, second_code;
13041 fpcmp_mode = ix86_fp_compare_mode (code);
13042 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
13044 if (second_test)
13045 *second_test = NULL_RTX;
13046 if (bypass_test)
13047 *bypass_test = NULL_RTX;
13049 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
13051 /* Do fcomi/sahf based test when profitable. */
13052 if (ix86_fp_comparison_arithmetics_cost (code) > cost
13053 && (bypass_code == UNKNOWN || bypass_test)
13054 && (second_code == UNKNOWN || second_test))
13056 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
13057 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
13058 tmp);
13059 if (TARGET_CMOVE)
13060 emit_insn (tmp);
13061 else
13063 gcc_assert (TARGET_SAHF);
13065 if (!scratch)
13066 scratch = gen_reg_rtx (HImode);
13067 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
13069 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
13072 /* The FP codes work out to act like unsigned. */
13073 intcmp_mode = fpcmp_mode;
13074 code = first_code;
13075 if (bypass_code != UNKNOWN)
13076 *bypass_test = gen_rtx_fmt_ee (bypass_code, VOIDmode,
13077 gen_rtx_REG (intcmp_mode, FLAGS_REG),
13078 const0_rtx);
13079 if (second_code != UNKNOWN)
13080 *second_test = gen_rtx_fmt_ee (second_code, VOIDmode,
13081 gen_rtx_REG (intcmp_mode, FLAGS_REG),
13082 const0_rtx);
13084 else
13086 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
13087 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
13088 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
13089 if (!scratch)
13090 scratch = gen_reg_rtx (HImode);
13091 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
13093 /* In the unordered case, we have to check C2 for NaN's, which
13094 doesn't happen to work out to anything nice combination-wise.
13095 So do some bit twiddling on the value we've got in AH to come
13096 up with an appropriate set of condition codes. */
13098 intcmp_mode = CCNOmode;
13099 switch (code)
13101 case GT:
13102 case UNGT:
13103 if (code == GT || !TARGET_IEEE_FP)
13105 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
13106 code = EQ;
13108 else
13110 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
13111 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
13112 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
13113 intcmp_mode = CCmode;
13114 code = GEU;
13116 break;
13117 case LT:
13118 case UNLT:
13119 if (code == LT && TARGET_IEEE_FP)
13121 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
13122 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x01)));
13123 intcmp_mode = CCmode;
13124 code = EQ;
13126 else
13128 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x01)));
13129 code = NE;
13131 break;
13132 case GE:
13133 case UNGE:
13134 if (code == GE || !TARGET_IEEE_FP)
13136 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
13137 code = EQ;
13139 else
13141 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
13142 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
13143 GEN_INT (0x01)));
13144 code = NE;
13146 break;
13147 case LE:
13148 case UNLE:
13149 if (code == LE && TARGET_IEEE_FP)
13151 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
13152 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
13153 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
13154 intcmp_mode = CCmode;
13155 code = LTU;
13157 else
13159 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
13160 code = NE;
13162 break;
13163 case EQ:
13164 case UNEQ:
13165 if (code == EQ && TARGET_IEEE_FP)
13167 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
13168 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
13169 intcmp_mode = CCmode;
13170 code = EQ;
13172 else
13174 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
13175 code = NE;
13176 break;
13178 break;
13179 case NE:
13180 case LTGT:
13181 if (code == NE && TARGET_IEEE_FP)
13183 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
13184 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
13185 GEN_INT (0x40)));
13186 code = NE;
13188 else
13190 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
13191 code = EQ;
13193 break;
13195 case UNORDERED:
13196 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
13197 code = NE;
13198 break;
13199 case ORDERED:
13200 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
13201 code = EQ;
13202 break;
13204 default:
13205 gcc_unreachable ();
13209 /* Return the test that should be put into the flags user, i.e.
13210 the bcc, scc, or cmov instruction. */
13211 return gen_rtx_fmt_ee (code, VOIDmode,
13212 gen_rtx_REG (intcmp_mode, FLAGS_REG),
13213 const0_rtx);
13217 ix86_expand_compare (enum rtx_code code, rtx *second_test, rtx *bypass_test)
13219 rtx op0, op1, ret;
13220 op0 = ix86_compare_op0;
13221 op1 = ix86_compare_op1;
13223 if (second_test)
13224 *second_test = NULL_RTX;
13225 if (bypass_test)
13226 *bypass_test = NULL_RTX;
13228 if (ix86_compare_emitted)
13230 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_emitted, const0_rtx);
13231 ix86_compare_emitted = NULL_RTX;
13233 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
13235 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
13236 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
13237 second_test, bypass_test);
13239 else
13240 ret = ix86_expand_int_compare (code, op0, op1);
13242 return ret;
13245 /* Return true if the CODE will result in nontrivial jump sequence. */
13246 bool
13247 ix86_fp_jump_nontrivial_p (enum rtx_code code)
13249 enum rtx_code bypass_code, first_code, second_code;
13250 if (!TARGET_CMOVE)
13251 return true;
13252 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
13253 return bypass_code != UNKNOWN || second_code != UNKNOWN;
13256 void
13257 ix86_expand_branch (enum rtx_code code, rtx label)
13259 rtx tmp;
13261 /* If we have emitted a compare insn, go straight to simple.
13262 ix86_expand_compare won't emit anything if ix86_compare_emitted
13263 is non NULL. */
13264 if (ix86_compare_emitted)
13265 goto simple;
13267 switch (GET_MODE (ix86_compare_op0))
13269 case QImode:
13270 case HImode:
13271 case SImode:
13272 simple:
13273 tmp = ix86_expand_compare (code, NULL, NULL);
13274 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
13275 gen_rtx_LABEL_REF (VOIDmode, label),
13276 pc_rtx);
13277 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
13278 return;
13280 case SFmode:
13281 case DFmode:
13282 case XFmode:
13284 rtvec vec;
13285 int use_fcomi;
13286 enum rtx_code bypass_code, first_code, second_code;
13288 code = ix86_prepare_fp_compare_args (code, &ix86_compare_op0,
13289 &ix86_compare_op1);
13291 ix86_fp_comparison_codes (code, &bypass_code, &first_code, &second_code);
13293 /* Check whether we will use the natural sequence with one jump. If
13294 so, we can expand jump early. Otherwise delay expansion by
13295 creating compound insn to not confuse optimizers. */
13296 if (bypass_code == UNKNOWN && second_code == UNKNOWN)
13298 ix86_split_fp_branch (code, ix86_compare_op0, ix86_compare_op1,
13299 gen_rtx_LABEL_REF (VOIDmode, label),
13300 pc_rtx, NULL_RTX, NULL_RTX);
13302 else
13304 tmp = gen_rtx_fmt_ee (code, VOIDmode,
13305 ix86_compare_op0, ix86_compare_op1);
13306 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
13307 gen_rtx_LABEL_REF (VOIDmode, label),
13308 pc_rtx);
13309 tmp = gen_rtx_SET (VOIDmode, pc_rtx, tmp);
13311 use_fcomi = ix86_use_fcomi_compare (code);
13312 vec = rtvec_alloc (3 + !use_fcomi);
13313 RTVEC_ELT (vec, 0) = tmp;
13314 RTVEC_ELT (vec, 1)
13315 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, FPSR_REG));
13316 RTVEC_ELT (vec, 2)
13317 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCFPmode, FLAGS_REG));
13318 if (! use_fcomi)
13319 RTVEC_ELT (vec, 3)
13320 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (HImode));
13322 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, vec));
13324 return;
13327 case DImode:
13328 if (TARGET_64BIT)
13329 goto simple;
13330 case TImode:
13331 /* Expand DImode branch into multiple compare+branch. */
13333 rtx lo[2], hi[2], label2;
13334 enum rtx_code code1, code2, code3;
13335 enum machine_mode submode;
13337 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
13339 tmp = ix86_compare_op0;
13340 ix86_compare_op0 = ix86_compare_op1;
13341 ix86_compare_op1 = tmp;
13342 code = swap_condition (code);
13344 if (GET_MODE (ix86_compare_op0) == DImode)
13346 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
13347 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
13348 submode = SImode;
13350 else
13352 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
13353 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
13354 submode = DImode;
13357 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
13358 avoid two branches. This costs one extra insn, so disable when
13359 optimizing for size. */
13361 if ((code == EQ || code == NE)
13362 && (!optimize_size
13363 || hi[1] == const0_rtx || lo[1] == const0_rtx))
13365 rtx xor0, xor1;
13367 xor1 = hi[0];
13368 if (hi[1] != const0_rtx)
13369 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
13370 NULL_RTX, 0, OPTAB_WIDEN);
13372 xor0 = lo[0];
13373 if (lo[1] != const0_rtx)
13374 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
13375 NULL_RTX, 0, OPTAB_WIDEN);
13377 tmp = expand_binop (submode, ior_optab, xor1, xor0,
13378 NULL_RTX, 0, OPTAB_WIDEN);
13380 ix86_compare_op0 = tmp;
13381 ix86_compare_op1 = const0_rtx;
13382 ix86_expand_branch (code, label);
13383 return;
13386 /* Otherwise, if we are doing less-than or greater-or-equal-than,
13387 op1 is a constant and the low word is zero, then we can just
13388 examine the high word. Similarly for low word -1 and
13389 less-or-equal-than or greater-than. */
13391 if (CONST_INT_P (hi[1]))
13392 switch (code)
13394 case LT: case LTU: case GE: case GEU:
13395 if (lo[1] == const0_rtx)
13397 ix86_compare_op0 = hi[0];
13398 ix86_compare_op1 = hi[1];
13399 ix86_expand_branch (code, label);
13400 return;
13402 break;
13403 case LE: case LEU: case GT: case GTU:
13404 if (lo[1] == constm1_rtx)
13406 ix86_compare_op0 = hi[0];
13407 ix86_compare_op1 = hi[1];
13408 ix86_expand_branch (code, label);
13409 return;
13411 break;
13412 default:
13413 break;
13416 /* Otherwise, we need two or three jumps. */
13418 label2 = gen_label_rtx ();
13420 code1 = code;
13421 code2 = swap_condition (code);
13422 code3 = unsigned_condition (code);
13424 switch (code)
13426 case LT: case GT: case LTU: case GTU:
13427 break;
13429 case LE: code1 = LT; code2 = GT; break;
13430 case GE: code1 = GT; code2 = LT; break;
13431 case LEU: code1 = LTU; code2 = GTU; break;
13432 case GEU: code1 = GTU; code2 = LTU; break;
13434 case EQ: code1 = UNKNOWN; code2 = NE; break;
13435 case NE: code2 = UNKNOWN; break;
13437 default:
13438 gcc_unreachable ();
13442 * a < b =>
13443 * if (hi(a) < hi(b)) goto true;
13444 * if (hi(a) > hi(b)) goto false;
13445 * if (lo(a) < lo(b)) goto true;
13446 * false:
13449 ix86_compare_op0 = hi[0];
13450 ix86_compare_op1 = hi[1];
13452 if (code1 != UNKNOWN)
13453 ix86_expand_branch (code1, label);
13454 if (code2 != UNKNOWN)
13455 ix86_expand_branch (code2, label2);
13457 ix86_compare_op0 = lo[0];
13458 ix86_compare_op1 = lo[1];
13459 ix86_expand_branch (code3, label);
13461 if (code2 != UNKNOWN)
13462 emit_label (label2);
13463 return;
13466 default:
13467 gcc_unreachable ();
13471 /* Split branch based on floating point condition. */
13472 void
13473 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
13474 rtx target1, rtx target2, rtx tmp, rtx pushed)
13476 rtx second, bypass;
13477 rtx label = NULL_RTX;
13478 rtx condition;
13479 int bypass_probability = -1, second_probability = -1, probability = -1;
13480 rtx i;
13482 if (target2 != pc_rtx)
13484 rtx tmp = target2;
13485 code = reverse_condition_maybe_unordered (code);
13486 target2 = target1;
13487 target1 = tmp;
13490 condition = ix86_expand_fp_compare (code, op1, op2,
13491 tmp, &second, &bypass);
13493 /* Remove pushed operand from stack. */
13494 if (pushed)
13495 ix86_free_from_memory (GET_MODE (pushed));
13497 if (split_branch_probability >= 0)
13499 /* Distribute the probabilities across the jumps.
13500 Assume the BYPASS and SECOND to be always test
13501 for UNORDERED. */
13502 probability = split_branch_probability;
13504 /* Value of 1 is low enough to make no need for probability
13505 to be updated. Later we may run some experiments and see
13506 if unordered values are more frequent in practice. */
13507 if (bypass)
13508 bypass_probability = 1;
13509 if (second)
13510 second_probability = 1;
13512 if (bypass != NULL_RTX)
13514 label = gen_label_rtx ();
13515 i = emit_jump_insn (gen_rtx_SET
13516 (VOIDmode, pc_rtx,
13517 gen_rtx_IF_THEN_ELSE (VOIDmode,
13518 bypass,
13519 gen_rtx_LABEL_REF (VOIDmode,
13520 label),
13521 pc_rtx)));
13522 if (bypass_probability >= 0)
13523 REG_NOTES (i)
13524 = gen_rtx_EXPR_LIST (REG_BR_PROB,
13525 GEN_INT (bypass_probability),
13526 REG_NOTES (i));
13528 i = emit_jump_insn (gen_rtx_SET
13529 (VOIDmode, pc_rtx,
13530 gen_rtx_IF_THEN_ELSE (VOIDmode,
13531 condition, target1, target2)));
13532 if (probability >= 0)
13533 REG_NOTES (i)
13534 = gen_rtx_EXPR_LIST (REG_BR_PROB,
13535 GEN_INT (probability),
13536 REG_NOTES (i));
13537 if (second != NULL_RTX)
13539 i = emit_jump_insn (gen_rtx_SET
13540 (VOIDmode, pc_rtx,
13541 gen_rtx_IF_THEN_ELSE (VOIDmode, second, target1,
13542 target2)));
13543 if (second_probability >= 0)
13544 REG_NOTES (i)
13545 = gen_rtx_EXPR_LIST (REG_BR_PROB,
13546 GEN_INT (second_probability),
13547 REG_NOTES (i));
13549 if (label != NULL_RTX)
13550 emit_label (label);
13554 ix86_expand_setcc (enum rtx_code code, rtx dest)
13556 rtx ret, tmp, tmpreg, equiv;
13557 rtx second_test, bypass_test;
13559 if (GET_MODE (ix86_compare_op0) == (TARGET_64BIT ? TImode : DImode))
13560 return 0; /* FAIL */
13562 gcc_assert (GET_MODE (dest) == QImode);
13564 ret = ix86_expand_compare (code, &second_test, &bypass_test);
13565 PUT_MODE (ret, QImode);
13567 tmp = dest;
13568 tmpreg = dest;
13570 emit_insn (gen_rtx_SET (VOIDmode, tmp, ret));
13571 if (bypass_test || second_test)
13573 rtx test = second_test;
13574 int bypass = 0;
13575 rtx tmp2 = gen_reg_rtx (QImode);
13576 if (bypass_test)
13578 gcc_assert (!second_test);
13579 test = bypass_test;
13580 bypass = 1;
13581 PUT_CODE (test, reverse_condition_maybe_unordered (GET_CODE (test)));
13583 PUT_MODE (test, QImode);
13584 emit_insn (gen_rtx_SET (VOIDmode, tmp2, test));
13586 if (bypass)
13587 emit_insn (gen_andqi3 (tmp, tmpreg, tmp2));
13588 else
13589 emit_insn (gen_iorqi3 (tmp, tmpreg, tmp2));
13592 /* Attach a REG_EQUAL note describing the comparison result. */
13593 if (ix86_compare_op0 && ix86_compare_op1)
13595 equiv = simplify_gen_relational (code, QImode,
13596 GET_MODE (ix86_compare_op0),
13597 ix86_compare_op0, ix86_compare_op1);
13598 set_unique_reg_note (get_last_insn (), REG_EQUAL, equiv);
13601 return 1; /* DONE */
13604 /* Expand comparison setting or clearing carry flag. Return true when
13605 successful and set pop for the operation. */
13606 static bool
13607 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
13609 enum machine_mode mode =
13610 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
13612 /* Do not handle DImode compares that go through special path. */
13613 if (mode == (TARGET_64BIT ? TImode : DImode))
13614 return false;
13616 if (SCALAR_FLOAT_MODE_P (mode))
13618 rtx second_test = NULL, bypass_test = NULL;
13619 rtx compare_op, compare_seq;
13621 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
13623 /* Shortcut: following common codes never translate
13624 into carry flag compares. */
13625 if (code == EQ || code == NE || code == UNEQ || code == LTGT
13626 || code == ORDERED || code == UNORDERED)
13627 return false;
13629 /* These comparisons require zero flag; swap operands so they won't. */
13630 if ((code == GT || code == UNLE || code == LE || code == UNGT)
13631 && !TARGET_IEEE_FP)
13633 rtx tmp = op0;
13634 op0 = op1;
13635 op1 = tmp;
13636 code = swap_condition (code);
13639 /* Try to expand the comparison and verify that we end up with
13640 carry flag based comparison. This fails to be true only when
13641 we decide to expand comparison using arithmetic that is not
13642 too common scenario. */
13643 start_sequence ();
13644 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX,
13645 &second_test, &bypass_test);
13646 compare_seq = get_insns ();
13647 end_sequence ();
13649 if (second_test || bypass_test)
13650 return false;
13652 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
13653 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
13654 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
13655 else
13656 code = GET_CODE (compare_op);
13658 if (code != LTU && code != GEU)
13659 return false;
13661 emit_insn (compare_seq);
13662 *pop = compare_op;
13663 return true;
13666 if (!INTEGRAL_MODE_P (mode))
13667 return false;
13669 switch (code)
13671 case LTU:
13672 case GEU:
13673 break;
13675 /* Convert a==0 into (unsigned)a<1. */
13676 case EQ:
13677 case NE:
13678 if (op1 != const0_rtx)
13679 return false;
13680 op1 = const1_rtx;
13681 code = (code == EQ ? LTU : GEU);
13682 break;
13684 /* Convert a>b into b<a or a>=b-1. */
13685 case GTU:
13686 case LEU:
13687 if (CONST_INT_P (op1))
13689 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
13690 /* Bail out on overflow. We still can swap operands but that
13691 would force loading of the constant into register. */
13692 if (op1 == const0_rtx
13693 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
13694 return false;
13695 code = (code == GTU ? GEU : LTU);
13697 else
13699 rtx tmp = op1;
13700 op1 = op0;
13701 op0 = tmp;
13702 code = (code == GTU ? LTU : GEU);
13704 break;
13706 /* Convert a>=0 into (unsigned)a<0x80000000. */
13707 case LT:
13708 case GE:
13709 if (mode == DImode || op1 != const0_rtx)
13710 return false;
13711 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
13712 code = (code == LT ? GEU : LTU);
13713 break;
13714 case LE:
13715 case GT:
13716 if (mode == DImode || op1 != constm1_rtx)
13717 return false;
13718 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
13719 code = (code == LE ? GEU : LTU);
13720 break;
13722 default:
13723 return false;
13725 /* Swapping operands may cause constant to appear as first operand. */
13726 if (!nonimmediate_operand (op0, VOIDmode))
13728 if (!can_create_pseudo_p ())
13729 return false;
13730 op0 = force_reg (mode, op0);
13732 ix86_compare_op0 = op0;
13733 ix86_compare_op1 = op1;
13734 *pop = ix86_expand_compare (code, NULL, NULL);
13735 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
13736 return true;
13740 ix86_expand_int_movcc (rtx operands[])
13742 enum rtx_code code = GET_CODE (operands[1]), compare_code;
13743 rtx compare_seq, compare_op;
13744 rtx second_test, bypass_test;
13745 enum machine_mode mode = GET_MODE (operands[0]);
13746 bool sign_bit_compare_p = false;;
13748 start_sequence ();
13749 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
13750 compare_seq = get_insns ();
13751 end_sequence ();
13753 compare_code = GET_CODE (compare_op);
13755 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
13756 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
13757 sign_bit_compare_p = true;
13759 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
13760 HImode insns, we'd be swallowed in word prefix ops. */
13762 if ((mode != HImode || TARGET_FAST_PREFIX)
13763 && (mode != (TARGET_64BIT ? TImode : DImode))
13764 && CONST_INT_P (operands[2])
13765 && CONST_INT_P (operands[3]))
13767 rtx out = operands[0];
13768 HOST_WIDE_INT ct = INTVAL (operands[2]);
13769 HOST_WIDE_INT cf = INTVAL (operands[3]);
13770 HOST_WIDE_INT diff;
13772 diff = ct - cf;
13773 /* Sign bit compares are better done using shifts than we do by using
13774 sbb. */
13775 if (sign_bit_compare_p
13776 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
13777 ix86_compare_op1, &compare_op))
13779 /* Detect overlap between destination and compare sources. */
13780 rtx tmp = out;
13782 if (!sign_bit_compare_p)
13784 bool fpcmp = false;
13786 compare_code = GET_CODE (compare_op);
13788 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
13789 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
13791 fpcmp = true;
13792 compare_code = ix86_fp_compare_code_to_integer (compare_code);
13795 /* To simplify rest of code, restrict to the GEU case. */
13796 if (compare_code == LTU)
13798 HOST_WIDE_INT tmp = ct;
13799 ct = cf;
13800 cf = tmp;
13801 compare_code = reverse_condition (compare_code);
13802 code = reverse_condition (code);
13804 else
13806 if (fpcmp)
13807 PUT_CODE (compare_op,
13808 reverse_condition_maybe_unordered
13809 (GET_CODE (compare_op)));
13810 else
13811 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
13813 diff = ct - cf;
13815 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
13816 || reg_overlap_mentioned_p (out, ix86_compare_op1))
13817 tmp = gen_reg_rtx (mode);
13819 if (mode == DImode)
13820 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
13821 else
13822 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
13824 else
13826 if (code == GT || code == GE)
13827 code = reverse_condition (code);
13828 else
13830 HOST_WIDE_INT tmp = ct;
13831 ct = cf;
13832 cf = tmp;
13833 diff = ct - cf;
13835 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
13836 ix86_compare_op1, VOIDmode, 0, -1);
13839 if (diff == 1)
13842 * cmpl op0,op1
13843 * sbbl dest,dest
13844 * [addl dest, ct]
13846 * Size 5 - 8.
13848 if (ct)
13849 tmp = expand_simple_binop (mode, PLUS,
13850 tmp, GEN_INT (ct),
13851 copy_rtx (tmp), 1, OPTAB_DIRECT);
13853 else if (cf == -1)
13856 * cmpl op0,op1
13857 * sbbl dest,dest
13858 * orl $ct, dest
13860 * Size 8.
13862 tmp = expand_simple_binop (mode, IOR,
13863 tmp, GEN_INT (ct),
13864 copy_rtx (tmp), 1, OPTAB_DIRECT);
13866 else if (diff == -1 && ct)
13869 * cmpl op0,op1
13870 * sbbl dest,dest
13871 * notl dest
13872 * [addl dest, cf]
13874 * Size 8 - 11.
13876 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
13877 if (cf)
13878 tmp = expand_simple_binop (mode, PLUS,
13879 copy_rtx (tmp), GEN_INT (cf),
13880 copy_rtx (tmp), 1, OPTAB_DIRECT);
13882 else
13885 * cmpl op0,op1
13886 * sbbl dest,dest
13887 * [notl dest]
13888 * andl cf - ct, dest
13889 * [addl dest, ct]
13891 * Size 8 - 11.
13894 if (cf == 0)
13896 cf = ct;
13897 ct = 0;
13898 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
13901 tmp = expand_simple_binop (mode, AND,
13902 copy_rtx (tmp),
13903 gen_int_mode (cf - ct, mode),
13904 copy_rtx (tmp), 1, OPTAB_DIRECT);
13905 if (ct)
13906 tmp = expand_simple_binop (mode, PLUS,
13907 copy_rtx (tmp), GEN_INT (ct),
13908 copy_rtx (tmp), 1, OPTAB_DIRECT);
13911 if (!rtx_equal_p (tmp, out))
13912 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
13914 return 1; /* DONE */
13917 if (diff < 0)
13919 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
13921 HOST_WIDE_INT tmp;
13922 tmp = ct, ct = cf, cf = tmp;
13923 diff = -diff;
13925 if (SCALAR_FLOAT_MODE_P (cmp_mode))
13927 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
13929 /* We may be reversing unordered compare to normal compare, that
13930 is not valid in general (we may convert non-trapping condition
13931 to trapping one), however on i386 we currently emit all
13932 comparisons unordered. */
13933 compare_code = reverse_condition_maybe_unordered (compare_code);
13934 code = reverse_condition_maybe_unordered (code);
13936 else
13938 compare_code = reverse_condition (compare_code);
13939 code = reverse_condition (code);
13943 compare_code = UNKNOWN;
13944 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
13945 && CONST_INT_P (ix86_compare_op1))
13947 if (ix86_compare_op1 == const0_rtx
13948 && (code == LT || code == GE))
13949 compare_code = code;
13950 else if (ix86_compare_op1 == constm1_rtx)
13952 if (code == LE)
13953 compare_code = LT;
13954 else if (code == GT)
13955 compare_code = GE;
13959 /* Optimize dest = (op0 < 0) ? -1 : cf. */
13960 if (compare_code != UNKNOWN
13961 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
13962 && (cf == -1 || ct == -1))
13964 /* If lea code below could be used, only optimize
13965 if it results in a 2 insn sequence. */
13967 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
13968 || diff == 3 || diff == 5 || diff == 9)
13969 || (compare_code == LT && ct == -1)
13970 || (compare_code == GE && cf == -1))
13973 * notl op1 (if necessary)
13974 * sarl $31, op1
13975 * orl cf, op1
13977 if (ct != -1)
13979 cf = ct;
13980 ct = -1;
13981 code = reverse_condition (code);
13984 out = emit_store_flag (out, code, ix86_compare_op0,
13985 ix86_compare_op1, VOIDmode, 0, -1);
13987 out = expand_simple_binop (mode, IOR,
13988 out, GEN_INT (cf),
13989 out, 1, OPTAB_DIRECT);
13990 if (out != operands[0])
13991 emit_move_insn (operands[0], out);
13993 return 1; /* DONE */
13998 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
13999 || diff == 3 || diff == 5 || diff == 9)
14000 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
14001 && (mode != DImode
14002 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
14005 * xorl dest,dest
14006 * cmpl op1,op2
14007 * setcc dest
14008 * lea cf(dest*(ct-cf)),dest
14010 * Size 14.
14012 * This also catches the degenerate setcc-only case.
14015 rtx tmp;
14016 int nops;
14018 out = emit_store_flag (out, code, ix86_compare_op0,
14019 ix86_compare_op1, VOIDmode, 0, 1);
14021 nops = 0;
14022 /* On x86_64 the lea instruction operates on Pmode, so we need
14023 to get arithmetics done in proper mode to match. */
14024 if (diff == 1)
14025 tmp = copy_rtx (out);
14026 else
14028 rtx out1;
14029 out1 = copy_rtx (out);
14030 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
14031 nops++;
14032 if (diff & 1)
14034 tmp = gen_rtx_PLUS (mode, tmp, out1);
14035 nops++;
14038 if (cf != 0)
14040 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
14041 nops++;
14043 if (!rtx_equal_p (tmp, out))
14045 if (nops == 1)
14046 out = force_operand (tmp, copy_rtx (out));
14047 else
14048 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
14050 if (!rtx_equal_p (out, operands[0]))
14051 emit_move_insn (operands[0], copy_rtx (out));
14053 return 1; /* DONE */
14057 * General case: Jumpful:
14058 * xorl dest,dest cmpl op1, op2
14059 * cmpl op1, op2 movl ct, dest
14060 * setcc dest jcc 1f
14061 * decl dest movl cf, dest
14062 * andl (cf-ct),dest 1:
14063 * addl ct,dest
14065 * Size 20. Size 14.
14067 * This is reasonably steep, but branch mispredict costs are
14068 * high on modern cpus, so consider failing only if optimizing
14069 * for space.
14072 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
14073 && BRANCH_COST >= 2)
14075 if (cf == 0)
14077 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
14079 cf = ct;
14080 ct = 0;
14082 if (SCALAR_FLOAT_MODE_P (cmp_mode))
14084 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
14086 /* We may be reversing unordered compare to normal compare,
14087 that is not valid in general (we may convert non-trapping
14088 condition to trapping one), however on i386 we currently
14089 emit all comparisons unordered. */
14090 code = reverse_condition_maybe_unordered (code);
14092 else
14094 code = reverse_condition (code);
14095 if (compare_code != UNKNOWN)
14096 compare_code = reverse_condition (compare_code);
14100 if (compare_code != UNKNOWN)
14102 /* notl op1 (if needed)
14103 sarl $31, op1
14104 andl (cf-ct), op1
14105 addl ct, op1
14107 For x < 0 (resp. x <= -1) there will be no notl,
14108 so if possible swap the constants to get rid of the
14109 complement.
14110 True/false will be -1/0 while code below (store flag
14111 followed by decrement) is 0/-1, so the constants need
14112 to be exchanged once more. */
14114 if (compare_code == GE || !cf)
14116 code = reverse_condition (code);
14117 compare_code = LT;
14119 else
14121 HOST_WIDE_INT tmp = cf;
14122 cf = ct;
14123 ct = tmp;
14126 out = emit_store_flag (out, code, ix86_compare_op0,
14127 ix86_compare_op1, VOIDmode, 0, -1);
14129 else
14131 out = emit_store_flag (out, code, ix86_compare_op0,
14132 ix86_compare_op1, VOIDmode, 0, 1);
14134 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
14135 copy_rtx (out), 1, OPTAB_DIRECT);
14138 out = expand_simple_binop (mode, AND, copy_rtx (out),
14139 gen_int_mode (cf - ct, mode),
14140 copy_rtx (out), 1, OPTAB_DIRECT);
14141 if (ct)
14142 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
14143 copy_rtx (out), 1, OPTAB_DIRECT);
14144 if (!rtx_equal_p (out, operands[0]))
14145 emit_move_insn (operands[0], copy_rtx (out));
14147 return 1; /* DONE */
14151 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
14153 /* Try a few things more with specific constants and a variable. */
14155 optab op;
14156 rtx var, orig_out, out, tmp;
14158 if (BRANCH_COST <= 2)
14159 return 0; /* FAIL */
14161 /* If one of the two operands is an interesting constant, load a
14162 constant with the above and mask it in with a logical operation. */
14164 if (CONST_INT_P (operands[2]))
14166 var = operands[3];
14167 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
14168 operands[3] = constm1_rtx, op = and_optab;
14169 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
14170 operands[3] = const0_rtx, op = ior_optab;
14171 else
14172 return 0; /* FAIL */
14174 else if (CONST_INT_P (operands[3]))
14176 var = operands[2];
14177 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
14178 operands[2] = constm1_rtx, op = and_optab;
14179 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
14180 operands[2] = const0_rtx, op = ior_optab;
14181 else
14182 return 0; /* FAIL */
14184 else
14185 return 0; /* FAIL */
14187 orig_out = operands[0];
14188 tmp = gen_reg_rtx (mode);
14189 operands[0] = tmp;
14191 /* Recurse to get the constant loaded. */
14192 if (ix86_expand_int_movcc (operands) == 0)
14193 return 0; /* FAIL */
14195 /* Mask in the interesting variable. */
14196 out = expand_binop (mode, op, var, tmp, orig_out, 0,
14197 OPTAB_WIDEN);
14198 if (!rtx_equal_p (out, orig_out))
14199 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
14201 return 1; /* DONE */
14205 * For comparison with above,
14207 * movl cf,dest
14208 * movl ct,tmp
14209 * cmpl op1,op2
14210 * cmovcc tmp,dest
14212 * Size 15.
14215 if (! nonimmediate_operand (operands[2], mode))
14216 operands[2] = force_reg (mode, operands[2]);
14217 if (! nonimmediate_operand (operands[3], mode))
14218 operands[3] = force_reg (mode, operands[3]);
14220 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
14222 rtx tmp = gen_reg_rtx (mode);
14223 emit_move_insn (tmp, operands[3]);
14224 operands[3] = tmp;
14226 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
14228 rtx tmp = gen_reg_rtx (mode);
14229 emit_move_insn (tmp, operands[2]);
14230 operands[2] = tmp;
14233 if (! register_operand (operands[2], VOIDmode)
14234 && (mode == QImode
14235 || ! register_operand (operands[3], VOIDmode)))
14236 operands[2] = force_reg (mode, operands[2]);
14238 if (mode == QImode
14239 && ! register_operand (operands[3], VOIDmode))
14240 operands[3] = force_reg (mode, operands[3]);
14242 emit_insn (compare_seq);
14243 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
14244 gen_rtx_IF_THEN_ELSE (mode,
14245 compare_op, operands[2],
14246 operands[3])));
14247 if (bypass_test)
14248 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
14249 gen_rtx_IF_THEN_ELSE (mode,
14250 bypass_test,
14251 copy_rtx (operands[3]),
14252 copy_rtx (operands[0]))));
14253 if (second_test)
14254 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (operands[0]),
14255 gen_rtx_IF_THEN_ELSE (mode,
14256 second_test,
14257 copy_rtx (operands[2]),
14258 copy_rtx (operands[0]))));
14260 return 1; /* DONE */
14263 /* Swap, force into registers, or otherwise massage the two operands
14264 to an sse comparison with a mask result. Thus we differ a bit from
14265 ix86_prepare_fp_compare_args which expects to produce a flags result.
14267 The DEST operand exists to help determine whether to commute commutative
14268 operators. The POP0/POP1 operands are updated in place. The new
14269 comparison code is returned, or UNKNOWN if not implementable. */
14271 static enum rtx_code
14272 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
14273 rtx *pop0, rtx *pop1)
14275 rtx tmp;
14277 switch (code)
14279 case LTGT:
14280 case UNEQ:
14281 /* We have no LTGT as an operator. We could implement it with
14282 NE & ORDERED, but this requires an extra temporary. It's
14283 not clear that it's worth it. */
14284 return UNKNOWN;
14286 case LT:
14287 case LE:
14288 case UNGT:
14289 case UNGE:
14290 /* These are supported directly. */
14291 break;
14293 case EQ:
14294 case NE:
14295 case UNORDERED:
14296 case ORDERED:
14297 /* For commutative operators, try to canonicalize the destination
14298 operand to be first in the comparison - this helps reload to
14299 avoid extra moves. */
14300 if (!dest || !rtx_equal_p (dest, *pop1))
14301 break;
14302 /* FALLTHRU */
14304 case GE:
14305 case GT:
14306 case UNLE:
14307 case UNLT:
14308 /* These are not supported directly. Swap the comparison operands
14309 to transform into something that is supported. */
14310 tmp = *pop0;
14311 *pop0 = *pop1;
14312 *pop1 = tmp;
14313 code = swap_condition (code);
14314 break;
14316 default:
14317 gcc_unreachable ();
14320 return code;
14323 /* Detect conditional moves that exactly match min/max operational
14324 semantics. Note that this is IEEE safe, as long as we don't
14325 interchange the operands.
14327 Returns FALSE if this conditional move doesn't match a MIN/MAX,
14328 and TRUE if the operation is successful and instructions are emitted. */
14330 static bool
14331 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
14332 rtx cmp_op1, rtx if_true, rtx if_false)
14334 enum machine_mode mode;
14335 bool is_min;
14336 rtx tmp;
14338 if (code == LT)
14340 else if (code == UNGE)
14342 tmp = if_true;
14343 if_true = if_false;
14344 if_false = tmp;
14346 else
14347 return false;
14349 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
14350 is_min = true;
14351 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
14352 is_min = false;
14353 else
14354 return false;
14356 mode = GET_MODE (dest);
14358 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
14359 but MODE may be a vector mode and thus not appropriate. */
14360 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
14362 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
14363 rtvec v;
14365 if_true = force_reg (mode, if_true);
14366 v = gen_rtvec (2, if_true, if_false);
14367 tmp = gen_rtx_UNSPEC (mode, v, u);
14369 else
14371 code = is_min ? SMIN : SMAX;
14372 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
14375 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
14376 return true;
14379 /* Expand an sse vector comparison. Return the register with the result. */
14381 static rtx
14382 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
14383 rtx op_true, rtx op_false)
14385 enum machine_mode mode = GET_MODE (dest);
14386 rtx x;
14388 cmp_op0 = force_reg (mode, cmp_op0);
14389 if (!nonimmediate_operand (cmp_op1, mode))
14390 cmp_op1 = force_reg (mode, cmp_op1);
14392 if (optimize
14393 || reg_overlap_mentioned_p (dest, op_true)
14394 || reg_overlap_mentioned_p (dest, op_false))
14395 dest = gen_reg_rtx (mode);
14397 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
14398 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14400 return dest;
14403 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
14404 operations. This is used for both scalar and vector conditional moves. */
14406 static void
14407 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
14409 enum machine_mode mode = GET_MODE (dest);
14410 rtx t2, t3, x;
14412 if (op_false == CONST0_RTX (mode))
14414 op_true = force_reg (mode, op_true);
14415 x = gen_rtx_AND (mode, cmp, op_true);
14416 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14418 else if (op_true == CONST0_RTX (mode))
14420 op_false = force_reg (mode, op_false);
14421 x = gen_rtx_NOT (mode, cmp);
14422 x = gen_rtx_AND (mode, x, op_false);
14423 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14425 else if (TARGET_SSE5)
14427 rtx pcmov = gen_rtx_SET (mode, dest,
14428 gen_rtx_IF_THEN_ELSE (mode, cmp,
14429 op_true,
14430 op_false));
14431 emit_insn (pcmov);
14433 else
14435 op_true = force_reg (mode, op_true);
14436 op_false = force_reg (mode, op_false);
14438 t2 = gen_reg_rtx (mode);
14439 if (optimize)
14440 t3 = gen_reg_rtx (mode);
14441 else
14442 t3 = dest;
14444 x = gen_rtx_AND (mode, op_true, cmp);
14445 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
14447 x = gen_rtx_NOT (mode, cmp);
14448 x = gen_rtx_AND (mode, x, op_false);
14449 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
14451 x = gen_rtx_IOR (mode, t3, t2);
14452 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14456 /* Expand a floating-point conditional move. Return true if successful. */
14459 ix86_expand_fp_movcc (rtx operands[])
14461 enum machine_mode mode = GET_MODE (operands[0]);
14462 enum rtx_code code = GET_CODE (operands[1]);
14463 rtx tmp, compare_op, second_test, bypass_test;
14465 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
14467 enum machine_mode cmode;
14469 /* Since we've no cmove for sse registers, don't force bad register
14470 allocation just to gain access to it. Deny movcc when the
14471 comparison mode doesn't match the move mode. */
14472 cmode = GET_MODE (ix86_compare_op0);
14473 if (cmode == VOIDmode)
14474 cmode = GET_MODE (ix86_compare_op1);
14475 if (cmode != mode)
14476 return 0;
14478 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
14479 &ix86_compare_op0,
14480 &ix86_compare_op1);
14481 if (code == UNKNOWN)
14482 return 0;
14484 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
14485 ix86_compare_op1, operands[2],
14486 operands[3]))
14487 return 1;
14489 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
14490 ix86_compare_op1, operands[2], operands[3]);
14491 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
14492 return 1;
14495 /* The floating point conditional move instructions don't directly
14496 support conditions resulting from a signed integer comparison. */
14498 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
14500 /* The floating point conditional move instructions don't directly
14501 support signed integer comparisons. */
14503 if (!fcmov_comparison_operator (compare_op, VOIDmode))
14505 gcc_assert (!second_test && !bypass_test);
14506 tmp = gen_reg_rtx (QImode);
14507 ix86_expand_setcc (code, tmp);
14508 code = NE;
14509 ix86_compare_op0 = tmp;
14510 ix86_compare_op1 = const0_rtx;
14511 compare_op = ix86_expand_compare (code, &second_test, &bypass_test);
14513 if (bypass_test && reg_overlap_mentioned_p (operands[0], operands[3]))
14515 tmp = gen_reg_rtx (mode);
14516 emit_move_insn (tmp, operands[3]);
14517 operands[3] = tmp;
14519 if (second_test && reg_overlap_mentioned_p (operands[0], operands[2]))
14521 tmp = gen_reg_rtx (mode);
14522 emit_move_insn (tmp, operands[2]);
14523 operands[2] = tmp;
14526 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
14527 gen_rtx_IF_THEN_ELSE (mode, compare_op,
14528 operands[2], operands[3])));
14529 if (bypass_test)
14530 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
14531 gen_rtx_IF_THEN_ELSE (mode, bypass_test,
14532 operands[3], operands[0])));
14533 if (second_test)
14534 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
14535 gen_rtx_IF_THEN_ELSE (mode, second_test,
14536 operands[2], operands[0])));
14538 return 1;
14541 /* Expand a floating-point vector conditional move; a vcond operation
14542 rather than a movcc operation. */
14544 bool
14545 ix86_expand_fp_vcond (rtx operands[])
14547 enum rtx_code code = GET_CODE (operands[3]);
14548 rtx cmp;
14550 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
14551 &operands[4], &operands[5]);
14552 if (code == UNKNOWN)
14553 return false;
14555 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
14556 operands[5], operands[1], operands[2]))
14557 return true;
14559 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
14560 operands[1], operands[2]);
14561 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
14562 return true;
14565 /* Expand a signed/unsigned integral vector conditional move. */
14567 bool
14568 ix86_expand_int_vcond (rtx operands[])
14570 enum machine_mode mode = GET_MODE (operands[0]);
14571 enum rtx_code code = GET_CODE (operands[3]);
14572 bool negate = false;
14573 rtx x, cop0, cop1;
14575 cop0 = operands[4];
14576 cop1 = operands[5];
14578 /* SSE5 supports all of the comparisons on all vector int types. */
14579 if (!TARGET_SSE5)
14581 /* Canonicalize the comparison to EQ, GT, GTU. */
14582 switch (code)
14584 case EQ:
14585 case GT:
14586 case GTU:
14587 break;
14589 case NE:
14590 case LE:
14591 case LEU:
14592 code = reverse_condition (code);
14593 negate = true;
14594 break;
14596 case GE:
14597 case GEU:
14598 code = reverse_condition (code);
14599 negate = true;
14600 /* FALLTHRU */
14602 case LT:
14603 case LTU:
14604 code = swap_condition (code);
14605 x = cop0, cop0 = cop1, cop1 = x;
14606 break;
14608 default:
14609 gcc_unreachable ();
14612 /* Only SSE4.1/SSE4.2 supports V2DImode. */
14613 if (mode == V2DImode)
14615 switch (code)
14617 case EQ:
14618 /* SSE4.1 supports EQ. */
14619 if (!TARGET_SSE4_1)
14620 return false;
14621 break;
14623 case GT:
14624 case GTU:
14625 /* SSE4.2 supports GT/GTU. */
14626 if (!TARGET_SSE4_2)
14627 return false;
14628 break;
14630 default:
14631 gcc_unreachable ();
14635 /* Unsigned parallel compare is not supported by the hardware. Play some
14636 tricks to turn this into a signed comparison against 0. */
14637 if (code == GTU)
14639 cop0 = force_reg (mode, cop0);
14641 switch (mode)
14643 case V4SImode:
14644 case V2DImode:
14646 rtx t1, t2, mask;
14648 /* Perform a parallel modulo subtraction. */
14649 t1 = gen_reg_rtx (mode);
14650 emit_insn ((mode == V4SImode
14651 ? gen_subv4si3
14652 : gen_subv2di3) (t1, cop0, cop1));
14654 /* Extract the original sign bit of op0. */
14655 mask = ix86_build_signbit_mask (GET_MODE_INNER (mode),
14656 true, false);
14657 t2 = gen_reg_rtx (mode);
14658 emit_insn ((mode == V4SImode
14659 ? gen_andv4si3
14660 : gen_andv2di3) (t2, cop0, mask));
14662 /* XOR it back into the result of the subtraction. This results
14663 in the sign bit set iff we saw unsigned underflow. */
14664 x = gen_reg_rtx (mode);
14665 emit_insn ((mode == V4SImode
14666 ? gen_xorv4si3
14667 : gen_xorv2di3) (x, t1, t2));
14669 code = GT;
14671 break;
14673 case V16QImode:
14674 case V8HImode:
14675 /* Perform a parallel unsigned saturating subtraction. */
14676 x = gen_reg_rtx (mode);
14677 emit_insn (gen_rtx_SET (VOIDmode, x,
14678 gen_rtx_US_MINUS (mode, cop0, cop1)));
14680 code = EQ;
14681 negate = !negate;
14682 break;
14684 default:
14685 gcc_unreachable ();
14688 cop0 = x;
14689 cop1 = CONST0_RTX (mode);
14693 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
14694 operands[1+negate], operands[2-negate]);
14696 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
14697 operands[2-negate]);
14698 return true;
14701 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
14702 true if we should do zero extension, else sign extension. HIGH_P is
14703 true if we want the N/2 high elements, else the low elements. */
14705 void
14706 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
14708 enum machine_mode imode = GET_MODE (operands[1]);
14709 rtx (*unpack)(rtx, rtx, rtx);
14710 rtx se, dest;
14712 switch (imode)
14714 case V16QImode:
14715 if (high_p)
14716 unpack = gen_vec_interleave_highv16qi;
14717 else
14718 unpack = gen_vec_interleave_lowv16qi;
14719 break;
14720 case V8HImode:
14721 if (high_p)
14722 unpack = gen_vec_interleave_highv8hi;
14723 else
14724 unpack = gen_vec_interleave_lowv8hi;
14725 break;
14726 case V4SImode:
14727 if (high_p)
14728 unpack = gen_vec_interleave_highv4si;
14729 else
14730 unpack = gen_vec_interleave_lowv4si;
14731 break;
14732 default:
14733 gcc_unreachable ();
14736 dest = gen_lowpart (imode, operands[0]);
14738 if (unsigned_p)
14739 se = force_reg (imode, CONST0_RTX (imode));
14740 else
14741 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
14742 operands[1], pc_rtx, pc_rtx);
14744 emit_insn (unpack (dest, operands[1], se));
14747 /* This function performs the same task as ix86_expand_sse_unpack,
14748 but with SSE4.1 instructions. */
14750 void
14751 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
14753 enum machine_mode imode = GET_MODE (operands[1]);
14754 rtx (*unpack)(rtx, rtx);
14755 rtx src, dest;
14757 switch (imode)
14759 case V16QImode:
14760 if (unsigned_p)
14761 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
14762 else
14763 unpack = gen_sse4_1_extendv8qiv8hi2;
14764 break;
14765 case V8HImode:
14766 if (unsigned_p)
14767 unpack = gen_sse4_1_zero_extendv4hiv4si2;
14768 else
14769 unpack = gen_sse4_1_extendv4hiv4si2;
14770 break;
14771 case V4SImode:
14772 if (unsigned_p)
14773 unpack = gen_sse4_1_zero_extendv2siv2di2;
14774 else
14775 unpack = gen_sse4_1_extendv2siv2di2;
14776 break;
14777 default:
14778 gcc_unreachable ();
14781 dest = operands[0];
14782 if (high_p)
14784 /* Shift higher 8 bytes to lower 8 bytes. */
14785 src = gen_reg_rtx (imode);
14786 emit_insn (gen_sse2_lshrti3 (gen_lowpart (TImode, src),
14787 gen_lowpart (TImode, operands[1]),
14788 GEN_INT (64)));
14790 else
14791 src = operands[1];
14793 emit_insn (unpack (dest, src));
14796 /* This function performs the same task as ix86_expand_sse_unpack,
14797 but with sse5 instructions. */
14799 void
14800 ix86_expand_sse5_unpack (rtx operands[2], bool unsigned_p, bool high_p)
14802 enum machine_mode imode = GET_MODE (operands[1]);
14803 int pperm_bytes[16];
14804 int i;
14805 int h = (high_p) ? 8 : 0;
14806 int h2;
14807 int sign_extend;
14808 rtvec v = rtvec_alloc (16);
14809 rtvec vs;
14810 rtx x, p;
14811 rtx op0 = operands[0], op1 = operands[1];
14813 switch (imode)
14815 case V16QImode:
14816 vs = rtvec_alloc (8);
14817 h2 = (high_p) ? 8 : 0;
14818 for (i = 0; i < 8; i++)
14820 pperm_bytes[2*i+0] = PPERM_SRC | PPERM_SRC2 | i | h;
14821 pperm_bytes[2*i+1] = ((unsigned_p)
14822 ? PPERM_ZERO
14823 : PPERM_SIGN | PPERM_SRC2 | i | h);
14826 for (i = 0; i < 16; i++)
14827 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
14829 for (i = 0; i < 8; i++)
14830 RTVEC_ELT (vs, i) = GEN_INT (i + h2);
14832 p = gen_rtx_PARALLEL (VOIDmode, vs);
14833 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
14834 if (unsigned_p)
14835 emit_insn (gen_sse5_pperm_zero_v16qi_v8hi (op0, op1, p, x));
14836 else
14837 emit_insn (gen_sse5_pperm_sign_v16qi_v8hi (op0, op1, p, x));
14838 break;
14840 case V8HImode:
14841 vs = rtvec_alloc (4);
14842 h2 = (high_p) ? 4 : 0;
14843 for (i = 0; i < 4; i++)
14845 sign_extend = ((unsigned_p)
14846 ? PPERM_ZERO
14847 : PPERM_SIGN | PPERM_SRC2 | ((2*i) + 1 + h));
14848 pperm_bytes[4*i+0] = PPERM_SRC | PPERM_SRC2 | ((2*i) + 0 + h);
14849 pperm_bytes[4*i+1] = PPERM_SRC | PPERM_SRC2 | ((2*i) + 1 + h);
14850 pperm_bytes[4*i+2] = sign_extend;
14851 pperm_bytes[4*i+3] = sign_extend;
14854 for (i = 0; i < 16; i++)
14855 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
14857 for (i = 0; i < 4; i++)
14858 RTVEC_ELT (vs, i) = GEN_INT (i + h2);
14860 p = gen_rtx_PARALLEL (VOIDmode, vs);
14861 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
14862 if (unsigned_p)
14863 emit_insn (gen_sse5_pperm_zero_v8hi_v4si (op0, op1, p, x));
14864 else
14865 emit_insn (gen_sse5_pperm_sign_v8hi_v4si (op0, op1, p, x));
14866 break;
14868 case V4SImode:
14869 vs = rtvec_alloc (2);
14870 h2 = (high_p) ? 2 : 0;
14871 for (i = 0; i < 2; i++)
14873 sign_extend = ((unsigned_p)
14874 ? PPERM_ZERO
14875 : PPERM_SIGN | PPERM_SRC2 | ((4*i) + 3 + h));
14876 pperm_bytes[8*i+0] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 0 + h);
14877 pperm_bytes[8*i+1] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 1 + h);
14878 pperm_bytes[8*i+2] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 2 + h);
14879 pperm_bytes[8*i+3] = PPERM_SRC | PPERM_SRC2 | ((4*i) + 3 + h);
14880 pperm_bytes[8*i+4] = sign_extend;
14881 pperm_bytes[8*i+5] = sign_extend;
14882 pperm_bytes[8*i+6] = sign_extend;
14883 pperm_bytes[8*i+7] = sign_extend;
14886 for (i = 0; i < 16; i++)
14887 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
14889 for (i = 0; i < 2; i++)
14890 RTVEC_ELT (vs, i) = GEN_INT (i + h2);
14892 p = gen_rtx_PARALLEL (VOIDmode, vs);
14893 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
14894 if (unsigned_p)
14895 emit_insn (gen_sse5_pperm_zero_v4si_v2di (op0, op1, p, x));
14896 else
14897 emit_insn (gen_sse5_pperm_sign_v4si_v2di (op0, op1, p, x));
14898 break;
14900 default:
14901 gcc_unreachable ();
14904 return;
14907 /* Pack the high bits from OPERANDS[1] and low bits from OPERANDS[2] into the
14908 next narrower integer vector type */
14909 void
14910 ix86_expand_sse5_pack (rtx operands[3])
14912 enum machine_mode imode = GET_MODE (operands[0]);
14913 int pperm_bytes[16];
14914 int i;
14915 rtvec v = rtvec_alloc (16);
14916 rtx x;
14917 rtx op0 = operands[0];
14918 rtx op1 = operands[1];
14919 rtx op2 = operands[2];
14921 switch (imode)
14923 case V16QImode:
14924 for (i = 0; i < 8; i++)
14926 pperm_bytes[i+0] = PPERM_SRC | PPERM_SRC1 | (i*2);
14927 pperm_bytes[i+8] = PPERM_SRC | PPERM_SRC2 | (i*2);
14930 for (i = 0; i < 16; i++)
14931 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
14933 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
14934 emit_insn (gen_sse5_pperm_pack_v8hi_v16qi (op0, op1, op2, x));
14935 break;
14937 case V8HImode:
14938 for (i = 0; i < 4; i++)
14940 pperm_bytes[(2*i)+0] = PPERM_SRC | PPERM_SRC1 | ((i*4) + 0);
14941 pperm_bytes[(2*i)+1] = PPERM_SRC | PPERM_SRC1 | ((i*4) + 1);
14942 pperm_bytes[(2*i)+8] = PPERM_SRC | PPERM_SRC2 | ((i*4) + 0);
14943 pperm_bytes[(2*i)+9] = PPERM_SRC | PPERM_SRC2 | ((i*4) + 1);
14946 for (i = 0; i < 16; i++)
14947 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
14949 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
14950 emit_insn (gen_sse5_pperm_pack_v4si_v8hi (op0, op1, op2, x));
14951 break;
14953 case V4SImode:
14954 for (i = 0; i < 2; i++)
14956 pperm_bytes[(4*i)+0] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 0);
14957 pperm_bytes[(4*i)+1] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 1);
14958 pperm_bytes[(4*i)+2] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 2);
14959 pperm_bytes[(4*i)+3] = PPERM_SRC | PPERM_SRC1 | ((i*8) + 3);
14960 pperm_bytes[(4*i)+8] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 0);
14961 pperm_bytes[(4*i)+9] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 1);
14962 pperm_bytes[(4*i)+10] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 2);
14963 pperm_bytes[(4*i)+11] = PPERM_SRC | PPERM_SRC2 | ((i*8) + 3);
14966 for (i = 0; i < 16; i++)
14967 RTVEC_ELT (v, i) = GEN_INT (pperm_bytes[i]);
14969 x = force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, v));
14970 emit_insn (gen_sse5_pperm_pack_v2di_v4si (op0, op1, op2, x));
14971 break;
14973 default:
14974 gcc_unreachable ();
14977 return;
14980 /* Expand conditional increment or decrement using adb/sbb instructions.
14981 The default case using setcc followed by the conditional move can be
14982 done by generic code. */
14984 ix86_expand_int_addcc (rtx operands[])
14986 enum rtx_code code = GET_CODE (operands[1]);
14987 rtx compare_op;
14988 rtx val = const0_rtx;
14989 bool fpcmp = false;
14990 enum machine_mode mode = GET_MODE (operands[0]);
14992 if (operands[3] != const1_rtx
14993 && operands[3] != constm1_rtx)
14994 return 0;
14995 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
14996 ix86_compare_op1, &compare_op))
14997 return 0;
14998 code = GET_CODE (compare_op);
15000 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
15001 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
15003 fpcmp = true;
15004 code = ix86_fp_compare_code_to_integer (code);
15007 if (code != LTU)
15009 val = constm1_rtx;
15010 if (fpcmp)
15011 PUT_CODE (compare_op,
15012 reverse_condition_maybe_unordered
15013 (GET_CODE (compare_op)));
15014 else
15015 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
15017 PUT_MODE (compare_op, mode);
15019 /* Construct either adc or sbb insn. */
15020 if ((code == LTU) == (operands[3] == constm1_rtx))
15022 switch (GET_MODE (operands[0]))
15024 case QImode:
15025 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
15026 break;
15027 case HImode:
15028 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
15029 break;
15030 case SImode:
15031 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
15032 break;
15033 case DImode:
15034 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
15035 break;
15036 default:
15037 gcc_unreachable ();
15040 else
15042 switch (GET_MODE (operands[0]))
15044 case QImode:
15045 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
15046 break;
15047 case HImode:
15048 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
15049 break;
15050 case SImode:
15051 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
15052 break;
15053 case DImode:
15054 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
15055 break;
15056 default:
15057 gcc_unreachable ();
15060 return 1; /* DONE */
15064 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
15065 works for floating pointer parameters and nonoffsetable memories.
15066 For pushes, it returns just stack offsets; the values will be saved
15067 in the right order. Maximally three parts are generated. */
15069 static int
15070 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
15072 int size;
15074 if (!TARGET_64BIT)
15075 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
15076 else
15077 size = (GET_MODE_SIZE (mode) + 4) / 8;
15079 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
15080 gcc_assert (size >= 2 && size <= 4);
15082 /* Optimize constant pool reference to immediates. This is used by fp
15083 moves, that force all constants to memory to allow combining. */
15084 if (MEM_P (operand) && MEM_READONLY_P (operand))
15086 rtx tmp = maybe_get_pool_constant (operand);
15087 if (tmp)
15088 operand = tmp;
15091 if (MEM_P (operand) && !offsettable_memref_p (operand))
15093 /* The only non-offsetable memories we handle are pushes. */
15094 int ok = push_operand (operand, VOIDmode);
15096 gcc_assert (ok);
15098 operand = copy_rtx (operand);
15099 PUT_MODE (operand, Pmode);
15100 parts[0] = parts[1] = parts[2] = parts[3] = operand;
15101 return size;
15104 if (GET_CODE (operand) == CONST_VECTOR)
15106 enum machine_mode imode = int_mode_for_mode (mode);
15107 /* Caution: if we looked through a constant pool memory above,
15108 the operand may actually have a different mode now. That's
15109 ok, since we want to pun this all the way back to an integer. */
15110 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
15111 gcc_assert (operand != NULL);
15112 mode = imode;
15115 if (!TARGET_64BIT)
15117 if (mode == DImode)
15118 split_di (&operand, 1, &parts[0], &parts[1]);
15119 else
15121 int i;
15123 if (REG_P (operand))
15125 gcc_assert (reload_completed);
15126 for (i = 0; i < size; i++)
15127 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
15129 else if (offsettable_memref_p (operand))
15131 operand = adjust_address (operand, SImode, 0);
15132 parts[0] = operand;
15133 for (i = 1; i < size; i++)
15134 parts[i] = adjust_address (operand, SImode, 4 * i);
15136 else if (GET_CODE (operand) == CONST_DOUBLE)
15138 REAL_VALUE_TYPE r;
15139 long l[4];
15141 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
15142 switch (mode)
15144 case TFmode:
15145 real_to_target (l, &r, mode);
15146 parts[3] = gen_int_mode (l[3], SImode);
15147 parts[2] = gen_int_mode (l[2], SImode);
15148 break;
15149 case XFmode:
15150 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
15151 parts[2] = gen_int_mode (l[2], SImode);
15152 break;
15153 case DFmode:
15154 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
15155 break;
15156 default:
15157 gcc_unreachable ();
15159 parts[1] = gen_int_mode (l[1], SImode);
15160 parts[0] = gen_int_mode (l[0], SImode);
15162 else
15163 gcc_unreachable ();
15166 else
15168 if (mode == TImode)
15169 split_ti (&operand, 1, &parts[0], &parts[1]);
15170 if (mode == XFmode || mode == TFmode)
15172 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
15173 if (REG_P (operand))
15175 gcc_assert (reload_completed);
15176 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
15177 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
15179 else if (offsettable_memref_p (operand))
15181 operand = adjust_address (operand, DImode, 0);
15182 parts[0] = operand;
15183 parts[1] = adjust_address (operand, upper_mode, 8);
15185 else if (GET_CODE (operand) == CONST_DOUBLE)
15187 REAL_VALUE_TYPE r;
15188 long l[4];
15190 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
15191 real_to_target (l, &r, mode);
15193 /* Do not use shift by 32 to avoid warning on 32bit systems. */
15194 if (HOST_BITS_PER_WIDE_INT >= 64)
15195 parts[0]
15196 = gen_int_mode
15197 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
15198 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
15199 DImode);
15200 else
15201 parts[0] = immed_double_const (l[0], l[1], DImode);
15203 if (upper_mode == SImode)
15204 parts[1] = gen_int_mode (l[2], SImode);
15205 else if (HOST_BITS_PER_WIDE_INT >= 64)
15206 parts[1]
15207 = gen_int_mode
15208 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
15209 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
15210 DImode);
15211 else
15212 parts[1] = immed_double_const (l[2], l[3], DImode);
15214 else
15215 gcc_unreachable ();
15219 return size;
15222 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
15223 Return false when normal moves are needed; true when all required
15224 insns have been emitted. Operands 2-4 contain the input values
15225 int the correct order; operands 5-7 contain the output values. */
15227 void
15228 ix86_split_long_move (rtx operands[])
15230 rtx part[2][4];
15231 int nparts, i, j;
15232 int push = 0;
15233 int collisions = 0;
15234 enum machine_mode mode = GET_MODE (operands[0]);
15235 bool collisionparts[4];
15237 /* The DFmode expanders may ask us to move double.
15238 For 64bit target this is single move. By hiding the fact
15239 here we simplify i386.md splitters. */
15240 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
15242 /* Optimize constant pool reference to immediates. This is used by
15243 fp moves, that force all constants to memory to allow combining. */
15245 if (MEM_P (operands[1])
15246 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
15247 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
15248 operands[1] = get_pool_constant (XEXP (operands[1], 0));
15249 if (push_operand (operands[0], VOIDmode))
15251 operands[0] = copy_rtx (operands[0]);
15252 PUT_MODE (operands[0], Pmode);
15254 else
15255 operands[0] = gen_lowpart (DImode, operands[0]);
15256 operands[1] = gen_lowpart (DImode, operands[1]);
15257 emit_move_insn (operands[0], operands[1]);
15258 return;
15261 /* The only non-offsettable memory we handle is push. */
15262 if (push_operand (operands[0], VOIDmode))
15263 push = 1;
15264 else
15265 gcc_assert (!MEM_P (operands[0])
15266 || offsettable_memref_p (operands[0]));
15268 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
15269 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
15271 /* When emitting push, take care for source operands on the stack. */
15272 if (push && MEM_P (operands[1])
15273 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
15274 for (i = 0; i < nparts - 1; i++)
15275 part[1][i] = change_address (part[1][i],
15276 GET_MODE (part[1][i]),
15277 XEXP (part[1][i + 1], 0));
15279 /* We need to do copy in the right order in case an address register
15280 of the source overlaps the destination. */
15281 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
15283 rtx tmp;
15285 for (i = 0; i < nparts; i++)
15287 collisionparts[i]
15288 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
15289 if (collisionparts[i])
15290 collisions++;
15293 /* Collision in the middle part can be handled by reordering. */
15294 if (collisions == 1 && nparts == 3 && collisionparts [1])
15296 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
15297 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
15299 else if (collisions == 1
15300 && nparts == 4
15301 && (collisionparts [1] || collisionparts [2]))
15303 if (collisionparts [1])
15305 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
15306 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
15308 else
15310 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
15311 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
15315 /* If there are more collisions, we can't handle it by reordering.
15316 Do an lea to the last part and use only one colliding move. */
15317 else if (collisions > 1)
15319 rtx base;
15321 collisions = 1;
15323 base = part[0][nparts - 1];
15325 /* Handle the case when the last part isn't valid for lea.
15326 Happens in 64-bit mode storing the 12-byte XFmode. */
15327 if (GET_MODE (base) != Pmode)
15328 base = gen_rtx_REG (Pmode, REGNO (base));
15330 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
15331 part[1][0] = replace_equiv_address (part[1][0], base);
15332 for (i = 1; i < nparts; i++)
15334 tmp = plus_constant (base, UNITS_PER_WORD * i);
15335 part[1][i] = replace_equiv_address (part[1][i], tmp);
15340 if (push)
15342 if (!TARGET_64BIT)
15344 if (nparts == 3)
15346 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
15347 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-4)));
15348 emit_move_insn (part[0][2], part[1][2]);
15350 else if (nparts == 4)
15352 emit_move_insn (part[0][3], part[1][3]);
15353 emit_move_insn (part[0][2], part[1][2]);
15356 else
15358 /* In 64bit mode we don't have 32bit push available. In case this is
15359 register, it is OK - we will just use larger counterpart. We also
15360 retype memory - these comes from attempt to avoid REX prefix on
15361 moving of second half of TFmode value. */
15362 if (GET_MODE (part[1][1]) == SImode)
15364 switch (GET_CODE (part[1][1]))
15366 case MEM:
15367 part[1][1] = adjust_address (part[1][1], DImode, 0);
15368 break;
15370 case REG:
15371 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
15372 break;
15374 default:
15375 gcc_unreachable ();
15378 if (GET_MODE (part[1][0]) == SImode)
15379 part[1][0] = part[1][1];
15382 emit_move_insn (part[0][1], part[1][1]);
15383 emit_move_insn (part[0][0], part[1][0]);
15384 return;
15387 /* Choose correct order to not overwrite the source before it is copied. */
15388 if ((REG_P (part[0][0])
15389 && REG_P (part[1][1])
15390 && (REGNO (part[0][0]) == REGNO (part[1][1])
15391 || (nparts == 3
15392 && REGNO (part[0][0]) == REGNO (part[1][2]))
15393 || (nparts == 4
15394 && REGNO (part[0][0]) == REGNO (part[1][3]))))
15395 || (collisions > 0
15396 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
15398 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
15400 operands[2 + i] = part[0][j];
15401 operands[6 + i] = part[1][j];
15404 else
15406 for (i = 0; i < nparts; i++)
15408 operands[2 + i] = part[0][i];
15409 operands[6 + i] = part[1][i];
15413 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
15414 if (optimize_size)
15416 for (j = 0; j < nparts - 1; j++)
15417 if (CONST_INT_P (operands[6 + j])
15418 && operands[6 + j] != const0_rtx
15419 && REG_P (operands[2 + j]))
15420 for (i = j; i < nparts - 1; i++)
15421 if (CONST_INT_P (operands[7 + i])
15422 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
15423 operands[7 + i] = operands[2 + j];
15426 for (i = 0; i < nparts; i++)
15427 emit_move_insn (operands[2 + i], operands[6 + i]);
15429 return;
15432 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
15433 left shift by a constant, either using a single shift or
15434 a sequence of add instructions. */
15436 static void
15437 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
15439 if (count == 1)
15441 emit_insn ((mode == DImode
15442 ? gen_addsi3
15443 : gen_adddi3) (operand, operand, operand));
15445 else if (!optimize_size
15446 && count * ix86_cost->add <= ix86_cost->shift_const)
15448 int i;
15449 for (i=0; i<count; i++)
15451 emit_insn ((mode == DImode
15452 ? gen_addsi3
15453 : gen_adddi3) (operand, operand, operand));
15456 else
15457 emit_insn ((mode == DImode
15458 ? gen_ashlsi3
15459 : gen_ashldi3) (operand, operand, GEN_INT (count)));
15462 void
15463 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
15465 rtx low[2], high[2];
15466 int count;
15467 const int single_width = mode == DImode ? 32 : 64;
15469 if (CONST_INT_P (operands[2]))
15471 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
15472 count = INTVAL (operands[2]) & (single_width * 2 - 1);
15474 if (count >= single_width)
15476 emit_move_insn (high[0], low[1]);
15477 emit_move_insn (low[0], const0_rtx);
15479 if (count > single_width)
15480 ix86_expand_ashl_const (high[0], count - single_width, mode);
15482 else
15484 if (!rtx_equal_p (operands[0], operands[1]))
15485 emit_move_insn (operands[0], operands[1]);
15486 emit_insn ((mode == DImode
15487 ? gen_x86_shld
15488 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
15489 ix86_expand_ashl_const (low[0], count, mode);
15491 return;
15494 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
15496 if (operands[1] == const1_rtx)
15498 /* Assuming we've chosen a QImode capable registers, then 1 << N
15499 can be done with two 32/64-bit shifts, no branches, no cmoves. */
15500 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
15502 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
15504 ix86_expand_clear (low[0]);
15505 ix86_expand_clear (high[0]);
15506 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
15508 d = gen_lowpart (QImode, low[0]);
15509 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
15510 s = gen_rtx_EQ (QImode, flags, const0_rtx);
15511 emit_insn (gen_rtx_SET (VOIDmode, d, s));
15513 d = gen_lowpart (QImode, high[0]);
15514 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
15515 s = gen_rtx_NE (QImode, flags, const0_rtx);
15516 emit_insn (gen_rtx_SET (VOIDmode, d, s));
15519 /* Otherwise, we can get the same results by manually performing
15520 a bit extract operation on bit 5/6, and then performing the two
15521 shifts. The two methods of getting 0/1 into low/high are exactly
15522 the same size. Avoiding the shift in the bit extract case helps
15523 pentium4 a bit; no one else seems to care much either way. */
15524 else
15526 rtx x;
15528 if (TARGET_PARTIAL_REG_STALL && !optimize_size)
15529 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
15530 else
15531 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
15532 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
15534 emit_insn ((mode == DImode
15535 ? gen_lshrsi3
15536 : gen_lshrdi3) (high[0], high[0], GEN_INT (mode == DImode ? 5 : 6)));
15537 emit_insn ((mode == DImode
15538 ? gen_andsi3
15539 : gen_anddi3) (high[0], high[0], GEN_INT (1)));
15540 emit_move_insn (low[0], high[0]);
15541 emit_insn ((mode == DImode
15542 ? gen_xorsi3
15543 : gen_xordi3) (low[0], low[0], GEN_INT (1)));
15546 emit_insn ((mode == DImode
15547 ? gen_ashlsi3
15548 : gen_ashldi3) (low[0], low[0], operands[2]));
15549 emit_insn ((mode == DImode
15550 ? gen_ashlsi3
15551 : gen_ashldi3) (high[0], high[0], operands[2]));
15552 return;
15555 if (operands[1] == constm1_rtx)
15557 /* For -1 << N, we can avoid the shld instruction, because we
15558 know that we're shifting 0...31/63 ones into a -1. */
15559 emit_move_insn (low[0], constm1_rtx);
15560 if (optimize_size)
15561 emit_move_insn (high[0], low[0]);
15562 else
15563 emit_move_insn (high[0], constm1_rtx);
15565 else
15567 if (!rtx_equal_p (operands[0], operands[1]))
15568 emit_move_insn (operands[0], operands[1]);
15570 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
15571 emit_insn ((mode == DImode
15572 ? gen_x86_shld
15573 : gen_x86_64_shld) (high[0], low[0], operands[2]));
15576 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
15578 if (TARGET_CMOVE && scratch)
15580 ix86_expand_clear (scratch);
15581 emit_insn ((mode == DImode
15582 ? gen_x86_shift_adj_1
15583 : gen_x86_64_shift_adj_1) (high[0], low[0], operands[2],
15584 scratch));
15586 else
15587 emit_insn ((mode == DImode
15588 ? gen_x86_shift_adj_2
15589 : gen_x86_64_shift_adj_2) (high[0], low[0], operands[2]));
15592 void
15593 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
15595 rtx low[2], high[2];
15596 int count;
15597 const int single_width = mode == DImode ? 32 : 64;
15599 if (CONST_INT_P (operands[2]))
15601 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
15602 count = INTVAL (operands[2]) & (single_width * 2 - 1);
15604 if (count == single_width * 2 - 1)
15606 emit_move_insn (high[0], high[1]);
15607 emit_insn ((mode == DImode
15608 ? gen_ashrsi3
15609 : gen_ashrdi3) (high[0], high[0],
15610 GEN_INT (single_width - 1)));
15611 emit_move_insn (low[0], high[0]);
15614 else if (count >= single_width)
15616 emit_move_insn (low[0], high[1]);
15617 emit_move_insn (high[0], low[0]);
15618 emit_insn ((mode == DImode
15619 ? gen_ashrsi3
15620 : gen_ashrdi3) (high[0], high[0],
15621 GEN_INT (single_width - 1)));
15622 if (count > single_width)
15623 emit_insn ((mode == DImode
15624 ? gen_ashrsi3
15625 : gen_ashrdi3) (low[0], low[0],
15626 GEN_INT (count - single_width)));
15628 else
15630 if (!rtx_equal_p (operands[0], operands[1]))
15631 emit_move_insn (operands[0], operands[1]);
15632 emit_insn ((mode == DImode
15633 ? gen_x86_shrd
15634 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
15635 emit_insn ((mode == DImode
15636 ? gen_ashrsi3
15637 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
15640 else
15642 if (!rtx_equal_p (operands[0], operands[1]))
15643 emit_move_insn (operands[0], operands[1]);
15645 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
15647 emit_insn ((mode == DImode
15648 ? gen_x86_shrd
15649 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
15650 emit_insn ((mode == DImode
15651 ? gen_ashrsi3
15652 : gen_ashrdi3) (high[0], high[0], operands[2]));
15654 if (TARGET_CMOVE && scratch)
15656 emit_move_insn (scratch, high[0]);
15657 emit_insn ((mode == DImode
15658 ? gen_ashrsi3
15659 : gen_ashrdi3) (scratch, scratch,
15660 GEN_INT (single_width - 1)));
15661 emit_insn ((mode == DImode
15662 ? gen_x86_shift_adj_1
15663 : gen_x86_64_shift_adj_1) (low[0], high[0], operands[2],
15664 scratch));
15666 else
15667 emit_insn ((mode == DImode
15668 ? gen_x86_shift_adj_3
15669 : gen_x86_64_shift_adj_3) (low[0], high[0], operands[2]));
15673 void
15674 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
15676 rtx low[2], high[2];
15677 int count;
15678 const int single_width = mode == DImode ? 32 : 64;
15680 if (CONST_INT_P (operands[2]))
15682 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
15683 count = INTVAL (operands[2]) & (single_width * 2 - 1);
15685 if (count >= single_width)
15687 emit_move_insn (low[0], high[1]);
15688 ix86_expand_clear (high[0]);
15690 if (count > single_width)
15691 emit_insn ((mode == DImode
15692 ? gen_lshrsi3
15693 : gen_lshrdi3) (low[0], low[0],
15694 GEN_INT (count - single_width)));
15696 else
15698 if (!rtx_equal_p (operands[0], operands[1]))
15699 emit_move_insn (operands[0], operands[1]);
15700 emit_insn ((mode == DImode
15701 ? gen_x86_shrd
15702 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
15703 emit_insn ((mode == DImode
15704 ? gen_lshrsi3
15705 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
15708 else
15710 if (!rtx_equal_p (operands[0], operands[1]))
15711 emit_move_insn (operands[0], operands[1]);
15713 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
15715 emit_insn ((mode == DImode
15716 ? gen_x86_shrd
15717 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
15718 emit_insn ((mode == DImode
15719 ? gen_lshrsi3
15720 : gen_lshrdi3) (high[0], high[0], operands[2]));
15722 /* Heh. By reversing the arguments, we can reuse this pattern. */
15723 if (TARGET_CMOVE && scratch)
15725 ix86_expand_clear (scratch);
15726 emit_insn ((mode == DImode
15727 ? gen_x86_shift_adj_1
15728 : gen_x86_64_shift_adj_1) (low[0], high[0], operands[2],
15729 scratch));
15731 else
15732 emit_insn ((mode == DImode
15733 ? gen_x86_shift_adj_2
15734 : gen_x86_64_shift_adj_2) (low[0], high[0], operands[2]));
15738 /* Predict just emitted jump instruction to be taken with probability PROB. */
15739 static void
15740 predict_jump (int prob)
15742 rtx insn = get_last_insn ();
15743 gcc_assert (JUMP_P (insn));
15744 REG_NOTES (insn)
15745 = gen_rtx_EXPR_LIST (REG_BR_PROB,
15746 GEN_INT (prob),
15747 REG_NOTES (insn));
15750 /* Helper function for the string operations below. Dest VARIABLE whether
15751 it is aligned to VALUE bytes. If true, jump to the label. */
15752 static rtx
15753 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
15755 rtx label = gen_label_rtx ();
15756 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
15757 if (GET_MODE (variable) == DImode)
15758 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
15759 else
15760 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
15761 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
15762 1, label);
15763 if (epilogue)
15764 predict_jump (REG_BR_PROB_BASE * 50 / 100);
15765 else
15766 predict_jump (REG_BR_PROB_BASE * 90 / 100);
15767 return label;
15770 /* Adjust COUNTER by the VALUE. */
15771 static void
15772 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
15774 if (GET_MODE (countreg) == DImode)
15775 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
15776 else
15777 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
15780 /* Zero extend possibly SImode EXP to Pmode register. */
15782 ix86_zero_extend_to_Pmode (rtx exp)
15784 rtx r;
15785 if (GET_MODE (exp) == VOIDmode)
15786 return force_reg (Pmode, exp);
15787 if (GET_MODE (exp) == Pmode)
15788 return copy_to_mode_reg (Pmode, exp);
15789 r = gen_reg_rtx (Pmode);
15790 emit_insn (gen_zero_extendsidi2 (r, exp));
15791 return r;
15794 /* Divide COUNTREG by SCALE. */
15795 static rtx
15796 scale_counter (rtx countreg, int scale)
15798 rtx sc;
15799 rtx piece_size_mask;
15801 if (scale == 1)
15802 return countreg;
15803 if (CONST_INT_P (countreg))
15804 return GEN_INT (INTVAL (countreg) / scale);
15805 gcc_assert (REG_P (countreg));
15807 piece_size_mask = GEN_INT (scale - 1);
15808 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
15809 GEN_INT (exact_log2 (scale)),
15810 NULL, 1, OPTAB_DIRECT);
15811 return sc;
15814 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
15815 DImode for constant loop counts. */
15817 static enum machine_mode
15818 counter_mode (rtx count_exp)
15820 if (GET_MODE (count_exp) != VOIDmode)
15821 return GET_MODE (count_exp);
15822 if (GET_CODE (count_exp) != CONST_INT)
15823 return Pmode;
15824 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
15825 return DImode;
15826 return SImode;
15829 /* When SRCPTR is non-NULL, output simple loop to move memory
15830 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
15831 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
15832 equivalent loop to set memory by VALUE (supposed to be in MODE).
15834 The size is rounded down to whole number of chunk size moved at once.
15835 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
15838 static void
15839 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
15840 rtx destptr, rtx srcptr, rtx value,
15841 rtx count, enum machine_mode mode, int unroll,
15842 int expected_size)
15844 rtx out_label, top_label, iter, tmp;
15845 enum machine_mode iter_mode = counter_mode (count);
15846 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
15847 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
15848 rtx size;
15849 rtx x_addr;
15850 rtx y_addr;
15851 int i;
15853 top_label = gen_label_rtx ();
15854 out_label = gen_label_rtx ();
15855 iter = gen_reg_rtx (iter_mode);
15857 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
15858 NULL, 1, OPTAB_DIRECT);
15859 /* Those two should combine. */
15860 if (piece_size == const1_rtx)
15862 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
15863 true, out_label);
15864 predict_jump (REG_BR_PROB_BASE * 10 / 100);
15866 emit_move_insn (iter, const0_rtx);
15868 emit_label (top_label);
15870 tmp = convert_modes (Pmode, iter_mode, iter, true);
15871 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
15872 destmem = change_address (destmem, mode, x_addr);
15874 if (srcmem)
15876 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
15877 srcmem = change_address (srcmem, mode, y_addr);
15879 /* When unrolling for chips that reorder memory reads and writes,
15880 we can save registers by using single temporary.
15881 Also using 4 temporaries is overkill in 32bit mode. */
15882 if (!TARGET_64BIT && 0)
15884 for (i = 0; i < unroll; i++)
15886 if (i)
15888 destmem =
15889 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
15890 srcmem =
15891 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
15893 emit_move_insn (destmem, srcmem);
15896 else
15898 rtx tmpreg[4];
15899 gcc_assert (unroll <= 4);
15900 for (i = 0; i < unroll; i++)
15902 tmpreg[i] = gen_reg_rtx (mode);
15903 if (i)
15905 srcmem =
15906 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
15908 emit_move_insn (tmpreg[i], srcmem);
15910 for (i = 0; i < unroll; i++)
15912 if (i)
15914 destmem =
15915 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
15917 emit_move_insn (destmem, tmpreg[i]);
15921 else
15922 for (i = 0; i < unroll; i++)
15924 if (i)
15925 destmem =
15926 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
15927 emit_move_insn (destmem, value);
15930 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
15931 true, OPTAB_LIB_WIDEN);
15932 if (tmp != iter)
15933 emit_move_insn (iter, tmp);
15935 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
15936 true, top_label);
15937 if (expected_size != -1)
15939 expected_size /= GET_MODE_SIZE (mode) * unroll;
15940 if (expected_size == 0)
15941 predict_jump (0);
15942 else if (expected_size > REG_BR_PROB_BASE)
15943 predict_jump (REG_BR_PROB_BASE - 1);
15944 else
15945 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
15947 else
15948 predict_jump (REG_BR_PROB_BASE * 80 / 100);
15949 iter = ix86_zero_extend_to_Pmode (iter);
15950 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
15951 true, OPTAB_LIB_WIDEN);
15952 if (tmp != destptr)
15953 emit_move_insn (destptr, tmp);
15954 if (srcptr)
15956 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
15957 true, OPTAB_LIB_WIDEN);
15958 if (tmp != srcptr)
15959 emit_move_insn (srcptr, tmp);
15961 emit_label (out_label);
15964 /* Output "rep; mov" instruction.
15965 Arguments have same meaning as for previous function */
15966 static void
15967 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
15968 rtx destptr, rtx srcptr,
15969 rtx count,
15970 enum machine_mode mode)
15972 rtx destexp;
15973 rtx srcexp;
15974 rtx countreg;
15976 /* If the size is known, it is shorter to use rep movs. */
15977 if (mode == QImode && CONST_INT_P (count)
15978 && !(INTVAL (count) & 3))
15979 mode = SImode;
15981 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
15982 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
15983 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
15984 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
15985 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
15986 if (mode != QImode)
15988 destexp = gen_rtx_ASHIFT (Pmode, countreg,
15989 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
15990 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
15991 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
15992 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
15993 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
15995 else
15997 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
15998 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
16000 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
16001 destexp, srcexp));
16004 /* Output "rep; stos" instruction.
16005 Arguments have same meaning as for previous function */
16006 static void
16007 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
16008 rtx count,
16009 enum machine_mode mode)
16011 rtx destexp;
16012 rtx countreg;
16014 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
16015 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
16016 value = force_reg (mode, gen_lowpart (mode, value));
16017 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
16018 if (mode != QImode)
16020 destexp = gen_rtx_ASHIFT (Pmode, countreg,
16021 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
16022 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
16024 else
16025 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
16026 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
16029 static void
16030 emit_strmov (rtx destmem, rtx srcmem,
16031 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
16033 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
16034 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
16035 emit_insn (gen_strmov (destptr, dest, srcptr, src));
16038 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
16039 static void
16040 expand_movmem_epilogue (rtx destmem, rtx srcmem,
16041 rtx destptr, rtx srcptr, rtx count, int max_size)
16043 rtx src, dest;
16044 if (CONST_INT_P (count))
16046 HOST_WIDE_INT countval = INTVAL (count);
16047 int offset = 0;
16049 if ((countval & 0x10) && max_size > 16)
16051 if (TARGET_64BIT)
16053 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
16054 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
16056 else
16057 gcc_unreachable ();
16058 offset += 16;
16060 if ((countval & 0x08) && max_size > 8)
16062 if (TARGET_64BIT)
16063 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
16064 else
16066 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
16067 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
16069 offset += 8;
16071 if ((countval & 0x04) && max_size > 4)
16073 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
16074 offset += 4;
16076 if ((countval & 0x02) && max_size > 2)
16078 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
16079 offset += 2;
16081 if ((countval & 0x01) && max_size > 1)
16083 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
16084 offset += 1;
16086 return;
16088 if (max_size > 8)
16090 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
16091 count, 1, OPTAB_DIRECT);
16092 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
16093 count, QImode, 1, 4);
16094 return;
16097 /* When there are stringops, we can cheaply increase dest and src pointers.
16098 Otherwise we save code size by maintaining offset (zero is readily
16099 available from preceding rep operation) and using x86 addressing modes.
16101 if (TARGET_SINGLE_STRINGOP)
16103 if (max_size > 4)
16105 rtx label = ix86_expand_aligntest (count, 4, true);
16106 src = change_address (srcmem, SImode, srcptr);
16107 dest = change_address (destmem, SImode, destptr);
16108 emit_insn (gen_strmov (destptr, dest, srcptr, src));
16109 emit_label (label);
16110 LABEL_NUSES (label) = 1;
16112 if (max_size > 2)
16114 rtx label = ix86_expand_aligntest (count, 2, true);
16115 src = change_address (srcmem, HImode, srcptr);
16116 dest = change_address (destmem, HImode, destptr);
16117 emit_insn (gen_strmov (destptr, dest, srcptr, src));
16118 emit_label (label);
16119 LABEL_NUSES (label) = 1;
16121 if (max_size > 1)
16123 rtx label = ix86_expand_aligntest (count, 1, true);
16124 src = change_address (srcmem, QImode, srcptr);
16125 dest = change_address (destmem, QImode, destptr);
16126 emit_insn (gen_strmov (destptr, dest, srcptr, src));
16127 emit_label (label);
16128 LABEL_NUSES (label) = 1;
16131 else
16133 rtx offset = force_reg (Pmode, const0_rtx);
16134 rtx tmp;
16136 if (max_size > 4)
16138 rtx label = ix86_expand_aligntest (count, 4, true);
16139 src = change_address (srcmem, SImode, srcptr);
16140 dest = change_address (destmem, SImode, destptr);
16141 emit_move_insn (dest, src);
16142 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
16143 true, OPTAB_LIB_WIDEN);
16144 if (tmp != offset)
16145 emit_move_insn (offset, tmp);
16146 emit_label (label);
16147 LABEL_NUSES (label) = 1;
16149 if (max_size > 2)
16151 rtx label = ix86_expand_aligntest (count, 2, true);
16152 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
16153 src = change_address (srcmem, HImode, tmp);
16154 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
16155 dest = change_address (destmem, HImode, tmp);
16156 emit_move_insn (dest, src);
16157 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
16158 true, OPTAB_LIB_WIDEN);
16159 if (tmp != offset)
16160 emit_move_insn (offset, tmp);
16161 emit_label (label);
16162 LABEL_NUSES (label) = 1;
16164 if (max_size > 1)
16166 rtx label = ix86_expand_aligntest (count, 1, true);
16167 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
16168 src = change_address (srcmem, QImode, tmp);
16169 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
16170 dest = change_address (destmem, QImode, tmp);
16171 emit_move_insn (dest, src);
16172 emit_label (label);
16173 LABEL_NUSES (label) = 1;
16178 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
16179 static void
16180 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
16181 rtx count, int max_size)
16183 count =
16184 expand_simple_binop (counter_mode (count), AND, count,
16185 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
16186 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
16187 gen_lowpart (QImode, value), count, QImode,
16188 1, max_size / 2);
16191 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
16192 static void
16193 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
16195 rtx dest;
16197 if (CONST_INT_P (count))
16199 HOST_WIDE_INT countval = INTVAL (count);
16200 int offset = 0;
16202 if ((countval & 0x10) && max_size > 16)
16204 if (TARGET_64BIT)
16206 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
16207 emit_insn (gen_strset (destptr, dest, value));
16208 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
16209 emit_insn (gen_strset (destptr, dest, value));
16211 else
16212 gcc_unreachable ();
16213 offset += 16;
16215 if ((countval & 0x08) && max_size > 8)
16217 if (TARGET_64BIT)
16219 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
16220 emit_insn (gen_strset (destptr, dest, value));
16222 else
16224 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
16225 emit_insn (gen_strset (destptr, dest, value));
16226 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
16227 emit_insn (gen_strset (destptr, dest, value));
16229 offset += 8;
16231 if ((countval & 0x04) && max_size > 4)
16233 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
16234 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
16235 offset += 4;
16237 if ((countval & 0x02) && max_size > 2)
16239 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
16240 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
16241 offset += 2;
16243 if ((countval & 0x01) && max_size > 1)
16245 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
16246 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
16247 offset += 1;
16249 return;
16251 if (max_size > 32)
16253 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
16254 return;
16256 if (max_size > 16)
16258 rtx label = ix86_expand_aligntest (count, 16, true);
16259 if (TARGET_64BIT)
16261 dest = change_address (destmem, DImode, destptr);
16262 emit_insn (gen_strset (destptr, dest, value));
16263 emit_insn (gen_strset (destptr, dest, value));
16265 else
16267 dest = change_address (destmem, SImode, destptr);
16268 emit_insn (gen_strset (destptr, dest, value));
16269 emit_insn (gen_strset (destptr, dest, value));
16270 emit_insn (gen_strset (destptr, dest, value));
16271 emit_insn (gen_strset (destptr, dest, value));
16273 emit_label (label);
16274 LABEL_NUSES (label) = 1;
16276 if (max_size > 8)
16278 rtx label = ix86_expand_aligntest (count, 8, true);
16279 if (TARGET_64BIT)
16281 dest = change_address (destmem, DImode, destptr);
16282 emit_insn (gen_strset (destptr, dest, value));
16284 else
16286 dest = change_address (destmem, SImode, destptr);
16287 emit_insn (gen_strset (destptr, dest, value));
16288 emit_insn (gen_strset (destptr, dest, value));
16290 emit_label (label);
16291 LABEL_NUSES (label) = 1;
16293 if (max_size > 4)
16295 rtx label = ix86_expand_aligntest (count, 4, true);
16296 dest = change_address (destmem, SImode, destptr);
16297 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
16298 emit_label (label);
16299 LABEL_NUSES (label) = 1;
16301 if (max_size > 2)
16303 rtx label = ix86_expand_aligntest (count, 2, true);
16304 dest = change_address (destmem, HImode, destptr);
16305 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
16306 emit_label (label);
16307 LABEL_NUSES (label) = 1;
16309 if (max_size > 1)
16311 rtx label = ix86_expand_aligntest (count, 1, true);
16312 dest = change_address (destmem, QImode, destptr);
16313 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
16314 emit_label (label);
16315 LABEL_NUSES (label) = 1;
16319 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
16320 DESIRED_ALIGNMENT. */
16321 static void
16322 expand_movmem_prologue (rtx destmem, rtx srcmem,
16323 rtx destptr, rtx srcptr, rtx count,
16324 int align, int desired_alignment)
16326 if (align <= 1 && desired_alignment > 1)
16328 rtx label = ix86_expand_aligntest (destptr, 1, false);
16329 srcmem = change_address (srcmem, QImode, srcptr);
16330 destmem = change_address (destmem, QImode, destptr);
16331 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
16332 ix86_adjust_counter (count, 1);
16333 emit_label (label);
16334 LABEL_NUSES (label) = 1;
16336 if (align <= 2 && desired_alignment > 2)
16338 rtx label = ix86_expand_aligntest (destptr, 2, false);
16339 srcmem = change_address (srcmem, HImode, srcptr);
16340 destmem = change_address (destmem, HImode, destptr);
16341 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
16342 ix86_adjust_counter (count, 2);
16343 emit_label (label);
16344 LABEL_NUSES (label) = 1;
16346 if (align <= 4 && desired_alignment > 4)
16348 rtx label = ix86_expand_aligntest (destptr, 4, false);
16349 srcmem = change_address (srcmem, SImode, srcptr);
16350 destmem = change_address (destmem, SImode, destptr);
16351 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
16352 ix86_adjust_counter (count, 4);
16353 emit_label (label);
16354 LABEL_NUSES (label) = 1;
16356 gcc_assert (desired_alignment <= 8);
16359 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
16360 DESIRED_ALIGNMENT. */
16361 static void
16362 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
16363 int align, int desired_alignment)
16365 if (align <= 1 && desired_alignment > 1)
16367 rtx label = ix86_expand_aligntest (destptr, 1, false);
16368 destmem = change_address (destmem, QImode, destptr);
16369 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
16370 ix86_adjust_counter (count, 1);
16371 emit_label (label);
16372 LABEL_NUSES (label) = 1;
16374 if (align <= 2 && desired_alignment > 2)
16376 rtx label = ix86_expand_aligntest (destptr, 2, false);
16377 destmem = change_address (destmem, HImode, destptr);
16378 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
16379 ix86_adjust_counter (count, 2);
16380 emit_label (label);
16381 LABEL_NUSES (label) = 1;
16383 if (align <= 4 && desired_alignment > 4)
16385 rtx label = ix86_expand_aligntest (destptr, 4, false);
16386 destmem = change_address (destmem, SImode, destptr);
16387 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
16388 ix86_adjust_counter (count, 4);
16389 emit_label (label);
16390 LABEL_NUSES (label) = 1;
16392 gcc_assert (desired_alignment <= 8);
16395 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
16396 static enum stringop_alg
16397 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
16398 int *dynamic_check)
16400 const struct stringop_algs * algs;
16401 /* Algorithms using the rep prefix want at least edi and ecx;
16402 additionally, memset wants eax and memcpy wants esi. Don't
16403 consider such algorithms if the user has appropriated those
16404 registers for their own purposes. */
16405 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
16406 || (memset
16407 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
16409 #define ALG_USABLE_P(alg) (rep_prefix_usable \
16410 || (alg != rep_prefix_1_byte \
16411 && alg != rep_prefix_4_byte \
16412 && alg != rep_prefix_8_byte))
16414 *dynamic_check = -1;
16415 if (memset)
16416 algs = &ix86_cost->memset[TARGET_64BIT != 0];
16417 else
16418 algs = &ix86_cost->memcpy[TARGET_64BIT != 0];
16419 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
16420 return stringop_alg;
16421 /* rep; movq or rep; movl is the smallest variant. */
16422 else if (optimize_size)
16424 if (!count || (count & 3))
16425 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
16426 else
16427 return rep_prefix_usable ? rep_prefix_4_byte : loop;
16429 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
16431 else if (expected_size != -1 && expected_size < 4)
16432 return loop_1_byte;
16433 else if (expected_size != -1)
16435 unsigned int i;
16436 enum stringop_alg alg = libcall;
16437 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
16439 /* We get here if the algorithms that were not libcall-based
16440 were rep-prefix based and we are unable to use rep prefixes
16441 based on global register usage. Break out of the loop and
16442 use the heuristic below. */
16443 if (algs->size[i].max == 0)
16444 break;
16445 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
16447 enum stringop_alg candidate = algs->size[i].alg;
16449 if (candidate != libcall && ALG_USABLE_P (candidate))
16450 alg = candidate;
16451 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
16452 last non-libcall inline algorithm. */
16453 if (TARGET_INLINE_ALL_STRINGOPS)
16455 /* When the current size is best to be copied by a libcall,
16456 but we are still forced to inline, run the heuristic below
16457 that will pick code for medium sized blocks. */
16458 if (alg != libcall)
16459 return alg;
16460 break;
16462 else if (ALG_USABLE_P (candidate))
16463 return candidate;
16466 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
16468 /* When asked to inline the call anyway, try to pick meaningful choice.
16469 We look for maximal size of block that is faster to copy by hand and
16470 take blocks of at most of that size guessing that average size will
16471 be roughly half of the block.
16473 If this turns out to be bad, we might simply specify the preferred
16474 choice in ix86_costs. */
16475 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
16476 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
16478 int max = -1;
16479 enum stringop_alg alg;
16480 int i;
16481 bool any_alg_usable_p = true;
16483 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
16485 enum stringop_alg candidate = algs->size[i].alg;
16486 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
16488 if (candidate != libcall && candidate
16489 && ALG_USABLE_P (candidate))
16490 max = algs->size[i].max;
16492 /* If there aren't any usable algorithms, then recursing on
16493 smaller sizes isn't going to find anything. Just return the
16494 simple byte-at-a-time copy loop. */
16495 if (!any_alg_usable_p)
16497 /* Pick something reasonable. */
16498 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
16499 *dynamic_check = 128;
16500 return loop_1_byte;
16502 if (max == -1)
16503 max = 4096;
16504 alg = decide_alg (count, max / 2, memset, dynamic_check);
16505 gcc_assert (*dynamic_check == -1);
16506 gcc_assert (alg != libcall);
16507 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
16508 *dynamic_check = max;
16509 return alg;
16511 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
16512 #undef ALG_USABLE_P
16515 /* Decide on alignment. We know that the operand is already aligned to ALIGN
16516 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
16517 static int
16518 decide_alignment (int align,
16519 enum stringop_alg alg,
16520 int expected_size)
16522 int desired_align = 0;
16523 switch (alg)
16525 case no_stringop:
16526 gcc_unreachable ();
16527 case loop:
16528 case unrolled_loop:
16529 desired_align = GET_MODE_SIZE (Pmode);
16530 break;
16531 case rep_prefix_8_byte:
16532 desired_align = 8;
16533 break;
16534 case rep_prefix_4_byte:
16535 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
16536 copying whole cacheline at once. */
16537 if (TARGET_PENTIUMPRO)
16538 desired_align = 8;
16539 else
16540 desired_align = 4;
16541 break;
16542 case rep_prefix_1_byte:
16543 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
16544 copying whole cacheline at once. */
16545 if (TARGET_PENTIUMPRO)
16546 desired_align = 8;
16547 else
16548 desired_align = 1;
16549 break;
16550 case loop_1_byte:
16551 desired_align = 1;
16552 break;
16553 case libcall:
16554 return 0;
16557 if (optimize_size)
16558 desired_align = 1;
16559 if (desired_align < align)
16560 desired_align = align;
16561 if (expected_size != -1 && expected_size < 4)
16562 desired_align = align;
16563 return desired_align;
16566 /* Return the smallest power of 2 greater than VAL. */
16567 static int
16568 smallest_pow2_greater_than (int val)
16570 int ret = 1;
16571 while (ret <= val)
16572 ret <<= 1;
16573 return ret;
16576 /* Expand string move (memcpy) operation. Use i386 string operations when
16577 profitable. expand_setmem contains similar code. The code depends upon
16578 architecture, block size and alignment, but always has the same
16579 overall structure:
16581 1) Prologue guard: Conditional that jumps up to epilogues for small
16582 blocks that can be handled by epilogue alone. This is faster but
16583 also needed for correctness, since prologue assume the block is larger
16584 than the desired alignment.
16586 Optional dynamic check for size and libcall for large
16587 blocks is emitted here too, with -minline-stringops-dynamically.
16589 2) Prologue: copy first few bytes in order to get destination aligned
16590 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
16591 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
16592 We emit either a jump tree on power of two sized blocks, or a byte loop.
16594 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
16595 with specified algorithm.
16597 4) Epilogue: code copying tail of the block that is too small to be
16598 handled by main body (or up to size guarded by prologue guard). */
16601 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
16602 rtx expected_align_exp, rtx expected_size_exp)
16604 rtx destreg;
16605 rtx srcreg;
16606 rtx label = NULL;
16607 rtx tmp;
16608 rtx jump_around_label = NULL;
16609 HOST_WIDE_INT align = 1;
16610 unsigned HOST_WIDE_INT count = 0;
16611 HOST_WIDE_INT expected_size = -1;
16612 int size_needed = 0, epilogue_size_needed;
16613 int desired_align = 0;
16614 enum stringop_alg alg;
16615 int dynamic_check;
16617 if (CONST_INT_P (align_exp))
16618 align = INTVAL (align_exp);
16619 /* i386 can do misaligned access on reasonably increased cost. */
16620 if (CONST_INT_P (expected_align_exp)
16621 && INTVAL (expected_align_exp) > align)
16622 align = INTVAL (expected_align_exp);
16623 if (CONST_INT_P (count_exp))
16624 count = expected_size = INTVAL (count_exp);
16625 if (CONST_INT_P (expected_size_exp) && count == 0)
16626 expected_size = INTVAL (expected_size_exp);
16628 /* Make sure we don't need to care about overflow later on. */
16629 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
16630 return 0;
16632 /* Step 0: Decide on preferred algorithm, desired alignment and
16633 size of chunks to be copied by main loop. */
16635 alg = decide_alg (count, expected_size, false, &dynamic_check);
16636 desired_align = decide_alignment (align, alg, expected_size);
16638 if (!TARGET_ALIGN_STRINGOPS)
16639 align = desired_align;
16641 if (alg == libcall)
16642 return 0;
16643 gcc_assert (alg != no_stringop);
16644 if (!count)
16645 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
16646 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
16647 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
16648 switch (alg)
16650 case libcall:
16651 case no_stringop:
16652 gcc_unreachable ();
16653 case loop:
16654 size_needed = GET_MODE_SIZE (Pmode);
16655 break;
16656 case unrolled_loop:
16657 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
16658 break;
16659 case rep_prefix_8_byte:
16660 size_needed = 8;
16661 break;
16662 case rep_prefix_4_byte:
16663 size_needed = 4;
16664 break;
16665 case rep_prefix_1_byte:
16666 case loop_1_byte:
16667 size_needed = 1;
16668 break;
16671 epilogue_size_needed = size_needed;
16673 /* Step 1: Prologue guard. */
16675 /* Alignment code needs count to be in register. */
16676 if (CONST_INT_P (count_exp) && desired_align > align)
16677 count_exp = force_reg (counter_mode (count_exp), count_exp);
16678 gcc_assert (desired_align >= 1 && align >= 1);
16680 /* Ensure that alignment prologue won't copy past end of block. */
16681 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
16683 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
16684 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
16685 Make sure it is power of 2. */
16686 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
16688 if (CONST_INT_P (count_exp))
16690 if (UINTVAL (count_exp) < (unsigned HOST_WIDE_INT)epilogue_size_needed)
16691 goto epilogue;
16693 else
16695 label = gen_label_rtx ();
16696 emit_cmp_and_jump_insns (count_exp,
16697 GEN_INT (epilogue_size_needed),
16698 LTU, 0, counter_mode (count_exp), 1, label);
16699 if (expected_size == -1 || expected_size < epilogue_size_needed)
16700 predict_jump (REG_BR_PROB_BASE * 60 / 100);
16701 else
16702 predict_jump (REG_BR_PROB_BASE * 20 / 100);
16706 /* Emit code to decide on runtime whether library call or inline should be
16707 used. */
16708 if (dynamic_check != -1)
16710 if (CONST_INT_P (count_exp))
16712 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
16714 emit_block_move_via_libcall (dst, src, count_exp, false);
16715 count_exp = const0_rtx;
16716 goto epilogue;
16719 else
16721 rtx hot_label = gen_label_rtx ();
16722 jump_around_label = gen_label_rtx ();
16723 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
16724 LEU, 0, GET_MODE (count_exp), 1, hot_label);
16725 predict_jump (REG_BR_PROB_BASE * 90 / 100);
16726 emit_block_move_via_libcall (dst, src, count_exp, false);
16727 emit_jump (jump_around_label);
16728 emit_label (hot_label);
16732 /* Step 2: Alignment prologue. */
16734 if (desired_align > align)
16736 /* Except for the first move in epilogue, we no longer know
16737 constant offset in aliasing info. It don't seems to worth
16738 the pain to maintain it for the first move, so throw away
16739 the info early. */
16740 src = change_address (src, BLKmode, srcreg);
16741 dst = change_address (dst, BLKmode, destreg);
16742 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
16743 desired_align);
16745 if (label && size_needed == 1)
16747 emit_label (label);
16748 LABEL_NUSES (label) = 1;
16749 label = NULL;
16752 /* Step 3: Main loop. */
16754 switch (alg)
16756 case libcall:
16757 case no_stringop:
16758 gcc_unreachable ();
16759 case loop_1_byte:
16760 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
16761 count_exp, QImode, 1, expected_size);
16762 break;
16763 case loop:
16764 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
16765 count_exp, Pmode, 1, expected_size);
16766 break;
16767 case unrolled_loop:
16768 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
16769 registers for 4 temporaries anyway. */
16770 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
16771 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
16772 expected_size);
16773 break;
16774 case rep_prefix_8_byte:
16775 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
16776 DImode);
16777 break;
16778 case rep_prefix_4_byte:
16779 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
16780 SImode);
16781 break;
16782 case rep_prefix_1_byte:
16783 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
16784 QImode);
16785 break;
16787 /* Adjust properly the offset of src and dest memory for aliasing. */
16788 if (CONST_INT_P (count_exp))
16790 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
16791 (count / size_needed) * size_needed);
16792 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
16793 (count / size_needed) * size_needed);
16795 else
16797 src = change_address (src, BLKmode, srcreg);
16798 dst = change_address (dst, BLKmode, destreg);
16801 /* Step 4: Epilogue to copy the remaining bytes. */
16802 epilogue:
16803 if (label)
16805 /* When the main loop is done, COUNT_EXP might hold original count,
16806 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
16807 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
16808 bytes. Compensate if needed. */
16810 if (size_needed < epilogue_size_needed)
16812 tmp =
16813 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
16814 GEN_INT (size_needed - 1), count_exp, 1,
16815 OPTAB_DIRECT);
16816 if (tmp != count_exp)
16817 emit_move_insn (count_exp, tmp);
16819 emit_label (label);
16820 LABEL_NUSES (label) = 1;
16823 if (count_exp != const0_rtx && epilogue_size_needed > 1)
16824 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
16825 epilogue_size_needed);
16826 if (jump_around_label)
16827 emit_label (jump_around_label);
16828 return 1;
16831 /* Helper function for memcpy. For QImode value 0xXY produce
16832 0xXYXYXYXY of wide specified by MODE. This is essentially
16833 a * 0x10101010, but we can do slightly better than
16834 synth_mult by unwinding the sequence by hand on CPUs with
16835 slow multiply. */
16836 static rtx
16837 promote_duplicated_reg (enum machine_mode mode, rtx val)
16839 enum machine_mode valmode = GET_MODE (val);
16840 rtx tmp;
16841 int nops = mode == DImode ? 3 : 2;
16843 gcc_assert (mode == SImode || mode == DImode);
16844 if (val == const0_rtx)
16845 return copy_to_mode_reg (mode, const0_rtx);
16846 if (CONST_INT_P (val))
16848 HOST_WIDE_INT v = INTVAL (val) & 255;
16850 v |= v << 8;
16851 v |= v << 16;
16852 if (mode == DImode)
16853 v |= (v << 16) << 16;
16854 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
16857 if (valmode == VOIDmode)
16858 valmode = QImode;
16859 if (valmode != QImode)
16860 val = gen_lowpart (QImode, val);
16861 if (mode == QImode)
16862 return val;
16863 if (!TARGET_PARTIAL_REG_STALL)
16864 nops--;
16865 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
16866 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
16867 <= (ix86_cost->shift_const + ix86_cost->add) * nops
16868 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
16870 rtx reg = convert_modes (mode, QImode, val, true);
16871 tmp = promote_duplicated_reg (mode, const1_rtx);
16872 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
16873 OPTAB_DIRECT);
16875 else
16877 rtx reg = convert_modes (mode, QImode, val, true);
16879 if (!TARGET_PARTIAL_REG_STALL)
16880 if (mode == SImode)
16881 emit_insn (gen_movsi_insv_1 (reg, reg));
16882 else
16883 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
16884 else
16886 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
16887 NULL, 1, OPTAB_DIRECT);
16888 reg =
16889 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
16891 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
16892 NULL, 1, OPTAB_DIRECT);
16893 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
16894 if (mode == SImode)
16895 return reg;
16896 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
16897 NULL, 1, OPTAB_DIRECT);
16898 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
16899 return reg;
16903 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
16904 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
16905 alignment from ALIGN to DESIRED_ALIGN. */
16906 static rtx
16907 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
16909 rtx promoted_val;
16911 if (TARGET_64BIT
16912 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
16913 promoted_val = promote_duplicated_reg (DImode, val);
16914 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
16915 promoted_val = promote_duplicated_reg (SImode, val);
16916 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
16917 promoted_val = promote_duplicated_reg (HImode, val);
16918 else
16919 promoted_val = val;
16921 return promoted_val;
16924 /* Expand string clear operation (bzero). Use i386 string operations when
16925 profitable. See expand_movmem comment for explanation of individual
16926 steps performed. */
16928 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
16929 rtx expected_align_exp, rtx expected_size_exp)
16931 rtx destreg;
16932 rtx label = NULL;
16933 rtx tmp;
16934 rtx jump_around_label = NULL;
16935 HOST_WIDE_INT align = 1;
16936 unsigned HOST_WIDE_INT count = 0;
16937 HOST_WIDE_INT expected_size = -1;
16938 int size_needed = 0, epilogue_size_needed;
16939 int desired_align = 0;
16940 enum stringop_alg alg;
16941 rtx promoted_val = NULL;
16942 bool force_loopy_epilogue = false;
16943 int dynamic_check;
16945 if (CONST_INT_P (align_exp))
16946 align = INTVAL (align_exp);
16947 /* i386 can do misaligned access on reasonably increased cost. */
16948 if (CONST_INT_P (expected_align_exp)
16949 && INTVAL (expected_align_exp) > align)
16950 align = INTVAL (expected_align_exp);
16951 if (CONST_INT_P (count_exp))
16952 count = expected_size = INTVAL (count_exp);
16953 if (CONST_INT_P (expected_size_exp) && count == 0)
16954 expected_size = INTVAL (expected_size_exp);
16956 /* Make sure we don't need to care about overflow later on. */
16957 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
16958 return 0;
16960 /* Step 0: Decide on preferred algorithm, desired alignment and
16961 size of chunks to be copied by main loop. */
16963 alg = decide_alg (count, expected_size, true, &dynamic_check);
16964 desired_align = decide_alignment (align, alg, expected_size);
16966 if (!TARGET_ALIGN_STRINGOPS)
16967 align = desired_align;
16969 if (alg == libcall)
16970 return 0;
16971 gcc_assert (alg != no_stringop);
16972 if (!count)
16973 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
16974 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
16975 switch (alg)
16977 case libcall:
16978 case no_stringop:
16979 gcc_unreachable ();
16980 case loop:
16981 size_needed = GET_MODE_SIZE (Pmode);
16982 break;
16983 case unrolled_loop:
16984 size_needed = GET_MODE_SIZE (Pmode) * 4;
16985 break;
16986 case rep_prefix_8_byte:
16987 size_needed = 8;
16988 break;
16989 case rep_prefix_4_byte:
16990 size_needed = 4;
16991 break;
16992 case rep_prefix_1_byte:
16993 case loop_1_byte:
16994 size_needed = 1;
16995 break;
16997 epilogue_size_needed = size_needed;
16999 /* Step 1: Prologue guard. */
17001 /* Alignment code needs count to be in register. */
17002 if (CONST_INT_P (count_exp) && desired_align > align)
17004 enum machine_mode mode = SImode;
17005 if (TARGET_64BIT && (count & ~0xffffffff))
17006 mode = DImode;
17007 count_exp = force_reg (mode, count_exp);
17009 /* Do the cheap promotion to allow better CSE across the
17010 main loop and epilogue (ie one load of the big constant in the
17011 front of all code. */
17012 if (CONST_INT_P (val_exp))
17013 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
17014 desired_align, align);
17015 /* Ensure that alignment prologue won't copy past end of block. */
17016 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
17018 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
17019 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
17020 Make sure it is power of 2. */
17021 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
17023 /* To improve performance of small blocks, we jump around the VAL
17024 promoting mode. This mean that if the promoted VAL is not constant,
17025 we might not use it in the epilogue and have to use byte
17026 loop variant. */
17027 if (epilogue_size_needed > 2 && !promoted_val)
17028 force_loopy_epilogue = true;
17029 label = gen_label_rtx ();
17030 emit_cmp_and_jump_insns (count_exp,
17031 GEN_INT (epilogue_size_needed),
17032 LTU, 0, counter_mode (count_exp), 1, label);
17033 if (GET_CODE (count_exp) == CONST_INT)
17035 else if (expected_size == -1 || expected_size <= epilogue_size_needed)
17036 predict_jump (REG_BR_PROB_BASE * 60 / 100);
17037 else
17038 predict_jump (REG_BR_PROB_BASE * 20 / 100);
17040 if (dynamic_check != -1)
17042 rtx hot_label = gen_label_rtx ();
17043 jump_around_label = gen_label_rtx ();
17044 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
17045 LEU, 0, counter_mode (count_exp), 1, hot_label);
17046 predict_jump (REG_BR_PROB_BASE * 90 / 100);
17047 set_storage_via_libcall (dst, count_exp, val_exp, false);
17048 emit_jump (jump_around_label);
17049 emit_label (hot_label);
17052 /* Step 2: Alignment prologue. */
17054 /* Do the expensive promotion once we branched off the small blocks. */
17055 if (!promoted_val)
17056 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
17057 desired_align, align);
17058 gcc_assert (desired_align >= 1 && align >= 1);
17060 if (desired_align > align)
17062 /* Except for the first move in epilogue, we no longer know
17063 constant offset in aliasing info. It don't seems to worth
17064 the pain to maintain it for the first move, so throw away
17065 the info early. */
17066 dst = change_address (dst, BLKmode, destreg);
17067 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
17068 desired_align);
17070 if (label && size_needed == 1)
17072 emit_label (label);
17073 LABEL_NUSES (label) = 1;
17074 label = NULL;
17077 /* Step 3: Main loop. */
17079 switch (alg)
17081 case libcall:
17082 case no_stringop:
17083 gcc_unreachable ();
17084 case loop_1_byte:
17085 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
17086 count_exp, QImode, 1, expected_size);
17087 break;
17088 case loop:
17089 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
17090 count_exp, Pmode, 1, expected_size);
17091 break;
17092 case unrolled_loop:
17093 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
17094 count_exp, Pmode, 4, expected_size);
17095 break;
17096 case rep_prefix_8_byte:
17097 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
17098 DImode);
17099 break;
17100 case rep_prefix_4_byte:
17101 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
17102 SImode);
17103 break;
17104 case rep_prefix_1_byte:
17105 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
17106 QImode);
17107 break;
17109 /* Adjust properly the offset of src and dest memory for aliasing. */
17110 if (CONST_INT_P (count_exp))
17111 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
17112 (count / size_needed) * size_needed);
17113 else
17114 dst = change_address (dst, BLKmode, destreg);
17116 /* Step 4: Epilogue to copy the remaining bytes. */
17118 if (label)
17120 /* When the main loop is done, COUNT_EXP might hold original count,
17121 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
17122 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
17123 bytes. Compensate if needed. */
17125 if (size_needed < desired_align - align)
17127 tmp =
17128 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
17129 GEN_INT (size_needed - 1), count_exp, 1,
17130 OPTAB_DIRECT);
17131 size_needed = desired_align - align + 1;
17132 if (tmp != count_exp)
17133 emit_move_insn (count_exp, tmp);
17135 emit_label (label);
17136 LABEL_NUSES (label) = 1;
17138 if (count_exp != const0_rtx && epilogue_size_needed > 1)
17140 if (force_loopy_epilogue)
17141 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
17142 size_needed);
17143 else
17144 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
17145 size_needed);
17147 if (jump_around_label)
17148 emit_label (jump_around_label);
17149 return 1;
17152 /* Expand the appropriate insns for doing strlen if not just doing
17153 repnz; scasb
17155 out = result, initialized with the start address
17156 align_rtx = alignment of the address.
17157 scratch = scratch register, initialized with the startaddress when
17158 not aligned, otherwise undefined
17160 This is just the body. It needs the initializations mentioned above and
17161 some address computing at the end. These things are done in i386.md. */
17163 static void
17164 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
17166 int align;
17167 rtx tmp;
17168 rtx align_2_label = NULL_RTX;
17169 rtx align_3_label = NULL_RTX;
17170 rtx align_4_label = gen_label_rtx ();
17171 rtx end_0_label = gen_label_rtx ();
17172 rtx mem;
17173 rtx tmpreg = gen_reg_rtx (SImode);
17174 rtx scratch = gen_reg_rtx (SImode);
17175 rtx cmp;
17177 align = 0;
17178 if (CONST_INT_P (align_rtx))
17179 align = INTVAL (align_rtx);
17181 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
17183 /* Is there a known alignment and is it less than 4? */
17184 if (align < 4)
17186 rtx scratch1 = gen_reg_rtx (Pmode);
17187 emit_move_insn (scratch1, out);
17188 /* Is there a known alignment and is it not 2? */
17189 if (align != 2)
17191 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
17192 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
17194 /* Leave just the 3 lower bits. */
17195 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
17196 NULL_RTX, 0, OPTAB_WIDEN);
17198 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
17199 Pmode, 1, align_4_label);
17200 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
17201 Pmode, 1, align_2_label);
17202 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
17203 Pmode, 1, align_3_label);
17205 else
17207 /* Since the alignment is 2, we have to check 2 or 0 bytes;
17208 check if is aligned to 4 - byte. */
17210 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
17211 NULL_RTX, 0, OPTAB_WIDEN);
17213 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
17214 Pmode, 1, align_4_label);
17217 mem = change_address (src, QImode, out);
17219 /* Now compare the bytes. */
17221 /* Compare the first n unaligned byte on a byte per byte basis. */
17222 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
17223 QImode, 1, end_0_label);
17225 /* Increment the address. */
17226 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
17228 /* Not needed with an alignment of 2 */
17229 if (align != 2)
17231 emit_label (align_2_label);
17233 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
17234 end_0_label);
17236 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
17238 emit_label (align_3_label);
17241 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
17242 end_0_label);
17244 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
17247 /* Generate loop to check 4 bytes at a time. It is not a good idea to
17248 align this loop. It gives only huge programs, but does not help to
17249 speed up. */
17250 emit_label (align_4_label);
17252 mem = change_address (src, SImode, out);
17253 emit_move_insn (scratch, mem);
17254 emit_insn ((*ix86_gen_add3) (out, out, GEN_INT (4)));
17256 /* This formula yields a nonzero result iff one of the bytes is zero.
17257 This saves three branches inside loop and many cycles. */
17259 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
17260 emit_insn (gen_one_cmplsi2 (scratch, scratch));
17261 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
17262 emit_insn (gen_andsi3 (tmpreg, tmpreg,
17263 gen_int_mode (0x80808080, SImode)));
17264 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
17265 align_4_label);
17267 if (TARGET_CMOVE)
17269 rtx reg = gen_reg_rtx (SImode);
17270 rtx reg2 = gen_reg_rtx (Pmode);
17271 emit_move_insn (reg, tmpreg);
17272 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
17274 /* If zero is not in the first two bytes, move two bytes forward. */
17275 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
17276 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
17277 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
17278 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
17279 gen_rtx_IF_THEN_ELSE (SImode, tmp,
17280 reg,
17281 tmpreg)));
17282 /* Emit lea manually to avoid clobbering of flags. */
17283 emit_insn (gen_rtx_SET (SImode, reg2,
17284 gen_rtx_PLUS (Pmode, out, const2_rtx)));
17286 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
17287 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
17288 emit_insn (gen_rtx_SET (VOIDmode, out,
17289 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
17290 reg2,
17291 out)));
17294 else
17296 rtx end_2_label = gen_label_rtx ();
17297 /* Is zero in the first two bytes? */
17299 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
17300 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
17301 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
17302 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
17303 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
17304 pc_rtx);
17305 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
17306 JUMP_LABEL (tmp) = end_2_label;
17308 /* Not in the first two. Move two bytes forward. */
17309 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
17310 emit_insn ((*ix86_gen_add3) (out, out, const2_rtx));
17312 emit_label (end_2_label);
17316 /* Avoid branch in fixing the byte. */
17317 tmpreg = gen_lowpart (QImode, tmpreg);
17318 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
17319 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, FLAGS_REG), const0_rtx);
17320 emit_insn ((*ix86_gen_sub3_carry) (out, out, GEN_INT (3), cmp));
17322 emit_label (end_0_label);
17325 /* Expand strlen. */
17328 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
17330 rtx addr, scratch1, scratch2, scratch3, scratch4;
17332 /* The generic case of strlen expander is long. Avoid it's
17333 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
17335 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
17336 && !TARGET_INLINE_ALL_STRINGOPS
17337 && !optimize_size
17338 && (!CONST_INT_P (align) || INTVAL (align) < 4))
17339 return 0;
17341 addr = force_reg (Pmode, XEXP (src, 0));
17342 scratch1 = gen_reg_rtx (Pmode);
17344 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
17345 && !optimize_size)
17347 /* Well it seems that some optimizer does not combine a call like
17348 foo(strlen(bar), strlen(bar));
17349 when the move and the subtraction is done here. It does calculate
17350 the length just once when these instructions are done inside of
17351 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
17352 often used and I use one fewer register for the lifetime of
17353 output_strlen_unroll() this is better. */
17355 emit_move_insn (out, addr);
17357 ix86_expand_strlensi_unroll_1 (out, src, align);
17359 /* strlensi_unroll_1 returns the address of the zero at the end of
17360 the string, like memchr(), so compute the length by subtracting
17361 the start address. */
17362 emit_insn ((*ix86_gen_sub3) (out, out, addr));
17364 else
17366 rtx unspec;
17368 /* Can't use this if the user has appropriated eax, ecx, or edi. */
17369 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
17370 return false;
17372 scratch2 = gen_reg_rtx (Pmode);
17373 scratch3 = gen_reg_rtx (Pmode);
17374 scratch4 = force_reg (Pmode, constm1_rtx);
17376 emit_move_insn (scratch3, addr);
17377 eoschar = force_reg (QImode, eoschar);
17379 src = replace_equiv_address_nv (src, scratch3);
17381 /* If .md starts supporting :P, this can be done in .md. */
17382 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
17383 scratch4), UNSPEC_SCAS);
17384 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
17385 emit_insn ((*ix86_gen_one_cmpl2) (scratch2, scratch1));
17386 emit_insn ((*ix86_gen_add3) (out, scratch2, constm1_rtx));
17388 return 1;
17391 /* For given symbol (function) construct code to compute address of it's PLT
17392 entry in large x86-64 PIC model. */
17394 construct_plt_address (rtx symbol)
17396 rtx tmp = gen_reg_rtx (Pmode);
17397 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
17399 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
17400 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
17402 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
17403 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
17404 return tmp;
17407 void
17408 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
17409 rtx callarg2 ATTRIBUTE_UNUSED,
17410 rtx pop, int sibcall)
17412 rtx use = NULL, call;
17414 if (pop == const0_rtx)
17415 pop = NULL;
17416 gcc_assert (!TARGET_64BIT || !pop);
17418 if (TARGET_MACHO && !TARGET_64BIT)
17420 #if TARGET_MACHO
17421 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
17422 fnaddr = machopic_indirect_call_target (fnaddr);
17423 #endif
17425 else
17427 /* Static functions and indirect calls don't need the pic register. */
17428 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
17429 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
17430 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
17431 use_reg (&use, pic_offset_table_rtx);
17434 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
17436 rtx al = gen_rtx_REG (QImode, AX_REG);
17437 emit_move_insn (al, callarg2);
17438 use_reg (&use, al);
17441 if (ix86_cmodel == CM_LARGE_PIC
17442 && GET_CODE (fnaddr) == MEM
17443 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
17444 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
17445 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
17446 else if (! call_insn_operand (XEXP (fnaddr, 0), Pmode))
17448 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
17449 fnaddr = gen_rtx_MEM (QImode, fnaddr);
17451 if (sibcall && TARGET_64BIT
17452 && !constant_call_address_operand (XEXP (fnaddr, 0), Pmode))
17454 rtx addr;
17455 addr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
17456 fnaddr = gen_rtx_REG (Pmode, R11_REG);
17457 emit_move_insn (fnaddr, addr);
17458 fnaddr = gen_rtx_MEM (QImode, fnaddr);
17461 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
17462 if (retval)
17463 call = gen_rtx_SET (VOIDmode, retval, call);
17464 if (pop)
17466 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
17467 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
17468 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
17471 call = emit_call_insn (call);
17472 if (use)
17473 CALL_INSN_FUNCTION_USAGE (call) = use;
17477 /* Clear stack slot assignments remembered from previous functions.
17478 This is called from INIT_EXPANDERS once before RTL is emitted for each
17479 function. */
17481 static struct machine_function *
17482 ix86_init_machine_status (void)
17484 struct machine_function *f;
17486 f = GGC_CNEW (struct machine_function);
17487 f->use_fast_prologue_epilogue_nregs = -1;
17488 f->tls_descriptor_call_expanded_p = 0;
17489 f->call_abi = DEFAULT_ABI;
17491 return f;
17494 /* Return a MEM corresponding to a stack slot with mode MODE.
17495 Allocate a new slot if necessary.
17497 The RTL for a function can have several slots available: N is
17498 which slot to use. */
17501 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
17503 struct stack_local_entry *s;
17505 gcc_assert (n < MAX_386_STACK_LOCALS);
17507 /* Virtual slot is valid only before vregs are instantiated. */
17508 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
17510 for (s = ix86_stack_locals; s; s = s->next)
17511 if (s->mode == mode && s->n == n)
17512 return copy_rtx (s->rtl);
17514 s = (struct stack_local_entry *)
17515 ggc_alloc (sizeof (struct stack_local_entry));
17516 s->n = n;
17517 s->mode = mode;
17518 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
17520 s->next = ix86_stack_locals;
17521 ix86_stack_locals = s;
17522 return s->rtl;
17525 /* Construct the SYMBOL_REF for the tls_get_addr function. */
17527 static GTY(()) rtx ix86_tls_symbol;
17529 ix86_tls_get_addr (void)
17532 if (!ix86_tls_symbol)
17534 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
17535 (TARGET_ANY_GNU_TLS
17536 && !TARGET_64BIT)
17537 ? "___tls_get_addr"
17538 : "__tls_get_addr");
17541 return ix86_tls_symbol;
17544 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
17546 static GTY(()) rtx ix86_tls_module_base_symbol;
17548 ix86_tls_module_base (void)
17551 if (!ix86_tls_module_base_symbol)
17553 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
17554 "_TLS_MODULE_BASE_");
17555 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
17556 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
17559 return ix86_tls_module_base_symbol;
17562 /* Calculate the length of the memory address in the instruction
17563 encoding. Does not include the one-byte modrm, opcode, or prefix. */
17566 memory_address_length (rtx addr)
17568 struct ix86_address parts;
17569 rtx base, index, disp;
17570 int len;
17571 int ok;
17573 if (GET_CODE (addr) == PRE_DEC
17574 || GET_CODE (addr) == POST_INC
17575 || GET_CODE (addr) == PRE_MODIFY
17576 || GET_CODE (addr) == POST_MODIFY)
17577 return 0;
17579 ok = ix86_decompose_address (addr, &parts);
17580 gcc_assert (ok);
17582 if (parts.base && GET_CODE (parts.base) == SUBREG)
17583 parts.base = SUBREG_REG (parts.base);
17584 if (parts.index && GET_CODE (parts.index) == SUBREG)
17585 parts.index = SUBREG_REG (parts.index);
17587 base = parts.base;
17588 index = parts.index;
17589 disp = parts.disp;
17590 len = 0;
17592 /* Rule of thumb:
17593 - esp as the base always wants an index,
17594 - ebp as the base always wants a displacement. */
17596 /* Register Indirect. */
17597 if (base && !index && !disp)
17599 /* esp (for its index) and ebp (for its displacement) need
17600 the two-byte modrm form. */
17601 if (addr == stack_pointer_rtx
17602 || addr == arg_pointer_rtx
17603 || addr == frame_pointer_rtx
17604 || addr == hard_frame_pointer_rtx)
17605 len = 1;
17608 /* Direct Addressing. */
17609 else if (disp && !base && !index)
17610 len = 4;
17612 else
17614 /* Find the length of the displacement constant. */
17615 if (disp)
17617 if (base && satisfies_constraint_K (disp))
17618 len = 1;
17619 else
17620 len = 4;
17622 /* ebp always wants a displacement. */
17623 else if (base == hard_frame_pointer_rtx)
17624 len = 1;
17626 /* An index requires the two-byte modrm form.... */
17627 if (index
17628 /* ...like esp, which always wants an index. */
17629 || base == stack_pointer_rtx
17630 || base == arg_pointer_rtx
17631 || base == frame_pointer_rtx)
17632 len += 1;
17635 return len;
17638 /* Compute default value for "length_immediate" attribute. When SHORTFORM
17639 is set, expect that insn have 8bit immediate alternative. */
17641 ix86_attr_length_immediate_default (rtx insn, int shortform)
17643 int len = 0;
17644 int i;
17645 extract_insn_cached (insn);
17646 for (i = recog_data.n_operands - 1; i >= 0; --i)
17647 if (CONSTANT_P (recog_data.operand[i]))
17649 gcc_assert (!len);
17650 if (shortform && satisfies_constraint_K (recog_data.operand[i]))
17651 len = 1;
17652 else
17654 switch (get_attr_mode (insn))
17656 case MODE_QI:
17657 len+=1;
17658 break;
17659 case MODE_HI:
17660 len+=2;
17661 break;
17662 case MODE_SI:
17663 len+=4;
17664 break;
17665 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
17666 case MODE_DI:
17667 len+=4;
17668 break;
17669 default:
17670 fatal_insn ("unknown insn mode", insn);
17674 return len;
17676 /* Compute default value for "length_address" attribute. */
17678 ix86_attr_length_address_default (rtx insn)
17680 int i;
17682 if (get_attr_type (insn) == TYPE_LEA)
17684 rtx set = PATTERN (insn);
17686 if (GET_CODE (set) == PARALLEL)
17687 set = XVECEXP (set, 0, 0);
17689 gcc_assert (GET_CODE (set) == SET);
17691 return memory_address_length (SET_SRC (set));
17694 extract_insn_cached (insn);
17695 for (i = recog_data.n_operands - 1; i >= 0; --i)
17696 if (MEM_P (recog_data.operand[i]))
17698 return memory_address_length (XEXP (recog_data.operand[i], 0));
17699 break;
17701 return 0;
17704 /* Return the maximum number of instructions a cpu can issue. */
17706 static int
17707 ix86_issue_rate (void)
17709 switch (ix86_tune)
17711 case PROCESSOR_PENTIUM:
17712 case PROCESSOR_K6:
17713 return 2;
17715 case PROCESSOR_PENTIUMPRO:
17716 case PROCESSOR_PENTIUM4:
17717 case PROCESSOR_ATHLON:
17718 case PROCESSOR_K8:
17719 case PROCESSOR_AMDFAM10:
17720 case PROCESSOR_NOCONA:
17721 case PROCESSOR_GENERIC32:
17722 case PROCESSOR_GENERIC64:
17723 return 3;
17725 case PROCESSOR_CORE2:
17726 return 4;
17728 default:
17729 return 1;
17733 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
17734 by DEP_INSN and nothing set by DEP_INSN. */
17736 static int
17737 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
17739 rtx set, set2;
17741 /* Simplify the test for uninteresting insns. */
17742 if (insn_type != TYPE_SETCC
17743 && insn_type != TYPE_ICMOV
17744 && insn_type != TYPE_FCMOV
17745 && insn_type != TYPE_IBR)
17746 return 0;
17748 if ((set = single_set (dep_insn)) != 0)
17750 set = SET_DEST (set);
17751 set2 = NULL_RTX;
17753 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
17754 && XVECLEN (PATTERN (dep_insn), 0) == 2
17755 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
17756 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
17758 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
17759 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
17761 else
17762 return 0;
17764 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
17765 return 0;
17767 /* This test is true if the dependent insn reads the flags but
17768 not any other potentially set register. */
17769 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
17770 return 0;
17772 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
17773 return 0;
17775 return 1;
17778 /* A subroutine of ix86_adjust_cost -- return true iff INSN has a memory
17779 address with operands set by DEP_INSN. */
17781 static int
17782 ix86_agi_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
17784 rtx addr;
17786 if (insn_type == TYPE_LEA
17787 && TARGET_PENTIUM)
17789 addr = PATTERN (insn);
17791 if (GET_CODE (addr) == PARALLEL)
17792 addr = XVECEXP (addr, 0, 0);
17794 gcc_assert (GET_CODE (addr) == SET);
17796 addr = SET_SRC (addr);
17798 else
17800 int i;
17801 extract_insn_cached (insn);
17802 for (i = recog_data.n_operands - 1; i >= 0; --i)
17803 if (MEM_P (recog_data.operand[i]))
17805 addr = XEXP (recog_data.operand[i], 0);
17806 goto found;
17808 return 0;
17809 found:;
17812 return modified_in_p (addr, dep_insn);
17815 static int
17816 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
17818 enum attr_type insn_type, dep_insn_type;
17819 enum attr_memory memory;
17820 rtx set, set2;
17821 int dep_insn_code_number;
17823 /* Anti and output dependencies have zero cost on all CPUs. */
17824 if (REG_NOTE_KIND (link) != 0)
17825 return 0;
17827 dep_insn_code_number = recog_memoized (dep_insn);
17829 /* If we can't recognize the insns, we can't really do anything. */
17830 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
17831 return cost;
17833 insn_type = get_attr_type (insn);
17834 dep_insn_type = get_attr_type (dep_insn);
17836 switch (ix86_tune)
17838 case PROCESSOR_PENTIUM:
17839 /* Address Generation Interlock adds a cycle of latency. */
17840 if (ix86_agi_dependent (insn, dep_insn, insn_type))
17841 cost += 1;
17843 /* ??? Compares pair with jump/setcc. */
17844 if (ix86_flags_dependent (insn, dep_insn, insn_type))
17845 cost = 0;
17847 /* Floating point stores require value to be ready one cycle earlier. */
17848 if (insn_type == TYPE_FMOV
17849 && get_attr_memory (insn) == MEMORY_STORE
17850 && !ix86_agi_dependent (insn, dep_insn, insn_type))
17851 cost += 1;
17852 break;
17854 case PROCESSOR_PENTIUMPRO:
17855 memory = get_attr_memory (insn);
17857 /* INT->FP conversion is expensive. */
17858 if (get_attr_fp_int_src (dep_insn))
17859 cost += 5;
17861 /* There is one cycle extra latency between an FP op and a store. */
17862 if (insn_type == TYPE_FMOV
17863 && (set = single_set (dep_insn)) != NULL_RTX
17864 && (set2 = single_set (insn)) != NULL_RTX
17865 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
17866 && MEM_P (SET_DEST (set2)))
17867 cost += 1;
17869 /* Show ability of reorder buffer to hide latency of load by executing
17870 in parallel with previous instruction in case
17871 previous instruction is not needed to compute the address. */
17872 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
17873 && !ix86_agi_dependent (insn, dep_insn, insn_type))
17875 /* Claim moves to take one cycle, as core can issue one load
17876 at time and the next load can start cycle later. */
17877 if (dep_insn_type == TYPE_IMOV
17878 || dep_insn_type == TYPE_FMOV)
17879 cost = 1;
17880 else if (cost > 1)
17881 cost--;
17883 break;
17885 case PROCESSOR_K6:
17886 memory = get_attr_memory (insn);
17888 /* The esp dependency is resolved before the instruction is really
17889 finished. */
17890 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
17891 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
17892 return 1;
17894 /* INT->FP conversion is expensive. */
17895 if (get_attr_fp_int_src (dep_insn))
17896 cost += 5;
17898 /* Show ability of reorder buffer to hide latency of load by executing
17899 in parallel with previous instruction in case
17900 previous instruction is not needed to compute the address. */
17901 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
17902 && !ix86_agi_dependent (insn, dep_insn, insn_type))
17904 /* Claim moves to take one cycle, as core can issue one load
17905 at time and the next load can start cycle later. */
17906 if (dep_insn_type == TYPE_IMOV
17907 || dep_insn_type == TYPE_FMOV)
17908 cost = 1;
17909 else if (cost > 2)
17910 cost -= 2;
17911 else
17912 cost = 1;
17914 break;
17916 case PROCESSOR_ATHLON:
17917 case PROCESSOR_K8:
17918 case PROCESSOR_AMDFAM10:
17919 case PROCESSOR_GENERIC32:
17920 case PROCESSOR_GENERIC64:
17921 memory = get_attr_memory (insn);
17923 /* Show ability of reorder buffer to hide latency of load by executing
17924 in parallel with previous instruction in case
17925 previous instruction is not needed to compute the address. */
17926 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
17927 && !ix86_agi_dependent (insn, dep_insn, insn_type))
17929 enum attr_unit unit = get_attr_unit (insn);
17930 int loadcost = 3;
17932 /* Because of the difference between the length of integer and
17933 floating unit pipeline preparation stages, the memory operands
17934 for floating point are cheaper.
17936 ??? For Athlon it the difference is most probably 2. */
17937 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
17938 loadcost = 3;
17939 else
17940 loadcost = TARGET_ATHLON ? 2 : 0;
17942 if (cost >= loadcost)
17943 cost -= loadcost;
17944 else
17945 cost = 0;
17948 default:
17949 break;
17952 return cost;
17955 /* How many alternative schedules to try. This should be as wide as the
17956 scheduling freedom in the DFA, but no wider. Making this value too
17957 large results extra work for the scheduler. */
17959 static int
17960 ia32_multipass_dfa_lookahead (void)
17962 switch (ix86_tune)
17964 case PROCESSOR_PENTIUM:
17965 return 2;
17967 case PROCESSOR_PENTIUMPRO:
17968 case PROCESSOR_K6:
17969 return 1;
17971 default:
17972 return 0;
17977 /* Compute the alignment given to a constant that is being placed in memory.
17978 EXP is the constant and ALIGN is the alignment that the object would
17979 ordinarily have.
17980 The value of this function is used instead of that alignment to align
17981 the object. */
17984 ix86_constant_alignment (tree exp, int align)
17986 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
17987 || TREE_CODE (exp) == INTEGER_CST)
17989 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
17990 return 64;
17991 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
17992 return 128;
17994 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
17995 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
17996 return BITS_PER_WORD;
17998 return align;
18001 /* Compute the alignment for a static variable.
18002 TYPE is the data type, and ALIGN is the alignment that
18003 the object would ordinarily have. The value of this function is used
18004 instead of that alignment to align the object. */
18007 ix86_data_alignment (tree type, int align)
18009 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
18011 if (AGGREGATE_TYPE_P (type)
18012 && TYPE_SIZE (type)
18013 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
18014 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
18015 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
18016 && align < max_align)
18017 align = max_align;
18019 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
18020 to 16byte boundary. */
18021 if (TARGET_64BIT)
18023 if (AGGREGATE_TYPE_P (type)
18024 && TYPE_SIZE (type)
18025 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
18026 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
18027 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
18028 return 128;
18031 if (TREE_CODE (type) == ARRAY_TYPE)
18033 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
18034 return 64;
18035 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
18036 return 128;
18038 else if (TREE_CODE (type) == COMPLEX_TYPE)
18041 if (TYPE_MODE (type) == DCmode && align < 64)
18042 return 64;
18043 if ((TYPE_MODE (type) == XCmode
18044 || TYPE_MODE (type) == TCmode) && align < 128)
18045 return 128;
18047 else if ((TREE_CODE (type) == RECORD_TYPE
18048 || TREE_CODE (type) == UNION_TYPE
18049 || TREE_CODE (type) == QUAL_UNION_TYPE)
18050 && TYPE_FIELDS (type))
18052 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
18053 return 64;
18054 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
18055 return 128;
18057 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
18058 || TREE_CODE (type) == INTEGER_TYPE)
18060 if (TYPE_MODE (type) == DFmode && align < 64)
18061 return 64;
18062 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
18063 return 128;
18066 return align;
18069 /* Compute the alignment for a local variable or a stack slot. TYPE is
18070 the data type, MODE is the widest mode available and ALIGN is the
18071 alignment that the object would ordinarily have. The value of this
18072 macro is used instead of that alignment to align the object. */
18074 unsigned int
18075 ix86_local_alignment (tree type, enum machine_mode mode,
18076 unsigned int align)
18078 /* If TYPE is NULL, we are allocating a stack slot for caller-save
18079 register in MODE. We will return the largest alignment of XF
18080 and DF. */
18081 if (!type)
18083 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
18084 align = GET_MODE_ALIGNMENT (DFmode);
18085 return align;
18088 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
18089 to 16byte boundary. */
18090 if (TARGET_64BIT)
18092 if (AGGREGATE_TYPE_P (type)
18093 && TYPE_SIZE (type)
18094 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
18095 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
18096 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
18097 return 128;
18099 if (TREE_CODE (type) == ARRAY_TYPE)
18101 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
18102 return 64;
18103 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
18104 return 128;
18106 else if (TREE_CODE (type) == COMPLEX_TYPE)
18108 if (TYPE_MODE (type) == DCmode && align < 64)
18109 return 64;
18110 if ((TYPE_MODE (type) == XCmode
18111 || TYPE_MODE (type) == TCmode) && align < 128)
18112 return 128;
18114 else if ((TREE_CODE (type) == RECORD_TYPE
18115 || TREE_CODE (type) == UNION_TYPE
18116 || TREE_CODE (type) == QUAL_UNION_TYPE)
18117 && TYPE_FIELDS (type))
18119 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
18120 return 64;
18121 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
18122 return 128;
18124 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
18125 || TREE_CODE (type) == INTEGER_TYPE)
18128 if (TYPE_MODE (type) == DFmode && align < 64)
18129 return 64;
18130 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
18131 return 128;
18133 return align;
18136 /* Emit RTL insns to initialize the variable parts of a trampoline.
18137 FNADDR is an RTX for the address of the function's pure code.
18138 CXT is an RTX for the static chain value for the function. */
18139 void
18140 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
18142 if (!TARGET_64BIT)
18144 /* Compute offset from the end of the jmp to the target function. */
18145 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
18146 plus_constant (tramp, 10),
18147 NULL_RTX, 1, OPTAB_DIRECT);
18148 emit_move_insn (gen_rtx_MEM (QImode, tramp),
18149 gen_int_mode (0xb9, QImode));
18150 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
18151 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
18152 gen_int_mode (0xe9, QImode));
18153 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
18155 else
18157 int offset = 0;
18158 /* Try to load address using shorter movl instead of movabs.
18159 We may want to support movq for kernel mode, but kernel does not use
18160 trampolines at the moment. */
18161 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
18163 fnaddr = copy_to_mode_reg (DImode, fnaddr);
18164 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
18165 gen_int_mode (0xbb41, HImode));
18166 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
18167 gen_lowpart (SImode, fnaddr));
18168 offset += 6;
18170 else
18172 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
18173 gen_int_mode (0xbb49, HImode));
18174 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
18175 fnaddr);
18176 offset += 10;
18178 /* Load static chain using movabs to r10. */
18179 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
18180 gen_int_mode (0xba49, HImode));
18181 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
18182 cxt);
18183 offset += 10;
18184 /* Jump to the r11 */
18185 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
18186 gen_int_mode (0xff49, HImode));
18187 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
18188 gen_int_mode (0xe3, QImode));
18189 offset += 3;
18190 gcc_assert (offset <= TRAMPOLINE_SIZE);
18193 #ifdef ENABLE_EXECUTE_STACK
18194 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
18195 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
18196 #endif
18199 /* Codes for all the SSE/MMX builtins. */
18200 enum ix86_builtins
18202 IX86_BUILTIN_ADDPS,
18203 IX86_BUILTIN_ADDSS,
18204 IX86_BUILTIN_DIVPS,
18205 IX86_BUILTIN_DIVSS,
18206 IX86_BUILTIN_MULPS,
18207 IX86_BUILTIN_MULSS,
18208 IX86_BUILTIN_SUBPS,
18209 IX86_BUILTIN_SUBSS,
18211 IX86_BUILTIN_CMPEQPS,
18212 IX86_BUILTIN_CMPLTPS,
18213 IX86_BUILTIN_CMPLEPS,
18214 IX86_BUILTIN_CMPGTPS,
18215 IX86_BUILTIN_CMPGEPS,
18216 IX86_BUILTIN_CMPNEQPS,
18217 IX86_BUILTIN_CMPNLTPS,
18218 IX86_BUILTIN_CMPNLEPS,
18219 IX86_BUILTIN_CMPNGTPS,
18220 IX86_BUILTIN_CMPNGEPS,
18221 IX86_BUILTIN_CMPORDPS,
18222 IX86_BUILTIN_CMPUNORDPS,
18223 IX86_BUILTIN_CMPEQSS,
18224 IX86_BUILTIN_CMPLTSS,
18225 IX86_BUILTIN_CMPLESS,
18226 IX86_BUILTIN_CMPNEQSS,
18227 IX86_BUILTIN_CMPNLTSS,
18228 IX86_BUILTIN_CMPNLESS,
18229 IX86_BUILTIN_CMPNGTSS,
18230 IX86_BUILTIN_CMPNGESS,
18231 IX86_BUILTIN_CMPORDSS,
18232 IX86_BUILTIN_CMPUNORDSS,
18234 IX86_BUILTIN_COMIEQSS,
18235 IX86_BUILTIN_COMILTSS,
18236 IX86_BUILTIN_COMILESS,
18237 IX86_BUILTIN_COMIGTSS,
18238 IX86_BUILTIN_COMIGESS,
18239 IX86_BUILTIN_COMINEQSS,
18240 IX86_BUILTIN_UCOMIEQSS,
18241 IX86_BUILTIN_UCOMILTSS,
18242 IX86_BUILTIN_UCOMILESS,
18243 IX86_BUILTIN_UCOMIGTSS,
18244 IX86_BUILTIN_UCOMIGESS,
18245 IX86_BUILTIN_UCOMINEQSS,
18247 IX86_BUILTIN_CVTPI2PS,
18248 IX86_BUILTIN_CVTPS2PI,
18249 IX86_BUILTIN_CVTSI2SS,
18250 IX86_BUILTIN_CVTSI642SS,
18251 IX86_BUILTIN_CVTSS2SI,
18252 IX86_BUILTIN_CVTSS2SI64,
18253 IX86_BUILTIN_CVTTPS2PI,
18254 IX86_BUILTIN_CVTTSS2SI,
18255 IX86_BUILTIN_CVTTSS2SI64,
18257 IX86_BUILTIN_MAXPS,
18258 IX86_BUILTIN_MAXSS,
18259 IX86_BUILTIN_MINPS,
18260 IX86_BUILTIN_MINSS,
18262 IX86_BUILTIN_LOADUPS,
18263 IX86_BUILTIN_STOREUPS,
18264 IX86_BUILTIN_MOVSS,
18266 IX86_BUILTIN_MOVHLPS,
18267 IX86_BUILTIN_MOVLHPS,
18268 IX86_BUILTIN_LOADHPS,
18269 IX86_BUILTIN_LOADLPS,
18270 IX86_BUILTIN_STOREHPS,
18271 IX86_BUILTIN_STORELPS,
18273 IX86_BUILTIN_MASKMOVQ,
18274 IX86_BUILTIN_MOVMSKPS,
18275 IX86_BUILTIN_PMOVMSKB,
18277 IX86_BUILTIN_MOVNTPS,
18278 IX86_BUILTIN_MOVNTQ,
18280 IX86_BUILTIN_LOADDQU,
18281 IX86_BUILTIN_STOREDQU,
18283 IX86_BUILTIN_PACKSSWB,
18284 IX86_BUILTIN_PACKSSDW,
18285 IX86_BUILTIN_PACKUSWB,
18287 IX86_BUILTIN_PADDB,
18288 IX86_BUILTIN_PADDW,
18289 IX86_BUILTIN_PADDD,
18290 IX86_BUILTIN_PADDQ,
18291 IX86_BUILTIN_PADDSB,
18292 IX86_BUILTIN_PADDSW,
18293 IX86_BUILTIN_PADDUSB,
18294 IX86_BUILTIN_PADDUSW,
18295 IX86_BUILTIN_PSUBB,
18296 IX86_BUILTIN_PSUBW,
18297 IX86_BUILTIN_PSUBD,
18298 IX86_BUILTIN_PSUBQ,
18299 IX86_BUILTIN_PSUBSB,
18300 IX86_BUILTIN_PSUBSW,
18301 IX86_BUILTIN_PSUBUSB,
18302 IX86_BUILTIN_PSUBUSW,
18304 IX86_BUILTIN_PAND,
18305 IX86_BUILTIN_PANDN,
18306 IX86_BUILTIN_POR,
18307 IX86_BUILTIN_PXOR,
18309 IX86_BUILTIN_PAVGB,
18310 IX86_BUILTIN_PAVGW,
18312 IX86_BUILTIN_PCMPEQB,
18313 IX86_BUILTIN_PCMPEQW,
18314 IX86_BUILTIN_PCMPEQD,
18315 IX86_BUILTIN_PCMPGTB,
18316 IX86_BUILTIN_PCMPGTW,
18317 IX86_BUILTIN_PCMPGTD,
18319 IX86_BUILTIN_PMADDWD,
18321 IX86_BUILTIN_PMAXSW,
18322 IX86_BUILTIN_PMAXUB,
18323 IX86_BUILTIN_PMINSW,
18324 IX86_BUILTIN_PMINUB,
18326 IX86_BUILTIN_PMULHUW,
18327 IX86_BUILTIN_PMULHW,
18328 IX86_BUILTIN_PMULLW,
18330 IX86_BUILTIN_PSADBW,
18331 IX86_BUILTIN_PSHUFW,
18333 IX86_BUILTIN_PSLLW,
18334 IX86_BUILTIN_PSLLD,
18335 IX86_BUILTIN_PSLLQ,
18336 IX86_BUILTIN_PSRAW,
18337 IX86_BUILTIN_PSRAD,
18338 IX86_BUILTIN_PSRLW,
18339 IX86_BUILTIN_PSRLD,
18340 IX86_BUILTIN_PSRLQ,
18341 IX86_BUILTIN_PSLLWI,
18342 IX86_BUILTIN_PSLLDI,
18343 IX86_BUILTIN_PSLLQI,
18344 IX86_BUILTIN_PSRAWI,
18345 IX86_BUILTIN_PSRADI,
18346 IX86_BUILTIN_PSRLWI,
18347 IX86_BUILTIN_PSRLDI,
18348 IX86_BUILTIN_PSRLQI,
18350 IX86_BUILTIN_PUNPCKHBW,
18351 IX86_BUILTIN_PUNPCKHWD,
18352 IX86_BUILTIN_PUNPCKHDQ,
18353 IX86_BUILTIN_PUNPCKLBW,
18354 IX86_BUILTIN_PUNPCKLWD,
18355 IX86_BUILTIN_PUNPCKLDQ,
18357 IX86_BUILTIN_SHUFPS,
18359 IX86_BUILTIN_RCPPS,
18360 IX86_BUILTIN_RCPSS,
18361 IX86_BUILTIN_RSQRTPS,
18362 IX86_BUILTIN_RSQRTPS_NR,
18363 IX86_BUILTIN_RSQRTSS,
18364 IX86_BUILTIN_RSQRTF,
18365 IX86_BUILTIN_SQRTPS,
18366 IX86_BUILTIN_SQRTPS_NR,
18367 IX86_BUILTIN_SQRTSS,
18369 IX86_BUILTIN_UNPCKHPS,
18370 IX86_BUILTIN_UNPCKLPS,
18372 IX86_BUILTIN_ANDPS,
18373 IX86_BUILTIN_ANDNPS,
18374 IX86_BUILTIN_ORPS,
18375 IX86_BUILTIN_XORPS,
18377 IX86_BUILTIN_EMMS,
18378 IX86_BUILTIN_LDMXCSR,
18379 IX86_BUILTIN_STMXCSR,
18380 IX86_BUILTIN_SFENCE,
18382 /* 3DNow! Original */
18383 IX86_BUILTIN_FEMMS,
18384 IX86_BUILTIN_PAVGUSB,
18385 IX86_BUILTIN_PF2ID,
18386 IX86_BUILTIN_PFACC,
18387 IX86_BUILTIN_PFADD,
18388 IX86_BUILTIN_PFCMPEQ,
18389 IX86_BUILTIN_PFCMPGE,
18390 IX86_BUILTIN_PFCMPGT,
18391 IX86_BUILTIN_PFMAX,
18392 IX86_BUILTIN_PFMIN,
18393 IX86_BUILTIN_PFMUL,
18394 IX86_BUILTIN_PFRCP,
18395 IX86_BUILTIN_PFRCPIT1,
18396 IX86_BUILTIN_PFRCPIT2,
18397 IX86_BUILTIN_PFRSQIT1,
18398 IX86_BUILTIN_PFRSQRT,
18399 IX86_BUILTIN_PFSUB,
18400 IX86_BUILTIN_PFSUBR,
18401 IX86_BUILTIN_PI2FD,
18402 IX86_BUILTIN_PMULHRW,
18404 /* 3DNow! Athlon Extensions */
18405 IX86_BUILTIN_PF2IW,
18406 IX86_BUILTIN_PFNACC,
18407 IX86_BUILTIN_PFPNACC,
18408 IX86_BUILTIN_PI2FW,
18409 IX86_BUILTIN_PSWAPDSI,
18410 IX86_BUILTIN_PSWAPDSF,
18412 /* SSE2 */
18413 IX86_BUILTIN_ADDPD,
18414 IX86_BUILTIN_ADDSD,
18415 IX86_BUILTIN_DIVPD,
18416 IX86_BUILTIN_DIVSD,
18417 IX86_BUILTIN_MULPD,
18418 IX86_BUILTIN_MULSD,
18419 IX86_BUILTIN_SUBPD,
18420 IX86_BUILTIN_SUBSD,
18422 IX86_BUILTIN_CMPEQPD,
18423 IX86_BUILTIN_CMPLTPD,
18424 IX86_BUILTIN_CMPLEPD,
18425 IX86_BUILTIN_CMPGTPD,
18426 IX86_BUILTIN_CMPGEPD,
18427 IX86_BUILTIN_CMPNEQPD,
18428 IX86_BUILTIN_CMPNLTPD,
18429 IX86_BUILTIN_CMPNLEPD,
18430 IX86_BUILTIN_CMPNGTPD,
18431 IX86_BUILTIN_CMPNGEPD,
18432 IX86_BUILTIN_CMPORDPD,
18433 IX86_BUILTIN_CMPUNORDPD,
18434 IX86_BUILTIN_CMPEQSD,
18435 IX86_BUILTIN_CMPLTSD,
18436 IX86_BUILTIN_CMPLESD,
18437 IX86_BUILTIN_CMPNEQSD,
18438 IX86_BUILTIN_CMPNLTSD,
18439 IX86_BUILTIN_CMPNLESD,
18440 IX86_BUILTIN_CMPORDSD,
18441 IX86_BUILTIN_CMPUNORDSD,
18443 IX86_BUILTIN_COMIEQSD,
18444 IX86_BUILTIN_COMILTSD,
18445 IX86_BUILTIN_COMILESD,
18446 IX86_BUILTIN_COMIGTSD,
18447 IX86_BUILTIN_COMIGESD,
18448 IX86_BUILTIN_COMINEQSD,
18449 IX86_BUILTIN_UCOMIEQSD,
18450 IX86_BUILTIN_UCOMILTSD,
18451 IX86_BUILTIN_UCOMILESD,
18452 IX86_BUILTIN_UCOMIGTSD,
18453 IX86_BUILTIN_UCOMIGESD,
18454 IX86_BUILTIN_UCOMINEQSD,
18456 IX86_BUILTIN_MAXPD,
18457 IX86_BUILTIN_MAXSD,
18458 IX86_BUILTIN_MINPD,
18459 IX86_BUILTIN_MINSD,
18461 IX86_BUILTIN_ANDPD,
18462 IX86_BUILTIN_ANDNPD,
18463 IX86_BUILTIN_ORPD,
18464 IX86_BUILTIN_XORPD,
18466 IX86_BUILTIN_SQRTPD,
18467 IX86_BUILTIN_SQRTSD,
18469 IX86_BUILTIN_UNPCKHPD,
18470 IX86_BUILTIN_UNPCKLPD,
18472 IX86_BUILTIN_SHUFPD,
18474 IX86_BUILTIN_LOADUPD,
18475 IX86_BUILTIN_STOREUPD,
18476 IX86_BUILTIN_MOVSD,
18478 IX86_BUILTIN_LOADHPD,
18479 IX86_BUILTIN_LOADLPD,
18481 IX86_BUILTIN_CVTDQ2PD,
18482 IX86_BUILTIN_CVTDQ2PS,
18484 IX86_BUILTIN_CVTPD2DQ,
18485 IX86_BUILTIN_CVTPD2PI,
18486 IX86_BUILTIN_CVTPD2PS,
18487 IX86_BUILTIN_CVTTPD2DQ,
18488 IX86_BUILTIN_CVTTPD2PI,
18490 IX86_BUILTIN_CVTPI2PD,
18491 IX86_BUILTIN_CVTSI2SD,
18492 IX86_BUILTIN_CVTSI642SD,
18494 IX86_BUILTIN_CVTSD2SI,
18495 IX86_BUILTIN_CVTSD2SI64,
18496 IX86_BUILTIN_CVTSD2SS,
18497 IX86_BUILTIN_CVTSS2SD,
18498 IX86_BUILTIN_CVTTSD2SI,
18499 IX86_BUILTIN_CVTTSD2SI64,
18501 IX86_BUILTIN_CVTPS2DQ,
18502 IX86_BUILTIN_CVTPS2PD,
18503 IX86_BUILTIN_CVTTPS2DQ,
18505 IX86_BUILTIN_MOVNTI,
18506 IX86_BUILTIN_MOVNTPD,
18507 IX86_BUILTIN_MOVNTDQ,
18509 /* SSE2 MMX */
18510 IX86_BUILTIN_MASKMOVDQU,
18511 IX86_BUILTIN_MOVMSKPD,
18512 IX86_BUILTIN_PMOVMSKB128,
18514 IX86_BUILTIN_PACKSSWB128,
18515 IX86_BUILTIN_PACKSSDW128,
18516 IX86_BUILTIN_PACKUSWB128,
18518 IX86_BUILTIN_PADDB128,
18519 IX86_BUILTIN_PADDW128,
18520 IX86_BUILTIN_PADDD128,
18521 IX86_BUILTIN_PADDQ128,
18522 IX86_BUILTIN_PADDSB128,
18523 IX86_BUILTIN_PADDSW128,
18524 IX86_BUILTIN_PADDUSB128,
18525 IX86_BUILTIN_PADDUSW128,
18526 IX86_BUILTIN_PSUBB128,
18527 IX86_BUILTIN_PSUBW128,
18528 IX86_BUILTIN_PSUBD128,
18529 IX86_BUILTIN_PSUBQ128,
18530 IX86_BUILTIN_PSUBSB128,
18531 IX86_BUILTIN_PSUBSW128,
18532 IX86_BUILTIN_PSUBUSB128,
18533 IX86_BUILTIN_PSUBUSW128,
18535 IX86_BUILTIN_PAND128,
18536 IX86_BUILTIN_PANDN128,
18537 IX86_BUILTIN_POR128,
18538 IX86_BUILTIN_PXOR128,
18540 IX86_BUILTIN_PAVGB128,
18541 IX86_BUILTIN_PAVGW128,
18543 IX86_BUILTIN_PCMPEQB128,
18544 IX86_BUILTIN_PCMPEQW128,
18545 IX86_BUILTIN_PCMPEQD128,
18546 IX86_BUILTIN_PCMPGTB128,
18547 IX86_BUILTIN_PCMPGTW128,
18548 IX86_BUILTIN_PCMPGTD128,
18550 IX86_BUILTIN_PMADDWD128,
18552 IX86_BUILTIN_PMAXSW128,
18553 IX86_BUILTIN_PMAXUB128,
18554 IX86_BUILTIN_PMINSW128,
18555 IX86_BUILTIN_PMINUB128,
18557 IX86_BUILTIN_PMULUDQ,
18558 IX86_BUILTIN_PMULUDQ128,
18559 IX86_BUILTIN_PMULHUW128,
18560 IX86_BUILTIN_PMULHW128,
18561 IX86_BUILTIN_PMULLW128,
18563 IX86_BUILTIN_PSADBW128,
18564 IX86_BUILTIN_PSHUFHW,
18565 IX86_BUILTIN_PSHUFLW,
18566 IX86_BUILTIN_PSHUFD,
18568 IX86_BUILTIN_PSLLDQI128,
18569 IX86_BUILTIN_PSLLWI128,
18570 IX86_BUILTIN_PSLLDI128,
18571 IX86_BUILTIN_PSLLQI128,
18572 IX86_BUILTIN_PSRAWI128,
18573 IX86_BUILTIN_PSRADI128,
18574 IX86_BUILTIN_PSRLDQI128,
18575 IX86_BUILTIN_PSRLWI128,
18576 IX86_BUILTIN_PSRLDI128,
18577 IX86_BUILTIN_PSRLQI128,
18579 IX86_BUILTIN_PSLLDQ128,
18580 IX86_BUILTIN_PSLLW128,
18581 IX86_BUILTIN_PSLLD128,
18582 IX86_BUILTIN_PSLLQ128,
18583 IX86_BUILTIN_PSRAW128,
18584 IX86_BUILTIN_PSRAD128,
18585 IX86_BUILTIN_PSRLW128,
18586 IX86_BUILTIN_PSRLD128,
18587 IX86_BUILTIN_PSRLQ128,
18589 IX86_BUILTIN_PUNPCKHBW128,
18590 IX86_BUILTIN_PUNPCKHWD128,
18591 IX86_BUILTIN_PUNPCKHDQ128,
18592 IX86_BUILTIN_PUNPCKHQDQ128,
18593 IX86_BUILTIN_PUNPCKLBW128,
18594 IX86_BUILTIN_PUNPCKLWD128,
18595 IX86_BUILTIN_PUNPCKLDQ128,
18596 IX86_BUILTIN_PUNPCKLQDQ128,
18598 IX86_BUILTIN_CLFLUSH,
18599 IX86_BUILTIN_MFENCE,
18600 IX86_BUILTIN_LFENCE,
18602 /* SSE3. */
18603 IX86_BUILTIN_ADDSUBPS,
18604 IX86_BUILTIN_HADDPS,
18605 IX86_BUILTIN_HSUBPS,
18606 IX86_BUILTIN_MOVSHDUP,
18607 IX86_BUILTIN_MOVSLDUP,
18608 IX86_BUILTIN_ADDSUBPD,
18609 IX86_BUILTIN_HADDPD,
18610 IX86_BUILTIN_HSUBPD,
18611 IX86_BUILTIN_LDDQU,
18613 IX86_BUILTIN_MONITOR,
18614 IX86_BUILTIN_MWAIT,
18616 /* SSSE3. */
18617 IX86_BUILTIN_PHADDW,
18618 IX86_BUILTIN_PHADDD,
18619 IX86_BUILTIN_PHADDSW,
18620 IX86_BUILTIN_PHSUBW,
18621 IX86_BUILTIN_PHSUBD,
18622 IX86_BUILTIN_PHSUBSW,
18623 IX86_BUILTIN_PMADDUBSW,
18624 IX86_BUILTIN_PMULHRSW,
18625 IX86_BUILTIN_PSHUFB,
18626 IX86_BUILTIN_PSIGNB,
18627 IX86_BUILTIN_PSIGNW,
18628 IX86_BUILTIN_PSIGND,
18629 IX86_BUILTIN_PALIGNR,
18630 IX86_BUILTIN_PABSB,
18631 IX86_BUILTIN_PABSW,
18632 IX86_BUILTIN_PABSD,
18634 IX86_BUILTIN_PHADDW128,
18635 IX86_BUILTIN_PHADDD128,
18636 IX86_BUILTIN_PHADDSW128,
18637 IX86_BUILTIN_PHSUBW128,
18638 IX86_BUILTIN_PHSUBD128,
18639 IX86_BUILTIN_PHSUBSW128,
18640 IX86_BUILTIN_PMADDUBSW128,
18641 IX86_BUILTIN_PMULHRSW128,
18642 IX86_BUILTIN_PSHUFB128,
18643 IX86_BUILTIN_PSIGNB128,
18644 IX86_BUILTIN_PSIGNW128,
18645 IX86_BUILTIN_PSIGND128,
18646 IX86_BUILTIN_PALIGNR128,
18647 IX86_BUILTIN_PABSB128,
18648 IX86_BUILTIN_PABSW128,
18649 IX86_BUILTIN_PABSD128,
18651 /* AMDFAM10 - SSE4A New Instructions. */
18652 IX86_BUILTIN_MOVNTSD,
18653 IX86_BUILTIN_MOVNTSS,
18654 IX86_BUILTIN_EXTRQI,
18655 IX86_BUILTIN_EXTRQ,
18656 IX86_BUILTIN_INSERTQI,
18657 IX86_BUILTIN_INSERTQ,
18659 /* SSE4.1. */
18660 IX86_BUILTIN_BLENDPD,
18661 IX86_BUILTIN_BLENDPS,
18662 IX86_BUILTIN_BLENDVPD,
18663 IX86_BUILTIN_BLENDVPS,
18664 IX86_BUILTIN_PBLENDVB128,
18665 IX86_BUILTIN_PBLENDW128,
18667 IX86_BUILTIN_DPPD,
18668 IX86_BUILTIN_DPPS,
18670 IX86_BUILTIN_INSERTPS128,
18672 IX86_BUILTIN_MOVNTDQA,
18673 IX86_BUILTIN_MPSADBW128,
18674 IX86_BUILTIN_PACKUSDW128,
18675 IX86_BUILTIN_PCMPEQQ,
18676 IX86_BUILTIN_PHMINPOSUW128,
18678 IX86_BUILTIN_PMAXSB128,
18679 IX86_BUILTIN_PMAXSD128,
18680 IX86_BUILTIN_PMAXUD128,
18681 IX86_BUILTIN_PMAXUW128,
18683 IX86_BUILTIN_PMINSB128,
18684 IX86_BUILTIN_PMINSD128,
18685 IX86_BUILTIN_PMINUD128,
18686 IX86_BUILTIN_PMINUW128,
18688 IX86_BUILTIN_PMOVSXBW128,
18689 IX86_BUILTIN_PMOVSXBD128,
18690 IX86_BUILTIN_PMOVSXBQ128,
18691 IX86_BUILTIN_PMOVSXWD128,
18692 IX86_BUILTIN_PMOVSXWQ128,
18693 IX86_BUILTIN_PMOVSXDQ128,
18695 IX86_BUILTIN_PMOVZXBW128,
18696 IX86_BUILTIN_PMOVZXBD128,
18697 IX86_BUILTIN_PMOVZXBQ128,
18698 IX86_BUILTIN_PMOVZXWD128,
18699 IX86_BUILTIN_PMOVZXWQ128,
18700 IX86_BUILTIN_PMOVZXDQ128,
18702 IX86_BUILTIN_PMULDQ128,
18703 IX86_BUILTIN_PMULLD128,
18705 IX86_BUILTIN_ROUNDPD,
18706 IX86_BUILTIN_ROUNDPS,
18707 IX86_BUILTIN_ROUNDSD,
18708 IX86_BUILTIN_ROUNDSS,
18710 IX86_BUILTIN_PTESTZ,
18711 IX86_BUILTIN_PTESTC,
18712 IX86_BUILTIN_PTESTNZC,
18714 IX86_BUILTIN_VEC_INIT_V2SI,
18715 IX86_BUILTIN_VEC_INIT_V4HI,
18716 IX86_BUILTIN_VEC_INIT_V8QI,
18717 IX86_BUILTIN_VEC_EXT_V2DF,
18718 IX86_BUILTIN_VEC_EXT_V2DI,
18719 IX86_BUILTIN_VEC_EXT_V4SF,
18720 IX86_BUILTIN_VEC_EXT_V4SI,
18721 IX86_BUILTIN_VEC_EXT_V8HI,
18722 IX86_BUILTIN_VEC_EXT_V2SI,
18723 IX86_BUILTIN_VEC_EXT_V4HI,
18724 IX86_BUILTIN_VEC_EXT_V16QI,
18725 IX86_BUILTIN_VEC_SET_V2DI,
18726 IX86_BUILTIN_VEC_SET_V4SF,
18727 IX86_BUILTIN_VEC_SET_V4SI,
18728 IX86_BUILTIN_VEC_SET_V8HI,
18729 IX86_BUILTIN_VEC_SET_V4HI,
18730 IX86_BUILTIN_VEC_SET_V16QI,
18732 IX86_BUILTIN_VEC_PACK_SFIX,
18734 /* SSE4.2. */
18735 IX86_BUILTIN_CRC32QI,
18736 IX86_BUILTIN_CRC32HI,
18737 IX86_BUILTIN_CRC32SI,
18738 IX86_BUILTIN_CRC32DI,
18740 IX86_BUILTIN_PCMPESTRI128,
18741 IX86_BUILTIN_PCMPESTRM128,
18742 IX86_BUILTIN_PCMPESTRA128,
18743 IX86_BUILTIN_PCMPESTRC128,
18744 IX86_BUILTIN_PCMPESTRO128,
18745 IX86_BUILTIN_PCMPESTRS128,
18746 IX86_BUILTIN_PCMPESTRZ128,
18747 IX86_BUILTIN_PCMPISTRI128,
18748 IX86_BUILTIN_PCMPISTRM128,
18749 IX86_BUILTIN_PCMPISTRA128,
18750 IX86_BUILTIN_PCMPISTRC128,
18751 IX86_BUILTIN_PCMPISTRO128,
18752 IX86_BUILTIN_PCMPISTRS128,
18753 IX86_BUILTIN_PCMPISTRZ128,
18755 IX86_BUILTIN_PCMPGTQ,
18757 /* AES instructions */
18758 IX86_BUILTIN_AESENC128,
18759 IX86_BUILTIN_AESENCLAST128,
18760 IX86_BUILTIN_AESDEC128,
18761 IX86_BUILTIN_AESDECLAST128,
18762 IX86_BUILTIN_AESIMC128,
18763 IX86_BUILTIN_AESKEYGENASSIST128,
18765 /* PCLMUL instruction */
18766 IX86_BUILTIN_PCLMULQDQ128,
18768 /* TFmode support builtins. */
18769 IX86_BUILTIN_INFQ,
18770 IX86_BUILTIN_FABSQ,
18771 IX86_BUILTIN_COPYSIGNQ,
18773 /* SSE5 instructions */
18774 IX86_BUILTIN_FMADDSS,
18775 IX86_BUILTIN_FMADDSD,
18776 IX86_BUILTIN_FMADDPS,
18777 IX86_BUILTIN_FMADDPD,
18778 IX86_BUILTIN_FMSUBSS,
18779 IX86_BUILTIN_FMSUBSD,
18780 IX86_BUILTIN_FMSUBPS,
18781 IX86_BUILTIN_FMSUBPD,
18782 IX86_BUILTIN_FNMADDSS,
18783 IX86_BUILTIN_FNMADDSD,
18784 IX86_BUILTIN_FNMADDPS,
18785 IX86_BUILTIN_FNMADDPD,
18786 IX86_BUILTIN_FNMSUBSS,
18787 IX86_BUILTIN_FNMSUBSD,
18788 IX86_BUILTIN_FNMSUBPS,
18789 IX86_BUILTIN_FNMSUBPD,
18790 IX86_BUILTIN_PCMOV_V2DI,
18791 IX86_BUILTIN_PCMOV_V4SI,
18792 IX86_BUILTIN_PCMOV_V8HI,
18793 IX86_BUILTIN_PCMOV_V16QI,
18794 IX86_BUILTIN_PCMOV_V4SF,
18795 IX86_BUILTIN_PCMOV_V2DF,
18796 IX86_BUILTIN_PPERM,
18797 IX86_BUILTIN_PERMPS,
18798 IX86_BUILTIN_PERMPD,
18799 IX86_BUILTIN_PMACSSWW,
18800 IX86_BUILTIN_PMACSWW,
18801 IX86_BUILTIN_PMACSSWD,
18802 IX86_BUILTIN_PMACSWD,
18803 IX86_BUILTIN_PMACSSDD,
18804 IX86_BUILTIN_PMACSDD,
18805 IX86_BUILTIN_PMACSSDQL,
18806 IX86_BUILTIN_PMACSSDQH,
18807 IX86_BUILTIN_PMACSDQL,
18808 IX86_BUILTIN_PMACSDQH,
18809 IX86_BUILTIN_PMADCSSWD,
18810 IX86_BUILTIN_PMADCSWD,
18811 IX86_BUILTIN_PHADDBW,
18812 IX86_BUILTIN_PHADDBD,
18813 IX86_BUILTIN_PHADDBQ,
18814 IX86_BUILTIN_PHADDWD,
18815 IX86_BUILTIN_PHADDWQ,
18816 IX86_BUILTIN_PHADDDQ,
18817 IX86_BUILTIN_PHADDUBW,
18818 IX86_BUILTIN_PHADDUBD,
18819 IX86_BUILTIN_PHADDUBQ,
18820 IX86_BUILTIN_PHADDUWD,
18821 IX86_BUILTIN_PHADDUWQ,
18822 IX86_BUILTIN_PHADDUDQ,
18823 IX86_BUILTIN_PHSUBBW,
18824 IX86_BUILTIN_PHSUBWD,
18825 IX86_BUILTIN_PHSUBDQ,
18826 IX86_BUILTIN_PROTB,
18827 IX86_BUILTIN_PROTW,
18828 IX86_BUILTIN_PROTD,
18829 IX86_BUILTIN_PROTQ,
18830 IX86_BUILTIN_PROTB_IMM,
18831 IX86_BUILTIN_PROTW_IMM,
18832 IX86_BUILTIN_PROTD_IMM,
18833 IX86_BUILTIN_PROTQ_IMM,
18834 IX86_BUILTIN_PSHLB,
18835 IX86_BUILTIN_PSHLW,
18836 IX86_BUILTIN_PSHLD,
18837 IX86_BUILTIN_PSHLQ,
18838 IX86_BUILTIN_PSHAB,
18839 IX86_BUILTIN_PSHAW,
18840 IX86_BUILTIN_PSHAD,
18841 IX86_BUILTIN_PSHAQ,
18842 IX86_BUILTIN_FRCZSS,
18843 IX86_BUILTIN_FRCZSD,
18844 IX86_BUILTIN_FRCZPS,
18845 IX86_BUILTIN_FRCZPD,
18846 IX86_BUILTIN_CVTPH2PS,
18847 IX86_BUILTIN_CVTPS2PH,
18849 IX86_BUILTIN_COMEQSS,
18850 IX86_BUILTIN_COMNESS,
18851 IX86_BUILTIN_COMLTSS,
18852 IX86_BUILTIN_COMLESS,
18853 IX86_BUILTIN_COMGTSS,
18854 IX86_BUILTIN_COMGESS,
18855 IX86_BUILTIN_COMUEQSS,
18856 IX86_BUILTIN_COMUNESS,
18857 IX86_BUILTIN_COMULTSS,
18858 IX86_BUILTIN_COMULESS,
18859 IX86_BUILTIN_COMUGTSS,
18860 IX86_BUILTIN_COMUGESS,
18861 IX86_BUILTIN_COMORDSS,
18862 IX86_BUILTIN_COMUNORDSS,
18863 IX86_BUILTIN_COMFALSESS,
18864 IX86_BUILTIN_COMTRUESS,
18866 IX86_BUILTIN_COMEQSD,
18867 IX86_BUILTIN_COMNESD,
18868 IX86_BUILTIN_COMLTSD,
18869 IX86_BUILTIN_COMLESD,
18870 IX86_BUILTIN_COMGTSD,
18871 IX86_BUILTIN_COMGESD,
18872 IX86_BUILTIN_COMUEQSD,
18873 IX86_BUILTIN_COMUNESD,
18874 IX86_BUILTIN_COMULTSD,
18875 IX86_BUILTIN_COMULESD,
18876 IX86_BUILTIN_COMUGTSD,
18877 IX86_BUILTIN_COMUGESD,
18878 IX86_BUILTIN_COMORDSD,
18879 IX86_BUILTIN_COMUNORDSD,
18880 IX86_BUILTIN_COMFALSESD,
18881 IX86_BUILTIN_COMTRUESD,
18883 IX86_BUILTIN_COMEQPS,
18884 IX86_BUILTIN_COMNEPS,
18885 IX86_BUILTIN_COMLTPS,
18886 IX86_BUILTIN_COMLEPS,
18887 IX86_BUILTIN_COMGTPS,
18888 IX86_BUILTIN_COMGEPS,
18889 IX86_BUILTIN_COMUEQPS,
18890 IX86_BUILTIN_COMUNEPS,
18891 IX86_BUILTIN_COMULTPS,
18892 IX86_BUILTIN_COMULEPS,
18893 IX86_BUILTIN_COMUGTPS,
18894 IX86_BUILTIN_COMUGEPS,
18895 IX86_BUILTIN_COMORDPS,
18896 IX86_BUILTIN_COMUNORDPS,
18897 IX86_BUILTIN_COMFALSEPS,
18898 IX86_BUILTIN_COMTRUEPS,
18900 IX86_BUILTIN_COMEQPD,
18901 IX86_BUILTIN_COMNEPD,
18902 IX86_BUILTIN_COMLTPD,
18903 IX86_BUILTIN_COMLEPD,
18904 IX86_BUILTIN_COMGTPD,
18905 IX86_BUILTIN_COMGEPD,
18906 IX86_BUILTIN_COMUEQPD,
18907 IX86_BUILTIN_COMUNEPD,
18908 IX86_BUILTIN_COMULTPD,
18909 IX86_BUILTIN_COMULEPD,
18910 IX86_BUILTIN_COMUGTPD,
18911 IX86_BUILTIN_COMUGEPD,
18912 IX86_BUILTIN_COMORDPD,
18913 IX86_BUILTIN_COMUNORDPD,
18914 IX86_BUILTIN_COMFALSEPD,
18915 IX86_BUILTIN_COMTRUEPD,
18917 IX86_BUILTIN_PCOMEQUB,
18918 IX86_BUILTIN_PCOMNEUB,
18919 IX86_BUILTIN_PCOMLTUB,
18920 IX86_BUILTIN_PCOMLEUB,
18921 IX86_BUILTIN_PCOMGTUB,
18922 IX86_BUILTIN_PCOMGEUB,
18923 IX86_BUILTIN_PCOMFALSEUB,
18924 IX86_BUILTIN_PCOMTRUEUB,
18925 IX86_BUILTIN_PCOMEQUW,
18926 IX86_BUILTIN_PCOMNEUW,
18927 IX86_BUILTIN_PCOMLTUW,
18928 IX86_BUILTIN_PCOMLEUW,
18929 IX86_BUILTIN_PCOMGTUW,
18930 IX86_BUILTIN_PCOMGEUW,
18931 IX86_BUILTIN_PCOMFALSEUW,
18932 IX86_BUILTIN_PCOMTRUEUW,
18933 IX86_BUILTIN_PCOMEQUD,
18934 IX86_BUILTIN_PCOMNEUD,
18935 IX86_BUILTIN_PCOMLTUD,
18936 IX86_BUILTIN_PCOMLEUD,
18937 IX86_BUILTIN_PCOMGTUD,
18938 IX86_BUILTIN_PCOMGEUD,
18939 IX86_BUILTIN_PCOMFALSEUD,
18940 IX86_BUILTIN_PCOMTRUEUD,
18941 IX86_BUILTIN_PCOMEQUQ,
18942 IX86_BUILTIN_PCOMNEUQ,
18943 IX86_BUILTIN_PCOMLTUQ,
18944 IX86_BUILTIN_PCOMLEUQ,
18945 IX86_BUILTIN_PCOMGTUQ,
18946 IX86_BUILTIN_PCOMGEUQ,
18947 IX86_BUILTIN_PCOMFALSEUQ,
18948 IX86_BUILTIN_PCOMTRUEUQ,
18950 IX86_BUILTIN_PCOMEQB,
18951 IX86_BUILTIN_PCOMNEB,
18952 IX86_BUILTIN_PCOMLTB,
18953 IX86_BUILTIN_PCOMLEB,
18954 IX86_BUILTIN_PCOMGTB,
18955 IX86_BUILTIN_PCOMGEB,
18956 IX86_BUILTIN_PCOMFALSEB,
18957 IX86_BUILTIN_PCOMTRUEB,
18958 IX86_BUILTIN_PCOMEQW,
18959 IX86_BUILTIN_PCOMNEW,
18960 IX86_BUILTIN_PCOMLTW,
18961 IX86_BUILTIN_PCOMLEW,
18962 IX86_BUILTIN_PCOMGTW,
18963 IX86_BUILTIN_PCOMGEW,
18964 IX86_BUILTIN_PCOMFALSEW,
18965 IX86_BUILTIN_PCOMTRUEW,
18966 IX86_BUILTIN_PCOMEQD,
18967 IX86_BUILTIN_PCOMNED,
18968 IX86_BUILTIN_PCOMLTD,
18969 IX86_BUILTIN_PCOMLED,
18970 IX86_BUILTIN_PCOMGTD,
18971 IX86_BUILTIN_PCOMGED,
18972 IX86_BUILTIN_PCOMFALSED,
18973 IX86_BUILTIN_PCOMTRUED,
18974 IX86_BUILTIN_PCOMEQQ,
18975 IX86_BUILTIN_PCOMNEQ,
18976 IX86_BUILTIN_PCOMLTQ,
18977 IX86_BUILTIN_PCOMLEQ,
18978 IX86_BUILTIN_PCOMGTQ,
18979 IX86_BUILTIN_PCOMGEQ,
18980 IX86_BUILTIN_PCOMFALSEQ,
18981 IX86_BUILTIN_PCOMTRUEQ,
18983 IX86_BUILTIN_MAX
18986 /* Table for the ix86 builtin decls. */
18987 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
18989 /* Table to record which ISA options the builtin needs. */
18990 static int ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
18992 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
18993 * of which isa_flags to use in the ix86_builtins_isa array. Stores the
18994 * function decl in the ix86_builtins array. Returns the function decl or
18995 * NULL_TREE, if the builtin was not added.
18997 * Record all builtins, even if it isn't an instruction set in the current ISA
18998 * in case the user uses function specific options for a different ISA. When
18999 * the builtin is expanded, check at that time whether it is valid. */
19001 static inline tree
19002 def_builtin (int mask, const char *name, tree type, enum ix86_builtins code)
19004 tree decl = NULL_TREE;
19006 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
19008 decl = add_builtin_function (name, type, code, BUILT_IN_MD,
19009 NULL, NULL_TREE);
19010 ix86_builtins[(int) code] = decl;
19011 ix86_builtins_isa[(int) code] = mask;
19014 return decl;
19017 /* Like def_builtin, but also marks the function decl "const". */
19019 static inline tree
19020 def_builtin_const (int mask, const char *name, tree type,
19021 enum ix86_builtins code)
19023 tree decl = def_builtin (mask, name, type, code);
19024 if (decl)
19025 TREE_READONLY (decl) = 1;
19026 return decl;
19029 /* Bits for builtin_description.flag. */
19031 /* Set when we don't support the comparison natively, and should
19032 swap_comparison in order to support it. */
19033 #define BUILTIN_DESC_SWAP_OPERANDS 1
19035 struct builtin_description
19037 const unsigned int mask;
19038 const enum insn_code icode;
19039 const char *const name;
19040 const enum ix86_builtins code;
19041 const enum rtx_code comparison;
19042 const int flag;
19045 static const struct builtin_description bdesc_comi[] =
19047 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
19048 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
19049 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
19050 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
19051 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
19052 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
19053 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
19054 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
19055 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
19056 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
19057 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
19058 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
19059 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
19060 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
19061 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
19062 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
19063 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
19064 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
19065 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
19066 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
19067 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
19068 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
19069 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
19070 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
19073 static const struct builtin_description bdesc_pcmpestr[] =
19075 /* SSE4.2 */
19076 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
19077 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
19078 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
19079 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
19080 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
19081 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
19082 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
19085 static const struct builtin_description bdesc_pcmpistr[] =
19087 /* SSE4.2 */
19088 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
19089 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
19090 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
19091 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
19092 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
19093 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
19094 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
19097 /* Special builtin types */
19098 enum ix86_special_builtin_type
19100 SPECIAL_FTYPE_UNKNOWN,
19101 VOID_FTYPE_VOID,
19102 V16QI_FTYPE_PCCHAR,
19103 V4SF_FTYPE_PCFLOAT,
19104 V2DF_FTYPE_PCDOUBLE,
19105 V4SF_FTYPE_V4SF_PCV2SF,
19106 V2DF_FTYPE_V2DF_PCDOUBLE,
19107 V2DI_FTYPE_PV2DI,
19108 VOID_FTYPE_PV2SF_V4SF,
19109 VOID_FTYPE_PV2DI_V2DI,
19110 VOID_FTYPE_PCHAR_V16QI,
19111 VOID_FTYPE_PFLOAT_V4SF,
19112 VOID_FTYPE_PDOUBLE_V2DF,
19113 VOID_FTYPE_PDI_DI,
19114 VOID_FTYPE_PINT_INT
19117 /* Builtin types */
19118 enum ix86_builtin_type
19120 FTYPE_UNKNOWN,
19121 FLOAT128_FTYPE_FLOAT128,
19122 FLOAT_FTYPE_FLOAT,
19123 FLOAT128_FTYPE_FLOAT128_FLOAT128,
19124 INT_FTYPE_V2DI_V2DI_PTEST,
19125 INT64_FTYPE_V4SF,
19126 INT64_FTYPE_V2DF,
19127 INT_FTYPE_V16QI,
19128 INT_FTYPE_V8QI,
19129 INT_FTYPE_V4SF,
19130 INT_FTYPE_V2DF,
19131 V16QI_FTYPE_V16QI,
19132 V8HI_FTYPE_V8HI,
19133 V8HI_FTYPE_V16QI,
19134 V8QI_FTYPE_V8QI,
19135 V4SI_FTYPE_V4SI,
19136 V4SI_FTYPE_V16QI,
19137 V4SI_FTYPE_V8HI,
19138 V4SI_FTYPE_V4SF,
19139 V4SI_FTYPE_V2DF,
19140 V4HI_FTYPE_V4HI,
19141 V4SF_FTYPE_V4SF,
19142 V4SF_FTYPE_V4SF_VEC_MERGE,
19143 V4SF_FTYPE_V4SI,
19144 V4SF_FTYPE_V2DF,
19145 V2DI_FTYPE_V2DI,
19146 V2DI_FTYPE_V16QI,
19147 V2DI_FTYPE_V8HI,
19148 V2DI_FTYPE_V4SI,
19149 V2DF_FTYPE_V2DF,
19150 V2DF_FTYPE_V2DF_VEC_MERGE,
19151 V2DF_FTYPE_V4SI,
19152 V2DF_FTYPE_V4SF,
19153 V2DF_FTYPE_V2SI,
19154 V2SI_FTYPE_V2SI,
19155 V2SI_FTYPE_V4SF,
19156 V2SI_FTYPE_V2SF,
19157 V2SI_FTYPE_V2DF,
19158 V2SF_FTYPE_V2SF,
19159 V2SF_FTYPE_V2SI,
19160 V16QI_FTYPE_V16QI_V16QI,
19161 V16QI_FTYPE_V8HI_V8HI,
19162 V8QI_FTYPE_V8QI_V8QI,
19163 V8QI_FTYPE_V4HI_V4HI,
19164 V8HI_FTYPE_V8HI_V8HI,
19165 V8HI_FTYPE_V8HI_V8HI_COUNT,
19166 V8HI_FTYPE_V16QI_V16QI,
19167 V8HI_FTYPE_V4SI_V4SI,
19168 V8HI_FTYPE_V8HI_SI_COUNT,
19169 V4SI_FTYPE_V4SI_V4SI,
19170 V4SI_FTYPE_V4SI_V4SI_COUNT,
19171 V4SI_FTYPE_V8HI_V8HI,
19172 V4SI_FTYPE_V4SF_V4SF,
19173 V4SI_FTYPE_V2DF_V2DF,
19174 V4SI_FTYPE_V4SI_SI_COUNT,
19175 V4HI_FTYPE_V4HI_V4HI,
19176 V4HI_FTYPE_V4HI_V4HI_COUNT,
19177 V4HI_FTYPE_V8QI_V8QI,
19178 V4HI_FTYPE_V2SI_V2SI,
19179 V4HI_FTYPE_V4HI_SI_COUNT,
19180 V4SF_FTYPE_V4SF_V4SF,
19181 V4SF_FTYPE_V4SF_V4SF_SWAP,
19182 V4SF_FTYPE_V4SF_V2SI,
19183 V4SF_FTYPE_V4SF_V2DF,
19184 V4SF_FTYPE_V4SF_DI,
19185 V4SF_FTYPE_V4SF_SI,
19186 V2DI_FTYPE_V2DI_V2DI,
19187 V2DI_FTYPE_V2DI_V2DI_COUNT,
19188 V2DI_FTYPE_V16QI_V16QI,
19189 V2DI_FTYPE_V4SI_V4SI,
19190 V2DI_FTYPE_V2DI_V16QI,
19191 V2DI_FTYPE_V2DF_V2DF,
19192 V2DI_FTYPE_V2DI_SI_COUNT,
19193 V2SI_FTYPE_V2SI_V2SI,
19194 V2SI_FTYPE_V2SI_V2SI_COUNT,
19195 V2SI_FTYPE_V4HI_V4HI,
19196 V2SI_FTYPE_V2SF_V2SF,
19197 V2SI_FTYPE_V2SI_SI_COUNT,
19198 V2DF_FTYPE_V2DF_V2DF,
19199 V2DF_FTYPE_V2DF_V2DF_SWAP,
19200 V2DF_FTYPE_V2DF_V4SF,
19201 V2DF_FTYPE_V2DF_DI,
19202 V2DF_FTYPE_V2DF_SI,
19203 V2SF_FTYPE_V2SF_V2SF,
19204 V1DI_FTYPE_V1DI_V1DI,
19205 V1DI_FTYPE_V1DI_V1DI_COUNT,
19206 V1DI_FTYPE_V8QI_V8QI,
19207 V1DI_FTYPE_V2SI_V2SI,
19208 V1DI_FTYPE_V1DI_SI_COUNT,
19209 UINT64_FTYPE_UINT64_UINT64,
19210 UINT_FTYPE_UINT_UINT,
19211 UINT_FTYPE_UINT_USHORT,
19212 UINT_FTYPE_UINT_UCHAR,
19213 V8HI_FTYPE_V8HI_INT,
19214 V4SI_FTYPE_V4SI_INT,
19215 V4HI_FTYPE_V4HI_INT,
19216 V4SF_FTYPE_V4SF_INT,
19217 V2DI_FTYPE_V2DI_INT,
19218 V2DI2TI_FTYPE_V2DI_INT,
19219 V2DF_FTYPE_V2DF_INT,
19220 V16QI_FTYPE_V16QI_V16QI_V16QI,
19221 V4SF_FTYPE_V4SF_V4SF_V4SF,
19222 V2DF_FTYPE_V2DF_V2DF_V2DF,
19223 V16QI_FTYPE_V16QI_V16QI_INT,
19224 V8HI_FTYPE_V8HI_V8HI_INT,
19225 V4SI_FTYPE_V4SI_V4SI_INT,
19226 V4SF_FTYPE_V4SF_V4SF_INT,
19227 V2DI_FTYPE_V2DI_V2DI_INT,
19228 V2DI2TI_FTYPE_V2DI_V2DI_INT,
19229 V1DI2DI_FTYPE_V1DI_V1DI_INT,
19230 V2DF_FTYPE_V2DF_V2DF_INT,
19231 V2DI_FTYPE_V2DI_UINT_UINT,
19232 V2DI_FTYPE_V2DI_V2DI_UINT_UINT
19235 /* Special builtins with variable number of arguments. */
19236 static const struct builtin_description bdesc_special_args[] =
19238 /* MMX */
19239 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
19241 /* 3DNow! */
19242 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
19244 /* SSE */
19245 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
19246 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
19247 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
19249 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
19250 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
19251 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
19252 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
19254 /* SSE or 3DNow!A */
19255 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
19256 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PDI_DI },
19258 /* SSE2 */
19259 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
19260 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
19261 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
19262 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
19263 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
19264 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
19265 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
19266 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
19267 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
19269 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
19270 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
19272 /* SSE3 */
19273 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
19275 /* SSE4.1 */
19276 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
19278 /* SSE4A */
19279 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
19280 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
19283 /* Builtins with variable number of arguments. */
19284 static const struct builtin_description bdesc_args[] =
19286 /* MMX */
19287 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
19288 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
19289 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
19290 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
19291 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
19292 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
19294 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
19295 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
19296 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
19297 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
19298 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
19299 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
19300 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
19301 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
19303 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
19304 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
19306 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
19307 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_nandv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
19308 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
19309 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
19311 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
19312 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
19313 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
19314 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
19315 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
19316 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
19318 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
19319 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
19320 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
19321 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
19322 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
19323 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
19325 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
19326 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
19327 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
19329 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
19331 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
19332 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
19333 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
19334 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
19335 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
19336 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
19338 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
19339 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
19340 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
19341 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
19342 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
19343 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
19345 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
19346 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
19347 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
19348 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
19350 /* 3DNow! */
19351 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
19352 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
19353 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
19354 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
19356 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
19357 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
19358 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
19359 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
19360 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
19361 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
19362 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
19363 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
19364 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
19365 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
19366 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
19367 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
19368 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
19369 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
19370 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
19372 /* 3DNow!A */
19373 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
19374 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
19375 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
19376 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
19377 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
19378 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
19380 /* SSE */
19381 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
19382 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
19383 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
19384 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
19385 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
19386 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
19387 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
19388 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
19389 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
19390 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
19391 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
19392 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
19394 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
19396 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19397 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19398 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19399 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19400 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19401 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19402 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19403 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19405 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
19406 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
19407 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
19408 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
19409 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
19410 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
19411 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
19412 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
19413 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
19414 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
19415 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
19416 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
19417 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
19418 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
19419 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
19420 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
19421 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
19422 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
19423 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
19424 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
19425 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
19426 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
19428 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19429 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19430 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19431 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19433 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19434 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_nandv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19435 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19436 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19438 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19439 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19440 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19441 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19442 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19444 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
19445 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
19446 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
19448 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
19450 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
19451 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
19452 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
19454 /* SSE MMX or 3Dnow!A */
19455 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
19456 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
19457 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
19459 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
19460 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
19461 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
19462 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
19464 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
19465 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
19467 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
19469 /* SSE2 */
19470 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
19472 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
19473 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
19474 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
19475 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
19476 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
19478 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
19479 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
19480 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
19481 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
19482 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
19484 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
19486 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
19487 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
19488 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
19489 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
19491 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
19492 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
19493 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
19495 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
19496 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
19497 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
19498 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
19499 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
19500 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
19501 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
19502 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
19504 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
19505 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
19506 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
19507 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
19508 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
19509 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
19510 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
19511 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
19512 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
19513 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
19514 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
19515 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
19516 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
19517 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
19518 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
19519 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
19520 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
19521 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
19522 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
19523 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
19525 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
19526 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
19527 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
19528 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
19530 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
19531 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_nandv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
19532 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
19533 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
19535 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
19536 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_unpckhpd_exp, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
19537 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_unpcklpd_exp, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
19539 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
19541 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
19542 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
19543 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
19544 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
19545 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
19546 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
19547 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
19548 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
19550 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
19551 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
19552 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
19553 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
19554 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
19555 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
19556 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
19557 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
19559 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
19560 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
19562 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
19563 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_nandv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
19564 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
19565 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
19567 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
19568 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
19570 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
19571 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
19572 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
19573 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
19574 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
19575 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
19577 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
19578 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
19579 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
19580 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
19582 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
19583 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
19584 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
19585 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
19586 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
19587 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
19588 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
19589 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
19591 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
19592 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
19593 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
19595 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
19596 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
19598 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
19599 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
19601 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
19603 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
19604 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
19605 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
19606 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
19608 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI2TI_FTYPE_V2DI_INT },
19609 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
19610 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
19611 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
19612 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
19613 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
19614 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
19616 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI2TI_FTYPE_V2DI_INT },
19617 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
19618 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
19619 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
19620 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
19621 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
19622 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
19624 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
19625 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
19626 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
19627 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
19629 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
19630 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
19631 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
19633 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
19635 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
19636 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
19638 /* SSE2 MMX */
19639 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
19640 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
19642 /* SSE3 */
19643 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
19644 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
19646 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19647 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
19648 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19649 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
19650 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
19651 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
19653 /* SSSE3 */
19654 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
19655 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
19656 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
19657 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
19658 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
19659 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
19661 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
19662 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
19663 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
19664 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
19665 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
19666 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
19667 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
19668 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
19669 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
19670 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
19671 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
19672 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
19673 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
19674 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
19675 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
19676 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
19677 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
19678 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
19679 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
19680 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
19681 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
19682 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
19683 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
19684 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
19686 /* SSSE3. */
19687 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI2TI_FTYPE_V2DI_V2DI_INT },
19688 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI2DI_FTYPE_V1DI_V1DI_INT },
19690 /* SSE4.1 */
19691 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
19692 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
19693 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
19694 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
19695 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
19696 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
19697 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
19698 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
19699 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
19700 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
19702 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
19703 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
19704 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
19705 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
19706 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
19707 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
19708 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
19709 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
19710 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
19711 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
19712 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
19713 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
19714 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
19716 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
19717 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
19718 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
19719 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
19720 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
19721 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
19722 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
19723 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
19724 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
19725 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
19726 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
19727 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
19729 /* SSE4.1 and SSE5 */
19730 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
19731 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
19732 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
19733 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
19735 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
19736 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
19737 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
19739 /* SSE4.2 */
19740 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
19741 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
19742 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
19743 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
19744 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
19746 /* SSE4A */
19747 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
19748 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
19749 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
19750 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
19752 /* AES */
19753 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
19754 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
19756 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
19757 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
19758 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
19759 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
19761 /* PCLMUL */
19762 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
19765 /* SSE5 */
19766 enum multi_arg_type {
19767 MULTI_ARG_UNKNOWN,
19768 MULTI_ARG_3_SF,
19769 MULTI_ARG_3_DF,
19770 MULTI_ARG_3_DI,
19771 MULTI_ARG_3_SI,
19772 MULTI_ARG_3_SI_DI,
19773 MULTI_ARG_3_HI,
19774 MULTI_ARG_3_HI_SI,
19775 MULTI_ARG_3_QI,
19776 MULTI_ARG_3_PERMPS,
19777 MULTI_ARG_3_PERMPD,
19778 MULTI_ARG_2_SF,
19779 MULTI_ARG_2_DF,
19780 MULTI_ARG_2_DI,
19781 MULTI_ARG_2_SI,
19782 MULTI_ARG_2_HI,
19783 MULTI_ARG_2_QI,
19784 MULTI_ARG_2_DI_IMM,
19785 MULTI_ARG_2_SI_IMM,
19786 MULTI_ARG_2_HI_IMM,
19787 MULTI_ARG_2_QI_IMM,
19788 MULTI_ARG_2_SF_CMP,
19789 MULTI_ARG_2_DF_CMP,
19790 MULTI_ARG_2_DI_CMP,
19791 MULTI_ARG_2_SI_CMP,
19792 MULTI_ARG_2_HI_CMP,
19793 MULTI_ARG_2_QI_CMP,
19794 MULTI_ARG_2_DI_TF,
19795 MULTI_ARG_2_SI_TF,
19796 MULTI_ARG_2_HI_TF,
19797 MULTI_ARG_2_QI_TF,
19798 MULTI_ARG_2_SF_TF,
19799 MULTI_ARG_2_DF_TF,
19800 MULTI_ARG_1_SF,
19801 MULTI_ARG_1_DF,
19802 MULTI_ARG_1_DI,
19803 MULTI_ARG_1_SI,
19804 MULTI_ARG_1_HI,
19805 MULTI_ARG_1_QI,
19806 MULTI_ARG_1_SI_DI,
19807 MULTI_ARG_1_HI_DI,
19808 MULTI_ARG_1_HI_SI,
19809 MULTI_ARG_1_QI_DI,
19810 MULTI_ARG_1_QI_SI,
19811 MULTI_ARG_1_QI_HI,
19812 MULTI_ARG_1_PH2PS,
19813 MULTI_ARG_1_PS2PH
19816 static const struct builtin_description bdesc_multi_arg[] =
19818 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmaddv4sf4, "__builtin_ia32_fmaddss", IX86_BUILTIN_FMADDSS, 0, (int)MULTI_ARG_3_SF },
19819 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmaddv2df4, "__builtin_ia32_fmaddsd", IX86_BUILTIN_FMADDSD, 0, (int)MULTI_ARG_3_DF },
19820 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmaddv4sf4, "__builtin_ia32_fmaddps", IX86_BUILTIN_FMADDPS, 0, (int)MULTI_ARG_3_SF },
19821 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmaddv2df4, "__builtin_ia32_fmaddpd", IX86_BUILTIN_FMADDPD, 0, (int)MULTI_ARG_3_DF },
19822 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmsubv4sf4, "__builtin_ia32_fmsubss", IX86_BUILTIN_FMSUBSS, 0, (int)MULTI_ARG_3_SF },
19823 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfmsubv2df4, "__builtin_ia32_fmsubsd", IX86_BUILTIN_FMSUBSD, 0, (int)MULTI_ARG_3_DF },
19824 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmsubv4sf4, "__builtin_ia32_fmsubps", IX86_BUILTIN_FMSUBPS, 0, (int)MULTI_ARG_3_SF },
19825 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fmsubv2df4, "__builtin_ia32_fmsubpd", IX86_BUILTIN_FMSUBPD, 0, (int)MULTI_ARG_3_DF },
19826 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmaddv4sf4, "__builtin_ia32_fnmaddss", IX86_BUILTIN_FNMADDSS, 0, (int)MULTI_ARG_3_SF },
19827 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmaddv2df4, "__builtin_ia32_fnmaddsd", IX86_BUILTIN_FNMADDSD, 0, (int)MULTI_ARG_3_DF },
19828 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmaddv4sf4, "__builtin_ia32_fnmaddps", IX86_BUILTIN_FNMADDPS, 0, (int)MULTI_ARG_3_SF },
19829 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmaddv2df4, "__builtin_ia32_fnmaddpd", IX86_BUILTIN_FNMADDPD, 0, (int)MULTI_ARG_3_DF },
19830 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmsubv4sf4, "__builtin_ia32_fnmsubss", IX86_BUILTIN_FNMSUBSS, 0, (int)MULTI_ARG_3_SF },
19831 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_vmfnmsubv2df4, "__builtin_ia32_fnmsubsd", IX86_BUILTIN_FNMSUBSD, 0, (int)MULTI_ARG_3_DF },
19832 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmsubv4sf4, "__builtin_ia32_fnmsubps", IX86_BUILTIN_FNMSUBPS, 0, (int)MULTI_ARG_3_SF },
19833 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5i_fnmsubv2df4, "__builtin_ia32_fnmsubpd", IX86_BUILTIN_FNMSUBPD, 0, (int)MULTI_ARG_3_DF },
19834 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v2di, "__builtin_ia32_pcmov", IX86_BUILTIN_PCMOV_V2DI, 0, (int)MULTI_ARG_3_DI },
19835 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v2di, "__builtin_ia32_pcmov_v2di", IX86_BUILTIN_PCMOV_V2DI, 0, (int)MULTI_ARG_3_DI },
19836 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v4si, "__builtin_ia32_pcmov_v4si", IX86_BUILTIN_PCMOV_V4SI, 0, (int)MULTI_ARG_3_SI },
19837 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v8hi, "__builtin_ia32_pcmov_v8hi", IX86_BUILTIN_PCMOV_V8HI, 0, (int)MULTI_ARG_3_HI },
19838 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v16qi, "__builtin_ia32_pcmov_v16qi",IX86_BUILTIN_PCMOV_V16QI,0, (int)MULTI_ARG_3_QI },
19839 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v2df, "__builtin_ia32_pcmov_v2df", IX86_BUILTIN_PCMOV_V2DF, 0, (int)MULTI_ARG_3_DF },
19840 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcmov_v4sf, "__builtin_ia32_pcmov_v4sf", IX86_BUILTIN_PCMOV_V4SF, 0, (int)MULTI_ARG_3_SF },
19841 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pperm, "__builtin_ia32_pperm", IX86_BUILTIN_PPERM, 0, (int)MULTI_ARG_3_QI },
19842 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_permv4sf, "__builtin_ia32_permps", IX86_BUILTIN_PERMPS, 0, (int)MULTI_ARG_3_PERMPS },
19843 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_permv2df, "__builtin_ia32_permpd", IX86_BUILTIN_PERMPD, 0, (int)MULTI_ARG_3_PERMPD },
19844 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssww, "__builtin_ia32_pmacssww", IX86_BUILTIN_PMACSSWW, 0, (int)MULTI_ARG_3_HI },
19845 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsww, "__builtin_ia32_pmacsww", IX86_BUILTIN_PMACSWW, 0, (int)MULTI_ARG_3_HI },
19846 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsswd, "__builtin_ia32_pmacsswd", IX86_BUILTIN_PMACSSWD, 0, (int)MULTI_ARG_3_HI_SI },
19847 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacswd, "__builtin_ia32_pmacswd", IX86_BUILTIN_PMACSWD, 0, (int)MULTI_ARG_3_HI_SI },
19848 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssdd, "__builtin_ia32_pmacssdd", IX86_BUILTIN_PMACSSDD, 0, (int)MULTI_ARG_3_SI },
19849 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsdd, "__builtin_ia32_pmacsdd", IX86_BUILTIN_PMACSDD, 0, (int)MULTI_ARG_3_SI },
19850 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssdql, "__builtin_ia32_pmacssdql", IX86_BUILTIN_PMACSSDQL, 0, (int)MULTI_ARG_3_SI_DI },
19851 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacssdqh, "__builtin_ia32_pmacssdqh", IX86_BUILTIN_PMACSSDQH, 0, (int)MULTI_ARG_3_SI_DI },
19852 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsdql, "__builtin_ia32_pmacsdql", IX86_BUILTIN_PMACSDQL, 0, (int)MULTI_ARG_3_SI_DI },
19853 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmacsdqh, "__builtin_ia32_pmacsdqh", IX86_BUILTIN_PMACSDQH, 0, (int)MULTI_ARG_3_SI_DI },
19854 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmadcsswd, "__builtin_ia32_pmadcsswd", IX86_BUILTIN_PMADCSSWD, 0, (int)MULTI_ARG_3_HI_SI },
19855 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pmadcswd, "__builtin_ia32_pmadcswd", IX86_BUILTIN_PMADCSWD, 0, (int)MULTI_ARG_3_HI_SI },
19856 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv2di3, "__builtin_ia32_protq", IX86_BUILTIN_PROTQ, 0, (int)MULTI_ARG_2_DI },
19857 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv4si3, "__builtin_ia32_protd", IX86_BUILTIN_PROTD, 0, (int)MULTI_ARG_2_SI },
19858 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv8hi3, "__builtin_ia32_protw", IX86_BUILTIN_PROTW, 0, (int)MULTI_ARG_2_HI },
19859 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vrotlv16qi3, "__builtin_ia32_protb", IX86_BUILTIN_PROTB, 0, (int)MULTI_ARG_2_QI },
19860 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv2di3, "__builtin_ia32_protqi", IX86_BUILTIN_PROTQ_IMM, 0, (int)MULTI_ARG_2_DI_IMM },
19861 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv4si3, "__builtin_ia32_protdi", IX86_BUILTIN_PROTD_IMM, 0, (int)MULTI_ARG_2_SI_IMM },
19862 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv8hi3, "__builtin_ia32_protwi", IX86_BUILTIN_PROTW_IMM, 0, (int)MULTI_ARG_2_HI_IMM },
19863 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_rotlv16qi3, "__builtin_ia32_protbi", IX86_BUILTIN_PROTB_IMM, 0, (int)MULTI_ARG_2_QI_IMM },
19864 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv2di3, "__builtin_ia32_pshaq", IX86_BUILTIN_PSHAQ, 0, (int)MULTI_ARG_2_DI },
19865 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv4si3, "__builtin_ia32_pshad", IX86_BUILTIN_PSHAD, 0, (int)MULTI_ARG_2_SI },
19866 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv8hi3, "__builtin_ia32_pshaw", IX86_BUILTIN_PSHAW, 0, (int)MULTI_ARG_2_HI },
19867 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_ashlv16qi3, "__builtin_ia32_pshab", IX86_BUILTIN_PSHAB, 0, (int)MULTI_ARG_2_QI },
19868 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv2di3, "__builtin_ia32_pshlq", IX86_BUILTIN_PSHLQ, 0, (int)MULTI_ARG_2_DI },
19869 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv4si3, "__builtin_ia32_pshld", IX86_BUILTIN_PSHLD, 0, (int)MULTI_ARG_2_SI },
19870 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv8hi3, "__builtin_ia32_pshlw", IX86_BUILTIN_PSHLW, 0, (int)MULTI_ARG_2_HI },
19871 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_lshlv16qi3, "__builtin_ia32_pshlb", IX86_BUILTIN_PSHLB, 0, (int)MULTI_ARG_2_QI },
19872 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmfrczv4sf2, "__builtin_ia32_frczss", IX86_BUILTIN_FRCZSS, 0, (int)MULTI_ARG_2_SF },
19873 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmfrczv2df2, "__builtin_ia32_frczsd", IX86_BUILTIN_FRCZSD, 0, (int)MULTI_ARG_2_DF },
19874 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_frczv4sf2, "__builtin_ia32_frczps", IX86_BUILTIN_FRCZPS, 0, (int)MULTI_ARG_1_SF },
19875 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_frczv2df2, "__builtin_ia32_frczpd", IX86_BUILTIN_FRCZPD, 0, (int)MULTI_ARG_1_DF },
19876 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_cvtph2ps, "__builtin_ia32_cvtph2ps", IX86_BUILTIN_CVTPH2PS, 0, (int)MULTI_ARG_1_PH2PS },
19877 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_cvtps2ph, "__builtin_ia32_cvtps2ph", IX86_BUILTIN_CVTPS2PH, 0, (int)MULTI_ARG_1_PS2PH },
19878 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddbw, "__builtin_ia32_phaddbw", IX86_BUILTIN_PHADDBW, 0, (int)MULTI_ARG_1_QI_HI },
19879 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddbd, "__builtin_ia32_phaddbd", IX86_BUILTIN_PHADDBD, 0, (int)MULTI_ARG_1_QI_SI },
19880 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddbq, "__builtin_ia32_phaddbq", IX86_BUILTIN_PHADDBQ, 0, (int)MULTI_ARG_1_QI_DI },
19881 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddwd, "__builtin_ia32_phaddwd", IX86_BUILTIN_PHADDWD, 0, (int)MULTI_ARG_1_HI_SI },
19882 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddwq, "__builtin_ia32_phaddwq", IX86_BUILTIN_PHADDWQ, 0, (int)MULTI_ARG_1_HI_DI },
19883 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phadddq, "__builtin_ia32_phadddq", IX86_BUILTIN_PHADDDQ, 0, (int)MULTI_ARG_1_SI_DI },
19884 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddubw, "__builtin_ia32_phaddubw", IX86_BUILTIN_PHADDUBW, 0, (int)MULTI_ARG_1_QI_HI },
19885 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddubd, "__builtin_ia32_phaddubd", IX86_BUILTIN_PHADDUBD, 0, (int)MULTI_ARG_1_QI_SI },
19886 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddubq, "__builtin_ia32_phaddubq", IX86_BUILTIN_PHADDUBQ, 0, (int)MULTI_ARG_1_QI_DI },
19887 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phadduwd, "__builtin_ia32_phadduwd", IX86_BUILTIN_PHADDUWD, 0, (int)MULTI_ARG_1_HI_SI },
19888 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phadduwq, "__builtin_ia32_phadduwq", IX86_BUILTIN_PHADDUWQ, 0, (int)MULTI_ARG_1_HI_DI },
19889 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phaddudq, "__builtin_ia32_phaddudq", IX86_BUILTIN_PHADDUDQ, 0, (int)MULTI_ARG_1_SI_DI },
19890 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phsubbw, "__builtin_ia32_phsubbw", IX86_BUILTIN_PHSUBBW, 0, (int)MULTI_ARG_1_QI_HI },
19891 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phsubwd, "__builtin_ia32_phsubwd", IX86_BUILTIN_PHSUBWD, 0, (int)MULTI_ARG_1_HI_SI },
19892 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_phsubdq, "__builtin_ia32_phsubdq", IX86_BUILTIN_PHSUBDQ, 0, (int)MULTI_ARG_1_SI_DI },
19894 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comeqss", IX86_BUILTIN_COMEQSS, EQ, (int)MULTI_ARG_2_SF_CMP },
19895 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comness", IX86_BUILTIN_COMNESS, NE, (int)MULTI_ARG_2_SF_CMP },
19896 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comneqss", IX86_BUILTIN_COMNESS, NE, (int)MULTI_ARG_2_SF_CMP },
19897 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comltss", IX86_BUILTIN_COMLTSS, LT, (int)MULTI_ARG_2_SF_CMP },
19898 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comless", IX86_BUILTIN_COMLESS, LE, (int)MULTI_ARG_2_SF_CMP },
19899 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comgtss", IX86_BUILTIN_COMGTSS, GT, (int)MULTI_ARG_2_SF_CMP },
19900 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comgess", IX86_BUILTIN_COMGESS, GE, (int)MULTI_ARG_2_SF_CMP },
19901 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comueqss", IX86_BUILTIN_COMUEQSS, UNEQ, (int)MULTI_ARG_2_SF_CMP },
19902 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comuness", IX86_BUILTIN_COMUNESS, LTGT, (int)MULTI_ARG_2_SF_CMP },
19903 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comuneqss", IX86_BUILTIN_COMUNESS, LTGT, (int)MULTI_ARG_2_SF_CMP },
19904 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comunltss", IX86_BUILTIN_COMULTSS, UNLT, (int)MULTI_ARG_2_SF_CMP },
19905 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comunless", IX86_BUILTIN_COMULESS, UNLE, (int)MULTI_ARG_2_SF_CMP },
19906 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comungtss", IX86_BUILTIN_COMUGTSS, UNGT, (int)MULTI_ARG_2_SF_CMP },
19907 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comungess", IX86_BUILTIN_COMUGESS, UNGE, (int)MULTI_ARG_2_SF_CMP },
19908 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comordss", IX86_BUILTIN_COMORDSS, ORDERED, (int)MULTI_ARG_2_SF_CMP },
19909 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv4sf3, "__builtin_ia32_comunordss", IX86_BUILTIN_COMUNORDSS, UNORDERED, (int)MULTI_ARG_2_SF_CMP },
19911 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comeqsd", IX86_BUILTIN_COMEQSD, EQ, (int)MULTI_ARG_2_DF_CMP },
19912 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comnesd", IX86_BUILTIN_COMNESD, NE, (int)MULTI_ARG_2_DF_CMP },
19913 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comneqsd", IX86_BUILTIN_COMNESD, NE, (int)MULTI_ARG_2_DF_CMP },
19914 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comltsd", IX86_BUILTIN_COMLTSD, LT, (int)MULTI_ARG_2_DF_CMP },
19915 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comlesd", IX86_BUILTIN_COMLESD, LE, (int)MULTI_ARG_2_DF_CMP },
19916 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comgtsd", IX86_BUILTIN_COMGTSD, GT, (int)MULTI_ARG_2_DF_CMP },
19917 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comgesd", IX86_BUILTIN_COMGESD, GE, (int)MULTI_ARG_2_DF_CMP },
19918 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comueqsd", IX86_BUILTIN_COMUEQSD, UNEQ, (int)MULTI_ARG_2_DF_CMP },
19919 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunesd", IX86_BUILTIN_COMUNESD, LTGT, (int)MULTI_ARG_2_DF_CMP },
19920 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comuneqsd", IX86_BUILTIN_COMUNESD, LTGT, (int)MULTI_ARG_2_DF_CMP },
19921 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunltsd", IX86_BUILTIN_COMULTSD, UNLT, (int)MULTI_ARG_2_DF_CMP },
19922 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunlesd", IX86_BUILTIN_COMULESD, UNLE, (int)MULTI_ARG_2_DF_CMP },
19923 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comungtsd", IX86_BUILTIN_COMUGTSD, UNGT, (int)MULTI_ARG_2_DF_CMP },
19924 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comungesd", IX86_BUILTIN_COMUGESD, UNGE, (int)MULTI_ARG_2_DF_CMP },
19925 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comordsd", IX86_BUILTIN_COMORDSD, ORDERED, (int)MULTI_ARG_2_DF_CMP },
19926 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_vmmaskcmpv2df3, "__builtin_ia32_comunordsd", IX86_BUILTIN_COMUNORDSD, UNORDERED, (int)MULTI_ARG_2_DF_CMP },
19928 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comeqps", IX86_BUILTIN_COMEQPS, EQ, (int)MULTI_ARG_2_SF_CMP },
19929 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comneps", IX86_BUILTIN_COMNEPS, NE, (int)MULTI_ARG_2_SF_CMP },
19930 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comneqps", IX86_BUILTIN_COMNEPS, NE, (int)MULTI_ARG_2_SF_CMP },
19931 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comltps", IX86_BUILTIN_COMLTPS, LT, (int)MULTI_ARG_2_SF_CMP },
19932 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comleps", IX86_BUILTIN_COMLEPS, LE, (int)MULTI_ARG_2_SF_CMP },
19933 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comgtps", IX86_BUILTIN_COMGTPS, GT, (int)MULTI_ARG_2_SF_CMP },
19934 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comgeps", IX86_BUILTIN_COMGEPS, GE, (int)MULTI_ARG_2_SF_CMP },
19935 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comueqps", IX86_BUILTIN_COMUEQPS, UNEQ, (int)MULTI_ARG_2_SF_CMP },
19936 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comuneps", IX86_BUILTIN_COMUNEPS, LTGT, (int)MULTI_ARG_2_SF_CMP },
19937 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comuneqps", IX86_BUILTIN_COMUNEPS, LTGT, (int)MULTI_ARG_2_SF_CMP },
19938 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comunltps", IX86_BUILTIN_COMULTPS, UNLT, (int)MULTI_ARG_2_SF_CMP },
19939 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comunleps", IX86_BUILTIN_COMULEPS, UNLE, (int)MULTI_ARG_2_SF_CMP },
19940 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comungtps", IX86_BUILTIN_COMUGTPS, UNGT, (int)MULTI_ARG_2_SF_CMP },
19941 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comungeps", IX86_BUILTIN_COMUGEPS, UNGE, (int)MULTI_ARG_2_SF_CMP },
19942 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comordps", IX86_BUILTIN_COMORDPS, ORDERED, (int)MULTI_ARG_2_SF_CMP },
19943 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4sf3, "__builtin_ia32_comunordps", IX86_BUILTIN_COMUNORDPS, UNORDERED, (int)MULTI_ARG_2_SF_CMP },
19945 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comeqpd", IX86_BUILTIN_COMEQPD, EQ, (int)MULTI_ARG_2_DF_CMP },
19946 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comnepd", IX86_BUILTIN_COMNEPD, NE, (int)MULTI_ARG_2_DF_CMP },
19947 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comneqpd", IX86_BUILTIN_COMNEPD, NE, (int)MULTI_ARG_2_DF_CMP },
19948 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comltpd", IX86_BUILTIN_COMLTPD, LT, (int)MULTI_ARG_2_DF_CMP },
19949 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comlepd", IX86_BUILTIN_COMLEPD, LE, (int)MULTI_ARG_2_DF_CMP },
19950 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comgtpd", IX86_BUILTIN_COMGTPD, GT, (int)MULTI_ARG_2_DF_CMP },
19951 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comgepd", IX86_BUILTIN_COMGEPD, GE, (int)MULTI_ARG_2_DF_CMP },
19952 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comueqpd", IX86_BUILTIN_COMUEQPD, UNEQ, (int)MULTI_ARG_2_DF_CMP },
19953 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunepd", IX86_BUILTIN_COMUNEPD, LTGT, (int)MULTI_ARG_2_DF_CMP },
19954 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comuneqpd", IX86_BUILTIN_COMUNEPD, LTGT, (int)MULTI_ARG_2_DF_CMP },
19955 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunltpd", IX86_BUILTIN_COMULTPD, UNLT, (int)MULTI_ARG_2_DF_CMP },
19956 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunlepd", IX86_BUILTIN_COMULEPD, UNLE, (int)MULTI_ARG_2_DF_CMP },
19957 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comungtpd", IX86_BUILTIN_COMUGTPD, UNGT, (int)MULTI_ARG_2_DF_CMP },
19958 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comungepd", IX86_BUILTIN_COMUGEPD, UNGE, (int)MULTI_ARG_2_DF_CMP },
19959 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comordpd", IX86_BUILTIN_COMORDPD, ORDERED, (int)MULTI_ARG_2_DF_CMP },
19960 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2df3, "__builtin_ia32_comunordpd", IX86_BUILTIN_COMUNORDPD, UNORDERED, (int)MULTI_ARG_2_DF_CMP },
19962 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomeqb", IX86_BUILTIN_PCOMEQB, EQ, (int)MULTI_ARG_2_QI_CMP },
19963 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomneb", IX86_BUILTIN_PCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
19964 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomneqb", IX86_BUILTIN_PCOMNEB, NE, (int)MULTI_ARG_2_QI_CMP },
19965 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomltb", IX86_BUILTIN_PCOMLTB, LT, (int)MULTI_ARG_2_QI_CMP },
19966 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomleb", IX86_BUILTIN_PCOMLEB, LE, (int)MULTI_ARG_2_QI_CMP },
19967 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomgtb", IX86_BUILTIN_PCOMGTB, GT, (int)MULTI_ARG_2_QI_CMP },
19968 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv16qi3, "__builtin_ia32_pcomgeb", IX86_BUILTIN_PCOMGEB, GE, (int)MULTI_ARG_2_QI_CMP },
19970 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomeqw", IX86_BUILTIN_PCOMEQW, EQ, (int)MULTI_ARG_2_HI_CMP },
19971 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomnew", IX86_BUILTIN_PCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
19972 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomneqw", IX86_BUILTIN_PCOMNEW, NE, (int)MULTI_ARG_2_HI_CMP },
19973 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomltw", IX86_BUILTIN_PCOMLTW, LT, (int)MULTI_ARG_2_HI_CMP },
19974 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomlew", IX86_BUILTIN_PCOMLEW, LE, (int)MULTI_ARG_2_HI_CMP },
19975 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomgtw", IX86_BUILTIN_PCOMGTW, GT, (int)MULTI_ARG_2_HI_CMP },
19976 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv8hi3, "__builtin_ia32_pcomgew", IX86_BUILTIN_PCOMGEW, GE, (int)MULTI_ARG_2_HI_CMP },
19978 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomeqd", IX86_BUILTIN_PCOMEQD, EQ, (int)MULTI_ARG_2_SI_CMP },
19979 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomned", IX86_BUILTIN_PCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
19980 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomneqd", IX86_BUILTIN_PCOMNED, NE, (int)MULTI_ARG_2_SI_CMP },
19981 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomltd", IX86_BUILTIN_PCOMLTD, LT, (int)MULTI_ARG_2_SI_CMP },
19982 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomled", IX86_BUILTIN_PCOMLED, LE, (int)MULTI_ARG_2_SI_CMP },
19983 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomgtd", IX86_BUILTIN_PCOMGTD, GT, (int)MULTI_ARG_2_SI_CMP },
19984 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv4si3, "__builtin_ia32_pcomged", IX86_BUILTIN_PCOMGED, GE, (int)MULTI_ARG_2_SI_CMP },
19986 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomeqq", IX86_BUILTIN_PCOMEQQ, EQ, (int)MULTI_ARG_2_DI_CMP },
19987 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomneq", IX86_BUILTIN_PCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
19988 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomneqq", IX86_BUILTIN_PCOMNEQ, NE, (int)MULTI_ARG_2_DI_CMP },
19989 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomltq", IX86_BUILTIN_PCOMLTQ, LT, (int)MULTI_ARG_2_DI_CMP },
19990 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomleq", IX86_BUILTIN_PCOMLEQ, LE, (int)MULTI_ARG_2_DI_CMP },
19991 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomgtq", IX86_BUILTIN_PCOMGTQ, GT, (int)MULTI_ARG_2_DI_CMP },
19992 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmpv2di3, "__builtin_ia32_pcomgeq", IX86_BUILTIN_PCOMGEQ, GE, (int)MULTI_ARG_2_DI_CMP },
19994 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v16qi3,"__builtin_ia32_pcomequb", IX86_BUILTIN_PCOMEQUB, EQ, (int)MULTI_ARG_2_QI_CMP },
19995 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v16qi3,"__builtin_ia32_pcomneub", IX86_BUILTIN_PCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
19996 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v16qi3,"__builtin_ia32_pcomnequb", IX86_BUILTIN_PCOMNEUB, NE, (int)MULTI_ARG_2_QI_CMP },
19997 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomltub", IX86_BUILTIN_PCOMLTUB, LTU, (int)MULTI_ARG_2_QI_CMP },
19998 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomleub", IX86_BUILTIN_PCOMLEUB, LEU, (int)MULTI_ARG_2_QI_CMP },
19999 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomgtub", IX86_BUILTIN_PCOMGTUB, GTU, (int)MULTI_ARG_2_QI_CMP },
20000 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv16qi3, "__builtin_ia32_pcomgeub", IX86_BUILTIN_PCOMGEUB, GEU, (int)MULTI_ARG_2_QI_CMP },
20002 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v8hi3, "__builtin_ia32_pcomequw", IX86_BUILTIN_PCOMEQUW, EQ, (int)MULTI_ARG_2_HI_CMP },
20003 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v8hi3, "__builtin_ia32_pcomneuw", IX86_BUILTIN_PCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
20004 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v8hi3, "__builtin_ia32_pcomnequw", IX86_BUILTIN_PCOMNEUW, NE, (int)MULTI_ARG_2_HI_CMP },
20005 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomltuw", IX86_BUILTIN_PCOMLTUW, LTU, (int)MULTI_ARG_2_HI_CMP },
20006 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomleuw", IX86_BUILTIN_PCOMLEUW, LEU, (int)MULTI_ARG_2_HI_CMP },
20007 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomgtuw", IX86_BUILTIN_PCOMGTUW, GTU, (int)MULTI_ARG_2_HI_CMP },
20008 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv8hi3, "__builtin_ia32_pcomgeuw", IX86_BUILTIN_PCOMGEUW, GEU, (int)MULTI_ARG_2_HI_CMP },
20010 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v4si3, "__builtin_ia32_pcomequd", IX86_BUILTIN_PCOMEQUD, EQ, (int)MULTI_ARG_2_SI_CMP },
20011 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v4si3, "__builtin_ia32_pcomneud", IX86_BUILTIN_PCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
20012 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v4si3, "__builtin_ia32_pcomnequd", IX86_BUILTIN_PCOMNEUD, NE, (int)MULTI_ARG_2_SI_CMP },
20013 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomltud", IX86_BUILTIN_PCOMLTUD, LTU, (int)MULTI_ARG_2_SI_CMP },
20014 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomleud", IX86_BUILTIN_PCOMLEUD, LEU, (int)MULTI_ARG_2_SI_CMP },
20015 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomgtud", IX86_BUILTIN_PCOMGTUD, GTU, (int)MULTI_ARG_2_SI_CMP },
20016 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv4si3, "__builtin_ia32_pcomgeud", IX86_BUILTIN_PCOMGEUD, GEU, (int)MULTI_ARG_2_SI_CMP },
20018 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v2di3, "__builtin_ia32_pcomequq", IX86_BUILTIN_PCOMEQUQ, EQ, (int)MULTI_ARG_2_DI_CMP },
20019 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v2di3, "__builtin_ia32_pcomneuq", IX86_BUILTIN_PCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
20020 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_uns2v2di3, "__builtin_ia32_pcomnequq", IX86_BUILTIN_PCOMNEUQ, NE, (int)MULTI_ARG_2_DI_CMP },
20021 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomltuq", IX86_BUILTIN_PCOMLTUQ, LTU, (int)MULTI_ARG_2_DI_CMP },
20022 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomleuq", IX86_BUILTIN_PCOMLEUQ, LEU, (int)MULTI_ARG_2_DI_CMP },
20023 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomgtuq", IX86_BUILTIN_PCOMGTUQ, GTU, (int)MULTI_ARG_2_DI_CMP },
20024 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_maskcmp_unsv2di3, "__builtin_ia32_pcomgeuq", IX86_BUILTIN_PCOMGEUQ, GEU, (int)MULTI_ARG_2_DI_CMP },
20026 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comfalsess", IX86_BUILTIN_COMFALSESS, COM_FALSE_S, (int)MULTI_ARG_2_SF_TF },
20027 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comtruess", IX86_BUILTIN_COMTRUESS, COM_TRUE_S, (int)MULTI_ARG_2_SF_TF },
20028 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comfalseps", IX86_BUILTIN_COMFALSEPS, COM_FALSE_P, (int)MULTI_ARG_2_SF_TF },
20029 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv4sf3, "__builtin_ia32_comtrueps", IX86_BUILTIN_COMTRUEPS, COM_TRUE_P, (int)MULTI_ARG_2_SF_TF },
20030 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comfalsesd", IX86_BUILTIN_COMFALSESD, COM_FALSE_S, (int)MULTI_ARG_2_DF_TF },
20031 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comtruesd", IX86_BUILTIN_COMTRUESD, COM_TRUE_S, (int)MULTI_ARG_2_DF_TF },
20032 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comfalsepd", IX86_BUILTIN_COMFALSEPD, COM_FALSE_P, (int)MULTI_ARG_2_DF_TF },
20033 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_com_tfv2df3, "__builtin_ia32_comtruepd", IX86_BUILTIN_COMTRUEPD, COM_TRUE_P, (int)MULTI_ARG_2_DF_TF },
20035 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomfalseb", IX86_BUILTIN_PCOMFALSEB, PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
20036 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomfalsew", IX86_BUILTIN_PCOMFALSEW, PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
20037 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomfalsed", IX86_BUILTIN_PCOMFALSED, PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
20038 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomfalseq", IX86_BUILTIN_PCOMFALSEQ, PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
20039 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomfalseub",IX86_BUILTIN_PCOMFALSEUB,PCOM_FALSE, (int)MULTI_ARG_2_QI_TF },
20040 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomfalseuw",IX86_BUILTIN_PCOMFALSEUW,PCOM_FALSE, (int)MULTI_ARG_2_HI_TF },
20041 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomfalseud",IX86_BUILTIN_PCOMFALSEUD,PCOM_FALSE, (int)MULTI_ARG_2_SI_TF },
20042 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomfalseuq",IX86_BUILTIN_PCOMFALSEUQ,PCOM_FALSE, (int)MULTI_ARG_2_DI_TF },
20044 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomtrueb", IX86_BUILTIN_PCOMTRUEB, PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
20045 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomtruew", IX86_BUILTIN_PCOMTRUEW, PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
20046 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomtrued", IX86_BUILTIN_PCOMTRUED, PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
20047 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomtrueq", IX86_BUILTIN_PCOMTRUEQ, PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
20048 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv16qi3, "__builtin_ia32_pcomtrueub", IX86_BUILTIN_PCOMTRUEUB, PCOM_TRUE, (int)MULTI_ARG_2_QI_TF },
20049 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv8hi3, "__builtin_ia32_pcomtrueuw", IX86_BUILTIN_PCOMTRUEUW, PCOM_TRUE, (int)MULTI_ARG_2_HI_TF },
20050 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv4si3, "__builtin_ia32_pcomtrueud", IX86_BUILTIN_PCOMTRUEUD, PCOM_TRUE, (int)MULTI_ARG_2_SI_TF },
20051 { OPTION_MASK_ISA_SSE5, CODE_FOR_sse5_pcom_tfv2di3, "__builtin_ia32_pcomtrueuq", IX86_BUILTIN_PCOMTRUEUQ, PCOM_TRUE, (int)MULTI_ARG_2_DI_TF },
20054 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
20055 in the current target ISA to allow the user to compile particular modules
20056 with different target specific options that differ from the command line
20057 options. */
20058 static void
20059 ix86_init_mmx_sse_builtins (void)
20061 const struct builtin_description * d;
20062 size_t i;
20064 tree V16QI_type_node = build_vector_type_for_mode (char_type_node, V16QImode);
20065 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
20066 tree V1DI_type_node
20067 = build_vector_type_for_mode (long_long_integer_type_node, V1DImode);
20068 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
20069 tree V2DI_type_node
20070 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
20071 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
20072 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
20073 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
20074 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
20075 tree V8QI_type_node = build_vector_type_for_mode (char_type_node, V8QImode);
20076 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
20078 tree pchar_type_node = build_pointer_type (char_type_node);
20079 tree pcchar_type_node
20080 = build_pointer_type (build_type_variant (char_type_node, 1, 0));
20081 tree pfloat_type_node = build_pointer_type (float_type_node);
20082 tree pcfloat_type_node
20083 = build_pointer_type (build_type_variant (float_type_node, 1, 0));
20084 tree pv2sf_type_node = build_pointer_type (V2SF_type_node);
20085 tree pcv2sf_type_node
20086 = build_pointer_type (build_type_variant (V2SF_type_node, 1, 0));
20087 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
20088 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
20090 /* Comparisons. */
20091 tree int_ftype_v4sf_v4sf
20092 = build_function_type_list (integer_type_node,
20093 V4SF_type_node, V4SF_type_node, NULL_TREE);
20094 tree v4si_ftype_v4sf_v4sf
20095 = build_function_type_list (V4SI_type_node,
20096 V4SF_type_node, V4SF_type_node, NULL_TREE);
20097 /* MMX/SSE/integer conversions. */
20098 tree int_ftype_v4sf
20099 = build_function_type_list (integer_type_node,
20100 V4SF_type_node, NULL_TREE);
20101 tree int64_ftype_v4sf
20102 = build_function_type_list (long_long_integer_type_node,
20103 V4SF_type_node, NULL_TREE);
20104 tree int_ftype_v8qi
20105 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
20106 tree v4sf_ftype_v4sf_int
20107 = build_function_type_list (V4SF_type_node,
20108 V4SF_type_node, integer_type_node, NULL_TREE);
20109 tree v4sf_ftype_v4sf_int64
20110 = build_function_type_list (V4SF_type_node,
20111 V4SF_type_node, long_long_integer_type_node,
20112 NULL_TREE);
20113 tree v4sf_ftype_v4sf_v2si
20114 = build_function_type_list (V4SF_type_node,
20115 V4SF_type_node, V2SI_type_node, NULL_TREE);
20117 /* Miscellaneous. */
20118 tree v8qi_ftype_v4hi_v4hi
20119 = build_function_type_list (V8QI_type_node,
20120 V4HI_type_node, V4HI_type_node, NULL_TREE);
20121 tree v4hi_ftype_v2si_v2si
20122 = build_function_type_list (V4HI_type_node,
20123 V2SI_type_node, V2SI_type_node, NULL_TREE);
20124 tree v4sf_ftype_v4sf_v4sf_int
20125 = build_function_type_list (V4SF_type_node,
20126 V4SF_type_node, V4SF_type_node,
20127 integer_type_node, NULL_TREE);
20128 tree v2si_ftype_v4hi_v4hi
20129 = build_function_type_list (V2SI_type_node,
20130 V4HI_type_node, V4HI_type_node, NULL_TREE);
20131 tree v4hi_ftype_v4hi_int
20132 = build_function_type_list (V4HI_type_node,
20133 V4HI_type_node, integer_type_node, NULL_TREE);
20134 tree v2si_ftype_v2si_int
20135 = build_function_type_list (V2SI_type_node,
20136 V2SI_type_node, integer_type_node, NULL_TREE);
20137 tree v1di_ftype_v1di_int
20138 = build_function_type_list (V1DI_type_node,
20139 V1DI_type_node, integer_type_node, NULL_TREE);
20141 tree void_ftype_void
20142 = build_function_type (void_type_node, void_list_node);
20143 tree void_ftype_unsigned
20144 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
20145 tree void_ftype_unsigned_unsigned
20146 = build_function_type_list (void_type_node, unsigned_type_node,
20147 unsigned_type_node, NULL_TREE);
20148 tree void_ftype_pcvoid_unsigned_unsigned
20149 = build_function_type_list (void_type_node, const_ptr_type_node,
20150 unsigned_type_node, unsigned_type_node,
20151 NULL_TREE);
20152 tree unsigned_ftype_void
20153 = build_function_type (unsigned_type_node, void_list_node);
20154 tree v2si_ftype_v4sf
20155 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
20156 /* Loads/stores. */
20157 tree void_ftype_v8qi_v8qi_pchar
20158 = build_function_type_list (void_type_node,
20159 V8QI_type_node, V8QI_type_node,
20160 pchar_type_node, NULL_TREE);
20161 tree v4sf_ftype_pcfloat
20162 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
20163 tree v4sf_ftype_v4sf_pcv2sf
20164 = build_function_type_list (V4SF_type_node,
20165 V4SF_type_node, pcv2sf_type_node, NULL_TREE);
20166 tree void_ftype_pv2sf_v4sf
20167 = build_function_type_list (void_type_node,
20168 pv2sf_type_node, V4SF_type_node, NULL_TREE);
20169 tree void_ftype_pfloat_v4sf
20170 = build_function_type_list (void_type_node,
20171 pfloat_type_node, V4SF_type_node, NULL_TREE);
20172 tree void_ftype_pdi_di
20173 = build_function_type_list (void_type_node,
20174 pdi_type_node, long_long_unsigned_type_node,
20175 NULL_TREE);
20176 tree void_ftype_pv2di_v2di
20177 = build_function_type_list (void_type_node,
20178 pv2di_type_node, V2DI_type_node, NULL_TREE);
20179 /* Normal vector unops. */
20180 tree v4sf_ftype_v4sf
20181 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
20182 tree v16qi_ftype_v16qi
20183 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
20184 tree v8hi_ftype_v8hi
20185 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
20186 tree v4si_ftype_v4si
20187 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
20188 tree v8qi_ftype_v8qi
20189 = build_function_type_list (V8QI_type_node, V8QI_type_node, NULL_TREE);
20190 tree v4hi_ftype_v4hi
20191 = build_function_type_list (V4HI_type_node, V4HI_type_node, NULL_TREE);
20193 /* Normal vector binops. */
20194 tree v4sf_ftype_v4sf_v4sf
20195 = build_function_type_list (V4SF_type_node,
20196 V4SF_type_node, V4SF_type_node, NULL_TREE);
20197 tree v8qi_ftype_v8qi_v8qi
20198 = build_function_type_list (V8QI_type_node,
20199 V8QI_type_node, V8QI_type_node, NULL_TREE);
20200 tree v4hi_ftype_v4hi_v4hi
20201 = build_function_type_list (V4HI_type_node,
20202 V4HI_type_node, V4HI_type_node, NULL_TREE);
20203 tree v2si_ftype_v2si_v2si
20204 = build_function_type_list (V2SI_type_node,
20205 V2SI_type_node, V2SI_type_node, NULL_TREE);
20206 tree v1di_ftype_v1di_v1di
20207 = build_function_type_list (V1DI_type_node,
20208 V1DI_type_node, V1DI_type_node, NULL_TREE);
20209 tree v1di_ftype_v1di_v1di_int
20210 = build_function_type_list (V1DI_type_node,
20211 V1DI_type_node, V1DI_type_node,
20212 integer_type_node, NULL_TREE);
20213 tree v2si_ftype_v2sf
20214 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
20215 tree v2sf_ftype_v2si
20216 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
20217 tree v2si_ftype_v2si
20218 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
20219 tree v2sf_ftype_v2sf
20220 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
20221 tree v2sf_ftype_v2sf_v2sf
20222 = build_function_type_list (V2SF_type_node,
20223 V2SF_type_node, V2SF_type_node, NULL_TREE);
20224 tree v2si_ftype_v2sf_v2sf
20225 = build_function_type_list (V2SI_type_node,
20226 V2SF_type_node, V2SF_type_node, NULL_TREE);
20227 tree pint_type_node = build_pointer_type (integer_type_node);
20228 tree pdouble_type_node = build_pointer_type (double_type_node);
20229 tree pcdouble_type_node = build_pointer_type (
20230 build_type_variant (double_type_node, 1, 0));
20231 tree int_ftype_v2df_v2df
20232 = build_function_type_list (integer_type_node,
20233 V2DF_type_node, V2DF_type_node, NULL_TREE);
20235 tree void_ftype_pcvoid
20236 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
20237 tree v4sf_ftype_v4si
20238 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
20239 tree v4si_ftype_v4sf
20240 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
20241 tree v2df_ftype_v4si
20242 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
20243 tree v4si_ftype_v2df
20244 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
20245 tree v4si_ftype_v2df_v2df
20246 = build_function_type_list (V4SI_type_node,
20247 V2DF_type_node, V2DF_type_node, NULL_TREE);
20248 tree v2si_ftype_v2df
20249 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
20250 tree v4sf_ftype_v2df
20251 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
20252 tree v2df_ftype_v2si
20253 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
20254 tree v2df_ftype_v4sf
20255 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
20256 tree int_ftype_v2df
20257 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
20258 tree int64_ftype_v2df
20259 = build_function_type_list (long_long_integer_type_node,
20260 V2DF_type_node, NULL_TREE);
20261 tree v2df_ftype_v2df_int
20262 = build_function_type_list (V2DF_type_node,
20263 V2DF_type_node, integer_type_node, NULL_TREE);
20264 tree v2df_ftype_v2df_int64
20265 = build_function_type_list (V2DF_type_node,
20266 V2DF_type_node, long_long_integer_type_node,
20267 NULL_TREE);
20268 tree v4sf_ftype_v4sf_v2df
20269 = build_function_type_list (V4SF_type_node,
20270 V4SF_type_node, V2DF_type_node, NULL_TREE);
20271 tree v2df_ftype_v2df_v4sf
20272 = build_function_type_list (V2DF_type_node,
20273 V2DF_type_node, V4SF_type_node, NULL_TREE);
20274 tree v2df_ftype_v2df_v2df_int
20275 = build_function_type_list (V2DF_type_node,
20276 V2DF_type_node, V2DF_type_node,
20277 integer_type_node,
20278 NULL_TREE);
20279 tree v2df_ftype_v2df_pcdouble
20280 = build_function_type_list (V2DF_type_node,
20281 V2DF_type_node, pcdouble_type_node, NULL_TREE);
20282 tree void_ftype_pdouble_v2df
20283 = build_function_type_list (void_type_node,
20284 pdouble_type_node, V2DF_type_node, NULL_TREE);
20285 tree void_ftype_pint_int
20286 = build_function_type_list (void_type_node,
20287 pint_type_node, integer_type_node, NULL_TREE);
20288 tree void_ftype_v16qi_v16qi_pchar
20289 = build_function_type_list (void_type_node,
20290 V16QI_type_node, V16QI_type_node,
20291 pchar_type_node, NULL_TREE);
20292 tree v2df_ftype_pcdouble
20293 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
20294 tree v2df_ftype_v2df_v2df
20295 = build_function_type_list (V2DF_type_node,
20296 V2DF_type_node, V2DF_type_node, NULL_TREE);
20297 tree v16qi_ftype_v16qi_v16qi
20298 = build_function_type_list (V16QI_type_node,
20299 V16QI_type_node, V16QI_type_node, NULL_TREE);
20300 tree v8hi_ftype_v8hi_v8hi
20301 = build_function_type_list (V8HI_type_node,
20302 V8HI_type_node, V8HI_type_node, NULL_TREE);
20303 tree v4si_ftype_v4si_v4si
20304 = build_function_type_list (V4SI_type_node,
20305 V4SI_type_node, V4SI_type_node, NULL_TREE);
20306 tree v2di_ftype_v2di_v2di
20307 = build_function_type_list (V2DI_type_node,
20308 V2DI_type_node, V2DI_type_node, NULL_TREE);
20309 tree v2di_ftype_v2df_v2df
20310 = build_function_type_list (V2DI_type_node,
20311 V2DF_type_node, V2DF_type_node, NULL_TREE);
20312 tree v2df_ftype_v2df
20313 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
20314 tree v2di_ftype_v2di_int
20315 = build_function_type_list (V2DI_type_node,
20316 V2DI_type_node, integer_type_node, NULL_TREE);
20317 tree v2di_ftype_v2di_v2di_int
20318 = build_function_type_list (V2DI_type_node, V2DI_type_node,
20319 V2DI_type_node, integer_type_node, NULL_TREE);
20320 tree v4si_ftype_v4si_int
20321 = build_function_type_list (V4SI_type_node,
20322 V4SI_type_node, integer_type_node, NULL_TREE);
20323 tree v8hi_ftype_v8hi_int
20324 = build_function_type_list (V8HI_type_node,
20325 V8HI_type_node, integer_type_node, NULL_TREE);
20326 tree v4si_ftype_v8hi_v8hi
20327 = build_function_type_list (V4SI_type_node,
20328 V8HI_type_node, V8HI_type_node, NULL_TREE);
20329 tree v1di_ftype_v8qi_v8qi
20330 = build_function_type_list (V1DI_type_node,
20331 V8QI_type_node, V8QI_type_node, NULL_TREE);
20332 tree v1di_ftype_v2si_v2si
20333 = build_function_type_list (V1DI_type_node,
20334 V2SI_type_node, V2SI_type_node, NULL_TREE);
20335 tree v2di_ftype_v16qi_v16qi
20336 = build_function_type_list (V2DI_type_node,
20337 V16QI_type_node, V16QI_type_node, NULL_TREE);
20338 tree v2di_ftype_v4si_v4si
20339 = build_function_type_list (V2DI_type_node,
20340 V4SI_type_node, V4SI_type_node, NULL_TREE);
20341 tree int_ftype_v16qi
20342 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
20343 tree v16qi_ftype_pcchar
20344 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
20345 tree void_ftype_pchar_v16qi
20346 = build_function_type_list (void_type_node,
20347 pchar_type_node, V16QI_type_node, NULL_TREE);
20349 tree v2di_ftype_v2di_unsigned_unsigned
20350 = build_function_type_list (V2DI_type_node, V2DI_type_node,
20351 unsigned_type_node, unsigned_type_node,
20352 NULL_TREE);
20353 tree v2di_ftype_v2di_v2di_unsigned_unsigned
20354 = build_function_type_list (V2DI_type_node, V2DI_type_node, V2DI_type_node,
20355 unsigned_type_node, unsigned_type_node,
20356 NULL_TREE);
20357 tree v2di_ftype_v2di_v16qi
20358 = build_function_type_list (V2DI_type_node, V2DI_type_node, V16QI_type_node,
20359 NULL_TREE);
20360 tree v2df_ftype_v2df_v2df_v2df
20361 = build_function_type_list (V2DF_type_node,
20362 V2DF_type_node, V2DF_type_node,
20363 V2DF_type_node, NULL_TREE);
20364 tree v4sf_ftype_v4sf_v4sf_v4sf
20365 = build_function_type_list (V4SF_type_node,
20366 V4SF_type_node, V4SF_type_node,
20367 V4SF_type_node, NULL_TREE);
20368 tree v8hi_ftype_v16qi
20369 = build_function_type_list (V8HI_type_node, V16QI_type_node,
20370 NULL_TREE);
20371 tree v4si_ftype_v16qi
20372 = build_function_type_list (V4SI_type_node, V16QI_type_node,
20373 NULL_TREE);
20374 tree v2di_ftype_v16qi
20375 = build_function_type_list (V2DI_type_node, V16QI_type_node,
20376 NULL_TREE);
20377 tree v4si_ftype_v8hi
20378 = build_function_type_list (V4SI_type_node, V8HI_type_node,
20379 NULL_TREE);
20380 tree v2di_ftype_v8hi
20381 = build_function_type_list (V2DI_type_node, V8HI_type_node,
20382 NULL_TREE);
20383 tree v2di_ftype_v4si
20384 = build_function_type_list (V2DI_type_node, V4SI_type_node,
20385 NULL_TREE);
20386 tree v2di_ftype_pv2di
20387 = build_function_type_list (V2DI_type_node, pv2di_type_node,
20388 NULL_TREE);
20389 tree v16qi_ftype_v16qi_v16qi_int
20390 = build_function_type_list (V16QI_type_node, V16QI_type_node,
20391 V16QI_type_node, integer_type_node,
20392 NULL_TREE);
20393 tree v16qi_ftype_v16qi_v16qi_v16qi
20394 = build_function_type_list (V16QI_type_node, V16QI_type_node,
20395 V16QI_type_node, V16QI_type_node,
20396 NULL_TREE);
20397 tree v8hi_ftype_v8hi_v8hi_int
20398 = build_function_type_list (V8HI_type_node, V8HI_type_node,
20399 V8HI_type_node, integer_type_node,
20400 NULL_TREE);
20401 tree v4si_ftype_v4si_v4si_int
20402 = build_function_type_list (V4SI_type_node, V4SI_type_node,
20403 V4SI_type_node, integer_type_node,
20404 NULL_TREE);
20405 tree int_ftype_v2di_v2di
20406 = build_function_type_list (integer_type_node,
20407 V2DI_type_node, V2DI_type_node,
20408 NULL_TREE);
20409 tree int_ftype_v16qi_int_v16qi_int_int
20410 = build_function_type_list (integer_type_node,
20411 V16QI_type_node,
20412 integer_type_node,
20413 V16QI_type_node,
20414 integer_type_node,
20415 integer_type_node,
20416 NULL_TREE);
20417 tree v16qi_ftype_v16qi_int_v16qi_int_int
20418 = build_function_type_list (V16QI_type_node,
20419 V16QI_type_node,
20420 integer_type_node,
20421 V16QI_type_node,
20422 integer_type_node,
20423 integer_type_node,
20424 NULL_TREE);
20425 tree int_ftype_v16qi_v16qi_int
20426 = build_function_type_list (integer_type_node,
20427 V16QI_type_node,
20428 V16QI_type_node,
20429 integer_type_node,
20430 NULL_TREE);
20432 /* SSE5 instructions */
20433 tree v2di_ftype_v2di_v2di_v2di
20434 = build_function_type_list (V2DI_type_node,
20435 V2DI_type_node,
20436 V2DI_type_node,
20437 V2DI_type_node,
20438 NULL_TREE);
20440 tree v4si_ftype_v4si_v4si_v4si
20441 = build_function_type_list (V4SI_type_node,
20442 V4SI_type_node,
20443 V4SI_type_node,
20444 V4SI_type_node,
20445 NULL_TREE);
20447 tree v4si_ftype_v4si_v4si_v2di
20448 = build_function_type_list (V4SI_type_node,
20449 V4SI_type_node,
20450 V4SI_type_node,
20451 V2DI_type_node,
20452 NULL_TREE);
20454 tree v8hi_ftype_v8hi_v8hi_v8hi
20455 = build_function_type_list (V8HI_type_node,
20456 V8HI_type_node,
20457 V8HI_type_node,
20458 V8HI_type_node,
20459 NULL_TREE);
20461 tree v8hi_ftype_v8hi_v8hi_v4si
20462 = build_function_type_list (V8HI_type_node,
20463 V8HI_type_node,
20464 V8HI_type_node,
20465 V4SI_type_node,
20466 NULL_TREE);
20468 tree v2df_ftype_v2df_v2df_v16qi
20469 = build_function_type_list (V2DF_type_node,
20470 V2DF_type_node,
20471 V2DF_type_node,
20472 V16QI_type_node,
20473 NULL_TREE);
20475 tree v4sf_ftype_v4sf_v4sf_v16qi
20476 = build_function_type_list (V4SF_type_node,
20477 V4SF_type_node,
20478 V4SF_type_node,
20479 V16QI_type_node,
20480 NULL_TREE);
20482 tree v2di_ftype_v2di_si
20483 = build_function_type_list (V2DI_type_node,
20484 V2DI_type_node,
20485 integer_type_node,
20486 NULL_TREE);
20488 tree v4si_ftype_v4si_si
20489 = build_function_type_list (V4SI_type_node,
20490 V4SI_type_node,
20491 integer_type_node,
20492 NULL_TREE);
20494 tree v8hi_ftype_v8hi_si
20495 = build_function_type_list (V8HI_type_node,
20496 V8HI_type_node,
20497 integer_type_node,
20498 NULL_TREE);
20500 tree v16qi_ftype_v16qi_si
20501 = build_function_type_list (V16QI_type_node,
20502 V16QI_type_node,
20503 integer_type_node,
20504 NULL_TREE);
20505 tree v4sf_ftype_v4hi
20506 = build_function_type_list (V4SF_type_node,
20507 V4HI_type_node,
20508 NULL_TREE);
20510 tree v4hi_ftype_v4sf
20511 = build_function_type_list (V4HI_type_node,
20512 V4SF_type_node,
20513 NULL_TREE);
20515 tree v2di_ftype_v2di
20516 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
20518 tree v16qi_ftype_v8hi_v8hi
20519 = build_function_type_list (V16QI_type_node,
20520 V8HI_type_node, V8HI_type_node,
20521 NULL_TREE);
20522 tree v8hi_ftype_v4si_v4si
20523 = build_function_type_list (V8HI_type_node,
20524 V4SI_type_node, V4SI_type_node,
20525 NULL_TREE);
20526 tree v8hi_ftype_v16qi_v16qi
20527 = build_function_type_list (V8HI_type_node,
20528 V16QI_type_node, V16QI_type_node,
20529 NULL_TREE);
20530 tree v4hi_ftype_v8qi_v8qi
20531 = build_function_type_list (V4HI_type_node,
20532 V8QI_type_node, V8QI_type_node,
20533 NULL_TREE);
20534 tree unsigned_ftype_unsigned_uchar
20535 = build_function_type_list (unsigned_type_node,
20536 unsigned_type_node,
20537 unsigned_char_type_node,
20538 NULL_TREE);
20539 tree unsigned_ftype_unsigned_ushort
20540 = build_function_type_list (unsigned_type_node,
20541 unsigned_type_node,
20542 short_unsigned_type_node,
20543 NULL_TREE);
20544 tree unsigned_ftype_unsigned_unsigned
20545 = build_function_type_list (unsigned_type_node,
20546 unsigned_type_node,
20547 unsigned_type_node,
20548 NULL_TREE);
20549 tree uint64_ftype_uint64_uint64
20550 = build_function_type_list (long_long_unsigned_type_node,
20551 long_long_unsigned_type_node,
20552 long_long_unsigned_type_node,
20553 NULL_TREE);
20554 tree float_ftype_float
20555 = build_function_type_list (float_type_node,
20556 float_type_node,
20557 NULL_TREE);
20559 tree ftype;
20561 /* Add all special builtins with variable number of operands. */
20562 for (i = 0, d = bdesc_special_args;
20563 i < ARRAY_SIZE (bdesc_special_args);
20564 i++, d++)
20566 tree type;
20568 if (d->name == 0)
20569 continue;
20571 switch ((enum ix86_special_builtin_type) d->flag)
20573 case VOID_FTYPE_VOID:
20574 type = void_ftype_void;
20575 break;
20576 case V16QI_FTYPE_PCCHAR:
20577 type = v16qi_ftype_pcchar;
20578 break;
20579 case V4SF_FTYPE_PCFLOAT:
20580 type = v4sf_ftype_pcfloat;
20581 break;
20582 case V2DI_FTYPE_PV2DI:
20583 type = v2di_ftype_pv2di;
20584 break;
20585 case V2DF_FTYPE_PCDOUBLE:
20586 type = v2df_ftype_pcdouble;
20587 break;
20588 case V4SF_FTYPE_V4SF_PCV2SF:
20589 type = v4sf_ftype_v4sf_pcv2sf;
20590 break;
20591 case V2DF_FTYPE_V2DF_PCDOUBLE:
20592 type = v2df_ftype_v2df_pcdouble;
20593 break;
20594 case VOID_FTYPE_PV2SF_V4SF:
20595 type = void_ftype_pv2sf_v4sf;
20596 break;
20597 case VOID_FTYPE_PV2DI_V2DI:
20598 type = void_ftype_pv2di_v2di;
20599 break;
20600 case VOID_FTYPE_PCHAR_V16QI:
20601 type = void_ftype_pchar_v16qi;
20602 break;
20603 case VOID_FTYPE_PFLOAT_V4SF:
20604 type = void_ftype_pfloat_v4sf;
20605 break;
20606 case VOID_FTYPE_PDOUBLE_V2DF:
20607 type = void_ftype_pdouble_v2df;
20608 break;
20609 case VOID_FTYPE_PDI_DI:
20610 type = void_ftype_pdi_di;
20611 break;
20612 case VOID_FTYPE_PINT_INT:
20613 type = void_ftype_pint_int;
20614 break;
20615 default:
20616 gcc_unreachable ();
20619 def_builtin (d->mask, d->name, type, d->code);
20622 /* Add all builtins with variable number of operands. */
20623 for (i = 0, d = bdesc_args;
20624 i < ARRAY_SIZE (bdesc_args);
20625 i++, d++)
20627 tree type;
20629 if (d->name == 0)
20630 continue;
20632 switch ((enum ix86_builtin_type) d->flag)
20634 case FLOAT_FTYPE_FLOAT:
20635 type = float_ftype_float;
20636 break;
20637 case INT_FTYPE_V2DI_V2DI_PTEST:
20638 type = int_ftype_v2di_v2di;
20639 break;
20640 case INT64_FTYPE_V4SF:
20641 type = int64_ftype_v4sf;
20642 break;
20643 case INT64_FTYPE_V2DF:
20644 type = int64_ftype_v2df;
20645 break;
20646 case INT_FTYPE_V16QI:
20647 type = int_ftype_v16qi;
20648 break;
20649 case INT_FTYPE_V8QI:
20650 type = int_ftype_v8qi;
20651 break;
20652 case INT_FTYPE_V4SF:
20653 type = int_ftype_v4sf;
20654 break;
20655 case INT_FTYPE_V2DF:
20656 type = int_ftype_v2df;
20657 break;
20658 case V16QI_FTYPE_V16QI:
20659 type = v16qi_ftype_v16qi;
20660 break;
20661 case V8HI_FTYPE_V8HI:
20662 type = v8hi_ftype_v8hi;
20663 break;
20664 case V8HI_FTYPE_V16QI:
20665 type = v8hi_ftype_v16qi;
20666 break;
20667 case V8QI_FTYPE_V8QI:
20668 type = v8qi_ftype_v8qi;
20669 break;
20670 case V4SI_FTYPE_V4SI:
20671 type = v4si_ftype_v4si;
20672 break;
20673 case V4SI_FTYPE_V16QI:
20674 type = v4si_ftype_v16qi;
20675 break;
20676 case V4SI_FTYPE_V8HI:
20677 type = v4si_ftype_v8hi;
20678 break;
20679 case V4SI_FTYPE_V4SF:
20680 type = v4si_ftype_v4sf;
20681 break;
20682 case V4SI_FTYPE_V2DF:
20683 type = v4si_ftype_v2df;
20684 break;
20685 case V4HI_FTYPE_V4HI:
20686 type = v4hi_ftype_v4hi;
20687 break;
20688 case V4SF_FTYPE_V4SF:
20689 case V4SF_FTYPE_V4SF_VEC_MERGE:
20690 type = v4sf_ftype_v4sf;
20691 break;
20692 case V4SF_FTYPE_V4SI:
20693 type = v4sf_ftype_v4si;
20694 break;
20695 case V4SF_FTYPE_V2DF:
20696 type = v4sf_ftype_v2df;
20697 break;
20698 case V2DI_FTYPE_V2DI:
20699 type = v2di_ftype_v2di;
20700 break;
20701 case V2DI_FTYPE_V16QI:
20702 type = v2di_ftype_v16qi;
20703 break;
20704 case V2DI_FTYPE_V8HI:
20705 type = v2di_ftype_v8hi;
20706 break;
20707 case V2DI_FTYPE_V4SI:
20708 type = v2di_ftype_v4si;
20709 break;
20710 case V2SI_FTYPE_V2SI:
20711 type = v2si_ftype_v2si;
20712 break;
20713 case V2SI_FTYPE_V4SF:
20714 type = v2si_ftype_v4sf;
20715 break;
20716 case V2SI_FTYPE_V2DF:
20717 type = v2si_ftype_v2df;
20718 break;
20719 case V2SI_FTYPE_V2SF:
20720 type = v2si_ftype_v2sf;
20721 break;
20722 case V2DF_FTYPE_V4SF:
20723 type = v2df_ftype_v4sf;
20724 break;
20725 case V2DF_FTYPE_V2DF:
20726 case V2DF_FTYPE_V2DF_VEC_MERGE:
20727 type = v2df_ftype_v2df;
20728 break;
20729 case V2DF_FTYPE_V2SI:
20730 type = v2df_ftype_v2si;
20731 break;
20732 case V2DF_FTYPE_V4SI:
20733 type = v2df_ftype_v4si;
20734 break;
20735 case V2SF_FTYPE_V2SF:
20736 type = v2sf_ftype_v2sf;
20737 break;
20738 case V2SF_FTYPE_V2SI:
20739 type = v2sf_ftype_v2si;
20740 break;
20741 case V16QI_FTYPE_V16QI_V16QI:
20742 type = v16qi_ftype_v16qi_v16qi;
20743 break;
20744 case V16QI_FTYPE_V8HI_V8HI:
20745 type = v16qi_ftype_v8hi_v8hi;
20746 break;
20747 case V8QI_FTYPE_V8QI_V8QI:
20748 type = v8qi_ftype_v8qi_v8qi;
20749 break;
20750 case V8QI_FTYPE_V4HI_V4HI:
20751 type = v8qi_ftype_v4hi_v4hi;
20752 break;
20753 case V8HI_FTYPE_V8HI_V8HI:
20754 case V8HI_FTYPE_V8HI_V8HI_COUNT:
20755 type = v8hi_ftype_v8hi_v8hi;
20756 break;
20757 case V8HI_FTYPE_V16QI_V16QI:
20758 type = v8hi_ftype_v16qi_v16qi;
20759 break;
20760 case V8HI_FTYPE_V4SI_V4SI:
20761 type = v8hi_ftype_v4si_v4si;
20762 break;
20763 case V8HI_FTYPE_V8HI_SI_COUNT:
20764 type = v8hi_ftype_v8hi_int;
20765 break;
20766 case V4SI_FTYPE_V4SI_V4SI:
20767 case V4SI_FTYPE_V4SI_V4SI_COUNT:
20768 type = v4si_ftype_v4si_v4si;
20769 break;
20770 case V4SI_FTYPE_V8HI_V8HI:
20771 type = v4si_ftype_v8hi_v8hi;
20772 break;
20773 case V4SI_FTYPE_V4SF_V4SF:
20774 type = v4si_ftype_v4sf_v4sf;
20775 break;
20776 case V4SI_FTYPE_V2DF_V2DF:
20777 type = v4si_ftype_v2df_v2df;
20778 break;
20779 case V4SI_FTYPE_V4SI_SI_COUNT:
20780 type = v4si_ftype_v4si_int;
20781 break;
20782 case V4HI_FTYPE_V4HI_V4HI:
20783 case V4HI_FTYPE_V4HI_V4HI_COUNT:
20784 type = v4hi_ftype_v4hi_v4hi;
20785 break;
20786 case V4HI_FTYPE_V8QI_V8QI:
20787 type = v4hi_ftype_v8qi_v8qi;
20788 break;
20789 case V4HI_FTYPE_V2SI_V2SI:
20790 type = v4hi_ftype_v2si_v2si;
20791 break;
20792 case V4HI_FTYPE_V4HI_SI_COUNT:
20793 type = v4hi_ftype_v4hi_int;
20794 break;
20795 case V4SF_FTYPE_V4SF_V4SF:
20796 case V4SF_FTYPE_V4SF_V4SF_SWAP:
20797 type = v4sf_ftype_v4sf_v4sf;
20798 break;
20799 case V4SF_FTYPE_V4SF_V2SI:
20800 type = v4sf_ftype_v4sf_v2si;
20801 break;
20802 case V4SF_FTYPE_V4SF_V2DF:
20803 type = v4sf_ftype_v4sf_v2df;
20804 break;
20805 case V4SF_FTYPE_V4SF_DI:
20806 type = v4sf_ftype_v4sf_int64;
20807 break;
20808 case V4SF_FTYPE_V4SF_SI:
20809 type = v4sf_ftype_v4sf_int;
20810 break;
20811 case V2DI_FTYPE_V2DI_V2DI:
20812 case V2DI_FTYPE_V2DI_V2DI_COUNT:
20813 type = v2di_ftype_v2di_v2di;
20814 break;
20815 case V2DI_FTYPE_V16QI_V16QI:
20816 type = v2di_ftype_v16qi_v16qi;
20817 break;
20818 case V2DI_FTYPE_V4SI_V4SI:
20819 type = v2di_ftype_v4si_v4si;
20820 break;
20821 case V2DI_FTYPE_V2DI_V16QI:
20822 type = v2di_ftype_v2di_v16qi;
20823 break;
20824 case V2DI_FTYPE_V2DF_V2DF:
20825 type = v2di_ftype_v2df_v2df;
20826 break;
20827 case V2DI_FTYPE_V2DI_SI_COUNT:
20828 type = v2di_ftype_v2di_int;
20829 break;
20830 case V2SI_FTYPE_V2SI_V2SI:
20831 case V2SI_FTYPE_V2SI_V2SI_COUNT:
20832 type = v2si_ftype_v2si_v2si;
20833 break;
20834 case V2SI_FTYPE_V4HI_V4HI:
20835 type = v2si_ftype_v4hi_v4hi;
20836 break;
20837 case V2SI_FTYPE_V2SF_V2SF:
20838 type = v2si_ftype_v2sf_v2sf;
20839 break;
20840 case V2SI_FTYPE_V2SI_SI_COUNT:
20841 type = v2si_ftype_v2si_int;
20842 break;
20843 case V2DF_FTYPE_V2DF_V2DF:
20844 case V2DF_FTYPE_V2DF_V2DF_SWAP:
20845 type = v2df_ftype_v2df_v2df;
20846 break;
20847 case V2DF_FTYPE_V2DF_V4SF:
20848 type = v2df_ftype_v2df_v4sf;
20849 break;
20850 case V2DF_FTYPE_V2DF_DI:
20851 type = v2df_ftype_v2df_int64;
20852 break;
20853 case V2DF_FTYPE_V2DF_SI:
20854 type = v2df_ftype_v2df_int;
20855 break;
20856 case V2SF_FTYPE_V2SF_V2SF:
20857 type = v2sf_ftype_v2sf_v2sf;
20858 break;
20859 case V1DI_FTYPE_V1DI_V1DI:
20860 case V1DI_FTYPE_V1DI_V1DI_COUNT:
20861 type = v1di_ftype_v1di_v1di;
20862 break;
20863 case V1DI_FTYPE_V8QI_V8QI:
20864 type = v1di_ftype_v8qi_v8qi;
20865 break;
20866 case V1DI_FTYPE_V2SI_V2SI:
20867 type = v1di_ftype_v2si_v2si;
20868 break;
20869 case V1DI_FTYPE_V1DI_SI_COUNT:
20870 type = v1di_ftype_v1di_int;
20871 break;
20872 case UINT64_FTYPE_UINT64_UINT64:
20873 type = uint64_ftype_uint64_uint64;
20874 break;
20875 case UINT_FTYPE_UINT_UINT:
20876 type = unsigned_ftype_unsigned_unsigned;
20877 break;
20878 case UINT_FTYPE_UINT_USHORT:
20879 type = unsigned_ftype_unsigned_ushort;
20880 break;
20881 case UINT_FTYPE_UINT_UCHAR:
20882 type = unsigned_ftype_unsigned_uchar;
20883 break;
20884 case V8HI_FTYPE_V8HI_INT:
20885 type = v8hi_ftype_v8hi_int;
20886 break;
20887 case V4SI_FTYPE_V4SI_INT:
20888 type = v4si_ftype_v4si_int;
20889 break;
20890 case V4HI_FTYPE_V4HI_INT:
20891 type = v4hi_ftype_v4hi_int;
20892 break;
20893 case V4SF_FTYPE_V4SF_INT:
20894 type = v4sf_ftype_v4sf_int;
20895 break;
20896 case V2DI_FTYPE_V2DI_INT:
20897 case V2DI2TI_FTYPE_V2DI_INT:
20898 type = v2di_ftype_v2di_int;
20899 break;
20900 case V2DF_FTYPE_V2DF_INT:
20901 type = v2df_ftype_v2df_int;
20902 break;
20903 case V16QI_FTYPE_V16QI_V16QI_V16QI:
20904 type = v16qi_ftype_v16qi_v16qi_v16qi;
20905 break;
20906 case V4SF_FTYPE_V4SF_V4SF_V4SF:
20907 type = v4sf_ftype_v4sf_v4sf_v4sf;
20908 break;
20909 case V2DF_FTYPE_V2DF_V2DF_V2DF:
20910 type = v2df_ftype_v2df_v2df_v2df;
20911 break;
20912 case V16QI_FTYPE_V16QI_V16QI_INT:
20913 type = v16qi_ftype_v16qi_v16qi_int;
20914 break;
20915 case V8HI_FTYPE_V8HI_V8HI_INT:
20916 type = v8hi_ftype_v8hi_v8hi_int;
20917 break;
20918 case V4SI_FTYPE_V4SI_V4SI_INT:
20919 type = v4si_ftype_v4si_v4si_int;
20920 break;
20921 case V4SF_FTYPE_V4SF_V4SF_INT:
20922 type = v4sf_ftype_v4sf_v4sf_int;
20923 break;
20924 case V2DI_FTYPE_V2DI_V2DI_INT:
20925 case V2DI2TI_FTYPE_V2DI_V2DI_INT:
20926 type = v2di_ftype_v2di_v2di_int;
20927 break;
20928 case V2DF_FTYPE_V2DF_V2DF_INT:
20929 type = v2df_ftype_v2df_v2df_int;
20930 break;
20931 case V2DI_FTYPE_V2DI_UINT_UINT:
20932 type = v2di_ftype_v2di_unsigned_unsigned;
20933 break;
20934 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
20935 type = v2di_ftype_v2di_v2di_unsigned_unsigned;
20936 break;
20937 case V1DI2DI_FTYPE_V1DI_V1DI_INT:
20938 type = v1di_ftype_v1di_v1di_int;
20939 break;
20940 default:
20941 gcc_unreachable ();
20944 def_builtin_const (d->mask, d->name, type, d->code);
20947 /* pcmpestr[im] insns. */
20948 for (i = 0, d = bdesc_pcmpestr;
20949 i < ARRAY_SIZE (bdesc_pcmpestr);
20950 i++, d++)
20952 if (d->code == IX86_BUILTIN_PCMPESTRM128)
20953 ftype = v16qi_ftype_v16qi_int_v16qi_int_int;
20954 else
20955 ftype = int_ftype_v16qi_int_v16qi_int_int;
20956 def_builtin_const (d->mask, d->name, ftype, d->code);
20959 /* pcmpistr[im] insns. */
20960 for (i = 0, d = bdesc_pcmpistr;
20961 i < ARRAY_SIZE (bdesc_pcmpistr);
20962 i++, d++)
20964 if (d->code == IX86_BUILTIN_PCMPISTRM128)
20965 ftype = v16qi_ftype_v16qi_v16qi_int;
20966 else
20967 ftype = int_ftype_v16qi_v16qi_int;
20968 def_builtin_const (d->mask, d->name, ftype, d->code);
20971 /* comi/ucomi insns. */
20972 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
20973 if (d->mask == OPTION_MASK_ISA_SSE2)
20974 def_builtin_const (d->mask, d->name, int_ftype_v2df_v2df, d->code);
20975 else
20976 def_builtin_const (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
20978 /* SSE */
20979 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
20980 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
20982 /* SSE or 3DNow!A */
20983 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
20985 /* SSE2 */
20986 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
20988 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
20989 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
20991 /* SSE3. */
20992 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor", void_ftype_pcvoid_unsigned_unsigned, IX86_BUILTIN_MONITOR);
20993 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait", void_ftype_unsigned_unsigned, IX86_BUILTIN_MWAIT);
20995 /* AES */
20996 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESENC128);
20997 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESENCLAST128);
20998 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESDEC128);
20999 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESDECLAST128);
21000 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128", v2di_ftype_v2di, IX86_BUILTIN_AESIMC128);
21001 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128", v2di_ftype_v2di_int, IX86_BUILTIN_AESKEYGENASSIST128);
21003 /* PCLMUL */
21004 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128", v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PCLMULQDQ128);
21006 /* Access to the vec_init patterns. */
21007 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
21008 integer_type_node, NULL_TREE);
21009 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si", ftype, IX86_BUILTIN_VEC_INIT_V2SI);
21011 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
21012 short_integer_type_node,
21013 short_integer_type_node,
21014 short_integer_type_node, NULL_TREE);
21015 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi", ftype, IX86_BUILTIN_VEC_INIT_V4HI);
21017 ftype = build_function_type_list (V8QI_type_node, char_type_node,
21018 char_type_node, char_type_node,
21019 char_type_node, char_type_node,
21020 char_type_node, char_type_node,
21021 char_type_node, NULL_TREE);
21022 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi", ftype, IX86_BUILTIN_VEC_INIT_V8QI);
21024 /* Access to the vec_extract patterns. */
21025 ftype = build_function_type_list (double_type_node, V2DF_type_node,
21026 integer_type_node, NULL_TREE);
21027 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df", ftype, IX86_BUILTIN_VEC_EXT_V2DF);
21029 ftype = build_function_type_list (long_long_integer_type_node,
21030 V2DI_type_node, integer_type_node,
21031 NULL_TREE);
21032 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di", ftype, IX86_BUILTIN_VEC_EXT_V2DI);
21034 ftype = build_function_type_list (float_type_node, V4SF_type_node,
21035 integer_type_node, NULL_TREE);
21036 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf", ftype, IX86_BUILTIN_VEC_EXT_V4SF);
21038 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
21039 integer_type_node, NULL_TREE);
21040 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si", ftype, IX86_BUILTIN_VEC_EXT_V4SI);
21042 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
21043 integer_type_node, NULL_TREE);
21044 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi", ftype, IX86_BUILTIN_VEC_EXT_V8HI);
21046 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
21047 integer_type_node, NULL_TREE);
21048 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_vec_ext_v4hi", ftype, IX86_BUILTIN_VEC_EXT_V4HI);
21050 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
21051 integer_type_node, NULL_TREE);
21052 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si", ftype, IX86_BUILTIN_VEC_EXT_V2SI);
21054 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
21055 integer_type_node, NULL_TREE);
21056 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi", ftype, IX86_BUILTIN_VEC_EXT_V16QI);
21058 /* Access to the vec_set patterns. */
21059 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
21060 intDI_type_node,
21061 integer_type_node, NULL_TREE);
21062 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT, "__builtin_ia32_vec_set_v2di", ftype, IX86_BUILTIN_VEC_SET_V2DI);
21064 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
21065 float_type_node,
21066 integer_type_node, NULL_TREE);
21067 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf", ftype, IX86_BUILTIN_VEC_SET_V4SF);
21069 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
21070 intSI_type_node,
21071 integer_type_node, NULL_TREE);
21072 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si", ftype, IX86_BUILTIN_VEC_SET_V4SI);
21074 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
21075 intHI_type_node,
21076 integer_type_node, NULL_TREE);
21077 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi", ftype, IX86_BUILTIN_VEC_SET_V8HI);
21079 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
21080 intHI_type_node,
21081 integer_type_node, NULL_TREE);
21082 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_vec_set_v4hi", ftype, IX86_BUILTIN_VEC_SET_V4HI);
21084 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
21085 intQI_type_node,
21086 integer_type_node, NULL_TREE);
21087 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi", ftype, IX86_BUILTIN_VEC_SET_V16QI);
21089 /* Add SSE5 multi-arg argument instructions */
21090 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
21092 tree mtype = NULL_TREE;
21094 if (d->name == 0)
21095 continue;
21097 switch ((enum multi_arg_type)d->flag)
21099 case MULTI_ARG_3_SF: mtype = v4sf_ftype_v4sf_v4sf_v4sf; break;
21100 case MULTI_ARG_3_DF: mtype = v2df_ftype_v2df_v2df_v2df; break;
21101 case MULTI_ARG_3_DI: mtype = v2di_ftype_v2di_v2di_v2di; break;
21102 case MULTI_ARG_3_SI: mtype = v4si_ftype_v4si_v4si_v4si; break;
21103 case MULTI_ARG_3_SI_DI: mtype = v4si_ftype_v4si_v4si_v2di; break;
21104 case MULTI_ARG_3_HI: mtype = v8hi_ftype_v8hi_v8hi_v8hi; break;
21105 case MULTI_ARG_3_HI_SI: mtype = v8hi_ftype_v8hi_v8hi_v4si; break;
21106 case MULTI_ARG_3_QI: mtype = v16qi_ftype_v16qi_v16qi_v16qi; break;
21107 case MULTI_ARG_3_PERMPS: mtype = v4sf_ftype_v4sf_v4sf_v16qi; break;
21108 case MULTI_ARG_3_PERMPD: mtype = v2df_ftype_v2df_v2df_v16qi; break;
21109 case MULTI_ARG_2_SF: mtype = v4sf_ftype_v4sf_v4sf; break;
21110 case MULTI_ARG_2_DF: mtype = v2df_ftype_v2df_v2df; break;
21111 case MULTI_ARG_2_DI: mtype = v2di_ftype_v2di_v2di; break;
21112 case MULTI_ARG_2_SI: mtype = v4si_ftype_v4si_v4si; break;
21113 case MULTI_ARG_2_HI: mtype = v8hi_ftype_v8hi_v8hi; break;
21114 case MULTI_ARG_2_QI: mtype = v16qi_ftype_v16qi_v16qi; break;
21115 case MULTI_ARG_2_DI_IMM: mtype = v2di_ftype_v2di_si; break;
21116 case MULTI_ARG_2_SI_IMM: mtype = v4si_ftype_v4si_si; break;
21117 case MULTI_ARG_2_HI_IMM: mtype = v8hi_ftype_v8hi_si; break;
21118 case MULTI_ARG_2_QI_IMM: mtype = v16qi_ftype_v16qi_si; break;
21119 case MULTI_ARG_2_SF_CMP: mtype = v4sf_ftype_v4sf_v4sf; break;
21120 case MULTI_ARG_2_DF_CMP: mtype = v2df_ftype_v2df_v2df; break;
21121 case MULTI_ARG_2_DI_CMP: mtype = v2di_ftype_v2di_v2di; break;
21122 case MULTI_ARG_2_SI_CMP: mtype = v4si_ftype_v4si_v4si; break;
21123 case MULTI_ARG_2_HI_CMP: mtype = v8hi_ftype_v8hi_v8hi; break;
21124 case MULTI_ARG_2_QI_CMP: mtype = v16qi_ftype_v16qi_v16qi; break;
21125 case MULTI_ARG_2_SF_TF: mtype = v4sf_ftype_v4sf_v4sf; break;
21126 case MULTI_ARG_2_DF_TF: mtype = v2df_ftype_v2df_v2df; break;
21127 case MULTI_ARG_2_DI_TF: mtype = v2di_ftype_v2di_v2di; break;
21128 case MULTI_ARG_2_SI_TF: mtype = v4si_ftype_v4si_v4si; break;
21129 case MULTI_ARG_2_HI_TF: mtype = v8hi_ftype_v8hi_v8hi; break;
21130 case MULTI_ARG_2_QI_TF: mtype = v16qi_ftype_v16qi_v16qi; break;
21131 case MULTI_ARG_1_SF: mtype = v4sf_ftype_v4sf; break;
21132 case MULTI_ARG_1_DF: mtype = v2df_ftype_v2df; break;
21133 case MULTI_ARG_1_DI: mtype = v2di_ftype_v2di; break;
21134 case MULTI_ARG_1_SI: mtype = v4si_ftype_v4si; break;
21135 case MULTI_ARG_1_HI: mtype = v8hi_ftype_v8hi; break;
21136 case MULTI_ARG_1_QI: mtype = v16qi_ftype_v16qi; break;
21137 case MULTI_ARG_1_SI_DI: mtype = v2di_ftype_v4si; break;
21138 case MULTI_ARG_1_HI_DI: mtype = v2di_ftype_v8hi; break;
21139 case MULTI_ARG_1_HI_SI: mtype = v4si_ftype_v8hi; break;
21140 case MULTI_ARG_1_QI_DI: mtype = v2di_ftype_v16qi; break;
21141 case MULTI_ARG_1_QI_SI: mtype = v4si_ftype_v16qi; break;
21142 case MULTI_ARG_1_QI_HI: mtype = v8hi_ftype_v16qi; break;
21143 case MULTI_ARG_1_PH2PS: mtype = v4sf_ftype_v4hi; break;
21144 case MULTI_ARG_1_PS2PH: mtype = v4hi_ftype_v4sf; break;
21145 case MULTI_ARG_UNKNOWN:
21146 default:
21147 gcc_unreachable ();
21150 if (mtype)
21151 def_builtin_const (d->mask, d->name, mtype, d->code);
21155 /* Internal method for ix86_init_builtins. */
21157 static void
21158 ix86_init_builtins_va_builtins_abi (void)
21160 tree ms_va_ref, sysv_va_ref;
21161 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
21162 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
21163 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
21164 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
21166 if (!TARGET_64BIT)
21167 return;
21168 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
21169 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
21170 ms_va_ref = build_reference_type (ms_va_list_type_node);
21171 sysv_va_ref =
21172 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
21174 fnvoid_va_end_ms =
21175 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
21176 fnvoid_va_start_ms =
21177 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
21178 fnvoid_va_end_sysv =
21179 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
21180 fnvoid_va_start_sysv =
21181 build_varargs_function_type_list (void_type_node, sysv_va_ref,
21182 NULL_TREE);
21183 fnvoid_va_copy_ms =
21184 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
21185 NULL_TREE);
21186 fnvoid_va_copy_sysv =
21187 build_function_type_list (void_type_node, sysv_va_ref,
21188 sysv_va_ref, NULL_TREE);
21190 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
21191 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
21192 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
21193 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
21194 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
21195 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
21196 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
21197 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
21198 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
21199 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
21200 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
21201 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
21204 static void
21205 ix86_init_builtins (void)
21207 tree float128_type_node = make_node (REAL_TYPE);
21208 tree ftype, decl;
21210 /* The __float80 type. */
21211 if (TYPE_MODE (long_double_type_node) == XFmode)
21212 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
21213 "__float80");
21214 else
21216 /* The __float80 type. */
21217 tree float80_type_node = make_node (REAL_TYPE);
21219 TYPE_PRECISION (float80_type_node) = 80;
21220 layout_type (float80_type_node);
21221 (*lang_hooks.types.register_builtin_type) (float80_type_node,
21222 "__float80");
21225 /* The __float128 type. */
21226 TYPE_PRECISION (float128_type_node) = 128;
21227 layout_type (float128_type_node);
21228 (*lang_hooks.types.register_builtin_type) (float128_type_node,
21229 "__float128");
21231 /* TFmode support builtins. */
21232 ftype = build_function_type (float128_type_node, void_list_node);
21233 decl = add_builtin_function ("__builtin_infq", ftype,
21234 IX86_BUILTIN_INFQ, BUILT_IN_MD,
21235 NULL, NULL_TREE);
21236 ix86_builtins[(int) IX86_BUILTIN_INFQ] = decl;
21238 /* We will expand them to normal call if SSE2 isn't available since
21239 they are used by libgcc. */
21240 ftype = build_function_type_list (float128_type_node,
21241 float128_type_node,
21242 NULL_TREE);
21243 decl = add_builtin_function ("__builtin_fabsq", ftype,
21244 IX86_BUILTIN_FABSQ, BUILT_IN_MD,
21245 "__fabstf2", NULL_TREE);
21246 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = decl;
21247 TREE_READONLY (decl) = 1;
21249 ftype = build_function_type_list (float128_type_node,
21250 float128_type_node,
21251 float128_type_node,
21252 NULL_TREE);
21253 decl = add_builtin_function ("__builtin_copysignq", ftype,
21254 IX86_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
21255 "__copysigntf3", NULL_TREE);
21256 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = decl;
21257 TREE_READONLY (decl) = 1;
21259 ix86_init_mmx_sse_builtins ();
21260 if (TARGET_64BIT)
21261 ix86_init_builtins_va_builtins_abi ();
21264 /* Errors in the source file can cause expand_expr to return const0_rtx
21265 where we expect a vector. To avoid crashing, use one of the vector
21266 clear instructions. */
21267 static rtx
21268 safe_vector_operand (rtx x, enum machine_mode mode)
21270 if (x == const0_rtx)
21271 x = CONST0_RTX (mode);
21272 return x;
21275 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
21277 static rtx
21278 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
21280 rtx pat;
21281 tree arg0 = CALL_EXPR_ARG (exp, 0);
21282 tree arg1 = CALL_EXPR_ARG (exp, 1);
21283 rtx op0 = expand_normal (arg0);
21284 rtx op1 = expand_normal (arg1);
21285 enum machine_mode tmode = insn_data[icode].operand[0].mode;
21286 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
21287 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
21289 if (VECTOR_MODE_P (mode0))
21290 op0 = safe_vector_operand (op0, mode0);
21291 if (VECTOR_MODE_P (mode1))
21292 op1 = safe_vector_operand (op1, mode1);
21294 if (optimize || !target
21295 || GET_MODE (target) != tmode
21296 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
21297 target = gen_reg_rtx (tmode);
21299 if (GET_MODE (op1) == SImode && mode1 == TImode)
21301 rtx x = gen_reg_rtx (V4SImode);
21302 emit_insn (gen_sse2_loadd (x, op1));
21303 op1 = gen_lowpart (TImode, x);
21306 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
21307 op0 = copy_to_mode_reg (mode0, op0);
21308 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
21309 op1 = copy_to_mode_reg (mode1, op1);
21311 pat = GEN_FCN (icode) (target, op0, op1);
21312 if (! pat)
21313 return 0;
21315 emit_insn (pat);
21317 return target;
21320 /* Subroutine of ix86_expand_builtin to take care of 2-4 argument insns. */
21322 static rtx
21323 ix86_expand_multi_arg_builtin (enum insn_code icode, tree exp, rtx target,
21324 enum multi_arg_type m_type,
21325 enum insn_code sub_code)
21327 rtx pat;
21328 int i;
21329 int nargs;
21330 bool comparison_p = false;
21331 bool tf_p = false;
21332 bool last_arg_constant = false;
21333 int num_memory = 0;
21334 struct {
21335 rtx op;
21336 enum machine_mode mode;
21337 } args[4];
21339 enum machine_mode tmode = insn_data[icode].operand[0].mode;
21341 switch (m_type)
21343 case MULTI_ARG_3_SF:
21344 case MULTI_ARG_3_DF:
21345 case MULTI_ARG_3_DI:
21346 case MULTI_ARG_3_SI:
21347 case MULTI_ARG_3_SI_DI:
21348 case MULTI_ARG_3_HI:
21349 case MULTI_ARG_3_HI_SI:
21350 case MULTI_ARG_3_QI:
21351 case MULTI_ARG_3_PERMPS:
21352 case MULTI_ARG_3_PERMPD:
21353 nargs = 3;
21354 break;
21356 case MULTI_ARG_2_SF:
21357 case MULTI_ARG_2_DF:
21358 case MULTI_ARG_2_DI:
21359 case MULTI_ARG_2_SI:
21360 case MULTI_ARG_2_HI:
21361 case MULTI_ARG_2_QI:
21362 nargs = 2;
21363 break;
21365 case MULTI_ARG_2_DI_IMM:
21366 case MULTI_ARG_2_SI_IMM:
21367 case MULTI_ARG_2_HI_IMM:
21368 case MULTI_ARG_2_QI_IMM:
21369 nargs = 2;
21370 last_arg_constant = true;
21371 break;
21373 case MULTI_ARG_1_SF:
21374 case MULTI_ARG_1_DF:
21375 case MULTI_ARG_1_DI:
21376 case MULTI_ARG_1_SI:
21377 case MULTI_ARG_1_HI:
21378 case MULTI_ARG_1_QI:
21379 case MULTI_ARG_1_SI_DI:
21380 case MULTI_ARG_1_HI_DI:
21381 case MULTI_ARG_1_HI_SI:
21382 case MULTI_ARG_1_QI_DI:
21383 case MULTI_ARG_1_QI_SI:
21384 case MULTI_ARG_1_QI_HI:
21385 case MULTI_ARG_1_PH2PS:
21386 case MULTI_ARG_1_PS2PH:
21387 nargs = 1;
21388 break;
21390 case MULTI_ARG_2_SF_CMP:
21391 case MULTI_ARG_2_DF_CMP:
21392 case MULTI_ARG_2_DI_CMP:
21393 case MULTI_ARG_2_SI_CMP:
21394 case MULTI_ARG_2_HI_CMP:
21395 case MULTI_ARG_2_QI_CMP:
21396 nargs = 2;
21397 comparison_p = true;
21398 break;
21400 case MULTI_ARG_2_SF_TF:
21401 case MULTI_ARG_2_DF_TF:
21402 case MULTI_ARG_2_DI_TF:
21403 case MULTI_ARG_2_SI_TF:
21404 case MULTI_ARG_2_HI_TF:
21405 case MULTI_ARG_2_QI_TF:
21406 nargs = 2;
21407 tf_p = true;
21408 break;
21410 case MULTI_ARG_UNKNOWN:
21411 default:
21412 gcc_unreachable ();
21415 if (optimize || !target
21416 || GET_MODE (target) != tmode
21417 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
21418 target = gen_reg_rtx (tmode);
21420 gcc_assert (nargs <= 4);
21422 for (i = 0; i < nargs; i++)
21424 tree arg = CALL_EXPR_ARG (exp, i);
21425 rtx op = expand_normal (arg);
21426 int adjust = (comparison_p) ? 1 : 0;
21427 enum machine_mode mode = insn_data[icode].operand[i+adjust+1].mode;
21429 if (last_arg_constant && i == nargs-1)
21431 if (GET_CODE (op) != CONST_INT)
21433 error ("last argument must be an immediate");
21434 return gen_reg_rtx (tmode);
21437 else
21439 if (VECTOR_MODE_P (mode))
21440 op = safe_vector_operand (op, mode);
21442 /* If we aren't optimizing, only allow one memory operand to be
21443 generated. */
21444 if (memory_operand (op, mode))
21445 num_memory++;
21447 gcc_assert (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode);
21449 if (optimize
21450 || ! (*insn_data[icode].operand[i+adjust+1].predicate) (op, mode)
21451 || num_memory > 1)
21452 op = force_reg (mode, op);
21455 args[i].op = op;
21456 args[i].mode = mode;
21459 switch (nargs)
21461 case 1:
21462 pat = GEN_FCN (icode) (target, args[0].op);
21463 break;
21465 case 2:
21466 if (tf_p)
21467 pat = GEN_FCN (icode) (target, args[0].op, args[1].op,
21468 GEN_INT ((int)sub_code));
21469 else if (! comparison_p)
21470 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
21471 else
21473 rtx cmp_op = gen_rtx_fmt_ee (sub_code, GET_MODE (target),
21474 args[0].op,
21475 args[1].op);
21477 pat = GEN_FCN (icode) (target, cmp_op, args[0].op, args[1].op);
21479 break;
21481 case 3:
21482 pat = GEN_FCN (icode) (target, args[0].op, args[1].op, args[2].op);
21483 break;
21485 default:
21486 gcc_unreachable ();
21489 if (! pat)
21490 return 0;
21492 emit_insn (pat);
21493 return target;
21496 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
21497 insns with vec_merge. */
21499 static rtx
21500 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
21501 rtx target)
21503 rtx pat;
21504 tree arg0 = CALL_EXPR_ARG (exp, 0);
21505 rtx op1, op0 = expand_normal (arg0);
21506 enum machine_mode tmode = insn_data[icode].operand[0].mode;
21507 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
21509 if (optimize || !target
21510 || GET_MODE (target) != tmode
21511 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
21512 target = gen_reg_rtx (tmode);
21514 if (VECTOR_MODE_P (mode0))
21515 op0 = safe_vector_operand (op0, mode0);
21517 if ((optimize && !register_operand (op0, mode0))
21518 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
21519 op0 = copy_to_mode_reg (mode0, op0);
21521 op1 = op0;
21522 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
21523 op1 = copy_to_mode_reg (mode0, op1);
21525 pat = GEN_FCN (icode) (target, op0, op1);
21526 if (! pat)
21527 return 0;
21528 emit_insn (pat);
21529 return target;
21532 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
21534 static rtx
21535 ix86_expand_sse_compare (const struct builtin_description *d,
21536 tree exp, rtx target, bool swap)
21538 rtx pat;
21539 tree arg0 = CALL_EXPR_ARG (exp, 0);
21540 tree arg1 = CALL_EXPR_ARG (exp, 1);
21541 rtx op0 = expand_normal (arg0);
21542 rtx op1 = expand_normal (arg1);
21543 rtx op2;
21544 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
21545 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
21546 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
21547 enum rtx_code comparison = d->comparison;
21549 if (VECTOR_MODE_P (mode0))
21550 op0 = safe_vector_operand (op0, mode0);
21551 if (VECTOR_MODE_P (mode1))
21552 op1 = safe_vector_operand (op1, mode1);
21554 /* Swap operands if we have a comparison that isn't available in
21555 hardware. */
21556 if (swap)
21558 rtx tmp = gen_reg_rtx (mode1);
21559 emit_move_insn (tmp, op1);
21560 op1 = op0;
21561 op0 = tmp;
21564 if (optimize || !target
21565 || GET_MODE (target) != tmode
21566 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
21567 target = gen_reg_rtx (tmode);
21569 if ((optimize && !register_operand (op0, mode0))
21570 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
21571 op0 = copy_to_mode_reg (mode0, op0);
21572 if ((optimize && !register_operand (op1, mode1))
21573 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
21574 op1 = copy_to_mode_reg (mode1, op1);
21576 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
21577 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
21578 if (! pat)
21579 return 0;
21580 emit_insn (pat);
21581 return target;
21584 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
21586 static rtx
21587 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
21588 rtx target)
21590 rtx pat;
21591 tree arg0 = CALL_EXPR_ARG (exp, 0);
21592 tree arg1 = CALL_EXPR_ARG (exp, 1);
21593 rtx op0 = expand_normal (arg0);
21594 rtx op1 = expand_normal (arg1);
21595 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
21596 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
21597 enum rtx_code comparison = d->comparison;
21599 if (VECTOR_MODE_P (mode0))
21600 op0 = safe_vector_operand (op0, mode0);
21601 if (VECTOR_MODE_P (mode1))
21602 op1 = safe_vector_operand (op1, mode1);
21604 /* Swap operands if we have a comparison that isn't available in
21605 hardware. */
21606 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
21608 rtx tmp = op1;
21609 op1 = op0;
21610 op0 = tmp;
21613 target = gen_reg_rtx (SImode);
21614 emit_move_insn (target, const0_rtx);
21615 target = gen_rtx_SUBREG (QImode, target, 0);
21617 if ((optimize && !register_operand (op0, mode0))
21618 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
21619 op0 = copy_to_mode_reg (mode0, op0);
21620 if ((optimize && !register_operand (op1, mode1))
21621 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
21622 op1 = copy_to_mode_reg (mode1, op1);
21624 pat = GEN_FCN (d->icode) (op0, op1);
21625 if (! pat)
21626 return 0;
21627 emit_insn (pat);
21628 emit_insn (gen_rtx_SET (VOIDmode,
21629 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
21630 gen_rtx_fmt_ee (comparison, QImode,
21631 SET_DEST (pat),
21632 const0_rtx)));
21634 return SUBREG_REG (target);
21637 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
21639 static rtx
21640 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
21641 rtx target)
21643 rtx pat;
21644 tree arg0 = CALL_EXPR_ARG (exp, 0);
21645 tree arg1 = CALL_EXPR_ARG (exp, 1);
21646 rtx op0 = expand_normal (arg0);
21647 rtx op1 = expand_normal (arg1);
21648 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
21649 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
21650 enum rtx_code comparison = d->comparison;
21652 if (VECTOR_MODE_P (mode0))
21653 op0 = safe_vector_operand (op0, mode0);
21654 if (VECTOR_MODE_P (mode1))
21655 op1 = safe_vector_operand (op1, mode1);
21657 target = gen_reg_rtx (SImode);
21658 emit_move_insn (target, const0_rtx);
21659 target = gen_rtx_SUBREG (QImode, target, 0);
21661 if ((optimize && !register_operand (op0, mode0))
21662 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
21663 op0 = copy_to_mode_reg (mode0, op0);
21664 if ((optimize && !register_operand (op1, mode1))
21665 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
21666 op1 = copy_to_mode_reg (mode1, op1);
21668 pat = GEN_FCN (d->icode) (op0, op1);
21669 if (! pat)
21670 return 0;
21671 emit_insn (pat);
21672 emit_insn (gen_rtx_SET (VOIDmode,
21673 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
21674 gen_rtx_fmt_ee (comparison, QImode,
21675 SET_DEST (pat),
21676 const0_rtx)));
21678 return SUBREG_REG (target);
21681 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
21683 static rtx
21684 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
21685 tree exp, rtx target)
21687 rtx pat;
21688 tree arg0 = CALL_EXPR_ARG (exp, 0);
21689 tree arg1 = CALL_EXPR_ARG (exp, 1);
21690 tree arg2 = CALL_EXPR_ARG (exp, 2);
21691 tree arg3 = CALL_EXPR_ARG (exp, 3);
21692 tree arg4 = CALL_EXPR_ARG (exp, 4);
21693 rtx scratch0, scratch1;
21694 rtx op0 = expand_normal (arg0);
21695 rtx op1 = expand_normal (arg1);
21696 rtx op2 = expand_normal (arg2);
21697 rtx op3 = expand_normal (arg3);
21698 rtx op4 = expand_normal (arg4);
21699 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
21701 tmode0 = insn_data[d->icode].operand[0].mode;
21702 tmode1 = insn_data[d->icode].operand[1].mode;
21703 modev2 = insn_data[d->icode].operand[2].mode;
21704 modei3 = insn_data[d->icode].operand[3].mode;
21705 modev4 = insn_data[d->icode].operand[4].mode;
21706 modei5 = insn_data[d->icode].operand[5].mode;
21707 modeimm = insn_data[d->icode].operand[6].mode;
21709 if (VECTOR_MODE_P (modev2))
21710 op0 = safe_vector_operand (op0, modev2);
21711 if (VECTOR_MODE_P (modev4))
21712 op2 = safe_vector_operand (op2, modev4);
21714 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
21715 op0 = copy_to_mode_reg (modev2, op0);
21716 if (! (*insn_data[d->icode].operand[3].predicate) (op1, modei3))
21717 op1 = copy_to_mode_reg (modei3, op1);
21718 if ((optimize && !register_operand (op2, modev4))
21719 || !(*insn_data[d->icode].operand[4].predicate) (op2, modev4))
21720 op2 = copy_to_mode_reg (modev4, op2);
21721 if (! (*insn_data[d->icode].operand[5].predicate) (op3, modei5))
21722 op3 = copy_to_mode_reg (modei5, op3);
21724 if (! (*insn_data[d->icode].operand[6].predicate) (op4, modeimm))
21726 error ("the fifth argument must be a 8-bit immediate");
21727 return const0_rtx;
21730 if (d->code == IX86_BUILTIN_PCMPESTRI128)
21732 if (optimize || !target
21733 || GET_MODE (target) != tmode0
21734 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
21735 target = gen_reg_rtx (tmode0);
21737 scratch1 = gen_reg_rtx (tmode1);
21739 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
21741 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
21743 if (optimize || !target
21744 || GET_MODE (target) != tmode1
21745 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
21746 target = gen_reg_rtx (tmode1);
21748 scratch0 = gen_reg_rtx (tmode0);
21750 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
21752 else
21754 gcc_assert (d->flag);
21756 scratch0 = gen_reg_rtx (tmode0);
21757 scratch1 = gen_reg_rtx (tmode1);
21759 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
21762 if (! pat)
21763 return 0;
21765 emit_insn (pat);
21767 if (d->flag)
21769 target = gen_reg_rtx (SImode);
21770 emit_move_insn (target, const0_rtx);
21771 target = gen_rtx_SUBREG (QImode, target, 0);
21773 emit_insn
21774 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
21775 gen_rtx_fmt_ee (EQ, QImode,
21776 gen_rtx_REG ((enum machine_mode) d->flag,
21777 FLAGS_REG),
21778 const0_rtx)));
21779 return SUBREG_REG (target);
21781 else
21782 return target;
21786 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
21788 static rtx
21789 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
21790 tree exp, rtx target)
21792 rtx pat;
21793 tree arg0 = CALL_EXPR_ARG (exp, 0);
21794 tree arg1 = CALL_EXPR_ARG (exp, 1);
21795 tree arg2 = CALL_EXPR_ARG (exp, 2);
21796 rtx scratch0, scratch1;
21797 rtx op0 = expand_normal (arg0);
21798 rtx op1 = expand_normal (arg1);
21799 rtx op2 = expand_normal (arg2);
21800 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
21802 tmode0 = insn_data[d->icode].operand[0].mode;
21803 tmode1 = insn_data[d->icode].operand[1].mode;
21804 modev2 = insn_data[d->icode].operand[2].mode;
21805 modev3 = insn_data[d->icode].operand[3].mode;
21806 modeimm = insn_data[d->icode].operand[4].mode;
21808 if (VECTOR_MODE_P (modev2))
21809 op0 = safe_vector_operand (op0, modev2);
21810 if (VECTOR_MODE_P (modev3))
21811 op1 = safe_vector_operand (op1, modev3);
21813 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
21814 op0 = copy_to_mode_reg (modev2, op0);
21815 if ((optimize && !register_operand (op1, modev3))
21816 || !(*insn_data[d->icode].operand[3].predicate) (op1, modev3))
21817 op1 = copy_to_mode_reg (modev3, op1);
21819 if (! (*insn_data[d->icode].operand[4].predicate) (op2, modeimm))
21821 error ("the third argument must be a 8-bit immediate");
21822 return const0_rtx;
21825 if (d->code == IX86_BUILTIN_PCMPISTRI128)
21827 if (optimize || !target
21828 || GET_MODE (target) != tmode0
21829 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
21830 target = gen_reg_rtx (tmode0);
21832 scratch1 = gen_reg_rtx (tmode1);
21834 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
21836 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
21838 if (optimize || !target
21839 || GET_MODE (target) != tmode1
21840 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
21841 target = gen_reg_rtx (tmode1);
21843 scratch0 = gen_reg_rtx (tmode0);
21845 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
21847 else
21849 gcc_assert (d->flag);
21851 scratch0 = gen_reg_rtx (tmode0);
21852 scratch1 = gen_reg_rtx (tmode1);
21854 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
21857 if (! pat)
21858 return 0;
21860 emit_insn (pat);
21862 if (d->flag)
21864 target = gen_reg_rtx (SImode);
21865 emit_move_insn (target, const0_rtx);
21866 target = gen_rtx_SUBREG (QImode, target, 0);
21868 emit_insn
21869 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
21870 gen_rtx_fmt_ee (EQ, QImode,
21871 gen_rtx_REG ((enum machine_mode) d->flag,
21872 FLAGS_REG),
21873 const0_rtx)));
21874 return SUBREG_REG (target);
21876 else
21877 return target;
21880 /* Subroutine of ix86_expand_builtin to take care of insns with
21881 variable number of operands. */
21883 static rtx
21884 ix86_expand_args_builtin (const struct builtin_description *d,
21885 tree exp, rtx target)
21887 rtx pat, real_target;
21888 unsigned int i, nargs;
21889 unsigned int nargs_constant = 0;
21890 int num_memory = 0;
21891 struct
21893 rtx op;
21894 enum machine_mode mode;
21895 } args[4];
21896 bool last_arg_count = false;
21897 enum insn_code icode = d->icode;
21898 const struct insn_data *insn_p = &insn_data[icode];
21899 enum machine_mode tmode = insn_p->operand[0].mode;
21900 enum machine_mode rmode = VOIDmode;
21901 bool swap = false;
21902 enum rtx_code comparison = d->comparison;
21904 switch ((enum ix86_builtin_type) d->flag)
21906 case INT_FTYPE_V2DI_V2DI_PTEST:
21907 return ix86_expand_sse_ptest (d, exp, target);
21908 case FLOAT128_FTYPE_FLOAT128:
21909 case FLOAT_FTYPE_FLOAT:
21910 case INT64_FTYPE_V4SF:
21911 case INT64_FTYPE_V2DF:
21912 case INT_FTYPE_V16QI:
21913 case INT_FTYPE_V8QI:
21914 case INT_FTYPE_V4SF:
21915 case INT_FTYPE_V2DF:
21916 case V16QI_FTYPE_V16QI:
21917 case V8HI_FTYPE_V8HI:
21918 case V8HI_FTYPE_V16QI:
21919 case V8QI_FTYPE_V8QI:
21920 case V4SI_FTYPE_V4SI:
21921 case V4SI_FTYPE_V16QI:
21922 case V4SI_FTYPE_V4SF:
21923 case V4SI_FTYPE_V8HI:
21924 case V4SI_FTYPE_V2DF:
21925 case V4HI_FTYPE_V4HI:
21926 case V4SF_FTYPE_V4SF:
21927 case V4SF_FTYPE_V4SI:
21928 case V4SF_FTYPE_V2DF:
21929 case V2DI_FTYPE_V2DI:
21930 case V2DI_FTYPE_V16QI:
21931 case V2DI_FTYPE_V8HI:
21932 case V2DI_FTYPE_V4SI:
21933 case V2DF_FTYPE_V2DF:
21934 case V2DF_FTYPE_V4SI:
21935 case V2DF_FTYPE_V4SF:
21936 case V2DF_FTYPE_V2SI:
21937 case V2SI_FTYPE_V2SI:
21938 case V2SI_FTYPE_V4SF:
21939 case V2SI_FTYPE_V2SF:
21940 case V2SI_FTYPE_V2DF:
21941 case V2SF_FTYPE_V2SF:
21942 case V2SF_FTYPE_V2SI:
21943 nargs = 1;
21944 break;
21945 case V4SF_FTYPE_V4SF_VEC_MERGE:
21946 case V2DF_FTYPE_V2DF_VEC_MERGE:
21947 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
21948 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
21949 case V16QI_FTYPE_V16QI_V16QI:
21950 case V16QI_FTYPE_V8HI_V8HI:
21951 case V8QI_FTYPE_V8QI_V8QI:
21952 case V8QI_FTYPE_V4HI_V4HI:
21953 case V8HI_FTYPE_V8HI_V8HI:
21954 case V8HI_FTYPE_V16QI_V16QI:
21955 case V8HI_FTYPE_V4SI_V4SI:
21956 case V4SI_FTYPE_V4SI_V4SI:
21957 case V4SI_FTYPE_V8HI_V8HI:
21958 case V4SI_FTYPE_V4SF_V4SF:
21959 case V4SI_FTYPE_V2DF_V2DF:
21960 case V4HI_FTYPE_V4HI_V4HI:
21961 case V4HI_FTYPE_V8QI_V8QI:
21962 case V4HI_FTYPE_V2SI_V2SI:
21963 case V4SF_FTYPE_V4SF_V4SF:
21964 case V4SF_FTYPE_V4SF_V2SI:
21965 case V4SF_FTYPE_V4SF_V2DF:
21966 case V4SF_FTYPE_V4SF_DI:
21967 case V4SF_FTYPE_V4SF_SI:
21968 case V2DI_FTYPE_V2DI_V2DI:
21969 case V2DI_FTYPE_V16QI_V16QI:
21970 case V2DI_FTYPE_V4SI_V4SI:
21971 case V2DI_FTYPE_V2DI_V16QI:
21972 case V2DI_FTYPE_V2DF_V2DF:
21973 case V2SI_FTYPE_V2SI_V2SI:
21974 case V2SI_FTYPE_V4HI_V4HI:
21975 case V2SI_FTYPE_V2SF_V2SF:
21976 case V2DF_FTYPE_V2DF_V2DF:
21977 case V2DF_FTYPE_V2DF_V4SF:
21978 case V2DF_FTYPE_V2DF_DI:
21979 case V2DF_FTYPE_V2DF_SI:
21980 case V2SF_FTYPE_V2SF_V2SF:
21981 case V1DI_FTYPE_V1DI_V1DI:
21982 case V1DI_FTYPE_V8QI_V8QI:
21983 case V1DI_FTYPE_V2SI_V2SI:
21984 if (comparison == UNKNOWN)
21985 return ix86_expand_binop_builtin (icode, exp, target);
21986 nargs = 2;
21987 break;
21988 case V4SF_FTYPE_V4SF_V4SF_SWAP:
21989 case V2DF_FTYPE_V2DF_V2DF_SWAP:
21990 gcc_assert (comparison != UNKNOWN);
21991 nargs = 2;
21992 swap = true;
21993 break;
21994 case V8HI_FTYPE_V8HI_V8HI_COUNT:
21995 case V8HI_FTYPE_V8HI_SI_COUNT:
21996 case V4SI_FTYPE_V4SI_V4SI_COUNT:
21997 case V4SI_FTYPE_V4SI_SI_COUNT:
21998 case V4HI_FTYPE_V4HI_V4HI_COUNT:
21999 case V4HI_FTYPE_V4HI_SI_COUNT:
22000 case V2DI_FTYPE_V2DI_V2DI_COUNT:
22001 case V2DI_FTYPE_V2DI_SI_COUNT:
22002 case V2SI_FTYPE_V2SI_V2SI_COUNT:
22003 case V2SI_FTYPE_V2SI_SI_COUNT:
22004 case V1DI_FTYPE_V1DI_V1DI_COUNT:
22005 case V1DI_FTYPE_V1DI_SI_COUNT:
22006 nargs = 2;
22007 last_arg_count = true;
22008 break;
22009 case UINT64_FTYPE_UINT64_UINT64:
22010 case UINT_FTYPE_UINT_UINT:
22011 case UINT_FTYPE_UINT_USHORT:
22012 case UINT_FTYPE_UINT_UCHAR:
22013 nargs = 2;
22014 break;
22015 case V2DI2TI_FTYPE_V2DI_INT:
22016 nargs = 2;
22017 rmode = V2DImode;
22018 nargs_constant = 1;
22019 break;
22020 case V8HI_FTYPE_V8HI_INT:
22021 case V4SI_FTYPE_V4SI_INT:
22022 case V4HI_FTYPE_V4HI_INT:
22023 case V4SF_FTYPE_V4SF_INT:
22024 case V2DI_FTYPE_V2DI_INT:
22025 case V2DF_FTYPE_V2DF_INT:
22026 nargs = 2;
22027 nargs_constant = 1;
22028 break;
22029 case V16QI_FTYPE_V16QI_V16QI_V16QI:
22030 case V4SF_FTYPE_V4SF_V4SF_V4SF:
22031 case V2DF_FTYPE_V2DF_V2DF_V2DF:
22032 nargs = 3;
22033 break;
22034 case V16QI_FTYPE_V16QI_V16QI_INT:
22035 case V8HI_FTYPE_V8HI_V8HI_INT:
22036 case V4SI_FTYPE_V4SI_V4SI_INT:
22037 case V4SF_FTYPE_V4SF_V4SF_INT:
22038 case V2DI_FTYPE_V2DI_V2DI_INT:
22039 case V2DF_FTYPE_V2DF_V2DF_INT:
22040 nargs = 3;
22041 nargs_constant = 1;
22042 break;
22043 case V2DI2TI_FTYPE_V2DI_V2DI_INT:
22044 nargs = 3;
22045 rmode = V2DImode;
22046 nargs_constant = 1;
22047 break;
22048 case V1DI2DI_FTYPE_V1DI_V1DI_INT:
22049 nargs = 3;
22050 rmode = DImode;
22051 nargs_constant = 1;
22052 break;
22053 case V2DI_FTYPE_V2DI_UINT_UINT:
22054 nargs = 3;
22055 nargs_constant = 2;
22056 break;
22057 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
22058 nargs = 4;
22059 nargs_constant = 2;
22060 break;
22061 default:
22062 gcc_unreachable ();
22065 gcc_assert (nargs <= ARRAY_SIZE (args));
22067 if (comparison != UNKNOWN)
22069 gcc_assert (nargs == 2);
22070 return ix86_expand_sse_compare (d, exp, target, swap);
22073 if (rmode == VOIDmode || rmode == tmode)
22075 if (optimize
22076 || target == 0
22077 || GET_MODE (target) != tmode
22078 || ! (*insn_p->operand[0].predicate) (target, tmode))
22079 target = gen_reg_rtx (tmode);
22080 real_target = target;
22082 else
22084 target = gen_reg_rtx (rmode);
22085 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
22088 for (i = 0; i < nargs; i++)
22090 tree arg = CALL_EXPR_ARG (exp, i);
22091 rtx op = expand_normal (arg);
22092 enum machine_mode mode = insn_p->operand[i + 1].mode;
22093 bool match = (*insn_p->operand[i + 1].predicate) (op, mode);
22095 if (last_arg_count && (i + 1) == nargs)
22097 /* SIMD shift insns take either an 8-bit immediate or
22098 register as count. But builtin functions take int as
22099 count. If count doesn't match, we put it in register. */
22100 if (!match)
22102 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
22103 if (!(*insn_p->operand[i + 1].predicate) (op, mode))
22104 op = copy_to_reg (op);
22107 else if ((nargs - i) <= nargs_constant)
22109 if (!match)
22110 switch (icode)
22112 case CODE_FOR_sse4_1_roundpd:
22113 case CODE_FOR_sse4_1_roundps:
22114 case CODE_FOR_sse4_1_roundsd:
22115 case CODE_FOR_sse4_1_roundss:
22116 case CODE_FOR_sse4_1_blendps:
22117 error ("the last argument must be a 4-bit immediate");
22118 return const0_rtx;
22120 case CODE_FOR_sse4_1_blendpd:
22121 error ("the last argument must be a 2-bit immediate");
22122 return const0_rtx;
22124 default:
22125 switch (nargs_constant)
22127 case 2:
22128 if ((nargs - i) == nargs_constant)
22130 error ("the next to last argument must be an 8-bit immediate");
22131 break;
22133 case 1:
22134 error ("the last argument must be an 8-bit immediate");
22135 break;
22136 default:
22137 gcc_unreachable ();
22139 return const0_rtx;
22142 else
22144 if (VECTOR_MODE_P (mode))
22145 op = safe_vector_operand (op, mode);
22147 /* If we aren't optimizing, only allow one memory operand to
22148 be generated. */
22149 if (memory_operand (op, mode))
22150 num_memory++;
22152 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
22154 if (optimize || !match || num_memory > 1)
22155 op = copy_to_mode_reg (mode, op);
22157 else
22159 op = copy_to_reg (op);
22160 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
22164 args[i].op = op;
22165 args[i].mode = mode;
22168 switch (nargs)
22170 case 1:
22171 pat = GEN_FCN (icode) (real_target, args[0].op);
22172 break;
22173 case 2:
22174 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
22175 break;
22176 case 3:
22177 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
22178 args[2].op);
22179 break;
22180 case 4:
22181 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
22182 args[2].op, args[3].op);
22183 break;
22184 default:
22185 gcc_unreachable ();
22188 if (! pat)
22189 return 0;
22191 emit_insn (pat);
22192 return target;
22195 /* Subroutine of ix86_expand_builtin to take care of special insns
22196 with variable number of operands. */
22198 static rtx
22199 ix86_expand_special_args_builtin (const struct builtin_description *d,
22200 tree exp, rtx target)
22202 tree arg;
22203 rtx pat, op;
22204 unsigned int i, nargs, arg_adjust, memory;
22205 struct
22207 rtx op;
22208 enum machine_mode mode;
22209 } args[2];
22210 enum insn_code icode = d->icode;
22211 bool last_arg_constant = false;
22212 const struct insn_data *insn_p = &insn_data[icode];
22213 enum machine_mode tmode = insn_p->operand[0].mode;
22214 enum { load, store } class;
22216 switch ((enum ix86_special_builtin_type) d->flag)
22218 case VOID_FTYPE_VOID:
22219 emit_insn (GEN_FCN (icode) (target));
22220 return 0;
22221 case V2DI_FTYPE_PV2DI:
22222 case V16QI_FTYPE_PCCHAR:
22223 case V4SF_FTYPE_PCFLOAT:
22224 case V2DF_FTYPE_PCDOUBLE:
22225 nargs = 1;
22226 class = load;
22227 memory = 0;
22228 break;
22229 case VOID_FTYPE_PV2SF_V4SF:
22230 case VOID_FTYPE_PV2DI_V2DI:
22231 case VOID_FTYPE_PCHAR_V16QI:
22232 case VOID_FTYPE_PFLOAT_V4SF:
22233 case VOID_FTYPE_PDOUBLE_V2DF:
22234 case VOID_FTYPE_PDI_DI:
22235 case VOID_FTYPE_PINT_INT:
22236 nargs = 1;
22237 class = store;
22238 /* Reserve memory operand for target. */
22239 memory = ARRAY_SIZE (args);
22240 break;
22241 case V4SF_FTYPE_V4SF_PCV2SF:
22242 case V2DF_FTYPE_V2DF_PCDOUBLE:
22243 nargs = 2;
22244 class = load;
22245 memory = 1;
22246 break;
22247 default:
22248 gcc_unreachable ();
22251 gcc_assert (nargs <= ARRAY_SIZE (args));
22253 if (class == store)
22255 arg = CALL_EXPR_ARG (exp, 0);
22256 op = expand_normal (arg);
22257 gcc_assert (target == 0);
22258 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
22259 arg_adjust = 1;
22261 else
22263 arg_adjust = 0;
22264 if (optimize
22265 || target == 0
22266 || GET_MODE (target) != tmode
22267 || ! (*insn_p->operand[0].predicate) (target, tmode))
22268 target = gen_reg_rtx (tmode);
22271 for (i = 0; i < nargs; i++)
22273 enum machine_mode mode = insn_p->operand[i + 1].mode;
22274 bool match;
22276 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
22277 op = expand_normal (arg);
22278 match = (*insn_p->operand[i + 1].predicate) (op, mode);
22280 if (last_arg_constant && (i + 1) == nargs)
22282 if (!match)
22283 switch (icode)
22285 default:
22286 error ("the last argument must be an 8-bit immediate");
22287 return const0_rtx;
22290 else
22292 if (i == memory)
22294 /* This must be the memory operand. */
22295 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
22296 gcc_assert (GET_MODE (op) == mode
22297 || GET_MODE (op) == VOIDmode);
22299 else
22301 /* This must be register. */
22302 if (VECTOR_MODE_P (mode))
22303 op = safe_vector_operand (op, mode);
22305 gcc_assert (GET_MODE (op) == mode
22306 || GET_MODE (op) == VOIDmode);
22307 op = copy_to_mode_reg (mode, op);
22311 args[i].op = op;
22312 args[i].mode = mode;
22315 switch (nargs)
22317 case 1:
22318 pat = GEN_FCN (icode) (target, args[0].op);
22319 break;
22320 case 2:
22321 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
22322 break;
22323 default:
22324 gcc_unreachable ();
22327 if (! pat)
22328 return 0;
22329 emit_insn (pat);
22330 return class == store ? 0 : target;
22333 /* Return the integer constant in ARG. Constrain it to be in the range
22334 of the subparts of VEC_TYPE; issue an error if not. */
22336 static int
22337 get_element_number (tree vec_type, tree arg)
22339 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
22341 if (!host_integerp (arg, 1)
22342 || (elt = tree_low_cst (arg, 1), elt > max))
22344 error ("selector must be an integer constant in the range 0..%wi", max);
22345 return 0;
22348 return elt;
22351 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
22352 ix86_expand_vector_init. We DO have language-level syntax for this, in
22353 the form of (type){ init-list }. Except that since we can't place emms
22354 instructions from inside the compiler, we can't allow the use of MMX
22355 registers unless the user explicitly asks for it. So we do *not* define
22356 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
22357 we have builtins invoked by mmintrin.h that gives us license to emit
22358 these sorts of instructions. */
22360 static rtx
22361 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
22363 enum machine_mode tmode = TYPE_MODE (type);
22364 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
22365 int i, n_elt = GET_MODE_NUNITS (tmode);
22366 rtvec v = rtvec_alloc (n_elt);
22368 gcc_assert (VECTOR_MODE_P (tmode));
22369 gcc_assert (call_expr_nargs (exp) == n_elt);
22371 for (i = 0; i < n_elt; ++i)
22373 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
22374 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
22377 if (!target || !register_operand (target, tmode))
22378 target = gen_reg_rtx (tmode);
22380 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
22381 return target;
22384 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
22385 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
22386 had a language-level syntax for referencing vector elements. */
22388 static rtx
22389 ix86_expand_vec_ext_builtin (tree exp, rtx target)
22391 enum machine_mode tmode, mode0;
22392 tree arg0, arg1;
22393 int elt;
22394 rtx op0;
22396 arg0 = CALL_EXPR_ARG (exp, 0);
22397 arg1 = CALL_EXPR_ARG (exp, 1);
22399 op0 = expand_normal (arg0);
22400 elt = get_element_number (TREE_TYPE (arg0), arg1);
22402 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
22403 mode0 = TYPE_MODE (TREE_TYPE (arg0));
22404 gcc_assert (VECTOR_MODE_P (mode0));
22406 op0 = force_reg (mode0, op0);
22408 if (optimize || !target || !register_operand (target, tmode))
22409 target = gen_reg_rtx (tmode);
22411 ix86_expand_vector_extract (true, target, op0, elt);
22413 return target;
22416 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
22417 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
22418 a language-level syntax for referencing vector elements. */
22420 static rtx
22421 ix86_expand_vec_set_builtin (tree exp)
22423 enum machine_mode tmode, mode1;
22424 tree arg0, arg1, arg2;
22425 int elt;
22426 rtx op0, op1, target;
22428 arg0 = CALL_EXPR_ARG (exp, 0);
22429 arg1 = CALL_EXPR_ARG (exp, 1);
22430 arg2 = CALL_EXPR_ARG (exp, 2);
22432 tmode = TYPE_MODE (TREE_TYPE (arg0));
22433 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
22434 gcc_assert (VECTOR_MODE_P (tmode));
22436 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
22437 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
22438 elt = get_element_number (TREE_TYPE (arg0), arg2);
22440 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
22441 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
22443 op0 = force_reg (tmode, op0);
22444 op1 = force_reg (mode1, op1);
22446 /* OP0 is the source of these builtin functions and shouldn't be
22447 modified. Create a copy, use it and return it as target. */
22448 target = gen_reg_rtx (tmode);
22449 emit_move_insn (target, op0);
22450 ix86_expand_vector_set (true, target, op1, elt);
22452 return target;
22455 /* Expand an expression EXP that calls a built-in function,
22456 with result going to TARGET if that's convenient
22457 (and in mode MODE if that's convenient).
22458 SUBTARGET may be used as the target for computing one of EXP's operands.
22459 IGNORE is nonzero if the value is to be ignored. */
22461 static rtx
22462 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
22463 enum machine_mode mode ATTRIBUTE_UNUSED,
22464 int ignore ATTRIBUTE_UNUSED)
22466 const struct builtin_description *d;
22467 size_t i;
22468 enum insn_code icode;
22469 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
22470 tree arg0, arg1, arg2;
22471 rtx op0, op1, op2, pat;
22472 enum machine_mode mode0, mode1, mode2;
22473 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
22475 /* Determine whether the builtin function is available under the current ISA.
22476 Originally the builtin was not created if it wasn't applicable to the
22477 current ISA based on the command line switches. With function specific
22478 options, we need to check in the context of the function making the call
22479 whether it is supported. */
22480 if (ix86_builtins_isa[fcode]
22481 && !(ix86_builtins_isa[fcode] & ix86_isa_flags))
22483 char *opts = ix86_target_string (ix86_builtins_isa[fcode], 0, NULL,
22484 NULL, NULL, false);
22486 if (!opts)
22487 error ("%qE needs unknown isa option", fndecl);
22488 else
22490 gcc_assert (opts != NULL);
22491 error ("%qE needs isa option %s", fndecl, opts);
22492 free (opts);
22494 return const0_rtx;
22497 switch (fcode)
22499 case IX86_BUILTIN_MASKMOVQ:
22500 case IX86_BUILTIN_MASKMOVDQU:
22501 icode = (fcode == IX86_BUILTIN_MASKMOVQ
22502 ? CODE_FOR_mmx_maskmovq
22503 : CODE_FOR_sse2_maskmovdqu);
22504 /* Note the arg order is different from the operand order. */
22505 arg1 = CALL_EXPR_ARG (exp, 0);
22506 arg2 = CALL_EXPR_ARG (exp, 1);
22507 arg0 = CALL_EXPR_ARG (exp, 2);
22508 op0 = expand_normal (arg0);
22509 op1 = expand_normal (arg1);
22510 op2 = expand_normal (arg2);
22511 mode0 = insn_data[icode].operand[0].mode;
22512 mode1 = insn_data[icode].operand[1].mode;
22513 mode2 = insn_data[icode].operand[2].mode;
22515 op0 = force_reg (Pmode, op0);
22516 op0 = gen_rtx_MEM (mode1, op0);
22518 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
22519 op0 = copy_to_mode_reg (mode0, op0);
22520 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
22521 op1 = copy_to_mode_reg (mode1, op1);
22522 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
22523 op2 = copy_to_mode_reg (mode2, op2);
22524 pat = GEN_FCN (icode) (op0, op1, op2);
22525 if (! pat)
22526 return 0;
22527 emit_insn (pat);
22528 return 0;
22530 case IX86_BUILTIN_LDMXCSR:
22531 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
22532 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
22533 emit_move_insn (target, op0);
22534 emit_insn (gen_sse_ldmxcsr (target));
22535 return 0;
22537 case IX86_BUILTIN_STMXCSR:
22538 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
22539 emit_insn (gen_sse_stmxcsr (target));
22540 return copy_to_mode_reg (SImode, target);
22542 case IX86_BUILTIN_CLFLUSH:
22543 arg0 = CALL_EXPR_ARG (exp, 0);
22544 op0 = expand_normal (arg0);
22545 icode = CODE_FOR_sse2_clflush;
22546 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
22547 op0 = copy_to_mode_reg (Pmode, op0);
22549 emit_insn (gen_sse2_clflush (op0));
22550 return 0;
22552 case IX86_BUILTIN_MONITOR:
22553 arg0 = CALL_EXPR_ARG (exp, 0);
22554 arg1 = CALL_EXPR_ARG (exp, 1);
22555 arg2 = CALL_EXPR_ARG (exp, 2);
22556 op0 = expand_normal (arg0);
22557 op1 = expand_normal (arg1);
22558 op2 = expand_normal (arg2);
22559 if (!REG_P (op0))
22560 op0 = copy_to_mode_reg (Pmode, op0);
22561 if (!REG_P (op1))
22562 op1 = copy_to_mode_reg (SImode, op1);
22563 if (!REG_P (op2))
22564 op2 = copy_to_mode_reg (SImode, op2);
22565 emit_insn ((*ix86_gen_monitor) (op0, op1, op2));
22566 return 0;
22568 case IX86_BUILTIN_MWAIT:
22569 arg0 = CALL_EXPR_ARG (exp, 0);
22570 arg1 = CALL_EXPR_ARG (exp, 1);
22571 op0 = expand_normal (arg0);
22572 op1 = expand_normal (arg1);
22573 if (!REG_P (op0))
22574 op0 = copy_to_mode_reg (SImode, op0);
22575 if (!REG_P (op1))
22576 op1 = copy_to_mode_reg (SImode, op1);
22577 emit_insn (gen_sse3_mwait (op0, op1));
22578 return 0;
22580 case IX86_BUILTIN_VEC_INIT_V2SI:
22581 case IX86_BUILTIN_VEC_INIT_V4HI:
22582 case IX86_BUILTIN_VEC_INIT_V8QI:
22583 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
22585 case IX86_BUILTIN_VEC_EXT_V2DF:
22586 case IX86_BUILTIN_VEC_EXT_V2DI:
22587 case IX86_BUILTIN_VEC_EXT_V4SF:
22588 case IX86_BUILTIN_VEC_EXT_V4SI:
22589 case IX86_BUILTIN_VEC_EXT_V8HI:
22590 case IX86_BUILTIN_VEC_EXT_V2SI:
22591 case IX86_BUILTIN_VEC_EXT_V4HI:
22592 case IX86_BUILTIN_VEC_EXT_V16QI:
22593 return ix86_expand_vec_ext_builtin (exp, target);
22595 case IX86_BUILTIN_VEC_SET_V2DI:
22596 case IX86_BUILTIN_VEC_SET_V4SF:
22597 case IX86_BUILTIN_VEC_SET_V4SI:
22598 case IX86_BUILTIN_VEC_SET_V8HI:
22599 case IX86_BUILTIN_VEC_SET_V4HI:
22600 case IX86_BUILTIN_VEC_SET_V16QI:
22601 return ix86_expand_vec_set_builtin (exp);
22603 case IX86_BUILTIN_INFQ:
22605 REAL_VALUE_TYPE inf;
22606 rtx tmp;
22608 real_inf (&inf);
22609 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
22611 tmp = validize_mem (force_const_mem (mode, tmp));
22613 if (target == 0)
22614 target = gen_reg_rtx (mode);
22616 emit_move_insn (target, tmp);
22617 return target;
22620 default:
22621 break;
22624 for (i = 0, d = bdesc_special_args;
22625 i < ARRAY_SIZE (bdesc_special_args);
22626 i++, d++)
22627 if (d->code == fcode)
22628 return ix86_expand_special_args_builtin (d, exp, target);
22630 for (i = 0, d = bdesc_args;
22631 i < ARRAY_SIZE (bdesc_args);
22632 i++, d++)
22633 if (d->code == fcode)
22634 switch (fcode)
22636 case IX86_BUILTIN_FABSQ:
22637 case IX86_BUILTIN_COPYSIGNQ:
22638 if (!TARGET_SSE2)
22639 /* Emit a normal call if SSE2 isn't available. */
22640 return expand_call (exp, target, ignore);
22641 default:
22642 return ix86_expand_args_builtin (d, exp, target);
22645 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
22646 if (d->code == fcode)
22647 return ix86_expand_sse_comi (d, exp, target);
22649 for (i = 0, d = bdesc_pcmpestr;
22650 i < ARRAY_SIZE (bdesc_pcmpestr);
22651 i++, d++)
22652 if (d->code == fcode)
22653 return ix86_expand_sse_pcmpestr (d, exp, target);
22655 for (i = 0, d = bdesc_pcmpistr;
22656 i < ARRAY_SIZE (bdesc_pcmpistr);
22657 i++, d++)
22658 if (d->code == fcode)
22659 return ix86_expand_sse_pcmpistr (d, exp, target);
22661 for (i = 0, d = bdesc_multi_arg; i < ARRAY_SIZE (bdesc_multi_arg); i++, d++)
22662 if (d->code == fcode)
22663 return ix86_expand_multi_arg_builtin (d->icode, exp, target,
22664 (enum multi_arg_type)d->flag,
22665 d->comparison);
22667 gcc_unreachable ();
22670 /* Returns a function decl for a vectorized version of the builtin function
22671 with builtin function code FN and the result vector type TYPE, or NULL_TREE
22672 if it is not available. */
22674 static tree
22675 ix86_builtin_vectorized_function (unsigned int fn, tree type_out,
22676 tree type_in)
22678 enum machine_mode in_mode, out_mode;
22679 int in_n, out_n;
22681 if (TREE_CODE (type_out) != VECTOR_TYPE
22682 || TREE_CODE (type_in) != VECTOR_TYPE)
22683 return NULL_TREE;
22685 out_mode = TYPE_MODE (TREE_TYPE (type_out));
22686 out_n = TYPE_VECTOR_SUBPARTS (type_out);
22687 in_mode = TYPE_MODE (TREE_TYPE (type_in));
22688 in_n = TYPE_VECTOR_SUBPARTS (type_in);
22690 switch (fn)
22692 case BUILT_IN_SQRT:
22693 if (out_mode == DFmode && out_n == 2
22694 && in_mode == DFmode && in_n == 2)
22695 return ix86_builtins[IX86_BUILTIN_SQRTPD];
22696 break;
22698 case BUILT_IN_SQRTF:
22699 if (out_mode == SFmode && out_n == 4
22700 && in_mode == SFmode && in_n == 4)
22701 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
22702 break;
22704 case BUILT_IN_LRINT:
22705 if (out_mode == SImode && out_n == 4
22706 && in_mode == DFmode && in_n == 2)
22707 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
22708 break;
22710 case BUILT_IN_LRINTF:
22711 if (out_mode == SImode && out_n == 4
22712 && in_mode == SFmode && in_n == 4)
22713 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
22714 break;
22716 default:
22720 /* Dispatch to a handler for a vectorization library. */
22721 if (ix86_veclib_handler)
22722 return (*ix86_veclib_handler)(fn, type_out, type_in);
22724 return NULL_TREE;
22727 /* Handler for an SVML-style interface to
22728 a library with vectorized intrinsics. */
22730 static tree
22731 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
22733 char name[20];
22734 tree fntype, new_fndecl, args;
22735 unsigned arity;
22736 const char *bname;
22737 enum machine_mode el_mode, in_mode;
22738 int n, in_n;
22740 /* The SVML is suitable for unsafe math only. */
22741 if (!flag_unsafe_math_optimizations)
22742 return NULL_TREE;
22744 el_mode = TYPE_MODE (TREE_TYPE (type_out));
22745 n = TYPE_VECTOR_SUBPARTS (type_out);
22746 in_mode = TYPE_MODE (TREE_TYPE (type_in));
22747 in_n = TYPE_VECTOR_SUBPARTS (type_in);
22748 if (el_mode != in_mode
22749 || n != in_n)
22750 return NULL_TREE;
22752 switch (fn)
22754 case BUILT_IN_EXP:
22755 case BUILT_IN_LOG:
22756 case BUILT_IN_LOG10:
22757 case BUILT_IN_POW:
22758 case BUILT_IN_TANH:
22759 case BUILT_IN_TAN:
22760 case BUILT_IN_ATAN:
22761 case BUILT_IN_ATAN2:
22762 case BUILT_IN_ATANH:
22763 case BUILT_IN_CBRT:
22764 case BUILT_IN_SINH:
22765 case BUILT_IN_SIN:
22766 case BUILT_IN_ASINH:
22767 case BUILT_IN_ASIN:
22768 case BUILT_IN_COSH:
22769 case BUILT_IN_COS:
22770 case BUILT_IN_ACOSH:
22771 case BUILT_IN_ACOS:
22772 if (el_mode != DFmode || n != 2)
22773 return NULL_TREE;
22774 break;
22776 case BUILT_IN_EXPF:
22777 case BUILT_IN_LOGF:
22778 case BUILT_IN_LOG10F:
22779 case BUILT_IN_POWF:
22780 case BUILT_IN_TANHF:
22781 case BUILT_IN_TANF:
22782 case BUILT_IN_ATANF:
22783 case BUILT_IN_ATAN2F:
22784 case BUILT_IN_ATANHF:
22785 case BUILT_IN_CBRTF:
22786 case BUILT_IN_SINHF:
22787 case BUILT_IN_SINF:
22788 case BUILT_IN_ASINHF:
22789 case BUILT_IN_ASINF:
22790 case BUILT_IN_COSHF:
22791 case BUILT_IN_COSF:
22792 case BUILT_IN_ACOSHF:
22793 case BUILT_IN_ACOSF:
22794 if (el_mode != SFmode || n != 4)
22795 return NULL_TREE;
22796 break;
22798 default:
22799 return NULL_TREE;
22802 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
22804 if (fn == BUILT_IN_LOGF)
22805 strcpy (name, "vmlsLn4");
22806 else if (fn == BUILT_IN_LOG)
22807 strcpy (name, "vmldLn2");
22808 else if (n == 4)
22810 sprintf (name, "vmls%s", bname+10);
22811 name[strlen (name)-1] = '4';
22813 else
22814 sprintf (name, "vmld%s2", bname+10);
22816 /* Convert to uppercase. */
22817 name[4] &= ~0x20;
22819 arity = 0;
22820 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
22821 args = TREE_CHAIN (args))
22822 arity++;
22824 if (arity == 1)
22825 fntype = build_function_type_list (type_out, type_in, NULL);
22826 else
22827 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
22829 /* Build a function declaration for the vectorized function. */
22830 new_fndecl = build_decl (FUNCTION_DECL, get_identifier (name), fntype);
22831 TREE_PUBLIC (new_fndecl) = 1;
22832 DECL_EXTERNAL (new_fndecl) = 1;
22833 DECL_IS_NOVOPS (new_fndecl) = 1;
22834 TREE_READONLY (new_fndecl) = 1;
22836 return new_fndecl;
22839 /* Handler for an ACML-style interface to
22840 a library with vectorized intrinsics. */
22842 static tree
22843 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
22845 char name[20] = "__vr.._";
22846 tree fntype, new_fndecl, args;
22847 unsigned arity;
22848 const char *bname;
22849 enum machine_mode el_mode, in_mode;
22850 int n, in_n;
22852 /* The ACML is 64bits only and suitable for unsafe math only as
22853 it does not correctly support parts of IEEE with the required
22854 precision such as denormals. */
22855 if (!TARGET_64BIT
22856 || !flag_unsafe_math_optimizations)
22857 return NULL_TREE;
22859 el_mode = TYPE_MODE (TREE_TYPE (type_out));
22860 n = TYPE_VECTOR_SUBPARTS (type_out);
22861 in_mode = TYPE_MODE (TREE_TYPE (type_in));
22862 in_n = TYPE_VECTOR_SUBPARTS (type_in);
22863 if (el_mode != in_mode
22864 || n != in_n)
22865 return NULL_TREE;
22867 switch (fn)
22869 case BUILT_IN_SIN:
22870 case BUILT_IN_COS:
22871 case BUILT_IN_EXP:
22872 case BUILT_IN_LOG:
22873 case BUILT_IN_LOG2:
22874 case BUILT_IN_LOG10:
22875 name[4] = 'd';
22876 name[5] = '2';
22877 if (el_mode != DFmode
22878 || n != 2)
22879 return NULL_TREE;
22880 break;
22882 case BUILT_IN_SINF:
22883 case BUILT_IN_COSF:
22884 case BUILT_IN_EXPF:
22885 case BUILT_IN_POWF:
22886 case BUILT_IN_LOGF:
22887 case BUILT_IN_LOG2F:
22888 case BUILT_IN_LOG10F:
22889 name[4] = 's';
22890 name[5] = '4';
22891 if (el_mode != SFmode
22892 || n != 4)
22893 return NULL_TREE;
22894 break;
22896 default:
22897 return NULL_TREE;
22900 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
22901 sprintf (name + 7, "%s", bname+10);
22903 arity = 0;
22904 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
22905 args = TREE_CHAIN (args))
22906 arity++;
22908 if (arity == 1)
22909 fntype = build_function_type_list (type_out, type_in, NULL);
22910 else
22911 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
22913 /* Build a function declaration for the vectorized function. */
22914 new_fndecl = build_decl (FUNCTION_DECL, get_identifier (name), fntype);
22915 TREE_PUBLIC (new_fndecl) = 1;
22916 DECL_EXTERNAL (new_fndecl) = 1;
22917 DECL_IS_NOVOPS (new_fndecl) = 1;
22918 TREE_READONLY (new_fndecl) = 1;
22920 return new_fndecl;
22924 /* Returns a decl of a function that implements conversion of the
22925 input vector of type TYPE, or NULL_TREE if it is not available. */
22927 static tree
22928 ix86_vectorize_builtin_conversion (unsigned int code, tree type)
22930 if (TREE_CODE (type) != VECTOR_TYPE)
22931 return NULL_TREE;
22933 switch (code)
22935 case FLOAT_EXPR:
22936 switch (TYPE_MODE (type))
22938 case V4SImode:
22939 return ix86_builtins[IX86_BUILTIN_CVTDQ2PS];
22940 default:
22941 return NULL_TREE;
22944 case FIX_TRUNC_EXPR:
22945 switch (TYPE_MODE (type))
22947 case V4SFmode:
22948 return ix86_builtins[IX86_BUILTIN_CVTTPS2DQ];
22949 default:
22950 return NULL_TREE;
22952 default:
22953 return NULL_TREE;
22958 /* Returns a code for a target-specific builtin that implements
22959 reciprocal of the function, or NULL_TREE if not available. */
22961 static tree
22962 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
22963 bool sqrt ATTRIBUTE_UNUSED)
22965 if (! (TARGET_SSE_MATH && TARGET_RECIP && !optimize_size
22966 && flag_finite_math_only && !flag_trapping_math
22967 && flag_unsafe_math_optimizations))
22968 return NULL_TREE;
22970 if (md_fn)
22971 /* Machine dependent builtins. */
22972 switch (fn)
22974 /* Vectorized version of sqrt to rsqrt conversion. */
22975 case IX86_BUILTIN_SQRTPS_NR:
22976 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
22978 default:
22979 return NULL_TREE;
22981 else
22982 /* Normal builtins. */
22983 switch (fn)
22985 /* Sqrt to rsqrt conversion. */
22986 case BUILT_IN_SQRTF:
22987 return ix86_builtins[IX86_BUILTIN_RSQRTF];
22989 default:
22990 return NULL_TREE;
22994 /* Store OPERAND to the memory after reload is completed. This means
22995 that we can't easily use assign_stack_local. */
22997 ix86_force_to_memory (enum machine_mode mode, rtx operand)
22999 rtx result;
23001 gcc_assert (reload_completed);
23002 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE)
23004 result = gen_rtx_MEM (mode,
23005 gen_rtx_PLUS (Pmode,
23006 stack_pointer_rtx,
23007 GEN_INT (-RED_ZONE_SIZE)));
23008 emit_move_insn (result, operand);
23010 else if ((TARGET_64BIT_MS_ABI || !TARGET_RED_ZONE) && TARGET_64BIT)
23012 switch (mode)
23014 case HImode:
23015 case SImode:
23016 operand = gen_lowpart (DImode, operand);
23017 /* FALLTHRU */
23018 case DImode:
23019 emit_insn (
23020 gen_rtx_SET (VOIDmode,
23021 gen_rtx_MEM (DImode,
23022 gen_rtx_PRE_DEC (DImode,
23023 stack_pointer_rtx)),
23024 operand));
23025 break;
23026 default:
23027 gcc_unreachable ();
23029 result = gen_rtx_MEM (mode, stack_pointer_rtx);
23031 else
23033 switch (mode)
23035 case DImode:
23037 rtx operands[2];
23038 split_di (&operand, 1, operands, operands + 1);
23039 emit_insn (
23040 gen_rtx_SET (VOIDmode,
23041 gen_rtx_MEM (SImode,
23042 gen_rtx_PRE_DEC (Pmode,
23043 stack_pointer_rtx)),
23044 operands[1]));
23045 emit_insn (
23046 gen_rtx_SET (VOIDmode,
23047 gen_rtx_MEM (SImode,
23048 gen_rtx_PRE_DEC (Pmode,
23049 stack_pointer_rtx)),
23050 operands[0]));
23052 break;
23053 case HImode:
23054 /* Store HImodes as SImodes. */
23055 operand = gen_lowpart (SImode, operand);
23056 /* FALLTHRU */
23057 case SImode:
23058 emit_insn (
23059 gen_rtx_SET (VOIDmode,
23060 gen_rtx_MEM (GET_MODE (operand),
23061 gen_rtx_PRE_DEC (SImode,
23062 stack_pointer_rtx)),
23063 operand));
23064 break;
23065 default:
23066 gcc_unreachable ();
23068 result = gen_rtx_MEM (mode, stack_pointer_rtx);
23070 return result;
23073 /* Free operand from the memory. */
23074 void
23075 ix86_free_from_memory (enum machine_mode mode)
23077 if (!TARGET_RED_ZONE || TARGET_64BIT_MS_ABI)
23079 int size;
23081 if (mode == DImode || TARGET_64BIT)
23082 size = 8;
23083 else
23084 size = 4;
23085 /* Use LEA to deallocate stack space. In peephole2 it will be converted
23086 to pop or add instruction if registers are available. */
23087 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
23088 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
23089 GEN_INT (size))));
23093 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
23094 QImode must go into class Q_REGS.
23095 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
23096 movdf to do mem-to-mem moves through integer regs. */
23097 enum reg_class
23098 ix86_preferred_reload_class (rtx x, enum reg_class regclass)
23100 enum machine_mode mode = GET_MODE (x);
23102 /* We're only allowed to return a subclass of CLASS. Many of the
23103 following checks fail for NO_REGS, so eliminate that early. */
23104 if (regclass == NO_REGS)
23105 return NO_REGS;
23107 /* All classes can load zeros. */
23108 if (x == CONST0_RTX (mode))
23109 return regclass;
23111 /* Force constants into memory if we are loading a (nonzero) constant into
23112 an MMX or SSE register. This is because there are no MMX/SSE instructions
23113 to load from a constant. */
23114 if (CONSTANT_P (x)
23115 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
23116 return NO_REGS;
23118 /* Prefer SSE regs only, if we can use them for math. */
23119 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
23120 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
23122 /* Floating-point constants need more complex checks. */
23123 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
23125 /* General regs can load everything. */
23126 if (reg_class_subset_p (regclass, GENERAL_REGS))
23127 return regclass;
23129 /* Floats can load 0 and 1 plus some others. Note that we eliminated
23130 zero above. We only want to wind up preferring 80387 registers if
23131 we plan on doing computation with them. */
23132 if (TARGET_80387
23133 && standard_80387_constant_p (x))
23135 /* Limit class to non-sse. */
23136 if (regclass == FLOAT_SSE_REGS)
23137 return FLOAT_REGS;
23138 if (regclass == FP_TOP_SSE_REGS)
23139 return FP_TOP_REG;
23140 if (regclass == FP_SECOND_SSE_REGS)
23141 return FP_SECOND_REG;
23142 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
23143 return regclass;
23146 return NO_REGS;
23149 /* Generally when we see PLUS here, it's the function invariant
23150 (plus soft-fp const_int). Which can only be computed into general
23151 regs. */
23152 if (GET_CODE (x) == PLUS)
23153 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
23155 /* QImode constants are easy to load, but non-constant QImode data
23156 must go into Q_REGS. */
23157 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
23159 if (reg_class_subset_p (regclass, Q_REGS))
23160 return regclass;
23161 if (reg_class_subset_p (Q_REGS, regclass))
23162 return Q_REGS;
23163 return NO_REGS;
23166 return regclass;
23169 /* Discourage putting floating-point values in SSE registers unless
23170 SSE math is being used, and likewise for the 387 registers. */
23171 enum reg_class
23172 ix86_preferred_output_reload_class (rtx x, enum reg_class regclass)
23174 enum machine_mode mode = GET_MODE (x);
23176 /* Restrict the output reload class to the register bank that we are doing
23177 math on. If we would like not to return a subset of CLASS, reject this
23178 alternative: if reload cannot do this, it will still use its choice. */
23179 mode = GET_MODE (x);
23180 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
23181 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
23183 if (X87_FLOAT_MODE_P (mode))
23185 if (regclass == FP_TOP_SSE_REGS)
23186 return FP_TOP_REG;
23187 else if (regclass == FP_SECOND_SSE_REGS)
23188 return FP_SECOND_REG;
23189 else
23190 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
23193 return regclass;
23196 static enum reg_class
23197 ix86_secondary_reload (bool in_p, rtx x, enum reg_class class,
23198 enum machine_mode mode,
23199 secondary_reload_info *sri ATTRIBUTE_UNUSED)
23201 /* QImode spills from non-QI registers require
23202 intermediate register on 32bit targets. */
23203 if (!in_p && mode == QImode && !TARGET_64BIT
23204 && (class == GENERAL_REGS
23205 || class == LEGACY_REGS
23206 || class == INDEX_REGS))
23208 int regno;
23210 if (REG_P (x))
23211 regno = REGNO (x);
23212 else
23213 regno = -1;
23215 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
23216 regno = true_regnum (x);
23218 /* Return Q_REGS if the operand is in memory. */
23219 if (regno == -1)
23220 return Q_REGS;
23223 return NO_REGS;
23226 /* If we are copying between general and FP registers, we need a memory
23227 location. The same is true for SSE and MMX registers.
23229 To optimize register_move_cost performance, allow inline variant.
23231 The macro can't work reliably when one of the CLASSES is class containing
23232 registers from multiple units (SSE, MMX, integer). We avoid this by never
23233 combining those units in single alternative in the machine description.
23234 Ensure that this constraint holds to avoid unexpected surprises.
23236 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
23237 enforce these sanity checks. */
23239 static inline int
23240 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
23241 enum machine_mode mode, int strict)
23243 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
23244 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
23245 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
23246 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
23247 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
23248 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
23250 gcc_assert (!strict);
23251 return true;
23254 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
23255 return true;
23257 /* ??? This is a lie. We do have moves between mmx/general, and for
23258 mmx/sse2. But by saying we need secondary memory we discourage the
23259 register allocator from using the mmx registers unless needed. */
23260 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
23261 return true;
23263 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
23265 /* SSE1 doesn't have any direct moves from other classes. */
23266 if (!TARGET_SSE2)
23267 return true;
23269 /* If the target says that inter-unit moves are more expensive
23270 than moving through memory, then don't generate them. */
23271 if (!TARGET_INTER_UNIT_MOVES)
23272 return true;
23274 /* Between SSE and general, we have moves no larger than word size. */
23275 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
23276 return true;
23279 return false;
23283 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
23284 enum machine_mode mode, int strict)
23286 return inline_secondary_memory_needed (class1, class2, mode, strict);
23289 /* Return true if the registers in CLASS cannot represent the change from
23290 modes FROM to TO. */
23292 bool
23293 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
23294 enum reg_class regclass)
23296 if (from == to)
23297 return false;
23299 /* x87 registers can't do subreg at all, as all values are reformatted
23300 to extended precision. */
23301 if (MAYBE_FLOAT_CLASS_P (regclass))
23302 return true;
23304 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
23306 /* Vector registers do not support QI or HImode loads. If we don't
23307 disallow a change to these modes, reload will assume it's ok to
23308 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
23309 the vec_dupv4hi pattern. */
23310 if (GET_MODE_SIZE (from) < 4)
23311 return true;
23313 /* Vector registers do not support subreg with nonzero offsets, which
23314 are otherwise valid for integer registers. Since we can't see
23315 whether we have a nonzero offset from here, prohibit all
23316 nonparadoxical subregs changing size. */
23317 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
23318 return true;
23321 return false;
23324 /* Return the cost of moving data of mode M between a
23325 register and memory. A value of 2 is the default; this cost is
23326 relative to those in `REGISTER_MOVE_COST'.
23328 This function is used extensively by register_move_cost that is used to
23329 build tables at startup. Make it inline in this case.
23330 When IN is 2, return maximum of in and out move cost.
23332 If moving between registers and memory is more expensive than
23333 between two registers, you should define this macro to express the
23334 relative cost.
23336 Model also increased moving costs of QImode registers in non
23337 Q_REGS classes.
23339 static inline int
23340 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
23341 int in)
23343 int cost;
23344 if (FLOAT_CLASS_P (regclass))
23346 int index;
23347 switch (mode)
23349 case SFmode:
23350 index = 0;
23351 break;
23352 case DFmode:
23353 index = 1;
23354 break;
23355 case XFmode:
23356 index = 2;
23357 break;
23358 default:
23359 return 100;
23361 if (in == 2)
23362 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
23363 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
23365 if (SSE_CLASS_P (regclass))
23367 int index;
23368 switch (GET_MODE_SIZE (mode))
23370 case 4:
23371 index = 0;
23372 break;
23373 case 8:
23374 index = 1;
23375 break;
23376 case 16:
23377 index = 2;
23378 break;
23379 default:
23380 return 100;
23382 if (in == 2)
23383 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
23384 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
23386 if (MMX_CLASS_P (regclass))
23388 int index;
23389 switch (GET_MODE_SIZE (mode))
23391 case 4:
23392 index = 0;
23393 break;
23394 case 8:
23395 index = 1;
23396 break;
23397 default:
23398 return 100;
23400 if (in)
23401 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
23402 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
23404 switch (GET_MODE_SIZE (mode))
23406 case 1:
23407 if (Q_CLASS_P (regclass) || TARGET_64BIT)
23409 if (!in)
23410 return ix86_cost->int_store[0];
23411 if (TARGET_PARTIAL_REG_DEPENDENCY && !optimize_size)
23412 cost = ix86_cost->movzbl_load;
23413 else
23414 cost = ix86_cost->int_load[0];
23415 if (in == 2)
23416 return MAX (cost, ix86_cost->int_store[0]);
23417 return cost;
23419 else
23421 if (in == 2)
23422 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
23423 if (in)
23424 return ix86_cost->movzbl_load;
23425 else
23426 return ix86_cost->int_store[0] + 4;
23428 break;
23429 case 2:
23430 if (in == 2)
23431 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
23432 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
23433 default:
23434 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
23435 if (mode == TFmode)
23436 mode = XFmode;
23437 if (in == 2)
23438 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
23439 else if (in)
23440 cost = ix86_cost->int_load[2];
23441 else
23442 cost = ix86_cost->int_store[2];
23443 return (cost * (((int) GET_MODE_SIZE (mode)
23444 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
23449 ix86_memory_move_cost (enum machine_mode mode, enum reg_class regclass, int in)
23451 return inline_memory_move_cost (mode, regclass, in);
23455 /* Return the cost of moving data from a register in class CLASS1 to
23456 one in class CLASS2.
23458 It is not required that the cost always equal 2 when FROM is the same as TO;
23459 on some machines it is expensive to move between registers if they are not
23460 general registers. */
23463 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
23464 enum reg_class class2)
23466 /* In case we require secondary memory, compute cost of the store followed
23467 by load. In order to avoid bad register allocation choices, we need
23468 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
23470 if (inline_secondary_memory_needed (class1, class2, mode, 0))
23472 int cost = 1;
23474 cost += inline_memory_move_cost (mode, class1, 2);
23475 cost += inline_memory_move_cost (mode, class2, 2);
23477 /* In case of copying from general_purpose_register we may emit multiple
23478 stores followed by single load causing memory size mismatch stall.
23479 Count this as arbitrarily high cost of 20. */
23480 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
23481 cost += 20;
23483 /* In the case of FP/MMX moves, the registers actually overlap, and we
23484 have to switch modes in order to treat them differently. */
23485 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
23486 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
23487 cost += 20;
23489 return cost;
23492 /* Moves between SSE/MMX and integer unit are expensive. */
23493 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
23494 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
23496 /* ??? By keeping returned value relatively high, we limit the number
23497 of moves between integer and MMX/SSE registers for all targets.
23498 Additionally, high value prevents problem with x86_modes_tieable_p(),
23499 where integer modes in MMX/SSE registers are not tieable
23500 because of missing QImode and HImode moves to, from or between
23501 MMX/SSE registers. */
23502 return MAX (8, ix86_cost->mmxsse_to_integer);
23504 if (MAYBE_FLOAT_CLASS_P (class1))
23505 return ix86_cost->fp_move;
23506 if (MAYBE_SSE_CLASS_P (class1))
23507 return ix86_cost->sse_move;
23508 if (MAYBE_MMX_CLASS_P (class1))
23509 return ix86_cost->mmx_move;
23510 return 2;
23513 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
23515 bool
23516 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
23518 /* Flags and only flags can only hold CCmode values. */
23519 if (CC_REGNO_P (regno))
23520 return GET_MODE_CLASS (mode) == MODE_CC;
23521 if (GET_MODE_CLASS (mode) == MODE_CC
23522 || GET_MODE_CLASS (mode) == MODE_RANDOM
23523 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
23524 return 0;
23525 if (FP_REGNO_P (regno))
23526 return VALID_FP_MODE_P (mode);
23527 if (SSE_REGNO_P (regno))
23529 /* We implement the move patterns for all vector modes into and
23530 out of SSE registers, even when no operation instructions
23531 are available. */
23532 return (VALID_SSE_REG_MODE (mode)
23533 || VALID_SSE2_REG_MODE (mode)
23534 || VALID_MMX_REG_MODE (mode)
23535 || VALID_MMX_REG_MODE_3DNOW (mode));
23537 if (MMX_REGNO_P (regno))
23539 /* We implement the move patterns for 3DNOW modes even in MMX mode,
23540 so if the register is available at all, then we can move data of
23541 the given mode into or out of it. */
23542 return (VALID_MMX_REG_MODE (mode)
23543 || VALID_MMX_REG_MODE_3DNOW (mode));
23546 if (mode == QImode)
23548 /* Take care for QImode values - they can be in non-QI regs,
23549 but then they do cause partial register stalls. */
23550 if (regno < 4 || TARGET_64BIT)
23551 return 1;
23552 if (!TARGET_PARTIAL_REG_STALL)
23553 return 1;
23554 return reload_in_progress || reload_completed;
23556 /* We handle both integer and floats in the general purpose registers. */
23557 else if (VALID_INT_MODE_P (mode))
23558 return 1;
23559 else if (VALID_FP_MODE_P (mode))
23560 return 1;
23561 else if (VALID_DFP_MODE_P (mode))
23562 return 1;
23563 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
23564 on to use that value in smaller contexts, this can easily force a
23565 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
23566 supporting DImode, allow it. */
23567 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
23568 return 1;
23570 return 0;
23573 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
23574 tieable integer mode. */
23576 static bool
23577 ix86_tieable_integer_mode_p (enum machine_mode mode)
23579 switch (mode)
23581 case HImode:
23582 case SImode:
23583 return true;
23585 case QImode:
23586 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
23588 case DImode:
23589 return TARGET_64BIT;
23591 default:
23592 return false;
23596 /* Return true if MODE1 is accessible in a register that can hold MODE2
23597 without copying. That is, all register classes that can hold MODE2
23598 can also hold MODE1. */
23600 bool
23601 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
23603 if (mode1 == mode2)
23604 return true;
23606 if (ix86_tieable_integer_mode_p (mode1)
23607 && ix86_tieable_integer_mode_p (mode2))
23608 return true;
23610 /* MODE2 being XFmode implies fp stack or general regs, which means we
23611 can tie any smaller floating point modes to it. Note that we do not
23612 tie this with TFmode. */
23613 if (mode2 == XFmode)
23614 return mode1 == SFmode || mode1 == DFmode;
23616 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
23617 that we can tie it with SFmode. */
23618 if (mode2 == DFmode)
23619 return mode1 == SFmode;
23621 /* If MODE2 is only appropriate for an SSE register, then tie with
23622 any other mode acceptable to SSE registers. */
23623 if (GET_MODE_SIZE (mode2) == 16
23624 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
23625 return (GET_MODE_SIZE (mode1) == 16
23626 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
23628 /* If MODE2 is appropriate for an MMX register, then tie
23629 with any other mode acceptable to MMX registers. */
23630 if (GET_MODE_SIZE (mode2) == 8
23631 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
23632 return (GET_MODE_SIZE (mode1) == 8
23633 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
23635 return false;
23638 /* Compute a (partial) cost for rtx X. Return true if the complete
23639 cost has been computed, and false if subexpressions should be
23640 scanned. In either case, *TOTAL contains the cost result. */
23642 static bool
23643 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total)
23645 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
23646 enum machine_mode mode = GET_MODE (x);
23648 switch (code)
23650 case CONST_INT:
23651 case CONST:
23652 case LABEL_REF:
23653 case SYMBOL_REF:
23654 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
23655 *total = 3;
23656 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
23657 *total = 2;
23658 else if (flag_pic && SYMBOLIC_CONST (x)
23659 && (!TARGET_64BIT
23660 || (!GET_CODE (x) != LABEL_REF
23661 && (GET_CODE (x) != SYMBOL_REF
23662 || !SYMBOL_REF_LOCAL_P (x)))))
23663 *total = 1;
23664 else
23665 *total = 0;
23666 return true;
23668 case CONST_DOUBLE:
23669 if (mode == VOIDmode)
23670 *total = 0;
23671 else
23672 switch (standard_80387_constant_p (x))
23674 case 1: /* 0.0 */
23675 *total = 1;
23676 break;
23677 default: /* Other constants */
23678 *total = 2;
23679 break;
23680 case 0:
23681 case -1:
23682 /* Start with (MEM (SYMBOL_REF)), since that's where
23683 it'll probably end up. Add a penalty for size. */
23684 *total = (COSTS_N_INSNS (1)
23685 + (flag_pic != 0 && !TARGET_64BIT)
23686 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
23687 break;
23689 return true;
23691 case ZERO_EXTEND:
23692 /* The zero extensions is often completely free on x86_64, so make
23693 it as cheap as possible. */
23694 if (TARGET_64BIT && mode == DImode
23695 && GET_MODE (XEXP (x, 0)) == SImode)
23696 *total = 1;
23697 else if (TARGET_ZERO_EXTEND_WITH_AND)
23698 *total = ix86_cost->add;
23699 else
23700 *total = ix86_cost->movzx;
23701 return false;
23703 case SIGN_EXTEND:
23704 *total = ix86_cost->movsx;
23705 return false;
23707 case ASHIFT:
23708 if (CONST_INT_P (XEXP (x, 1))
23709 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
23711 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
23712 if (value == 1)
23714 *total = ix86_cost->add;
23715 return false;
23717 if ((value == 2 || value == 3)
23718 && ix86_cost->lea <= ix86_cost->shift_const)
23720 *total = ix86_cost->lea;
23721 return false;
23724 /* FALLTHRU */
23726 case ROTATE:
23727 case ASHIFTRT:
23728 case LSHIFTRT:
23729 case ROTATERT:
23730 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
23732 if (CONST_INT_P (XEXP (x, 1)))
23734 if (INTVAL (XEXP (x, 1)) > 32)
23735 *total = ix86_cost->shift_const + COSTS_N_INSNS (2);
23736 else
23737 *total = ix86_cost->shift_const * 2;
23739 else
23741 if (GET_CODE (XEXP (x, 1)) == AND)
23742 *total = ix86_cost->shift_var * 2;
23743 else
23744 *total = ix86_cost->shift_var * 6 + COSTS_N_INSNS (2);
23747 else
23749 if (CONST_INT_P (XEXP (x, 1)))
23750 *total = ix86_cost->shift_const;
23751 else
23752 *total = ix86_cost->shift_var;
23754 return false;
23756 case MULT:
23757 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
23759 /* ??? SSE scalar cost should be used here. */
23760 *total = ix86_cost->fmul;
23761 return false;
23763 else if (X87_FLOAT_MODE_P (mode))
23765 *total = ix86_cost->fmul;
23766 return false;
23768 else if (FLOAT_MODE_P (mode))
23770 /* ??? SSE vector cost should be used here. */
23771 *total = ix86_cost->fmul;
23772 return false;
23774 else
23776 rtx op0 = XEXP (x, 0);
23777 rtx op1 = XEXP (x, 1);
23778 int nbits;
23779 if (CONST_INT_P (XEXP (x, 1)))
23781 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
23782 for (nbits = 0; value != 0; value &= value - 1)
23783 nbits++;
23785 else
23786 /* This is arbitrary. */
23787 nbits = 7;
23789 /* Compute costs correctly for widening multiplication. */
23790 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
23791 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
23792 == GET_MODE_SIZE (mode))
23794 int is_mulwiden = 0;
23795 enum machine_mode inner_mode = GET_MODE (op0);
23797 if (GET_CODE (op0) == GET_CODE (op1))
23798 is_mulwiden = 1, op1 = XEXP (op1, 0);
23799 else if (CONST_INT_P (op1))
23801 if (GET_CODE (op0) == SIGN_EXTEND)
23802 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
23803 == INTVAL (op1);
23804 else
23805 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
23808 if (is_mulwiden)
23809 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
23812 *total = (ix86_cost->mult_init[MODE_INDEX (mode)]
23813 + nbits * ix86_cost->mult_bit
23814 + rtx_cost (op0, outer_code) + rtx_cost (op1, outer_code));
23816 return true;
23819 case DIV:
23820 case UDIV:
23821 case MOD:
23822 case UMOD:
23823 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
23824 /* ??? SSE cost should be used here. */
23825 *total = ix86_cost->fdiv;
23826 else if (X87_FLOAT_MODE_P (mode))
23827 *total = ix86_cost->fdiv;
23828 else if (FLOAT_MODE_P (mode))
23829 /* ??? SSE vector cost should be used here. */
23830 *total = ix86_cost->fdiv;
23831 else
23832 *total = ix86_cost->divide[MODE_INDEX (mode)];
23833 return false;
23835 case PLUS:
23836 if (GET_MODE_CLASS (mode) == MODE_INT
23837 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
23839 if (GET_CODE (XEXP (x, 0)) == PLUS
23840 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
23841 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
23842 && CONSTANT_P (XEXP (x, 1)))
23844 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
23845 if (val == 2 || val == 4 || val == 8)
23847 *total = ix86_cost->lea;
23848 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
23849 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
23850 outer_code);
23851 *total += rtx_cost (XEXP (x, 1), outer_code);
23852 return true;
23855 else if (GET_CODE (XEXP (x, 0)) == MULT
23856 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
23858 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
23859 if (val == 2 || val == 4 || val == 8)
23861 *total = ix86_cost->lea;
23862 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
23863 *total += rtx_cost (XEXP (x, 1), outer_code);
23864 return true;
23867 else if (GET_CODE (XEXP (x, 0)) == PLUS)
23869 *total = ix86_cost->lea;
23870 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code);
23871 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code);
23872 *total += rtx_cost (XEXP (x, 1), outer_code);
23873 return true;
23876 /* FALLTHRU */
23878 case MINUS:
23879 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
23881 /* ??? SSE cost should be used here. */
23882 *total = ix86_cost->fadd;
23883 return false;
23885 else if (X87_FLOAT_MODE_P (mode))
23887 *total = ix86_cost->fadd;
23888 return false;
23890 else if (FLOAT_MODE_P (mode))
23892 /* ??? SSE vector cost should be used here. */
23893 *total = ix86_cost->fadd;
23894 return false;
23896 /* FALLTHRU */
23898 case AND:
23899 case IOR:
23900 case XOR:
23901 if (!TARGET_64BIT && mode == DImode)
23903 *total = (ix86_cost->add * 2
23904 + (rtx_cost (XEXP (x, 0), outer_code)
23905 << (GET_MODE (XEXP (x, 0)) != DImode))
23906 + (rtx_cost (XEXP (x, 1), outer_code)
23907 << (GET_MODE (XEXP (x, 1)) != DImode)));
23908 return true;
23910 /* FALLTHRU */
23912 case NEG:
23913 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
23915 /* ??? SSE cost should be used here. */
23916 *total = ix86_cost->fchs;
23917 return false;
23919 else if (X87_FLOAT_MODE_P (mode))
23921 *total = ix86_cost->fchs;
23922 return false;
23924 else if (FLOAT_MODE_P (mode))
23926 /* ??? SSE vector cost should be used here. */
23927 *total = ix86_cost->fchs;
23928 return false;
23930 /* FALLTHRU */
23932 case NOT:
23933 if (!TARGET_64BIT && mode == DImode)
23934 *total = ix86_cost->add * 2;
23935 else
23936 *total = ix86_cost->add;
23937 return false;
23939 case COMPARE:
23940 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
23941 && XEXP (XEXP (x, 0), 1) == const1_rtx
23942 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
23943 && XEXP (x, 1) == const0_rtx)
23945 /* This kind of construct is implemented using test[bwl].
23946 Treat it as if we had an AND. */
23947 *total = (ix86_cost->add
23948 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code)
23949 + rtx_cost (const1_rtx, outer_code));
23950 return true;
23952 return false;
23954 case FLOAT_EXTEND:
23955 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
23956 *total = 0;
23957 return false;
23959 case ABS:
23960 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
23961 /* ??? SSE cost should be used here. */
23962 *total = ix86_cost->fabs;
23963 else if (X87_FLOAT_MODE_P (mode))
23964 *total = ix86_cost->fabs;
23965 else if (FLOAT_MODE_P (mode))
23966 /* ??? SSE vector cost should be used here. */
23967 *total = ix86_cost->fabs;
23968 return false;
23970 case SQRT:
23971 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
23972 /* ??? SSE cost should be used here. */
23973 *total = ix86_cost->fsqrt;
23974 else if (X87_FLOAT_MODE_P (mode))
23975 *total = ix86_cost->fsqrt;
23976 else if (FLOAT_MODE_P (mode))
23977 /* ??? SSE vector cost should be used here. */
23978 *total = ix86_cost->fsqrt;
23979 return false;
23981 case UNSPEC:
23982 if (XINT (x, 1) == UNSPEC_TP)
23983 *total = 0;
23984 return false;
23986 default:
23987 return false;
23991 #if TARGET_MACHO
23993 static int current_machopic_label_num;
23995 /* Given a symbol name and its associated stub, write out the
23996 definition of the stub. */
23998 void
23999 machopic_output_stub (FILE *file, const char *symb, const char *stub)
24001 unsigned int length;
24002 char *binder_name, *symbol_name, lazy_ptr_name[32];
24003 int label = ++current_machopic_label_num;
24005 /* For 64-bit we shouldn't get here. */
24006 gcc_assert (!TARGET_64BIT);
24008 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
24009 symb = (*targetm.strip_name_encoding) (symb);
24011 length = strlen (stub);
24012 binder_name = XALLOCAVEC (char, length + 32);
24013 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
24015 length = strlen (symb);
24016 symbol_name = XALLOCAVEC (char, length + 32);
24017 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
24019 sprintf (lazy_ptr_name, "L%d$lz", label);
24021 if (MACHOPIC_PURE)
24022 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
24023 else
24024 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
24026 fprintf (file, "%s:\n", stub);
24027 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
24029 if (MACHOPIC_PURE)
24031 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
24032 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
24033 fprintf (file, "\tjmp\t*%%edx\n");
24035 else
24036 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
24038 fprintf (file, "%s:\n", binder_name);
24040 if (MACHOPIC_PURE)
24042 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
24043 fprintf (file, "\tpushl\t%%eax\n");
24045 else
24046 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
24048 fprintf (file, "\tjmp\tdyld_stub_binding_helper\n");
24050 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
24051 fprintf (file, "%s:\n", lazy_ptr_name);
24052 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
24053 fprintf (file, "\t.long %s\n", binder_name);
24056 void
24057 darwin_x86_file_end (void)
24059 darwin_file_end ();
24060 ix86_file_end ();
24062 #endif /* TARGET_MACHO */
24064 /* Order the registers for register allocator. */
24066 void
24067 x86_order_regs_for_local_alloc (void)
24069 int pos = 0;
24070 int i;
24072 /* First allocate the local general purpose registers. */
24073 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
24074 if (GENERAL_REGNO_P (i) && call_used_regs[i])
24075 reg_alloc_order [pos++] = i;
24077 /* Global general purpose registers. */
24078 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
24079 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
24080 reg_alloc_order [pos++] = i;
24082 /* x87 registers come first in case we are doing FP math
24083 using them. */
24084 if (!TARGET_SSE_MATH)
24085 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
24086 reg_alloc_order [pos++] = i;
24088 /* SSE registers. */
24089 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
24090 reg_alloc_order [pos++] = i;
24091 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
24092 reg_alloc_order [pos++] = i;
24094 /* x87 registers. */
24095 if (TARGET_SSE_MATH)
24096 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
24097 reg_alloc_order [pos++] = i;
24099 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
24100 reg_alloc_order [pos++] = i;
24102 /* Initialize the rest of array as we do not allocate some registers
24103 at all. */
24104 while (pos < FIRST_PSEUDO_REGISTER)
24105 reg_alloc_order [pos++] = 0;
24108 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
24109 struct attribute_spec.handler. */
24110 static tree
24111 ix86_handle_abi_attribute (tree *node, tree name,
24112 tree args ATTRIBUTE_UNUSED,
24113 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
24115 if (TREE_CODE (*node) != FUNCTION_TYPE
24116 && TREE_CODE (*node) != METHOD_TYPE
24117 && TREE_CODE (*node) != FIELD_DECL
24118 && TREE_CODE (*node) != TYPE_DECL)
24120 warning (OPT_Wattributes, "%qs attribute only applies to functions",
24121 IDENTIFIER_POINTER (name));
24122 *no_add_attrs = true;
24123 return NULL_TREE;
24125 if (!TARGET_64BIT)
24127 warning (OPT_Wattributes, "%qs attribute only available for 64-bit",
24128 IDENTIFIER_POINTER (name));
24129 *no_add_attrs = true;
24130 return NULL_TREE;
24133 /* Can combine regparm with all attributes but fastcall. */
24134 if (is_attribute_p ("ms_abi", name))
24136 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
24138 error ("ms_abi and sysv_abi attributes are not compatible");
24141 return NULL_TREE;
24143 else if (is_attribute_p ("sysv_abi", name))
24145 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
24147 error ("ms_abi and sysv_abi attributes are not compatible");
24150 return NULL_TREE;
24153 return NULL_TREE;
24156 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
24157 struct attribute_spec.handler. */
24158 static tree
24159 ix86_handle_struct_attribute (tree *node, tree name,
24160 tree args ATTRIBUTE_UNUSED,
24161 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
24163 tree *type = NULL;
24164 if (DECL_P (*node))
24166 if (TREE_CODE (*node) == TYPE_DECL)
24167 type = &TREE_TYPE (*node);
24169 else
24170 type = node;
24172 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
24173 || TREE_CODE (*type) == UNION_TYPE)))
24175 warning (OPT_Wattributes, "%qs attribute ignored",
24176 IDENTIFIER_POINTER (name));
24177 *no_add_attrs = true;
24180 else if ((is_attribute_p ("ms_struct", name)
24181 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
24182 || ((is_attribute_p ("gcc_struct", name)
24183 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
24185 warning (OPT_Wattributes, "%qs incompatible attribute ignored",
24186 IDENTIFIER_POINTER (name));
24187 *no_add_attrs = true;
24190 return NULL_TREE;
24193 static bool
24194 ix86_ms_bitfield_layout_p (const_tree record_type)
24196 return (TARGET_MS_BITFIELD_LAYOUT &&
24197 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
24198 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
24201 /* Returns an expression indicating where the this parameter is
24202 located on entry to the FUNCTION. */
24204 static rtx
24205 x86_this_parameter (tree function)
24207 tree type = TREE_TYPE (function);
24208 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
24209 int nregs;
24211 if (TARGET_64BIT)
24213 const int *parm_regs;
24215 if (ix86_function_type_abi (type) == MS_ABI)
24216 parm_regs = x86_64_ms_abi_int_parameter_registers;
24217 else
24218 parm_regs = x86_64_int_parameter_registers;
24219 return gen_rtx_REG (DImode, parm_regs[aggr]);
24222 nregs = ix86_function_regparm (type, function);
24224 if (nregs > 0 && !stdarg_p (type))
24226 int regno;
24228 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
24229 regno = aggr ? DX_REG : CX_REG;
24230 else
24232 regno = AX_REG;
24233 if (aggr)
24235 regno = DX_REG;
24236 if (nregs == 1)
24237 return gen_rtx_MEM (SImode,
24238 plus_constant (stack_pointer_rtx, 4));
24241 return gen_rtx_REG (SImode, regno);
24244 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
24247 /* Determine whether x86_output_mi_thunk can succeed. */
24249 static bool
24250 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
24251 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
24252 HOST_WIDE_INT vcall_offset, const_tree function)
24254 /* 64-bit can handle anything. */
24255 if (TARGET_64BIT)
24256 return true;
24258 /* For 32-bit, everything's fine if we have one free register. */
24259 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
24260 return true;
24262 /* Need a free register for vcall_offset. */
24263 if (vcall_offset)
24264 return false;
24266 /* Need a free register for GOT references. */
24267 if (flag_pic && !(*targetm.binds_local_p) (function))
24268 return false;
24270 /* Otherwise ok. */
24271 return true;
24274 /* Output the assembler code for a thunk function. THUNK_DECL is the
24275 declaration for the thunk function itself, FUNCTION is the decl for
24276 the target function. DELTA is an immediate constant offset to be
24277 added to THIS. If VCALL_OFFSET is nonzero, the word at
24278 *(*this + vcall_offset) should be added to THIS. */
24280 static void
24281 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
24282 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
24283 HOST_WIDE_INT vcall_offset, tree function)
24285 rtx xops[3];
24286 rtx this_param = x86_this_parameter (function);
24287 rtx this_reg, tmp;
24289 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
24290 pull it in now and let DELTA benefit. */
24291 if (REG_P (this_param))
24292 this_reg = this_param;
24293 else if (vcall_offset)
24295 /* Put the this parameter into %eax. */
24296 xops[0] = this_param;
24297 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
24298 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
24300 else
24301 this_reg = NULL_RTX;
24303 /* Adjust the this parameter by a fixed constant. */
24304 if (delta)
24306 xops[0] = GEN_INT (delta);
24307 xops[1] = this_reg ? this_reg : this_param;
24308 if (TARGET_64BIT)
24310 if (!x86_64_general_operand (xops[0], DImode))
24312 tmp = gen_rtx_REG (DImode, R10_REG);
24313 xops[1] = tmp;
24314 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
24315 xops[0] = tmp;
24316 xops[1] = this_param;
24318 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
24320 else
24321 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
24324 /* Adjust the this parameter by a value stored in the vtable. */
24325 if (vcall_offset)
24327 if (TARGET_64BIT)
24328 tmp = gen_rtx_REG (DImode, R10_REG);
24329 else
24331 int tmp_regno = CX_REG;
24332 if (lookup_attribute ("fastcall",
24333 TYPE_ATTRIBUTES (TREE_TYPE (function))))
24334 tmp_regno = AX_REG;
24335 tmp = gen_rtx_REG (SImode, tmp_regno);
24338 xops[0] = gen_rtx_MEM (Pmode, this_reg);
24339 xops[1] = tmp;
24340 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
24342 /* Adjust the this parameter. */
24343 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
24344 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
24346 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
24347 xops[0] = GEN_INT (vcall_offset);
24348 xops[1] = tmp2;
24349 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
24350 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
24352 xops[1] = this_reg;
24353 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
24356 /* If necessary, drop THIS back to its stack slot. */
24357 if (this_reg && this_reg != this_param)
24359 xops[0] = this_reg;
24360 xops[1] = this_param;
24361 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
24364 xops[0] = XEXP (DECL_RTL (function), 0);
24365 if (TARGET_64BIT)
24367 if (!flag_pic || (*targetm.binds_local_p) (function))
24368 output_asm_insn ("jmp\t%P0", xops);
24369 /* All thunks should be in the same object as their target,
24370 and thus binds_local_p should be true. */
24371 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
24372 gcc_unreachable ();
24373 else
24375 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
24376 tmp = gen_rtx_CONST (Pmode, tmp);
24377 tmp = gen_rtx_MEM (QImode, tmp);
24378 xops[0] = tmp;
24379 output_asm_insn ("jmp\t%A0", xops);
24382 else
24384 if (!flag_pic || (*targetm.binds_local_p) (function))
24385 output_asm_insn ("jmp\t%P0", xops);
24386 else
24387 #if TARGET_MACHO
24388 if (TARGET_MACHO)
24390 rtx sym_ref = XEXP (DECL_RTL (function), 0);
24391 tmp = (gen_rtx_SYMBOL_REF
24392 (Pmode,
24393 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
24394 tmp = gen_rtx_MEM (QImode, tmp);
24395 xops[0] = tmp;
24396 output_asm_insn ("jmp\t%0", xops);
24398 else
24399 #endif /* TARGET_MACHO */
24401 tmp = gen_rtx_REG (SImode, CX_REG);
24402 output_set_got (tmp, NULL_RTX);
24404 xops[1] = tmp;
24405 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
24406 output_asm_insn ("jmp\t{*}%1", xops);
24411 static void
24412 x86_file_start (void)
24414 default_file_start ();
24415 #if TARGET_MACHO
24416 darwin_file_start ();
24417 #endif
24418 if (X86_FILE_START_VERSION_DIRECTIVE)
24419 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
24420 if (X86_FILE_START_FLTUSED)
24421 fputs ("\t.global\t__fltused\n", asm_out_file);
24422 if (ix86_asm_dialect == ASM_INTEL)
24423 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
24427 x86_field_alignment (tree field, int computed)
24429 enum machine_mode mode;
24430 tree type = TREE_TYPE (field);
24432 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
24433 return computed;
24434 mode = TYPE_MODE (strip_array_types (type));
24435 if (mode == DFmode || mode == DCmode
24436 || GET_MODE_CLASS (mode) == MODE_INT
24437 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
24438 return MIN (32, computed);
24439 return computed;
24442 /* Output assembler code to FILE to increment profiler label # LABELNO
24443 for profiling a function entry. */
24444 void
24445 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
24447 if (TARGET_64BIT)
24449 #ifndef NO_PROFILE_COUNTERS
24450 fprintf (file, "\tleaq\t%sP%d@(%%rip),%%r11\n", LPREFIX, labelno);
24451 #endif
24453 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
24454 fprintf (file, "\tcall\t*%s@GOTPCREL(%%rip)\n", MCOUNT_NAME);
24455 else
24456 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
24458 else if (flag_pic)
24460 #ifndef NO_PROFILE_COUNTERS
24461 fprintf (file, "\tleal\t%sP%d@GOTOFF(%%ebx),%%%s\n",
24462 LPREFIX, labelno, PROFILE_COUNT_REGISTER);
24463 #endif
24464 fprintf (file, "\tcall\t*%s@GOT(%%ebx)\n", MCOUNT_NAME);
24466 else
24468 #ifndef NO_PROFILE_COUNTERS
24469 fprintf (file, "\tmovl\t$%sP%d,%%%s\n", LPREFIX, labelno,
24470 PROFILE_COUNT_REGISTER);
24471 #endif
24472 fprintf (file, "\tcall\t%s\n", MCOUNT_NAME);
24476 /* We don't have exact information about the insn sizes, but we may assume
24477 quite safely that we are informed about all 1 byte insns and memory
24478 address sizes. This is enough to eliminate unnecessary padding in
24479 99% of cases. */
24481 static int
24482 min_insn_size (rtx insn)
24484 int l = 0;
24486 if (!INSN_P (insn) || !active_insn_p (insn))
24487 return 0;
24489 /* Discard alignments we've emit and jump instructions. */
24490 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
24491 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
24492 return 0;
24493 if (JUMP_P (insn)
24494 && (GET_CODE (PATTERN (insn)) == ADDR_VEC
24495 || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC))
24496 return 0;
24498 /* Important case - calls are always 5 bytes.
24499 It is common to have many calls in the row. */
24500 if (CALL_P (insn)
24501 && symbolic_reference_mentioned_p (PATTERN (insn))
24502 && !SIBLING_CALL_P (insn))
24503 return 5;
24504 if (get_attr_length (insn) <= 1)
24505 return 1;
24507 /* For normal instructions we may rely on the sizes of addresses
24508 and the presence of symbol to require 4 bytes of encoding.
24509 This is not the case for jumps where references are PC relative. */
24510 if (!JUMP_P (insn))
24512 l = get_attr_length_address (insn);
24513 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
24514 l = 4;
24516 if (l)
24517 return 1+l;
24518 else
24519 return 2;
24522 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
24523 window. */
24525 static void
24526 ix86_avoid_jump_misspredicts (void)
24528 rtx insn, start = get_insns ();
24529 int nbytes = 0, njumps = 0;
24530 int isjump = 0;
24532 /* Look for all minimal intervals of instructions containing 4 jumps.
24533 The intervals are bounded by START and INSN. NBYTES is the total
24534 size of instructions in the interval including INSN and not including
24535 START. When the NBYTES is smaller than 16 bytes, it is possible
24536 that the end of START and INSN ends up in the same 16byte page.
24538 The smallest offset in the page INSN can start is the case where START
24539 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
24540 We add p2align to 16byte window with maxskip 17 - NBYTES + sizeof (INSN).
24542 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
24545 nbytes += min_insn_size (insn);
24546 if (dump_file)
24547 fprintf(dump_file, "Insn %i estimated to %i bytes\n",
24548 INSN_UID (insn), min_insn_size (insn));
24549 if ((JUMP_P (insn)
24550 && GET_CODE (PATTERN (insn)) != ADDR_VEC
24551 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
24552 || CALL_P (insn))
24553 njumps++;
24554 else
24555 continue;
24557 while (njumps > 3)
24559 start = NEXT_INSN (start);
24560 if ((JUMP_P (start)
24561 && GET_CODE (PATTERN (start)) != ADDR_VEC
24562 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
24563 || CALL_P (start))
24564 njumps--, isjump = 1;
24565 else
24566 isjump = 0;
24567 nbytes -= min_insn_size (start);
24569 gcc_assert (njumps >= 0);
24570 if (dump_file)
24571 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
24572 INSN_UID (start), INSN_UID (insn), nbytes);
24574 if (njumps == 3 && isjump && nbytes < 16)
24576 int padsize = 15 - nbytes + min_insn_size (insn);
24578 if (dump_file)
24579 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
24580 INSN_UID (insn), padsize);
24581 emit_insn_before (gen_align (GEN_INT (padsize)), insn);
24586 /* AMD Athlon works faster
24587 when RET is not destination of conditional jump or directly preceded
24588 by other jump instruction. We avoid the penalty by inserting NOP just
24589 before the RET instructions in such cases. */
24590 static void
24591 ix86_pad_returns (void)
24593 edge e;
24594 edge_iterator ei;
24596 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
24598 basic_block bb = e->src;
24599 rtx ret = BB_END (bb);
24600 rtx prev;
24601 bool replace = false;
24603 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
24604 || !maybe_hot_bb_p (bb))
24605 continue;
24606 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
24607 if (active_insn_p (prev) || LABEL_P (prev))
24608 break;
24609 if (prev && LABEL_P (prev))
24611 edge e;
24612 edge_iterator ei;
24614 FOR_EACH_EDGE (e, ei, bb->preds)
24615 if (EDGE_FREQUENCY (e) && e->src->index >= 0
24616 && !(e->flags & EDGE_FALLTHRU))
24617 replace = true;
24619 if (!replace)
24621 prev = prev_active_insn (ret);
24622 if (prev
24623 && ((JUMP_P (prev) && any_condjump_p (prev))
24624 || CALL_P (prev)))
24625 replace = true;
24626 /* Empty functions get branch mispredict even when the jump destination
24627 is not visible to us. */
24628 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
24629 replace = true;
24631 if (replace)
24633 emit_insn_before (gen_return_internal_long (), ret);
24634 delete_insn (ret);
24639 /* Implement machine specific optimizations. We implement padding of returns
24640 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
24641 static void
24642 ix86_reorg (void)
24644 if (TARGET_PAD_RETURNS && optimize && !optimize_size)
24645 ix86_pad_returns ();
24646 if (TARGET_FOUR_JUMP_LIMIT && optimize && !optimize_size)
24647 ix86_avoid_jump_misspredicts ();
24650 /* Return nonzero when QImode register that must be represented via REX prefix
24651 is used. */
24652 bool
24653 x86_extended_QIreg_mentioned_p (rtx insn)
24655 int i;
24656 extract_insn_cached (insn);
24657 for (i = 0; i < recog_data.n_operands; i++)
24658 if (REG_P (recog_data.operand[i])
24659 && REGNO (recog_data.operand[i]) >= 4)
24660 return true;
24661 return false;
24664 /* Return nonzero when P points to register encoded via REX prefix.
24665 Called via for_each_rtx. */
24666 static int
24667 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
24669 unsigned int regno;
24670 if (!REG_P (*p))
24671 return 0;
24672 regno = REGNO (*p);
24673 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
24676 /* Return true when INSN mentions register that must be encoded using REX
24677 prefix. */
24678 bool
24679 x86_extended_reg_mentioned_p (rtx insn)
24681 return for_each_rtx (&PATTERN (insn), extended_reg_mentioned_1, NULL);
24684 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
24685 optabs would emit if we didn't have TFmode patterns. */
24687 void
24688 x86_emit_floatuns (rtx operands[2])
24690 rtx neglab, donelab, i0, i1, f0, in, out;
24691 enum machine_mode mode, inmode;
24693 inmode = GET_MODE (operands[1]);
24694 gcc_assert (inmode == SImode || inmode == DImode);
24696 out = operands[0];
24697 in = force_reg (inmode, operands[1]);
24698 mode = GET_MODE (out);
24699 neglab = gen_label_rtx ();
24700 donelab = gen_label_rtx ();
24701 f0 = gen_reg_rtx (mode);
24703 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
24705 expand_float (out, in, 0);
24707 emit_jump_insn (gen_jump (donelab));
24708 emit_barrier ();
24710 emit_label (neglab);
24712 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
24713 1, OPTAB_DIRECT);
24714 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
24715 1, OPTAB_DIRECT);
24716 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
24718 expand_float (f0, i0, 0);
24720 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
24722 emit_label (donelab);
24725 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
24726 with all elements equal to VAR. Return true if successful. */
24728 static bool
24729 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
24730 rtx target, rtx val)
24732 enum machine_mode smode, wsmode, wvmode;
24733 rtx x;
24735 switch (mode)
24737 case V2SImode:
24738 case V2SFmode:
24739 if (!mmx_ok)
24740 return false;
24741 /* FALLTHRU */
24743 case V2DFmode:
24744 case V2DImode:
24745 case V4SFmode:
24746 case V4SImode:
24747 val = force_reg (GET_MODE_INNER (mode), val);
24748 x = gen_rtx_VEC_DUPLICATE (mode, val);
24749 emit_insn (gen_rtx_SET (VOIDmode, target, x));
24750 return true;
24752 case V4HImode:
24753 if (!mmx_ok)
24754 return false;
24755 if (TARGET_SSE || TARGET_3DNOW_A)
24757 val = gen_lowpart (SImode, val);
24758 x = gen_rtx_TRUNCATE (HImode, val);
24759 x = gen_rtx_VEC_DUPLICATE (mode, x);
24760 emit_insn (gen_rtx_SET (VOIDmode, target, x));
24761 return true;
24763 else
24765 smode = HImode;
24766 wsmode = SImode;
24767 wvmode = V2SImode;
24768 goto widen;
24771 case V8QImode:
24772 if (!mmx_ok)
24773 return false;
24774 smode = QImode;
24775 wsmode = HImode;
24776 wvmode = V4HImode;
24777 goto widen;
24778 case V8HImode:
24779 if (TARGET_SSE2)
24781 rtx tmp1, tmp2;
24782 /* Extend HImode to SImode using a paradoxical SUBREG. */
24783 tmp1 = gen_reg_rtx (SImode);
24784 emit_move_insn (tmp1, gen_lowpart (SImode, val));
24785 /* Insert the SImode value as low element of V4SImode vector. */
24786 tmp2 = gen_reg_rtx (V4SImode);
24787 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
24788 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
24789 CONST0_RTX (V4SImode),
24790 const1_rtx);
24791 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
24792 /* Cast the V4SImode vector back to a V8HImode vector. */
24793 tmp1 = gen_reg_rtx (V8HImode);
24794 emit_move_insn (tmp1, gen_lowpart (V8HImode, tmp2));
24795 /* Duplicate the low short through the whole low SImode word. */
24796 emit_insn (gen_sse2_punpcklwd (tmp1, tmp1, tmp1));
24797 /* Cast the V8HImode vector back to a V4SImode vector. */
24798 tmp2 = gen_reg_rtx (V4SImode);
24799 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
24800 /* Replicate the low element of the V4SImode vector. */
24801 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
24802 /* Cast the V2SImode back to V8HImode, and store in target. */
24803 emit_move_insn (target, gen_lowpart (V8HImode, tmp2));
24804 return true;
24806 smode = HImode;
24807 wsmode = SImode;
24808 wvmode = V4SImode;
24809 goto widen;
24810 case V16QImode:
24811 if (TARGET_SSE2)
24813 rtx tmp1, tmp2;
24814 /* Extend QImode to SImode using a paradoxical SUBREG. */
24815 tmp1 = gen_reg_rtx (SImode);
24816 emit_move_insn (tmp1, gen_lowpart (SImode, val));
24817 /* Insert the SImode value as low element of V4SImode vector. */
24818 tmp2 = gen_reg_rtx (V4SImode);
24819 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
24820 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
24821 CONST0_RTX (V4SImode),
24822 const1_rtx);
24823 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
24824 /* Cast the V4SImode vector back to a V16QImode vector. */
24825 tmp1 = gen_reg_rtx (V16QImode);
24826 emit_move_insn (tmp1, gen_lowpart (V16QImode, tmp2));
24827 /* Duplicate the low byte through the whole low SImode word. */
24828 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
24829 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
24830 /* Cast the V16QImode vector back to a V4SImode vector. */
24831 tmp2 = gen_reg_rtx (V4SImode);
24832 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
24833 /* Replicate the low element of the V4SImode vector. */
24834 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
24835 /* Cast the V2SImode back to V16QImode, and store in target. */
24836 emit_move_insn (target, gen_lowpart (V16QImode, tmp2));
24837 return true;
24839 smode = QImode;
24840 wsmode = HImode;
24841 wvmode = V8HImode;
24842 goto widen;
24843 widen:
24844 /* Replicate the value once into the next wider mode and recurse. */
24845 val = convert_modes (wsmode, smode, val, true);
24846 x = expand_simple_binop (wsmode, ASHIFT, val,
24847 GEN_INT (GET_MODE_BITSIZE (smode)),
24848 NULL_RTX, 1, OPTAB_LIB_WIDEN);
24849 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
24851 x = gen_reg_rtx (wvmode);
24852 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
24853 gcc_unreachable ();
24854 emit_move_insn (target, gen_lowpart (mode, x));
24855 return true;
24857 default:
24858 return false;
24862 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
24863 whose ONE_VAR element is VAR, and other elements are zero. Return true
24864 if successful. */
24866 static bool
24867 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
24868 rtx target, rtx var, int one_var)
24870 enum machine_mode vsimode;
24871 rtx new_target;
24872 rtx x, tmp;
24873 bool use_vector_set = false;
24875 switch (mode)
24877 case V2DImode:
24878 use_vector_set = TARGET_64BIT && TARGET_SSE4_1;
24879 break;
24880 case V16QImode:
24881 case V4SImode:
24882 case V4SFmode:
24883 use_vector_set = TARGET_SSE4_1;
24884 break;
24885 case V8HImode:
24886 use_vector_set = TARGET_SSE2;
24887 break;
24888 case V4HImode:
24889 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
24890 break;
24891 default:
24892 break;
24895 if (use_vector_set)
24897 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
24898 var = force_reg (GET_MODE_INNER (mode), var);
24899 ix86_expand_vector_set (mmx_ok, target, var, one_var);
24900 return true;
24903 switch (mode)
24905 case V2SFmode:
24906 case V2SImode:
24907 if (!mmx_ok)
24908 return false;
24909 /* FALLTHRU */
24911 case V2DFmode:
24912 case V2DImode:
24913 if (one_var != 0)
24914 return false;
24915 var = force_reg (GET_MODE_INNER (mode), var);
24916 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
24917 emit_insn (gen_rtx_SET (VOIDmode, target, x));
24918 return true;
24920 case V4SFmode:
24921 case V4SImode:
24922 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
24923 new_target = gen_reg_rtx (mode);
24924 else
24925 new_target = target;
24926 var = force_reg (GET_MODE_INNER (mode), var);
24927 x = gen_rtx_VEC_DUPLICATE (mode, var);
24928 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
24929 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
24930 if (one_var != 0)
24932 /* We need to shuffle the value to the correct position, so
24933 create a new pseudo to store the intermediate result. */
24935 /* With SSE2, we can use the integer shuffle insns. */
24936 if (mode != V4SFmode && TARGET_SSE2)
24938 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
24939 GEN_INT (1),
24940 GEN_INT (one_var == 1 ? 0 : 1),
24941 GEN_INT (one_var == 2 ? 0 : 1),
24942 GEN_INT (one_var == 3 ? 0 : 1)));
24943 if (target != new_target)
24944 emit_move_insn (target, new_target);
24945 return true;
24948 /* Otherwise convert the intermediate result to V4SFmode and
24949 use the SSE1 shuffle instructions. */
24950 if (mode != V4SFmode)
24952 tmp = gen_reg_rtx (V4SFmode);
24953 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
24955 else
24956 tmp = new_target;
24958 emit_insn (gen_sse_shufps_1 (tmp, tmp, tmp,
24959 GEN_INT (1),
24960 GEN_INT (one_var == 1 ? 0 : 1),
24961 GEN_INT (one_var == 2 ? 0+4 : 1+4),
24962 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
24964 if (mode != V4SFmode)
24965 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
24966 else if (tmp != target)
24967 emit_move_insn (target, tmp);
24969 else if (target != new_target)
24970 emit_move_insn (target, new_target);
24971 return true;
24973 case V8HImode:
24974 case V16QImode:
24975 vsimode = V4SImode;
24976 goto widen;
24977 case V4HImode:
24978 case V8QImode:
24979 if (!mmx_ok)
24980 return false;
24981 vsimode = V2SImode;
24982 goto widen;
24983 widen:
24984 if (one_var != 0)
24985 return false;
24987 /* Zero extend the variable element to SImode and recurse. */
24988 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
24990 x = gen_reg_rtx (vsimode);
24991 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
24992 var, one_var))
24993 gcc_unreachable ();
24995 emit_move_insn (target, gen_lowpart (mode, x));
24996 return true;
24998 default:
24999 return false;
25003 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
25004 consisting of the values in VALS. It is known that all elements
25005 except ONE_VAR are constants. Return true if successful. */
25007 static bool
25008 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
25009 rtx target, rtx vals, int one_var)
25011 rtx var = XVECEXP (vals, 0, one_var);
25012 enum machine_mode wmode;
25013 rtx const_vec, x;
25015 const_vec = copy_rtx (vals);
25016 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
25017 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
25019 switch (mode)
25021 case V2DFmode:
25022 case V2DImode:
25023 case V2SFmode:
25024 case V2SImode:
25025 /* For the two element vectors, it's just as easy to use
25026 the general case. */
25027 return false;
25029 case V4SFmode:
25030 case V4SImode:
25031 case V8HImode:
25032 case V4HImode:
25033 break;
25035 case V16QImode:
25036 if (TARGET_SSE4_1)
25037 break;
25038 wmode = V8HImode;
25039 goto widen;
25040 case V8QImode:
25041 wmode = V4HImode;
25042 goto widen;
25043 widen:
25044 /* There's no way to set one QImode entry easily. Combine
25045 the variable value with its adjacent constant value, and
25046 promote to an HImode set. */
25047 x = XVECEXP (vals, 0, one_var ^ 1);
25048 if (one_var & 1)
25050 var = convert_modes (HImode, QImode, var, true);
25051 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
25052 NULL_RTX, 1, OPTAB_LIB_WIDEN);
25053 x = GEN_INT (INTVAL (x) & 0xff);
25055 else
25057 var = convert_modes (HImode, QImode, var, true);
25058 x = gen_int_mode (INTVAL (x) << 8, HImode);
25060 if (x != const0_rtx)
25061 var = expand_simple_binop (HImode, IOR, var, x, var,
25062 1, OPTAB_LIB_WIDEN);
25064 x = gen_reg_rtx (wmode);
25065 emit_move_insn (x, gen_lowpart (wmode, const_vec));
25066 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
25068 emit_move_insn (target, gen_lowpart (mode, x));
25069 return true;
25071 default:
25072 return false;
25075 emit_move_insn (target, const_vec);
25076 ix86_expand_vector_set (mmx_ok, target, var, one_var);
25077 return true;
25080 /* A subroutine of ix86_expand_vector_init_general. Use vector
25081 concatenate to handle the most general case: all values variable,
25082 and none identical. */
25084 static void
25085 ix86_expand_vector_init_concat (enum machine_mode mode,
25086 rtx target, rtx *ops, int n)
25088 enum machine_mode cmode, hmode = VOIDmode;
25089 rtx first[4], second[2];
25090 rtvec v;
25091 int i, j;
25093 switch (n)
25095 case 2:
25096 switch (mode)
25098 case V4SImode:
25099 cmode = V2SImode;
25100 break;
25101 case V4SFmode:
25102 cmode = V2SFmode;
25103 break;
25104 case V2DImode:
25105 cmode = DImode;
25106 break;
25107 case V2SImode:
25108 cmode = SImode;
25109 break;
25110 case V2DFmode:
25111 cmode = DFmode;
25112 break;
25113 case V2SFmode:
25114 cmode = SFmode;
25115 break;
25116 default:
25117 gcc_unreachable ();
25120 if (!register_operand (ops[1], cmode))
25121 ops[1] = force_reg (cmode, ops[1]);
25122 if (!register_operand (ops[0], cmode))
25123 ops[0] = force_reg (cmode, ops[0]);
25124 emit_insn (gen_rtx_SET (VOIDmode, target,
25125 gen_rtx_VEC_CONCAT (mode, ops[0],
25126 ops[1])));
25127 break;
25129 case 4:
25130 switch (mode)
25132 case V4SImode:
25133 cmode = V2SImode;
25134 break;
25135 case V4SFmode:
25136 cmode = V2SFmode;
25137 break;
25138 default:
25139 gcc_unreachable ();
25141 goto half;
25143 half:
25144 /* FIXME: We process inputs backward to help RA. PR 36222. */
25145 i = n - 1;
25146 j = (n >> 1) - 1;
25147 for (; i > 0; i -= 2, j--)
25149 first[j] = gen_reg_rtx (cmode);
25150 v = gen_rtvec (2, ops[i - 1], ops[i]);
25151 ix86_expand_vector_init (false, first[j],
25152 gen_rtx_PARALLEL (cmode, v));
25155 n >>= 1;
25156 if (n > 2)
25158 gcc_assert (hmode != VOIDmode);
25159 for (i = j = 0; i < n; i += 2, j++)
25161 second[j] = gen_reg_rtx (hmode);
25162 ix86_expand_vector_init_concat (hmode, second [j],
25163 &first [i], 2);
25165 n >>= 1;
25166 ix86_expand_vector_init_concat (mode, target, second, n);
25168 else
25169 ix86_expand_vector_init_concat (mode, target, first, n);
25170 break;
25172 default:
25173 gcc_unreachable ();
25177 /* A subroutine of ix86_expand_vector_init_general. Use vector
25178 interleave to handle the most general case: all values variable,
25179 and none identical. */
25181 static void
25182 ix86_expand_vector_init_interleave (enum machine_mode mode,
25183 rtx target, rtx *ops, int n)
25185 enum machine_mode first_imode, second_imode, third_imode;
25186 int i, j;
25187 rtx op0, op1;
25188 rtx (*gen_load_even) (rtx, rtx, rtx);
25189 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
25190 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
25192 switch (mode)
25194 case V8HImode:
25195 gen_load_even = gen_vec_setv8hi;
25196 gen_interleave_first_low = gen_vec_interleave_lowv4si;
25197 gen_interleave_second_low = gen_vec_interleave_lowv2di;
25198 first_imode = V4SImode;
25199 second_imode = V2DImode;
25200 third_imode = VOIDmode;
25201 break;
25202 case V16QImode:
25203 gen_load_even = gen_vec_setv16qi;
25204 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
25205 gen_interleave_second_low = gen_vec_interleave_lowv4si;
25206 first_imode = V8HImode;
25207 second_imode = V4SImode;
25208 third_imode = V2DImode;
25209 break;
25210 default:
25211 gcc_unreachable ();
25214 for (i = 0; i < n; i++)
25216 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
25217 op0 = gen_reg_rtx (SImode);
25218 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
25220 /* Insert the SImode value as low element of V4SImode vector. */
25221 op1 = gen_reg_rtx (V4SImode);
25222 op0 = gen_rtx_VEC_MERGE (V4SImode,
25223 gen_rtx_VEC_DUPLICATE (V4SImode,
25224 op0),
25225 CONST0_RTX (V4SImode),
25226 const1_rtx);
25227 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
25229 /* Cast the V4SImode vector back to a vector in orignal mode. */
25230 op0 = gen_reg_rtx (mode);
25231 emit_move_insn (op0, gen_lowpart (mode, op1));
25233 /* Load even elements into the second positon. */
25234 emit_insn ((*gen_load_even) (op0, ops [i + i + 1],
25235 const1_rtx));
25237 /* Cast vector to FIRST_IMODE vector. */
25238 ops[i] = gen_reg_rtx (first_imode);
25239 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
25242 /* Interleave low FIRST_IMODE vectors. */
25243 for (i = j = 0; i < n; i += 2, j++)
25245 op0 = gen_reg_rtx (first_imode);
25246 emit_insn ((*gen_interleave_first_low) (op0, ops[i], ops[i + 1]));
25248 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
25249 ops[j] = gen_reg_rtx (second_imode);
25250 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
25253 /* Interleave low SECOND_IMODE vectors. */
25254 switch (second_imode)
25256 case V4SImode:
25257 for (i = j = 0; i < n / 2; i += 2, j++)
25259 op0 = gen_reg_rtx (second_imode);
25260 emit_insn ((*gen_interleave_second_low) (op0, ops[i],
25261 ops[i + 1]));
25263 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
25264 vector. */
25265 ops[j] = gen_reg_rtx (third_imode);
25266 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
25268 second_imode = V2DImode;
25269 gen_interleave_second_low = gen_vec_interleave_lowv2di;
25270 /* FALLTHRU */
25272 case V2DImode:
25273 op0 = gen_reg_rtx (second_imode);
25274 emit_insn ((*gen_interleave_second_low) (op0, ops[0],
25275 ops[1]));
25277 /* Cast the SECOND_IMODE vector back to a vector on original
25278 mode. */
25279 emit_insn (gen_rtx_SET (VOIDmode, target,
25280 gen_lowpart (mode, op0)));
25281 break;
25283 default:
25284 gcc_unreachable ();
25288 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
25289 all values variable, and none identical. */
25291 static void
25292 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
25293 rtx target, rtx vals)
25295 rtx ops[16];
25296 int n, i;
25298 switch (mode)
25300 case V2SFmode:
25301 case V2SImode:
25302 if (!mmx_ok && !TARGET_SSE)
25303 break;
25304 /* FALLTHRU */
25306 case V4SFmode:
25307 case V4SImode:
25308 case V2DFmode:
25309 case V2DImode:
25310 n = GET_MODE_NUNITS (mode);
25311 for (i = 0; i < n; i++)
25312 ops[i] = XVECEXP (vals, 0, i);
25313 ix86_expand_vector_init_concat (mode, target, ops, n);
25314 return;
25316 case V16QImode:
25317 if (!TARGET_SSE4_1)
25318 break;
25319 /* FALLTHRU */
25321 case V8HImode:
25322 if (!TARGET_SSE2)
25323 break;
25325 n = GET_MODE_NUNITS (mode);
25326 for (i = 0; i < n; i++)
25327 ops[i] = XVECEXP (vals, 0, i);
25328 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
25329 return;
25331 case V4HImode:
25332 case V8QImode:
25333 break;
25335 default:
25336 gcc_unreachable ();
25340 int i, j, n_elts, n_words, n_elt_per_word;
25341 enum machine_mode inner_mode;
25342 rtx words[4], shift;
25344 inner_mode = GET_MODE_INNER (mode);
25345 n_elts = GET_MODE_NUNITS (mode);
25346 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
25347 n_elt_per_word = n_elts / n_words;
25348 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
25350 for (i = 0; i < n_words; ++i)
25352 rtx word = NULL_RTX;
25354 for (j = 0; j < n_elt_per_word; ++j)
25356 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
25357 elt = convert_modes (word_mode, inner_mode, elt, true);
25359 if (j == 0)
25360 word = elt;
25361 else
25363 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
25364 word, 1, OPTAB_LIB_WIDEN);
25365 word = expand_simple_binop (word_mode, IOR, word, elt,
25366 word, 1, OPTAB_LIB_WIDEN);
25370 words[i] = word;
25373 if (n_words == 1)
25374 emit_move_insn (target, gen_lowpart (mode, words[0]));
25375 else if (n_words == 2)
25377 rtx tmp = gen_reg_rtx (mode);
25378 emit_clobber (tmp);
25379 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
25380 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
25381 emit_move_insn (target, tmp);
25383 else if (n_words == 4)
25385 rtx tmp = gen_reg_rtx (V4SImode);
25386 gcc_assert (word_mode == SImode);
25387 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
25388 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
25389 emit_move_insn (target, gen_lowpart (mode, tmp));
25391 else
25392 gcc_unreachable ();
25396 /* Initialize vector TARGET via VALS. Suppress the use of MMX
25397 instructions unless MMX_OK is true. */
25399 void
25400 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
25402 enum machine_mode mode = GET_MODE (target);
25403 enum machine_mode inner_mode = GET_MODE_INNER (mode);
25404 int n_elts = GET_MODE_NUNITS (mode);
25405 int n_var = 0, one_var = -1;
25406 bool all_same = true, all_const_zero = true;
25407 int i;
25408 rtx x;
25410 for (i = 0; i < n_elts; ++i)
25412 x = XVECEXP (vals, 0, i);
25413 if (!(CONST_INT_P (x)
25414 || GET_CODE (x) == CONST_DOUBLE
25415 || GET_CODE (x) == CONST_FIXED))
25416 n_var++, one_var = i;
25417 else if (x != CONST0_RTX (inner_mode))
25418 all_const_zero = false;
25419 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
25420 all_same = false;
25423 /* Constants are best loaded from the constant pool. */
25424 if (n_var == 0)
25426 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
25427 return;
25430 /* If all values are identical, broadcast the value. */
25431 if (all_same
25432 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
25433 XVECEXP (vals, 0, 0)))
25434 return;
25436 /* Values where only one field is non-constant are best loaded from
25437 the pool and overwritten via move later. */
25438 if (n_var == 1)
25440 if (all_const_zero
25441 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
25442 XVECEXP (vals, 0, one_var),
25443 one_var))
25444 return;
25446 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
25447 return;
25450 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
25453 void
25454 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
25456 enum machine_mode mode = GET_MODE (target);
25457 enum machine_mode inner_mode = GET_MODE_INNER (mode);
25458 bool use_vec_merge = false;
25459 rtx tmp;
25461 switch (mode)
25463 case V2SFmode:
25464 case V2SImode:
25465 if (mmx_ok)
25467 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
25468 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
25469 if (elt == 0)
25470 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
25471 else
25472 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
25473 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
25474 return;
25476 break;
25478 case V2DImode:
25479 use_vec_merge = TARGET_SSE4_1;
25480 if (use_vec_merge)
25481 break;
25483 case V2DFmode:
25485 rtx op0, op1;
25487 /* For the two element vectors, we implement a VEC_CONCAT with
25488 the extraction of the other element. */
25490 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
25491 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
25493 if (elt == 0)
25494 op0 = val, op1 = tmp;
25495 else
25496 op0 = tmp, op1 = val;
25498 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
25499 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
25501 return;
25503 case V4SFmode:
25504 use_vec_merge = TARGET_SSE4_1;
25505 if (use_vec_merge)
25506 break;
25508 switch (elt)
25510 case 0:
25511 use_vec_merge = true;
25512 break;
25514 case 1:
25515 /* tmp = target = A B C D */
25516 tmp = copy_to_reg (target);
25517 /* target = A A B B */
25518 emit_insn (gen_sse_unpcklps (target, target, target));
25519 /* target = X A B B */
25520 ix86_expand_vector_set (false, target, val, 0);
25521 /* target = A X C D */
25522 emit_insn (gen_sse_shufps_1 (target, target, tmp,
25523 GEN_INT (1), GEN_INT (0),
25524 GEN_INT (2+4), GEN_INT (3+4)));
25525 return;
25527 case 2:
25528 /* tmp = target = A B C D */
25529 tmp = copy_to_reg (target);
25530 /* tmp = X B C D */
25531 ix86_expand_vector_set (false, tmp, val, 0);
25532 /* target = A B X D */
25533 emit_insn (gen_sse_shufps_1 (target, target, tmp,
25534 GEN_INT (0), GEN_INT (1),
25535 GEN_INT (0+4), GEN_INT (3+4)));
25536 return;
25538 case 3:
25539 /* tmp = target = A B C D */
25540 tmp = copy_to_reg (target);
25541 /* tmp = X B C D */
25542 ix86_expand_vector_set (false, tmp, val, 0);
25543 /* target = A B X D */
25544 emit_insn (gen_sse_shufps_1 (target, target, tmp,
25545 GEN_INT (0), GEN_INT (1),
25546 GEN_INT (2+4), GEN_INT (0+4)));
25547 return;
25549 default:
25550 gcc_unreachable ();
25552 break;
25554 case V4SImode:
25555 use_vec_merge = TARGET_SSE4_1;
25556 if (use_vec_merge)
25557 break;
25559 /* Element 0 handled by vec_merge below. */
25560 if (elt == 0)
25562 use_vec_merge = true;
25563 break;
25566 if (TARGET_SSE2)
25568 /* With SSE2, use integer shuffles to swap element 0 and ELT,
25569 store into element 0, then shuffle them back. */
25571 rtx order[4];
25573 order[0] = GEN_INT (elt);
25574 order[1] = const1_rtx;
25575 order[2] = const2_rtx;
25576 order[3] = GEN_INT (3);
25577 order[elt] = const0_rtx;
25579 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
25580 order[1], order[2], order[3]));
25582 ix86_expand_vector_set (false, target, val, 0);
25584 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
25585 order[1], order[2], order[3]));
25587 else
25589 /* For SSE1, we have to reuse the V4SF code. */
25590 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
25591 gen_lowpart (SFmode, val), elt);
25593 return;
25595 case V8HImode:
25596 use_vec_merge = TARGET_SSE2;
25597 break;
25598 case V4HImode:
25599 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
25600 break;
25602 case V16QImode:
25603 use_vec_merge = TARGET_SSE4_1;
25604 break;
25606 case V8QImode:
25607 default:
25608 break;
25611 if (use_vec_merge)
25613 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
25614 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
25615 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
25617 else
25619 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
25621 emit_move_insn (mem, target);
25623 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
25624 emit_move_insn (tmp, val);
25626 emit_move_insn (target, mem);
25630 void
25631 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
25633 enum machine_mode mode = GET_MODE (vec);
25634 enum machine_mode inner_mode = GET_MODE_INNER (mode);
25635 bool use_vec_extr = false;
25636 rtx tmp;
25638 switch (mode)
25640 case V2SImode:
25641 case V2SFmode:
25642 if (!mmx_ok)
25643 break;
25644 /* FALLTHRU */
25646 case V2DFmode:
25647 case V2DImode:
25648 use_vec_extr = true;
25649 break;
25651 case V4SFmode:
25652 use_vec_extr = TARGET_SSE4_1;
25653 if (use_vec_extr)
25654 break;
25656 switch (elt)
25658 case 0:
25659 tmp = vec;
25660 break;
25662 case 1:
25663 case 3:
25664 tmp = gen_reg_rtx (mode);
25665 emit_insn (gen_sse_shufps_1 (tmp, vec, vec,
25666 GEN_INT (elt), GEN_INT (elt),
25667 GEN_INT (elt+4), GEN_INT (elt+4)));
25668 break;
25670 case 2:
25671 tmp = gen_reg_rtx (mode);
25672 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
25673 break;
25675 default:
25676 gcc_unreachable ();
25678 vec = tmp;
25679 use_vec_extr = true;
25680 elt = 0;
25681 break;
25683 case V4SImode:
25684 use_vec_extr = TARGET_SSE4_1;
25685 if (use_vec_extr)
25686 break;
25688 if (TARGET_SSE2)
25690 switch (elt)
25692 case 0:
25693 tmp = vec;
25694 break;
25696 case 1:
25697 case 3:
25698 tmp = gen_reg_rtx (mode);
25699 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
25700 GEN_INT (elt), GEN_INT (elt),
25701 GEN_INT (elt), GEN_INT (elt)));
25702 break;
25704 case 2:
25705 tmp = gen_reg_rtx (mode);
25706 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
25707 break;
25709 default:
25710 gcc_unreachable ();
25712 vec = tmp;
25713 use_vec_extr = true;
25714 elt = 0;
25716 else
25718 /* For SSE1, we have to reuse the V4SF code. */
25719 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
25720 gen_lowpart (V4SFmode, vec), elt);
25721 return;
25723 break;
25725 case V8HImode:
25726 use_vec_extr = TARGET_SSE2;
25727 break;
25728 case V4HImode:
25729 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
25730 break;
25732 case V16QImode:
25733 use_vec_extr = TARGET_SSE4_1;
25734 break;
25736 case V8QImode:
25737 /* ??? Could extract the appropriate HImode element and shift. */
25738 default:
25739 break;
25742 if (use_vec_extr)
25744 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
25745 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
25747 /* Let the rtl optimizers know about the zero extension performed. */
25748 if (inner_mode == QImode || inner_mode == HImode)
25750 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
25751 target = gen_lowpart (SImode, target);
25754 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
25756 else
25758 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
25760 emit_move_insn (mem, vec);
25762 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
25763 emit_move_insn (target, tmp);
25767 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
25768 pattern to reduce; DEST is the destination; IN is the input vector. */
25770 void
25771 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
25773 rtx tmp1, tmp2, tmp3;
25775 tmp1 = gen_reg_rtx (V4SFmode);
25776 tmp2 = gen_reg_rtx (V4SFmode);
25777 tmp3 = gen_reg_rtx (V4SFmode);
25779 emit_insn (gen_sse_movhlps (tmp1, in, in));
25780 emit_insn (fn (tmp2, tmp1, in));
25782 emit_insn (gen_sse_shufps_1 (tmp3, tmp2, tmp2,
25783 GEN_INT (1), GEN_INT (1),
25784 GEN_INT (1+4), GEN_INT (1+4)));
25785 emit_insn (fn (dest, tmp2, tmp3));
25788 /* Target hook for scalar_mode_supported_p. */
25789 static bool
25790 ix86_scalar_mode_supported_p (enum machine_mode mode)
25792 if (DECIMAL_FLOAT_MODE_P (mode))
25793 return true;
25794 else if (mode == TFmode)
25795 return true;
25796 else
25797 return default_scalar_mode_supported_p (mode);
25800 /* Implements target hook vector_mode_supported_p. */
25801 static bool
25802 ix86_vector_mode_supported_p (enum machine_mode mode)
25804 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
25805 return true;
25806 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
25807 return true;
25808 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
25809 return true;
25810 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
25811 return true;
25812 return false;
25815 /* Target hook for c_mode_for_suffix. */
25816 static enum machine_mode
25817 ix86_c_mode_for_suffix (char suffix)
25819 if (suffix == 'q')
25820 return TFmode;
25821 if (suffix == 'w')
25822 return XFmode;
25824 return VOIDmode;
25827 /* Worker function for TARGET_MD_ASM_CLOBBERS.
25829 We do this in the new i386 backend to maintain source compatibility
25830 with the old cc0-based compiler. */
25832 static tree
25833 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
25834 tree inputs ATTRIBUTE_UNUSED,
25835 tree clobbers)
25837 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
25838 clobbers);
25839 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
25840 clobbers);
25841 return clobbers;
25844 /* Implements target vector targetm.asm.encode_section_info. This
25845 is not used by netware. */
25847 static void ATTRIBUTE_UNUSED
25848 ix86_encode_section_info (tree decl, rtx rtl, int first)
25850 default_encode_section_info (decl, rtl, first);
25852 if (TREE_CODE (decl) == VAR_DECL
25853 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
25854 && ix86_in_large_data_p (decl))
25855 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
25858 /* Worker function for REVERSE_CONDITION. */
25860 enum rtx_code
25861 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
25863 return (mode != CCFPmode && mode != CCFPUmode
25864 ? reverse_condition (code)
25865 : reverse_condition_maybe_unordered (code));
25868 /* Output code to perform an x87 FP register move, from OPERANDS[1]
25869 to OPERANDS[0]. */
25871 const char *
25872 output_387_reg_move (rtx insn, rtx *operands)
25874 if (REG_P (operands[0]))
25876 if (REG_P (operands[1])
25877 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
25879 if (REGNO (operands[0]) == FIRST_STACK_REG)
25880 return output_387_ffreep (operands, 0);
25881 return "fstp\t%y0";
25883 if (STACK_TOP_P (operands[0]))
25884 return "fld%z1\t%y1";
25885 return "fst\t%y0";
25887 else if (MEM_P (operands[0]))
25889 gcc_assert (REG_P (operands[1]));
25890 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
25891 return "fstp%z0\t%y0";
25892 else
25894 /* There is no non-popping store to memory for XFmode.
25895 So if we need one, follow the store with a load. */
25896 if (GET_MODE (operands[0]) == XFmode)
25897 return "fstp%z0\t%y0\n\tfld%z0\t%y0";
25898 else
25899 return "fst%z0\t%y0";
25902 else
25903 gcc_unreachable();
25906 /* Output code to perform a conditional jump to LABEL, if C2 flag in
25907 FP status register is set. */
25909 void
25910 ix86_emit_fp_unordered_jump (rtx label)
25912 rtx reg = gen_reg_rtx (HImode);
25913 rtx temp;
25915 emit_insn (gen_x86_fnstsw_1 (reg));
25917 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_size))
25919 emit_insn (gen_x86_sahf_1 (reg));
25921 temp = gen_rtx_REG (CCmode, FLAGS_REG);
25922 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
25924 else
25926 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
25928 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
25929 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
25932 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
25933 gen_rtx_LABEL_REF (VOIDmode, label),
25934 pc_rtx);
25935 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
25937 emit_jump_insn (temp);
25938 predict_jump (REG_BR_PROB_BASE * 10 / 100);
25941 /* Output code to perform a log1p XFmode calculation. */
25943 void ix86_emit_i387_log1p (rtx op0, rtx op1)
25945 rtx label1 = gen_label_rtx ();
25946 rtx label2 = gen_label_rtx ();
25948 rtx tmp = gen_reg_rtx (XFmode);
25949 rtx tmp2 = gen_reg_rtx (XFmode);
25951 emit_insn (gen_absxf2 (tmp, op1));
25952 emit_insn (gen_cmpxf (tmp,
25953 CONST_DOUBLE_FROM_REAL_VALUE (
25954 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
25955 XFmode)));
25956 emit_jump_insn (gen_bge (label1));
25958 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
25959 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
25960 emit_jump (label2);
25962 emit_label (label1);
25963 emit_move_insn (tmp, CONST1_RTX (XFmode));
25964 emit_insn (gen_addxf3 (tmp, op1, tmp));
25965 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
25966 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
25968 emit_label (label2);
25971 /* Output code to perform a Newton-Rhapson approximation of a single precision
25972 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
25974 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
25976 rtx x0, x1, e0, e1, two;
25978 x0 = gen_reg_rtx (mode);
25979 e0 = gen_reg_rtx (mode);
25980 e1 = gen_reg_rtx (mode);
25981 x1 = gen_reg_rtx (mode);
25983 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
25985 if (VECTOR_MODE_P (mode))
25986 two = ix86_build_const_vector (SFmode, true, two);
25988 two = force_reg (mode, two);
25990 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
25992 /* x0 = rcp(b) estimate */
25993 emit_insn (gen_rtx_SET (VOIDmode, x0,
25994 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
25995 UNSPEC_RCP)));
25996 /* e0 = x0 * b */
25997 emit_insn (gen_rtx_SET (VOIDmode, e0,
25998 gen_rtx_MULT (mode, x0, b)));
25999 /* e1 = 2. - e0 */
26000 emit_insn (gen_rtx_SET (VOIDmode, e1,
26001 gen_rtx_MINUS (mode, two, e0)));
26002 /* x1 = x0 * e1 */
26003 emit_insn (gen_rtx_SET (VOIDmode, x1,
26004 gen_rtx_MULT (mode, x0, e1)));
26005 /* res = a * x1 */
26006 emit_insn (gen_rtx_SET (VOIDmode, res,
26007 gen_rtx_MULT (mode, a, x1)));
26010 /* Output code to perform a Newton-Rhapson approximation of a
26011 single precision floating point [reciprocal] square root. */
26013 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
26014 bool recip)
26016 rtx x0, e0, e1, e2, e3, mthree, mhalf;
26017 REAL_VALUE_TYPE r;
26019 x0 = gen_reg_rtx (mode);
26020 e0 = gen_reg_rtx (mode);
26021 e1 = gen_reg_rtx (mode);
26022 e2 = gen_reg_rtx (mode);
26023 e3 = gen_reg_rtx (mode);
26025 real_from_integer (&r, VOIDmode, -3, -1, 0);
26026 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
26028 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
26029 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
26031 if (VECTOR_MODE_P (mode))
26033 mthree = ix86_build_const_vector (SFmode, true, mthree);
26034 mhalf = ix86_build_const_vector (SFmode, true, mhalf);
26037 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
26038 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
26040 /* x0 = rsqrt(a) estimate */
26041 emit_insn (gen_rtx_SET (VOIDmode, x0,
26042 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
26043 UNSPEC_RSQRT)));
26045 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
26046 if (!recip)
26048 rtx zero, mask;
26050 zero = gen_reg_rtx (mode);
26051 mask = gen_reg_rtx (mode);
26053 zero = force_reg (mode, CONST0_RTX(mode));
26054 emit_insn (gen_rtx_SET (VOIDmode, mask,
26055 gen_rtx_NE (mode, zero, a)));
26057 emit_insn (gen_rtx_SET (VOIDmode, x0,
26058 gen_rtx_AND (mode, x0, mask)));
26061 /* e0 = x0 * a */
26062 emit_insn (gen_rtx_SET (VOIDmode, e0,
26063 gen_rtx_MULT (mode, x0, a)));
26064 /* e1 = e0 * x0 */
26065 emit_insn (gen_rtx_SET (VOIDmode, e1,
26066 gen_rtx_MULT (mode, e0, x0)));
26068 /* e2 = e1 - 3. */
26069 mthree = force_reg (mode, mthree);
26070 emit_insn (gen_rtx_SET (VOIDmode, e2,
26071 gen_rtx_PLUS (mode, e1, mthree)));
26073 mhalf = force_reg (mode, mhalf);
26074 if (recip)
26075 /* e3 = -.5 * x0 */
26076 emit_insn (gen_rtx_SET (VOIDmode, e3,
26077 gen_rtx_MULT (mode, x0, mhalf)));
26078 else
26079 /* e3 = -.5 * e0 */
26080 emit_insn (gen_rtx_SET (VOIDmode, e3,
26081 gen_rtx_MULT (mode, e0, mhalf)));
26082 /* ret = e2 * e3 */
26083 emit_insn (gen_rtx_SET (VOIDmode, res,
26084 gen_rtx_MULT (mode, e2, e3)));
26087 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
26089 static void ATTRIBUTE_UNUSED
26090 i386_solaris_elf_named_section (const char *name, unsigned int flags,
26091 tree decl)
26093 /* With Binutils 2.15, the "@unwind" marker must be specified on
26094 every occurrence of the ".eh_frame" section, not just the first
26095 one. */
26096 if (TARGET_64BIT
26097 && strcmp (name, ".eh_frame") == 0)
26099 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
26100 flags & SECTION_WRITE ? "aw" : "a");
26101 return;
26103 default_elf_asm_named_section (name, flags, decl);
26106 /* Return the mangling of TYPE if it is an extended fundamental type. */
26108 static const char *
26109 ix86_mangle_type (const_tree type)
26111 type = TYPE_MAIN_VARIANT (type);
26113 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
26114 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
26115 return NULL;
26117 switch (TYPE_MODE (type))
26119 case TFmode:
26120 /* __float128 is "g". */
26121 return "g";
26122 case XFmode:
26123 /* "long double" or __float80 is "e". */
26124 return "e";
26125 default:
26126 return NULL;
26130 /* For 32-bit code we can save PIC register setup by using
26131 __stack_chk_fail_local hidden function instead of calling
26132 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
26133 register, so it is better to call __stack_chk_fail directly. */
26135 static tree
26136 ix86_stack_protect_fail (void)
26138 return TARGET_64BIT
26139 ? default_external_stack_protect_fail ()
26140 : default_hidden_stack_protect_fail ();
26143 /* Select a format to encode pointers in exception handling data. CODE
26144 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
26145 true if the symbol may be affected by dynamic relocations.
26147 ??? All x86 object file formats are capable of representing this.
26148 After all, the relocation needed is the same as for the call insn.
26149 Whether or not a particular assembler allows us to enter such, I
26150 guess we'll have to see. */
26152 asm_preferred_eh_data_format (int code, int global)
26154 if (flag_pic)
26156 int type = DW_EH_PE_sdata8;
26157 if (!TARGET_64BIT
26158 || ix86_cmodel == CM_SMALL_PIC
26159 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
26160 type = DW_EH_PE_sdata4;
26161 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
26163 if (ix86_cmodel == CM_SMALL
26164 || (ix86_cmodel == CM_MEDIUM && code))
26165 return DW_EH_PE_udata4;
26166 return DW_EH_PE_absptr;
26169 /* Expand copysign from SIGN to the positive value ABS_VALUE
26170 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
26171 the sign-bit. */
26172 static void
26173 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
26175 enum machine_mode mode = GET_MODE (sign);
26176 rtx sgn = gen_reg_rtx (mode);
26177 if (mask == NULL_RTX)
26179 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
26180 if (!VECTOR_MODE_P (mode))
26182 /* We need to generate a scalar mode mask in this case. */
26183 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
26184 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
26185 mask = gen_reg_rtx (mode);
26186 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
26189 else
26190 mask = gen_rtx_NOT (mode, mask);
26191 emit_insn (gen_rtx_SET (VOIDmode, sgn,
26192 gen_rtx_AND (mode, mask, sign)));
26193 emit_insn (gen_rtx_SET (VOIDmode, result,
26194 gen_rtx_IOR (mode, abs_value, sgn)));
26197 /* Expand fabs (OP0) and return a new rtx that holds the result. The
26198 mask for masking out the sign-bit is stored in *SMASK, if that is
26199 non-null. */
26200 static rtx
26201 ix86_expand_sse_fabs (rtx op0, rtx *smask)
26203 enum machine_mode mode = GET_MODE (op0);
26204 rtx xa, mask;
26206 xa = gen_reg_rtx (mode);
26207 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
26208 if (!VECTOR_MODE_P (mode))
26210 /* We need to generate a scalar mode mask in this case. */
26211 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
26212 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
26213 mask = gen_reg_rtx (mode);
26214 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
26216 emit_insn (gen_rtx_SET (VOIDmode, xa,
26217 gen_rtx_AND (mode, op0, mask)));
26219 if (smask)
26220 *smask = mask;
26222 return xa;
26225 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
26226 swapping the operands if SWAP_OPERANDS is true. The expanded
26227 code is a forward jump to a newly created label in case the
26228 comparison is true. The generated label rtx is returned. */
26229 static rtx
26230 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
26231 bool swap_operands)
26233 rtx label, tmp;
26235 if (swap_operands)
26237 tmp = op0;
26238 op0 = op1;
26239 op1 = tmp;
26242 label = gen_label_rtx ();
26243 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
26244 emit_insn (gen_rtx_SET (VOIDmode, tmp,
26245 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
26246 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
26247 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
26248 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
26249 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
26250 JUMP_LABEL (tmp) = label;
26252 return label;
26255 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
26256 using comparison code CODE. Operands are swapped for the comparison if
26257 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
26258 static rtx
26259 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
26260 bool swap_operands)
26262 enum machine_mode mode = GET_MODE (op0);
26263 rtx mask = gen_reg_rtx (mode);
26265 if (swap_operands)
26267 rtx tmp = op0;
26268 op0 = op1;
26269 op1 = tmp;
26272 if (mode == DFmode)
26273 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
26274 gen_rtx_fmt_ee (code, mode, op0, op1)));
26275 else
26276 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
26277 gen_rtx_fmt_ee (code, mode, op0, op1)));
26279 return mask;
26282 /* Generate and return a rtx of mode MODE for 2**n where n is the number
26283 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
26284 static rtx
26285 ix86_gen_TWO52 (enum machine_mode mode)
26287 REAL_VALUE_TYPE TWO52r;
26288 rtx TWO52;
26290 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
26291 TWO52 = const_double_from_real_value (TWO52r, mode);
26292 TWO52 = force_reg (mode, TWO52);
26294 return TWO52;
26297 /* Expand SSE sequence for computing lround from OP1 storing
26298 into OP0. */
26299 void
26300 ix86_expand_lround (rtx op0, rtx op1)
26302 /* C code for the stuff we're doing below:
26303 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
26304 return (long)tmp;
26306 enum machine_mode mode = GET_MODE (op1);
26307 const struct real_format *fmt;
26308 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
26309 rtx adj;
26311 /* load nextafter (0.5, 0.0) */
26312 fmt = REAL_MODE_FORMAT (mode);
26313 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
26314 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
26316 /* adj = copysign (0.5, op1) */
26317 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
26318 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
26320 /* adj = op1 + adj */
26321 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
26323 /* op0 = (imode)adj */
26324 expand_fix (op0, adj, 0);
26327 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
26328 into OPERAND0. */
26329 void
26330 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
26332 /* C code for the stuff we're doing below (for do_floor):
26333 xi = (long)op1;
26334 xi -= (double)xi > op1 ? 1 : 0;
26335 return xi;
26337 enum machine_mode fmode = GET_MODE (op1);
26338 enum machine_mode imode = GET_MODE (op0);
26339 rtx ireg, freg, label, tmp;
26341 /* reg = (long)op1 */
26342 ireg = gen_reg_rtx (imode);
26343 expand_fix (ireg, op1, 0);
26345 /* freg = (double)reg */
26346 freg = gen_reg_rtx (fmode);
26347 expand_float (freg, ireg, 0);
26349 /* ireg = (freg > op1) ? ireg - 1 : ireg */
26350 label = ix86_expand_sse_compare_and_jump (UNLE,
26351 freg, op1, !do_floor);
26352 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
26353 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
26354 emit_move_insn (ireg, tmp);
26356 emit_label (label);
26357 LABEL_NUSES (label) = 1;
26359 emit_move_insn (op0, ireg);
26362 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
26363 result in OPERAND0. */
26364 void
26365 ix86_expand_rint (rtx operand0, rtx operand1)
26367 /* C code for the stuff we're doing below:
26368 xa = fabs (operand1);
26369 if (!isless (xa, 2**52))
26370 return operand1;
26371 xa = xa + 2**52 - 2**52;
26372 return copysign (xa, operand1);
26374 enum machine_mode mode = GET_MODE (operand0);
26375 rtx res, xa, label, TWO52, mask;
26377 res = gen_reg_rtx (mode);
26378 emit_move_insn (res, operand1);
26380 /* xa = abs (operand1) */
26381 xa = ix86_expand_sse_fabs (res, &mask);
26383 /* if (!isless (xa, TWO52)) goto label; */
26384 TWO52 = ix86_gen_TWO52 (mode);
26385 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
26387 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
26388 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
26390 ix86_sse_copysign_to_positive (res, xa, res, mask);
26392 emit_label (label);
26393 LABEL_NUSES (label) = 1;
26395 emit_move_insn (operand0, res);
26398 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
26399 into OPERAND0. */
26400 void
26401 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
26403 /* C code for the stuff we expand below.
26404 double xa = fabs (x), x2;
26405 if (!isless (xa, TWO52))
26406 return x;
26407 xa = xa + TWO52 - TWO52;
26408 x2 = copysign (xa, x);
26409 Compensate. Floor:
26410 if (x2 > x)
26411 x2 -= 1;
26412 Compensate. Ceil:
26413 if (x2 < x)
26414 x2 -= -1;
26415 return x2;
26417 enum machine_mode mode = GET_MODE (operand0);
26418 rtx xa, TWO52, tmp, label, one, res, mask;
26420 TWO52 = ix86_gen_TWO52 (mode);
26422 /* Temporary for holding the result, initialized to the input
26423 operand to ease control flow. */
26424 res = gen_reg_rtx (mode);
26425 emit_move_insn (res, operand1);
26427 /* xa = abs (operand1) */
26428 xa = ix86_expand_sse_fabs (res, &mask);
26430 /* if (!isless (xa, TWO52)) goto label; */
26431 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
26433 /* xa = xa + TWO52 - TWO52; */
26434 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
26435 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
26437 /* xa = copysign (xa, operand1) */
26438 ix86_sse_copysign_to_positive (xa, xa, res, mask);
26440 /* generate 1.0 or -1.0 */
26441 one = force_reg (mode,
26442 const_double_from_real_value (do_floor
26443 ? dconst1 : dconstm1, mode));
26445 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
26446 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
26447 emit_insn (gen_rtx_SET (VOIDmode, tmp,
26448 gen_rtx_AND (mode, one, tmp)));
26449 /* We always need to subtract here to preserve signed zero. */
26450 tmp = expand_simple_binop (mode, MINUS,
26451 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
26452 emit_move_insn (res, tmp);
26454 emit_label (label);
26455 LABEL_NUSES (label) = 1;
26457 emit_move_insn (operand0, res);
26460 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
26461 into OPERAND0. */
26462 void
26463 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
26465 /* C code for the stuff we expand below.
26466 double xa = fabs (x), x2;
26467 if (!isless (xa, TWO52))
26468 return x;
26469 x2 = (double)(long)x;
26470 Compensate. Floor:
26471 if (x2 > x)
26472 x2 -= 1;
26473 Compensate. Ceil:
26474 if (x2 < x)
26475 x2 += 1;
26476 if (HONOR_SIGNED_ZEROS (mode))
26477 return copysign (x2, x);
26478 return x2;
26480 enum machine_mode mode = GET_MODE (operand0);
26481 rtx xa, xi, TWO52, tmp, label, one, res, mask;
26483 TWO52 = ix86_gen_TWO52 (mode);
26485 /* Temporary for holding the result, initialized to the input
26486 operand to ease control flow. */
26487 res = gen_reg_rtx (mode);
26488 emit_move_insn (res, operand1);
26490 /* xa = abs (operand1) */
26491 xa = ix86_expand_sse_fabs (res, &mask);
26493 /* if (!isless (xa, TWO52)) goto label; */
26494 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
26496 /* xa = (double)(long)x */
26497 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
26498 expand_fix (xi, res, 0);
26499 expand_float (xa, xi, 0);
26501 /* generate 1.0 */
26502 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
26504 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
26505 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
26506 emit_insn (gen_rtx_SET (VOIDmode, tmp,
26507 gen_rtx_AND (mode, one, tmp)));
26508 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
26509 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
26510 emit_move_insn (res, tmp);
26512 if (HONOR_SIGNED_ZEROS (mode))
26513 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
26515 emit_label (label);
26516 LABEL_NUSES (label) = 1;
26518 emit_move_insn (operand0, res);
26521 /* Expand SSE sequence for computing round from OPERAND1 storing
26522 into OPERAND0. Sequence that works without relying on DImode truncation
26523 via cvttsd2siq that is only available on 64bit targets. */
26524 void
26525 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
26527 /* C code for the stuff we expand below.
26528 double xa = fabs (x), xa2, x2;
26529 if (!isless (xa, TWO52))
26530 return x;
26531 Using the absolute value and copying back sign makes
26532 -0.0 -> -0.0 correct.
26533 xa2 = xa + TWO52 - TWO52;
26534 Compensate.
26535 dxa = xa2 - xa;
26536 if (dxa <= -0.5)
26537 xa2 += 1;
26538 else if (dxa > 0.5)
26539 xa2 -= 1;
26540 x2 = copysign (xa2, x);
26541 return x2;
26543 enum machine_mode mode = GET_MODE (operand0);
26544 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
26546 TWO52 = ix86_gen_TWO52 (mode);
26548 /* Temporary for holding the result, initialized to the input
26549 operand to ease control flow. */
26550 res = gen_reg_rtx (mode);
26551 emit_move_insn (res, operand1);
26553 /* xa = abs (operand1) */
26554 xa = ix86_expand_sse_fabs (res, &mask);
26556 /* if (!isless (xa, TWO52)) goto label; */
26557 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
26559 /* xa2 = xa + TWO52 - TWO52; */
26560 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
26561 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
26563 /* dxa = xa2 - xa; */
26564 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
26566 /* generate 0.5, 1.0 and -0.5 */
26567 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
26568 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
26569 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
26570 0, OPTAB_DIRECT);
26572 /* Compensate. */
26573 tmp = gen_reg_rtx (mode);
26574 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
26575 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
26576 emit_insn (gen_rtx_SET (VOIDmode, tmp,
26577 gen_rtx_AND (mode, one, tmp)));
26578 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
26579 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
26580 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
26581 emit_insn (gen_rtx_SET (VOIDmode, tmp,
26582 gen_rtx_AND (mode, one, tmp)));
26583 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
26585 /* res = copysign (xa2, operand1) */
26586 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
26588 emit_label (label);
26589 LABEL_NUSES (label) = 1;
26591 emit_move_insn (operand0, res);
26594 /* Expand SSE sequence for computing trunc from OPERAND1 storing
26595 into OPERAND0. */
26596 void
26597 ix86_expand_trunc (rtx operand0, rtx operand1)
26599 /* C code for SSE variant we expand below.
26600 double xa = fabs (x), x2;
26601 if (!isless (xa, TWO52))
26602 return x;
26603 x2 = (double)(long)x;
26604 if (HONOR_SIGNED_ZEROS (mode))
26605 return copysign (x2, x);
26606 return x2;
26608 enum machine_mode mode = GET_MODE (operand0);
26609 rtx xa, xi, TWO52, label, res, mask;
26611 TWO52 = ix86_gen_TWO52 (mode);
26613 /* Temporary for holding the result, initialized to the input
26614 operand to ease control flow. */
26615 res = gen_reg_rtx (mode);
26616 emit_move_insn (res, operand1);
26618 /* xa = abs (operand1) */
26619 xa = ix86_expand_sse_fabs (res, &mask);
26621 /* if (!isless (xa, TWO52)) goto label; */
26622 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
26624 /* x = (double)(long)x */
26625 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
26626 expand_fix (xi, res, 0);
26627 expand_float (res, xi, 0);
26629 if (HONOR_SIGNED_ZEROS (mode))
26630 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
26632 emit_label (label);
26633 LABEL_NUSES (label) = 1;
26635 emit_move_insn (operand0, res);
26638 /* Expand SSE sequence for computing trunc from OPERAND1 storing
26639 into OPERAND0. */
26640 void
26641 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
26643 enum machine_mode mode = GET_MODE (operand0);
26644 rtx xa, mask, TWO52, label, one, res, smask, tmp;
26646 /* C code for SSE variant we expand below.
26647 double xa = fabs (x), x2;
26648 if (!isless (xa, TWO52))
26649 return x;
26650 xa2 = xa + TWO52 - TWO52;
26651 Compensate:
26652 if (xa2 > xa)
26653 xa2 -= 1.0;
26654 x2 = copysign (xa2, x);
26655 return x2;
26658 TWO52 = ix86_gen_TWO52 (mode);
26660 /* Temporary for holding the result, initialized to the input
26661 operand to ease control flow. */
26662 res = gen_reg_rtx (mode);
26663 emit_move_insn (res, operand1);
26665 /* xa = abs (operand1) */
26666 xa = ix86_expand_sse_fabs (res, &smask);
26668 /* if (!isless (xa, TWO52)) goto label; */
26669 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
26671 /* res = xa + TWO52 - TWO52; */
26672 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
26673 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
26674 emit_move_insn (res, tmp);
26676 /* generate 1.0 */
26677 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
26679 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
26680 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
26681 emit_insn (gen_rtx_SET (VOIDmode, mask,
26682 gen_rtx_AND (mode, mask, one)));
26683 tmp = expand_simple_binop (mode, MINUS,
26684 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
26685 emit_move_insn (res, tmp);
26687 /* res = copysign (res, operand1) */
26688 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
26690 emit_label (label);
26691 LABEL_NUSES (label) = 1;
26693 emit_move_insn (operand0, res);
26696 /* Expand SSE sequence for computing round from OPERAND1 storing
26697 into OPERAND0. */
26698 void
26699 ix86_expand_round (rtx operand0, rtx operand1)
26701 /* C code for the stuff we're doing below:
26702 double xa = fabs (x);
26703 if (!isless (xa, TWO52))
26704 return x;
26705 xa = (double)(long)(xa + nextafter (0.5, 0.0));
26706 return copysign (xa, x);
26708 enum machine_mode mode = GET_MODE (operand0);
26709 rtx res, TWO52, xa, label, xi, half, mask;
26710 const struct real_format *fmt;
26711 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
26713 /* Temporary for holding the result, initialized to the input
26714 operand to ease control flow. */
26715 res = gen_reg_rtx (mode);
26716 emit_move_insn (res, operand1);
26718 TWO52 = ix86_gen_TWO52 (mode);
26719 xa = ix86_expand_sse_fabs (res, &mask);
26720 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
26722 /* load nextafter (0.5, 0.0) */
26723 fmt = REAL_MODE_FORMAT (mode);
26724 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
26725 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
26727 /* xa = xa + 0.5 */
26728 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
26729 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
26731 /* xa = (double)(int64_t)xa */
26732 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
26733 expand_fix (xi, xa, 0);
26734 expand_float (xa, xi, 0);
26736 /* res = copysign (xa, operand1) */
26737 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
26739 emit_label (label);
26740 LABEL_NUSES (label) = 1;
26742 emit_move_insn (operand0, res);
26746 /* Validate whether a SSE5 instruction is valid or not.
26747 OPERANDS is the array of operands.
26748 NUM is the number of operands.
26749 USES_OC0 is true if the instruction uses OC0 and provides 4 variants.
26750 NUM_MEMORY is the maximum number of memory operands to accept.
26751 when COMMUTATIVE is set, operand 1 and 2 can be swapped. */
26753 bool
26754 ix86_sse5_valid_op_p (rtx operands[], rtx insn ATTRIBUTE_UNUSED, int num,
26755 bool uses_oc0, int num_memory, bool commutative)
26757 int mem_mask;
26758 int mem_count;
26759 int i;
26761 /* Count the number of memory arguments */
26762 mem_mask = 0;
26763 mem_count = 0;
26764 for (i = 0; i < num; i++)
26766 enum machine_mode mode = GET_MODE (operands[i]);
26767 if (register_operand (operands[i], mode))
26770 else if (memory_operand (operands[i], mode))
26772 mem_mask |= (1 << i);
26773 mem_count++;
26776 else
26778 rtx pattern = PATTERN (insn);
26780 /* allow 0 for pcmov */
26781 if (GET_CODE (pattern) != SET
26782 || GET_CODE (SET_SRC (pattern)) != IF_THEN_ELSE
26783 || i < 2
26784 || operands[i] != CONST0_RTX (mode))
26785 return false;
26789 /* Special case pmacsdq{l,h} where we allow the 3rd argument to be
26790 a memory operation. */
26791 if (num_memory < 0)
26793 num_memory = -num_memory;
26794 if ((mem_mask & (1 << (num-1))) != 0)
26796 mem_mask &= ~(1 << (num-1));
26797 mem_count--;
26801 /* If there were no memory operations, allow the insn */
26802 if (mem_mask == 0)
26803 return true;
26805 /* Do not allow the destination register to be a memory operand. */
26806 else if (mem_mask & (1 << 0))
26807 return false;
26809 /* If there are too many memory operations, disallow the instruction. While
26810 the hardware only allows 1 memory reference, before register allocation
26811 for some insns, we allow two memory operations sometimes in order to allow
26812 code like the following to be optimized:
26814 float fmadd (float *a, float *b, float *c) { return (*a * *b) + *c; }
26816 or similar cases that are vectorized into using the fmaddss
26817 instruction. */
26818 else if (mem_count > num_memory)
26819 return false;
26821 /* Don't allow more than one memory operation if not optimizing. */
26822 else if (mem_count > 1 && !optimize)
26823 return false;
26825 else if (num == 4 && mem_count == 1)
26827 /* formats (destination is the first argument), example fmaddss:
26828 xmm1, xmm1, xmm2, xmm3/mem
26829 xmm1, xmm1, xmm2/mem, xmm3
26830 xmm1, xmm2, xmm3/mem, xmm1
26831 xmm1, xmm2/mem, xmm3, xmm1 */
26832 if (uses_oc0)
26833 return ((mem_mask == (1 << 1))
26834 || (mem_mask == (1 << 2))
26835 || (mem_mask == (1 << 3)));
26837 /* format, example pmacsdd:
26838 xmm1, xmm2, xmm3/mem, xmm1 */
26839 if (commutative)
26840 return (mem_mask == (1 << 2) || mem_mask == (1 << 1));
26841 else
26842 return (mem_mask == (1 << 2));
26845 else if (num == 4 && num_memory == 2)
26847 /* If there are two memory operations, we can load one of the memory ops
26848 into the destination register. This is for optimizing the
26849 multiply/add ops, which the combiner has optimized both the multiply
26850 and the add insns to have a memory operation. We have to be careful
26851 that the destination doesn't overlap with the inputs. */
26852 rtx op0 = operands[0];
26854 if (reg_mentioned_p (op0, operands[1])
26855 || reg_mentioned_p (op0, operands[2])
26856 || reg_mentioned_p (op0, operands[3]))
26857 return false;
26859 /* formats (destination is the first argument), example fmaddss:
26860 xmm1, xmm1, xmm2, xmm3/mem
26861 xmm1, xmm1, xmm2/mem, xmm3
26862 xmm1, xmm2, xmm3/mem, xmm1
26863 xmm1, xmm2/mem, xmm3, xmm1
26865 For the oc0 case, we will load either operands[1] or operands[3] into
26866 operands[0], so any combination of 2 memory operands is ok. */
26867 if (uses_oc0)
26868 return true;
26870 /* format, example pmacsdd:
26871 xmm1, xmm2, xmm3/mem, xmm1
26873 For the integer multiply/add instructions be more restrictive and
26874 require operands[2] and operands[3] to be the memory operands. */
26875 if (commutative)
26876 return (mem_mask == ((1 << 1) | (1 << 3)) || ((1 << 2) | (1 << 3)));
26877 else
26878 return (mem_mask == ((1 << 2) | (1 << 3)));
26881 else if (num == 3 && num_memory == 1)
26883 /* formats, example protb:
26884 xmm1, xmm2, xmm3/mem
26885 xmm1, xmm2/mem, xmm3 */
26886 if (uses_oc0)
26887 return ((mem_mask == (1 << 1)) || (mem_mask == (1 << 2)));
26889 /* format, example comeq:
26890 xmm1, xmm2, xmm3/mem */
26891 else
26892 return (mem_mask == (1 << 2));
26895 else
26896 gcc_unreachable ();
26898 return false;
26902 /* Fixup an SSE5 instruction that has 2 memory input references into a form the
26903 hardware will allow by using the destination register to load one of the
26904 memory operations. Presently this is used by the multiply/add routines to
26905 allow 2 memory references. */
26907 void
26908 ix86_expand_sse5_multiple_memory (rtx operands[],
26909 int num,
26910 enum machine_mode mode)
26912 rtx op0 = operands[0];
26913 if (num != 4
26914 || memory_operand (op0, mode)
26915 || reg_mentioned_p (op0, operands[1])
26916 || reg_mentioned_p (op0, operands[2])
26917 || reg_mentioned_p (op0, operands[3]))
26918 gcc_unreachable ();
26920 /* For 2 memory operands, pick either operands[1] or operands[3] to move into
26921 the destination register. */
26922 if (memory_operand (operands[1], mode))
26924 emit_move_insn (op0, operands[1]);
26925 operands[1] = op0;
26927 else if (memory_operand (operands[3], mode))
26929 emit_move_insn (op0, operands[3]);
26930 operands[3] = op0;
26932 else
26933 gcc_unreachable ();
26935 return;
26939 /* Table of valid machine attributes. */
26940 static const struct attribute_spec ix86_attribute_table[] =
26942 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
26943 /* Stdcall attribute says callee is responsible for popping arguments
26944 if they are not variable. */
26945 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
26946 /* Fastcall attribute says callee is responsible for popping arguments
26947 if they are not variable. */
26948 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
26949 /* Cdecl attribute says the callee is a normal C declaration */
26950 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
26951 /* Regparm attribute specifies how many integer arguments are to be
26952 passed in registers. */
26953 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
26954 /* Sseregparm attribute says we are using x86_64 calling conventions
26955 for FP arguments. */
26956 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
26957 /* force_align_arg_pointer says this function realigns the stack at entry. */
26958 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
26959 false, true, true, ix86_handle_cconv_attribute },
26960 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
26961 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
26962 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
26963 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
26964 #endif
26965 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
26966 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
26967 #ifdef SUBTARGET_ATTRIBUTE_TABLE
26968 SUBTARGET_ATTRIBUTE_TABLE,
26969 #endif
26970 /* ms_abi and sysv_abi calling convention function attributes. */
26971 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
26972 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
26973 /* End element. */
26974 { NULL, 0, 0, false, false, false, NULL }
26977 /* Implement targetm.vectorize.builtin_vectorization_cost. */
26978 static int
26979 x86_builtin_vectorization_cost (bool runtime_test)
26981 /* If the branch of the runtime test is taken - i.e. - the vectorized
26982 version is skipped - this incurs a misprediction cost (because the
26983 vectorized version is expected to be the fall-through). So we subtract
26984 the latency of a mispredicted branch from the costs that are incured
26985 when the vectorized version is executed.
26987 TODO: The values in individual target tables have to be tuned or new
26988 fields may be needed. For eg. on K8, the default branch path is the
26989 not-taken path. If the taken path is predicted correctly, the minimum
26990 penalty of going down the taken-path is 1 cycle. If the taken-path is
26991 not predicted correctly, then the minimum penalty is 10 cycles. */
26993 if (runtime_test)
26995 return (-(ix86_cost->cond_taken_branch_cost));
26997 else
26998 return 0;
27001 /* This function returns the calling abi specific va_list type node.
27002 It returns the FNDECL specific va_list type. */
27004 tree
27005 ix86_fn_abi_va_list (tree fndecl)
27007 int abi;
27009 if (!TARGET_64BIT)
27010 return va_list_type_node;
27011 gcc_assert (fndecl != NULL_TREE);
27012 abi = ix86_function_abi ((const_tree) fndecl);
27014 if (abi == MS_ABI)
27015 return ms_va_list_type_node;
27016 else
27017 return sysv_va_list_type_node;
27020 /* Returns the canonical va_list type specified by TYPE. If there
27021 is no valid TYPE provided, it return NULL_TREE. */
27023 tree
27024 ix86_canonical_va_list_type (tree type)
27026 tree wtype, htype;
27028 /* Resolve references and pointers to va_list type. */
27029 if (INDIRECT_REF_P (type))
27030 type = TREE_TYPE (type);
27031 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
27032 type = TREE_TYPE (type);
27034 if (TARGET_64BIT)
27036 wtype = va_list_type_node;
27037 gcc_assert (wtype != NULL_TREE);
27038 htype = type;
27039 if (TREE_CODE (wtype) == ARRAY_TYPE)
27041 /* If va_list is an array type, the argument may have decayed
27042 to a pointer type, e.g. by being passed to another function.
27043 In that case, unwrap both types so that we can compare the
27044 underlying records. */
27045 if (TREE_CODE (htype) == ARRAY_TYPE
27046 || POINTER_TYPE_P (htype))
27048 wtype = TREE_TYPE (wtype);
27049 htype = TREE_TYPE (htype);
27052 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
27053 return va_list_type_node;
27054 wtype = sysv_va_list_type_node;
27055 gcc_assert (wtype != NULL_TREE);
27056 htype = type;
27057 if (TREE_CODE (wtype) == ARRAY_TYPE)
27059 /* If va_list is an array type, the argument may have decayed
27060 to a pointer type, e.g. by being passed to another function.
27061 In that case, unwrap both types so that we can compare the
27062 underlying records. */
27063 if (TREE_CODE (htype) == ARRAY_TYPE
27064 || POINTER_TYPE_P (htype))
27066 wtype = TREE_TYPE (wtype);
27067 htype = TREE_TYPE (htype);
27070 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
27071 return sysv_va_list_type_node;
27072 wtype = ms_va_list_type_node;
27073 gcc_assert (wtype != NULL_TREE);
27074 htype = type;
27075 if (TREE_CODE (wtype) == ARRAY_TYPE)
27077 /* If va_list is an array type, the argument may have decayed
27078 to a pointer type, e.g. by being passed to another function.
27079 In that case, unwrap both types so that we can compare the
27080 underlying records. */
27081 if (TREE_CODE (htype) == ARRAY_TYPE
27082 || POINTER_TYPE_P (htype))
27084 wtype = TREE_TYPE (wtype);
27085 htype = TREE_TYPE (htype);
27088 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
27089 return ms_va_list_type_node;
27090 return NULL_TREE;
27092 return std_canonical_va_list_type (type);
27095 /* Iterate through the target-specific builtin types for va_list.
27096 IDX denotes the iterator, *PTREE is set to the result type of
27097 the va_list builtin, and *PNAME to its internal type.
27098 Returns zero if there is no element for this index, otherwise
27099 IDX should be increased upon the next call.
27100 Note, do not iterate a base builtin's name like __builtin_va_list.
27101 Used from c_common_nodes_and_builtins. */
27104 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
27106 if (!TARGET_64BIT)
27107 return 0;
27108 switch (idx) {
27109 case 0:
27110 *ptree = ms_va_list_type_node;
27111 *pname = "__builtin_ms_va_list";
27112 break;
27113 case 1:
27114 *ptree = sysv_va_list_type_node;
27115 *pname = "__builtin_sysv_va_list";
27116 break;
27117 default:
27118 return 0;
27120 return 1;
27123 /* Initialize the GCC target structure. */
27124 #undef TARGET_RETURN_IN_MEMORY
27125 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
27127 #undef TARGET_ATTRIBUTE_TABLE
27128 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
27129 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
27130 # undef TARGET_MERGE_DECL_ATTRIBUTES
27131 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
27132 #endif
27134 #undef TARGET_COMP_TYPE_ATTRIBUTES
27135 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
27137 #undef TARGET_INIT_BUILTINS
27138 #define TARGET_INIT_BUILTINS ix86_init_builtins
27139 #undef TARGET_EXPAND_BUILTIN
27140 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
27142 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
27143 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
27144 ix86_builtin_vectorized_function
27146 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
27147 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
27149 #undef TARGET_BUILTIN_RECIPROCAL
27150 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
27152 #undef TARGET_ASM_FUNCTION_EPILOGUE
27153 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
27155 #undef TARGET_ENCODE_SECTION_INFO
27156 #ifndef SUBTARGET_ENCODE_SECTION_INFO
27157 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
27158 #else
27159 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
27160 #endif
27162 #undef TARGET_ASM_OPEN_PAREN
27163 #define TARGET_ASM_OPEN_PAREN ""
27164 #undef TARGET_ASM_CLOSE_PAREN
27165 #define TARGET_ASM_CLOSE_PAREN ""
27167 #undef TARGET_ASM_ALIGNED_HI_OP
27168 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
27169 #undef TARGET_ASM_ALIGNED_SI_OP
27170 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
27171 #ifdef ASM_QUAD
27172 #undef TARGET_ASM_ALIGNED_DI_OP
27173 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
27174 #endif
27176 #undef TARGET_ASM_UNALIGNED_HI_OP
27177 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
27178 #undef TARGET_ASM_UNALIGNED_SI_OP
27179 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
27180 #undef TARGET_ASM_UNALIGNED_DI_OP
27181 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
27183 #undef TARGET_SCHED_ADJUST_COST
27184 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
27185 #undef TARGET_SCHED_ISSUE_RATE
27186 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
27187 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
27188 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
27189 ia32_multipass_dfa_lookahead
27191 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
27192 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
27194 #ifdef HAVE_AS_TLS
27195 #undef TARGET_HAVE_TLS
27196 #define TARGET_HAVE_TLS true
27197 #endif
27198 #undef TARGET_CANNOT_FORCE_CONST_MEM
27199 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
27200 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
27201 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
27203 #undef TARGET_DELEGITIMIZE_ADDRESS
27204 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
27206 #undef TARGET_MS_BITFIELD_LAYOUT_P
27207 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
27209 #if TARGET_MACHO
27210 #undef TARGET_BINDS_LOCAL_P
27211 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
27212 #endif
27213 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
27214 #undef TARGET_BINDS_LOCAL_P
27215 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
27216 #endif
27218 #undef TARGET_ASM_OUTPUT_MI_THUNK
27219 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
27220 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
27221 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
27223 #undef TARGET_ASM_FILE_START
27224 #define TARGET_ASM_FILE_START x86_file_start
27226 #undef TARGET_DEFAULT_TARGET_FLAGS
27227 #define TARGET_DEFAULT_TARGET_FLAGS \
27228 (TARGET_DEFAULT \
27229 | TARGET_SUBTARGET_DEFAULT \
27230 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
27232 #undef TARGET_HANDLE_OPTION
27233 #define TARGET_HANDLE_OPTION ix86_handle_option
27235 #undef TARGET_RTX_COSTS
27236 #define TARGET_RTX_COSTS ix86_rtx_costs
27237 #undef TARGET_ADDRESS_COST
27238 #define TARGET_ADDRESS_COST ix86_address_cost
27240 #undef TARGET_FIXED_CONDITION_CODE_REGS
27241 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
27242 #undef TARGET_CC_MODES_COMPATIBLE
27243 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
27245 #undef TARGET_MACHINE_DEPENDENT_REORG
27246 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
27248 #undef TARGET_BUILD_BUILTIN_VA_LIST
27249 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
27251 #undef TARGET_FN_ABI_VA_LIST
27252 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
27254 #undef TARGET_CANONICAL_VA_LIST_TYPE
27255 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
27257 #undef TARGET_EXPAND_BUILTIN_VA_START
27258 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
27260 #undef TARGET_MD_ASM_CLOBBERS
27261 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
27263 #undef TARGET_PROMOTE_PROTOTYPES
27264 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
27265 #undef TARGET_STRUCT_VALUE_RTX
27266 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
27267 #undef TARGET_SETUP_INCOMING_VARARGS
27268 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
27269 #undef TARGET_MUST_PASS_IN_STACK
27270 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
27271 #undef TARGET_PASS_BY_REFERENCE
27272 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
27273 #undef TARGET_INTERNAL_ARG_POINTER
27274 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
27275 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
27276 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC ix86_dwarf_handle_frame_unspec
27277 #undef TARGET_STRICT_ARGUMENT_NAMING
27278 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
27280 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
27281 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
27283 #undef TARGET_SCALAR_MODE_SUPPORTED_P
27284 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
27286 #undef TARGET_VECTOR_MODE_SUPPORTED_P
27287 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
27289 #undef TARGET_C_MODE_FOR_SUFFIX
27290 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
27292 #ifdef HAVE_AS_TLS
27293 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
27294 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
27295 #endif
27297 #ifdef SUBTARGET_INSERT_ATTRIBUTES
27298 #undef TARGET_INSERT_ATTRIBUTES
27299 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
27300 #endif
27302 #undef TARGET_MANGLE_TYPE
27303 #define TARGET_MANGLE_TYPE ix86_mangle_type
27305 #undef TARGET_STACK_PROTECT_FAIL
27306 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
27308 #undef TARGET_FUNCTION_VALUE
27309 #define TARGET_FUNCTION_VALUE ix86_function_value
27311 #undef TARGET_SECONDARY_RELOAD
27312 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
27314 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
27315 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST x86_builtin_vectorization_cost
27317 #undef TARGET_SET_CURRENT_FUNCTION
27318 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
27320 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
27321 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_option_attribute_p
27323 #undef TARGET_OPTION_SAVE
27324 #define TARGET_OPTION_SAVE ix86_function_specific_save
27326 #undef TARGET_OPTION_RESTORE
27327 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
27329 #undef TARGET_OPTION_PRINT
27330 #define TARGET_OPTION_PRINT ix86_function_specific_print
27332 #undef TARGET_OPTION_CAN_INLINE_P
27333 #define TARGET_OPTION_CAN_INLINE_P ix86_can_inline_p
27335 #undef TARGET_OPTION_COLD_ATTRIBUTE_SETS_OPTIMIZATION
27336 #define TARGET_OPTION_COLD_ATTRIBUTE_SETS_OPTIMIZATION true
27338 #undef TARGET_OPTION_HOT_ATTRIBUTE_SETS_OPTIMIZATION
27339 #define TARGET_OPTION_HOT_ATTRIBUTE_SETS_OPTIMIZATION true
27341 struct gcc_target targetm = TARGET_INITIALIZER;
27343 #include "gt-i386.h"