PR target/38288
[official-gcc/alias-decl.git] / gcc / config / i386 / i386.c
blob3d907c04e1d7aa3d25f6e529abe763cc4e6407f5
1 /* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
11 any later version.
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "tm_p.h"
29 #include "regs.h"
30 #include "hard-reg-set.h"
31 #include "real.h"
32 #include "insn-config.h"
33 #include "conditions.h"
34 #include "output.h"
35 #include "insn-codes.h"
36 #include "insn-attr.h"
37 #include "flags.h"
38 #include "except.h"
39 #include "function.h"
40 #include "recog.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "toplev.h"
44 #include "basic-block.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "langhooks.h"
49 #include "cgraph.h"
50 #include "gimple.h"
51 #include "dwarf2.h"
52 #include "df.h"
53 #include "tm-constrs.h"
54 #include "params.h"
55 #include "cselib.h"
57 static int x86_builtin_vectorization_cost (bool);
58 static rtx legitimize_dllimport_symbol (rtx, bool);
60 #ifndef CHECK_STACK_LIMIT
61 #define CHECK_STACK_LIMIT (-1)
62 #endif
64 /* Return index of given mode in mult and division cost tables. */
65 #define MODE_INDEX(mode) \
66 ((mode) == QImode ? 0 \
67 : (mode) == HImode ? 1 \
68 : (mode) == SImode ? 2 \
69 : (mode) == DImode ? 3 \
70 : 4)
72 /* Processor costs (relative to an add) */
73 /* We assume COSTS_N_INSNS is defined as (N)*4 and an addition is 2 bytes. */
74 #define COSTS_N_BYTES(N) ((N) * 2)
76 #define DUMMY_STRINGOP_ALGS {libcall, {{-1, libcall}}}
78 const
79 struct processor_costs ix86_size_cost = {/* costs for tuning for size */
80 COSTS_N_BYTES (2), /* cost of an add instruction */
81 COSTS_N_BYTES (3), /* cost of a lea instruction */
82 COSTS_N_BYTES (2), /* variable shift costs */
83 COSTS_N_BYTES (3), /* constant shift costs */
84 {COSTS_N_BYTES (3), /* cost of starting multiply for QI */
85 COSTS_N_BYTES (3), /* HI */
86 COSTS_N_BYTES (3), /* SI */
87 COSTS_N_BYTES (3), /* DI */
88 COSTS_N_BYTES (5)}, /* other */
89 0, /* cost of multiply per each bit set */
90 {COSTS_N_BYTES (3), /* cost of a divide/mod for QI */
91 COSTS_N_BYTES (3), /* HI */
92 COSTS_N_BYTES (3), /* SI */
93 COSTS_N_BYTES (3), /* DI */
94 COSTS_N_BYTES (5)}, /* other */
95 COSTS_N_BYTES (3), /* cost of movsx */
96 COSTS_N_BYTES (3), /* cost of movzx */
97 0, /* "large" insn */
98 2, /* MOVE_RATIO */
99 2, /* cost for loading QImode using movzbl */
100 {2, 2, 2}, /* cost of loading integer registers
101 in QImode, HImode and SImode.
102 Relative to reg-reg move (2). */
103 {2, 2, 2}, /* cost of storing integer registers */
104 2, /* cost of reg,reg fld/fst */
105 {2, 2, 2}, /* cost of loading fp registers
106 in SFmode, DFmode and XFmode */
107 {2, 2, 2}, /* cost of storing fp registers
108 in SFmode, DFmode and XFmode */
109 3, /* cost of moving MMX register */
110 {3, 3}, /* cost of loading MMX registers
111 in SImode and DImode */
112 {3, 3}, /* cost of storing MMX registers
113 in SImode and DImode */
114 3, /* cost of moving SSE register */
115 {3, 3, 3}, /* cost of loading SSE registers
116 in SImode, DImode and TImode */
117 {3, 3, 3}, /* cost of storing SSE registers
118 in SImode, DImode and TImode */
119 3, /* MMX or SSE register to integer */
120 0, /* size of l1 cache */
121 0, /* size of l2 cache */
122 0, /* size of prefetch block */
123 0, /* number of parallel prefetches */
124 2, /* Branch cost */
125 COSTS_N_BYTES (2), /* cost of FADD and FSUB insns. */
126 COSTS_N_BYTES (2), /* cost of FMUL instruction. */
127 COSTS_N_BYTES (2), /* cost of FDIV instruction. */
128 COSTS_N_BYTES (2), /* cost of FABS instruction. */
129 COSTS_N_BYTES (2), /* cost of FCHS instruction. */
130 COSTS_N_BYTES (2), /* cost of FSQRT instruction. */
131 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
132 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
133 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
134 {rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}}},
135 1, /* scalar_stmt_cost. */
136 1, /* scalar load_cost. */
137 1, /* scalar_store_cost. */
138 1, /* vec_stmt_cost. */
139 1, /* vec_to_scalar_cost. */
140 1, /* scalar_to_vec_cost. */
141 1, /* vec_align_load_cost. */
142 1, /* vec_unalign_load_cost. */
143 1, /* vec_store_cost. */
144 1, /* cond_taken_branch_cost. */
145 1, /* cond_not_taken_branch_cost. */
148 /* Processor costs (relative to an add) */
149 static const
150 struct processor_costs i386_cost = { /* 386 specific costs */
151 COSTS_N_INSNS (1), /* cost of an add instruction */
152 COSTS_N_INSNS (1), /* cost of a lea instruction */
153 COSTS_N_INSNS (3), /* variable shift costs */
154 COSTS_N_INSNS (2), /* constant shift costs */
155 {COSTS_N_INSNS (6), /* cost of starting multiply for QI */
156 COSTS_N_INSNS (6), /* HI */
157 COSTS_N_INSNS (6), /* SI */
158 COSTS_N_INSNS (6), /* DI */
159 COSTS_N_INSNS (6)}, /* other */
160 COSTS_N_INSNS (1), /* cost of multiply per each bit set */
161 {COSTS_N_INSNS (23), /* cost of a divide/mod for QI */
162 COSTS_N_INSNS (23), /* HI */
163 COSTS_N_INSNS (23), /* SI */
164 COSTS_N_INSNS (23), /* DI */
165 COSTS_N_INSNS (23)}, /* other */
166 COSTS_N_INSNS (3), /* cost of movsx */
167 COSTS_N_INSNS (2), /* cost of movzx */
168 15, /* "large" insn */
169 3, /* MOVE_RATIO */
170 4, /* cost for loading QImode using movzbl */
171 {2, 4, 2}, /* cost of loading integer registers
172 in QImode, HImode and SImode.
173 Relative to reg-reg move (2). */
174 {2, 4, 2}, /* cost of storing integer registers */
175 2, /* cost of reg,reg fld/fst */
176 {8, 8, 8}, /* cost of loading fp registers
177 in SFmode, DFmode and XFmode */
178 {8, 8, 8}, /* cost of storing fp registers
179 in SFmode, DFmode and XFmode */
180 2, /* cost of moving MMX register */
181 {4, 8}, /* cost of loading MMX registers
182 in SImode and DImode */
183 {4, 8}, /* cost of storing MMX registers
184 in SImode and DImode */
185 2, /* cost of moving SSE register */
186 {4, 8, 16}, /* cost of loading SSE registers
187 in SImode, DImode and TImode */
188 {4, 8, 16}, /* cost of storing SSE registers
189 in SImode, DImode and TImode */
190 3, /* MMX or SSE register to integer */
191 0, /* size of l1 cache */
192 0, /* size of l2 cache */
193 0, /* size of prefetch block */
194 0, /* number of parallel prefetches */
195 1, /* Branch cost */
196 COSTS_N_INSNS (23), /* cost of FADD and FSUB insns. */
197 COSTS_N_INSNS (27), /* cost of FMUL instruction. */
198 COSTS_N_INSNS (88), /* cost of FDIV instruction. */
199 COSTS_N_INSNS (22), /* cost of FABS instruction. */
200 COSTS_N_INSNS (24), /* cost of FCHS instruction. */
201 COSTS_N_INSNS (122), /* cost of FSQRT instruction. */
202 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
203 DUMMY_STRINGOP_ALGS},
204 {{rep_prefix_1_byte, {{-1, rep_prefix_1_byte}}},
205 DUMMY_STRINGOP_ALGS},
206 1, /* scalar_stmt_cost. */
207 1, /* scalar load_cost. */
208 1, /* scalar_store_cost. */
209 1, /* vec_stmt_cost. */
210 1, /* vec_to_scalar_cost. */
211 1, /* scalar_to_vec_cost. */
212 1, /* vec_align_load_cost. */
213 2, /* vec_unalign_load_cost. */
214 1, /* vec_store_cost. */
215 3, /* cond_taken_branch_cost. */
216 1, /* cond_not_taken_branch_cost. */
219 static const
220 struct processor_costs i486_cost = { /* 486 specific costs */
221 COSTS_N_INSNS (1), /* cost of an add instruction */
222 COSTS_N_INSNS (1), /* cost of a lea instruction */
223 COSTS_N_INSNS (3), /* variable shift costs */
224 COSTS_N_INSNS (2), /* constant shift costs */
225 {COSTS_N_INSNS (12), /* cost of starting multiply for QI */
226 COSTS_N_INSNS (12), /* HI */
227 COSTS_N_INSNS (12), /* SI */
228 COSTS_N_INSNS (12), /* DI */
229 COSTS_N_INSNS (12)}, /* other */
230 1, /* cost of multiply per each bit set */
231 {COSTS_N_INSNS (40), /* cost of a divide/mod for QI */
232 COSTS_N_INSNS (40), /* HI */
233 COSTS_N_INSNS (40), /* SI */
234 COSTS_N_INSNS (40), /* DI */
235 COSTS_N_INSNS (40)}, /* other */
236 COSTS_N_INSNS (3), /* cost of movsx */
237 COSTS_N_INSNS (2), /* cost of movzx */
238 15, /* "large" insn */
239 3, /* MOVE_RATIO */
240 4, /* cost for loading QImode using movzbl */
241 {2, 4, 2}, /* cost of loading integer registers
242 in QImode, HImode and SImode.
243 Relative to reg-reg move (2). */
244 {2, 4, 2}, /* cost of storing integer registers */
245 2, /* cost of reg,reg fld/fst */
246 {8, 8, 8}, /* cost of loading fp registers
247 in SFmode, DFmode and XFmode */
248 {8, 8, 8}, /* cost of storing fp registers
249 in SFmode, DFmode and XFmode */
250 2, /* cost of moving MMX register */
251 {4, 8}, /* cost of loading MMX registers
252 in SImode and DImode */
253 {4, 8}, /* cost of storing MMX registers
254 in SImode and DImode */
255 2, /* cost of moving SSE register */
256 {4, 8, 16}, /* cost of loading SSE registers
257 in SImode, DImode and TImode */
258 {4, 8, 16}, /* cost of storing SSE registers
259 in SImode, DImode and TImode */
260 3, /* MMX or SSE register to integer */
261 4, /* size of l1 cache. 486 has 8kB cache
262 shared for code and data, so 4kB is
263 not really precise. */
264 4, /* size of l2 cache */
265 0, /* size of prefetch block */
266 0, /* number of parallel prefetches */
267 1, /* Branch cost */
268 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
269 COSTS_N_INSNS (16), /* cost of FMUL instruction. */
270 COSTS_N_INSNS (73), /* cost of FDIV instruction. */
271 COSTS_N_INSNS (3), /* cost of FABS instruction. */
272 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
273 COSTS_N_INSNS (83), /* cost of FSQRT instruction. */
274 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
275 DUMMY_STRINGOP_ALGS},
276 {{rep_prefix_4_byte, {{-1, rep_prefix_4_byte}}},
277 DUMMY_STRINGOP_ALGS},
278 1, /* scalar_stmt_cost. */
279 1, /* scalar load_cost. */
280 1, /* scalar_store_cost. */
281 1, /* vec_stmt_cost. */
282 1, /* vec_to_scalar_cost. */
283 1, /* scalar_to_vec_cost. */
284 1, /* vec_align_load_cost. */
285 2, /* vec_unalign_load_cost. */
286 1, /* vec_store_cost. */
287 3, /* cond_taken_branch_cost. */
288 1, /* cond_not_taken_branch_cost. */
291 static const
292 struct processor_costs pentium_cost = {
293 COSTS_N_INSNS (1), /* cost of an add instruction */
294 COSTS_N_INSNS (1), /* cost of a lea instruction */
295 COSTS_N_INSNS (4), /* variable shift costs */
296 COSTS_N_INSNS (1), /* constant shift costs */
297 {COSTS_N_INSNS (11), /* cost of starting multiply for QI */
298 COSTS_N_INSNS (11), /* HI */
299 COSTS_N_INSNS (11), /* SI */
300 COSTS_N_INSNS (11), /* DI */
301 COSTS_N_INSNS (11)}, /* other */
302 0, /* cost of multiply per each bit set */
303 {COSTS_N_INSNS (25), /* cost of a divide/mod for QI */
304 COSTS_N_INSNS (25), /* HI */
305 COSTS_N_INSNS (25), /* SI */
306 COSTS_N_INSNS (25), /* DI */
307 COSTS_N_INSNS (25)}, /* other */
308 COSTS_N_INSNS (3), /* cost of movsx */
309 COSTS_N_INSNS (2), /* cost of movzx */
310 8, /* "large" insn */
311 6, /* MOVE_RATIO */
312 6, /* cost for loading QImode using movzbl */
313 {2, 4, 2}, /* cost of loading integer registers
314 in QImode, HImode and SImode.
315 Relative to reg-reg move (2). */
316 {2, 4, 2}, /* cost of storing integer registers */
317 2, /* cost of reg,reg fld/fst */
318 {2, 2, 6}, /* cost of loading fp registers
319 in SFmode, DFmode and XFmode */
320 {4, 4, 6}, /* cost of storing fp registers
321 in SFmode, DFmode and XFmode */
322 8, /* cost of moving MMX register */
323 {8, 8}, /* cost of loading MMX registers
324 in SImode and DImode */
325 {8, 8}, /* cost of storing MMX registers
326 in SImode and DImode */
327 2, /* cost of moving SSE register */
328 {4, 8, 16}, /* cost of loading SSE registers
329 in SImode, DImode and TImode */
330 {4, 8, 16}, /* cost of storing SSE registers
331 in SImode, DImode and TImode */
332 3, /* MMX or SSE register to integer */
333 8, /* size of l1 cache. */
334 8, /* size of l2 cache */
335 0, /* size of prefetch block */
336 0, /* number of parallel prefetches */
337 2, /* Branch cost */
338 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
339 COSTS_N_INSNS (3), /* cost of FMUL instruction. */
340 COSTS_N_INSNS (39), /* cost of FDIV instruction. */
341 COSTS_N_INSNS (1), /* cost of FABS instruction. */
342 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
343 COSTS_N_INSNS (70), /* cost of FSQRT instruction. */
344 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
345 DUMMY_STRINGOP_ALGS},
346 {{libcall, {{-1, rep_prefix_4_byte}}},
347 DUMMY_STRINGOP_ALGS},
348 1, /* scalar_stmt_cost. */
349 1, /* scalar load_cost. */
350 1, /* scalar_store_cost. */
351 1, /* vec_stmt_cost. */
352 1, /* vec_to_scalar_cost. */
353 1, /* scalar_to_vec_cost. */
354 1, /* vec_align_load_cost. */
355 2, /* vec_unalign_load_cost. */
356 1, /* vec_store_cost. */
357 3, /* cond_taken_branch_cost. */
358 1, /* cond_not_taken_branch_cost. */
361 static const
362 struct processor_costs pentiumpro_cost = {
363 COSTS_N_INSNS (1), /* cost of an add instruction */
364 COSTS_N_INSNS (1), /* cost of a lea instruction */
365 COSTS_N_INSNS (1), /* variable shift costs */
366 COSTS_N_INSNS (1), /* constant shift costs */
367 {COSTS_N_INSNS (4), /* cost of starting multiply for QI */
368 COSTS_N_INSNS (4), /* HI */
369 COSTS_N_INSNS (4), /* SI */
370 COSTS_N_INSNS (4), /* DI */
371 COSTS_N_INSNS (4)}, /* other */
372 0, /* cost of multiply per each bit set */
373 {COSTS_N_INSNS (17), /* cost of a divide/mod for QI */
374 COSTS_N_INSNS (17), /* HI */
375 COSTS_N_INSNS (17), /* SI */
376 COSTS_N_INSNS (17), /* DI */
377 COSTS_N_INSNS (17)}, /* other */
378 COSTS_N_INSNS (1), /* cost of movsx */
379 COSTS_N_INSNS (1), /* cost of movzx */
380 8, /* "large" insn */
381 6, /* MOVE_RATIO */
382 2, /* cost for loading QImode using movzbl */
383 {4, 4, 4}, /* cost of loading integer registers
384 in QImode, HImode and SImode.
385 Relative to reg-reg move (2). */
386 {2, 2, 2}, /* cost of storing integer registers */
387 2, /* cost of reg,reg fld/fst */
388 {2, 2, 6}, /* cost of loading fp registers
389 in SFmode, DFmode and XFmode */
390 {4, 4, 6}, /* cost of storing fp registers
391 in SFmode, DFmode and XFmode */
392 2, /* cost of moving MMX register */
393 {2, 2}, /* cost of loading MMX registers
394 in SImode and DImode */
395 {2, 2}, /* cost of storing MMX registers
396 in SImode and DImode */
397 2, /* cost of moving SSE register */
398 {2, 2, 8}, /* cost of loading SSE registers
399 in SImode, DImode and TImode */
400 {2, 2, 8}, /* cost of storing SSE registers
401 in SImode, DImode and TImode */
402 3, /* MMX or SSE register to integer */
403 8, /* size of l1 cache. */
404 256, /* size of l2 cache */
405 32, /* size of prefetch block */
406 6, /* number of parallel prefetches */
407 2, /* Branch cost */
408 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
409 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
410 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
411 COSTS_N_INSNS (2), /* cost of FABS instruction. */
412 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
413 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
414 /* PentiumPro has optimized rep instructions for blocks aligned by 8 bytes (we ensure
415 the alignment). For small blocks inline loop is still a noticeable win, for bigger
416 blocks either rep movsl or rep movsb is way to go. Rep movsb has apparently
417 more expensive startup time in CPU, but after 4K the difference is down in the noise.
419 {{rep_prefix_4_byte, {{128, loop}, {1024, unrolled_loop},
420 {8192, rep_prefix_4_byte}, {-1, rep_prefix_1_byte}}},
421 DUMMY_STRINGOP_ALGS},
422 {{rep_prefix_4_byte, {{1024, unrolled_loop},
423 {8192, rep_prefix_4_byte}, {-1, libcall}}},
424 DUMMY_STRINGOP_ALGS},
425 1, /* scalar_stmt_cost. */
426 1, /* scalar load_cost. */
427 1, /* scalar_store_cost. */
428 1, /* vec_stmt_cost. */
429 1, /* vec_to_scalar_cost. */
430 1, /* scalar_to_vec_cost. */
431 1, /* vec_align_load_cost. */
432 2, /* vec_unalign_load_cost. */
433 1, /* vec_store_cost. */
434 3, /* cond_taken_branch_cost. */
435 1, /* cond_not_taken_branch_cost. */
438 static const
439 struct processor_costs geode_cost = {
440 COSTS_N_INSNS (1), /* cost of an add instruction */
441 COSTS_N_INSNS (1), /* cost of a lea instruction */
442 COSTS_N_INSNS (2), /* variable shift costs */
443 COSTS_N_INSNS (1), /* constant shift costs */
444 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
445 COSTS_N_INSNS (4), /* HI */
446 COSTS_N_INSNS (7), /* SI */
447 COSTS_N_INSNS (7), /* DI */
448 COSTS_N_INSNS (7)}, /* other */
449 0, /* cost of multiply per each bit set */
450 {COSTS_N_INSNS (15), /* cost of a divide/mod for QI */
451 COSTS_N_INSNS (23), /* HI */
452 COSTS_N_INSNS (39), /* SI */
453 COSTS_N_INSNS (39), /* DI */
454 COSTS_N_INSNS (39)}, /* other */
455 COSTS_N_INSNS (1), /* cost of movsx */
456 COSTS_N_INSNS (1), /* cost of movzx */
457 8, /* "large" insn */
458 4, /* MOVE_RATIO */
459 1, /* cost for loading QImode using movzbl */
460 {1, 1, 1}, /* cost of loading integer registers
461 in QImode, HImode and SImode.
462 Relative to reg-reg move (2). */
463 {1, 1, 1}, /* cost of storing integer registers */
464 1, /* cost of reg,reg fld/fst */
465 {1, 1, 1}, /* cost of loading fp registers
466 in SFmode, DFmode and XFmode */
467 {4, 6, 6}, /* cost of storing fp registers
468 in SFmode, DFmode and XFmode */
470 1, /* cost of moving MMX register */
471 {1, 1}, /* cost of loading MMX registers
472 in SImode and DImode */
473 {1, 1}, /* cost of storing MMX registers
474 in SImode and DImode */
475 1, /* cost of moving SSE register */
476 {1, 1, 1}, /* cost of loading SSE registers
477 in SImode, DImode and TImode */
478 {1, 1, 1}, /* cost of storing SSE registers
479 in SImode, DImode and TImode */
480 1, /* MMX or SSE register to integer */
481 64, /* size of l1 cache. */
482 128, /* size of l2 cache. */
483 32, /* size of prefetch block */
484 1, /* number of parallel prefetches */
485 1, /* Branch cost */
486 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
487 COSTS_N_INSNS (11), /* cost of FMUL instruction. */
488 COSTS_N_INSNS (47), /* cost of FDIV instruction. */
489 COSTS_N_INSNS (1), /* cost of FABS instruction. */
490 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
491 COSTS_N_INSNS (54), /* cost of FSQRT instruction. */
492 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
493 DUMMY_STRINGOP_ALGS},
494 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
495 DUMMY_STRINGOP_ALGS},
496 1, /* scalar_stmt_cost. */
497 1, /* scalar load_cost. */
498 1, /* scalar_store_cost. */
499 1, /* vec_stmt_cost. */
500 1, /* vec_to_scalar_cost. */
501 1, /* scalar_to_vec_cost. */
502 1, /* vec_align_load_cost. */
503 2, /* vec_unalign_load_cost. */
504 1, /* vec_store_cost. */
505 3, /* cond_taken_branch_cost. */
506 1, /* cond_not_taken_branch_cost. */
509 static const
510 struct processor_costs k6_cost = {
511 COSTS_N_INSNS (1), /* cost of an add instruction */
512 COSTS_N_INSNS (2), /* cost of a lea instruction */
513 COSTS_N_INSNS (1), /* variable shift costs */
514 COSTS_N_INSNS (1), /* constant shift costs */
515 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
516 COSTS_N_INSNS (3), /* HI */
517 COSTS_N_INSNS (3), /* SI */
518 COSTS_N_INSNS (3), /* DI */
519 COSTS_N_INSNS (3)}, /* other */
520 0, /* cost of multiply per each bit set */
521 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
522 COSTS_N_INSNS (18), /* HI */
523 COSTS_N_INSNS (18), /* SI */
524 COSTS_N_INSNS (18), /* DI */
525 COSTS_N_INSNS (18)}, /* other */
526 COSTS_N_INSNS (2), /* cost of movsx */
527 COSTS_N_INSNS (2), /* cost of movzx */
528 8, /* "large" insn */
529 4, /* MOVE_RATIO */
530 3, /* cost for loading QImode using movzbl */
531 {4, 5, 4}, /* cost of loading integer registers
532 in QImode, HImode and SImode.
533 Relative to reg-reg move (2). */
534 {2, 3, 2}, /* cost of storing integer registers */
535 4, /* cost of reg,reg fld/fst */
536 {6, 6, 6}, /* cost of loading fp registers
537 in SFmode, DFmode and XFmode */
538 {4, 4, 4}, /* cost of storing fp registers
539 in SFmode, DFmode and XFmode */
540 2, /* cost of moving MMX register */
541 {2, 2}, /* cost of loading MMX registers
542 in SImode and DImode */
543 {2, 2}, /* cost of storing MMX registers
544 in SImode and DImode */
545 2, /* cost of moving SSE register */
546 {2, 2, 8}, /* cost of loading SSE registers
547 in SImode, DImode and TImode */
548 {2, 2, 8}, /* cost of storing SSE registers
549 in SImode, DImode and TImode */
550 6, /* MMX or SSE register to integer */
551 32, /* size of l1 cache. */
552 32, /* size of l2 cache. Some models
553 have integrated l2 cache, but
554 optimizing for k6 is not important
555 enough to worry about that. */
556 32, /* size of prefetch block */
557 1, /* number of parallel prefetches */
558 1, /* Branch cost */
559 COSTS_N_INSNS (2), /* cost of FADD and FSUB insns. */
560 COSTS_N_INSNS (2), /* cost of FMUL instruction. */
561 COSTS_N_INSNS (56), /* cost of FDIV instruction. */
562 COSTS_N_INSNS (2), /* cost of FABS instruction. */
563 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
564 COSTS_N_INSNS (56), /* cost of FSQRT instruction. */
565 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
566 DUMMY_STRINGOP_ALGS},
567 {{libcall, {{256, rep_prefix_4_byte}, {-1, libcall}}},
568 DUMMY_STRINGOP_ALGS},
569 1, /* scalar_stmt_cost. */
570 1, /* scalar load_cost. */
571 1, /* scalar_store_cost. */
572 1, /* vec_stmt_cost. */
573 1, /* vec_to_scalar_cost. */
574 1, /* scalar_to_vec_cost. */
575 1, /* vec_align_load_cost. */
576 2, /* vec_unalign_load_cost. */
577 1, /* vec_store_cost. */
578 3, /* cond_taken_branch_cost. */
579 1, /* cond_not_taken_branch_cost. */
582 static const
583 struct processor_costs athlon_cost = {
584 COSTS_N_INSNS (1), /* cost of an add instruction */
585 COSTS_N_INSNS (2), /* cost of a lea instruction */
586 COSTS_N_INSNS (1), /* variable shift costs */
587 COSTS_N_INSNS (1), /* constant shift costs */
588 {COSTS_N_INSNS (5), /* cost of starting multiply for QI */
589 COSTS_N_INSNS (5), /* HI */
590 COSTS_N_INSNS (5), /* SI */
591 COSTS_N_INSNS (5), /* DI */
592 COSTS_N_INSNS (5)}, /* other */
593 0, /* cost of multiply per each bit set */
594 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
595 COSTS_N_INSNS (26), /* HI */
596 COSTS_N_INSNS (42), /* SI */
597 COSTS_N_INSNS (74), /* DI */
598 COSTS_N_INSNS (74)}, /* other */
599 COSTS_N_INSNS (1), /* cost of movsx */
600 COSTS_N_INSNS (1), /* cost of movzx */
601 8, /* "large" insn */
602 9, /* MOVE_RATIO */
603 4, /* cost for loading QImode using movzbl */
604 {3, 4, 3}, /* cost of loading integer registers
605 in QImode, HImode and SImode.
606 Relative to reg-reg move (2). */
607 {3, 4, 3}, /* cost of storing integer registers */
608 4, /* cost of reg,reg fld/fst */
609 {4, 4, 12}, /* cost of loading fp registers
610 in SFmode, DFmode and XFmode */
611 {6, 6, 8}, /* cost of storing fp registers
612 in SFmode, DFmode and XFmode */
613 2, /* cost of moving MMX register */
614 {4, 4}, /* cost of loading MMX registers
615 in SImode and DImode */
616 {4, 4}, /* cost of storing MMX registers
617 in SImode and DImode */
618 2, /* cost of moving SSE register */
619 {4, 4, 6}, /* cost of loading SSE registers
620 in SImode, DImode and TImode */
621 {4, 4, 5}, /* cost of storing SSE registers
622 in SImode, DImode and TImode */
623 5, /* MMX or SSE register to integer */
624 64, /* size of l1 cache. */
625 256, /* size of l2 cache. */
626 64, /* size of prefetch block */
627 6, /* number of parallel prefetches */
628 5, /* Branch cost */
629 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
630 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
631 COSTS_N_INSNS (24), /* cost of FDIV instruction. */
632 COSTS_N_INSNS (2), /* cost of FABS instruction. */
633 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
634 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
635 /* For some reason, Athlon deals better with REP prefix (relative to loops)
636 compared to K8. Alignment becomes important after 8 bytes for memcpy and
637 128 bytes for memset. */
638 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
639 DUMMY_STRINGOP_ALGS},
640 {{libcall, {{2048, rep_prefix_4_byte}, {-1, libcall}}},
641 DUMMY_STRINGOP_ALGS},
642 1, /* scalar_stmt_cost. */
643 1, /* scalar load_cost. */
644 1, /* scalar_store_cost. */
645 1, /* vec_stmt_cost. */
646 1, /* vec_to_scalar_cost. */
647 1, /* scalar_to_vec_cost. */
648 1, /* vec_align_load_cost. */
649 2, /* vec_unalign_load_cost. */
650 1, /* vec_store_cost. */
651 3, /* cond_taken_branch_cost. */
652 1, /* cond_not_taken_branch_cost. */
655 static const
656 struct processor_costs k8_cost = {
657 COSTS_N_INSNS (1), /* cost of an add instruction */
658 COSTS_N_INSNS (2), /* cost of a lea instruction */
659 COSTS_N_INSNS (1), /* variable shift costs */
660 COSTS_N_INSNS (1), /* constant shift costs */
661 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
662 COSTS_N_INSNS (4), /* HI */
663 COSTS_N_INSNS (3), /* SI */
664 COSTS_N_INSNS (4), /* DI */
665 COSTS_N_INSNS (5)}, /* other */
666 0, /* cost of multiply per each bit set */
667 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
668 COSTS_N_INSNS (26), /* HI */
669 COSTS_N_INSNS (42), /* SI */
670 COSTS_N_INSNS (74), /* DI */
671 COSTS_N_INSNS (74)}, /* other */
672 COSTS_N_INSNS (1), /* cost of movsx */
673 COSTS_N_INSNS (1), /* cost of movzx */
674 8, /* "large" insn */
675 9, /* MOVE_RATIO */
676 4, /* cost for loading QImode using movzbl */
677 {3, 4, 3}, /* cost of loading integer registers
678 in QImode, HImode and SImode.
679 Relative to reg-reg move (2). */
680 {3, 4, 3}, /* cost of storing integer registers */
681 4, /* cost of reg,reg fld/fst */
682 {4, 4, 12}, /* cost of loading fp registers
683 in SFmode, DFmode and XFmode */
684 {6, 6, 8}, /* cost of storing fp registers
685 in SFmode, DFmode and XFmode */
686 2, /* cost of moving MMX register */
687 {3, 3}, /* cost of loading MMX registers
688 in SImode and DImode */
689 {4, 4}, /* cost of storing MMX registers
690 in SImode and DImode */
691 2, /* cost of moving SSE register */
692 {4, 3, 6}, /* cost of loading SSE registers
693 in SImode, DImode and TImode */
694 {4, 4, 5}, /* cost of storing SSE registers
695 in SImode, DImode and TImode */
696 5, /* MMX or SSE register to integer */
697 64, /* size of l1 cache. */
698 512, /* size of l2 cache. */
699 64, /* size of prefetch block */
700 /* New AMD processors never drop prefetches; if they cannot be performed
701 immediately, they are queued. We set number of simultaneous prefetches
702 to a large constant to reflect this (it probably is not a good idea not
703 to limit number of prefetches at all, as their execution also takes some
704 time). */
705 100, /* number of parallel prefetches */
706 3, /* Branch cost */
707 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
708 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
709 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
710 COSTS_N_INSNS (2), /* cost of FABS instruction. */
711 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
712 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
713 /* K8 has optimized REP instruction for medium sized blocks, but for very small
714 blocks it is better to use loop. For large blocks, libcall can do
715 nontemporary accesses and beat inline considerably. */
716 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
717 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
718 {{libcall, {{8, loop}, {24, unrolled_loop},
719 {2048, rep_prefix_4_byte}, {-1, libcall}}},
720 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
721 4, /* scalar_stmt_cost. */
722 2, /* scalar load_cost. */
723 2, /* scalar_store_cost. */
724 5, /* vec_stmt_cost. */
725 0, /* vec_to_scalar_cost. */
726 2, /* scalar_to_vec_cost. */
727 2, /* vec_align_load_cost. */
728 3, /* vec_unalign_load_cost. */
729 3, /* vec_store_cost. */
730 3, /* cond_taken_branch_cost. */
731 2, /* cond_not_taken_branch_cost. */
734 struct processor_costs amdfam10_cost = {
735 COSTS_N_INSNS (1), /* cost of an add instruction */
736 COSTS_N_INSNS (2), /* cost of a lea instruction */
737 COSTS_N_INSNS (1), /* variable shift costs */
738 COSTS_N_INSNS (1), /* constant shift costs */
739 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
740 COSTS_N_INSNS (4), /* HI */
741 COSTS_N_INSNS (3), /* SI */
742 COSTS_N_INSNS (4), /* DI */
743 COSTS_N_INSNS (5)}, /* other */
744 0, /* cost of multiply per each bit set */
745 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
746 COSTS_N_INSNS (35), /* HI */
747 COSTS_N_INSNS (51), /* SI */
748 COSTS_N_INSNS (83), /* DI */
749 COSTS_N_INSNS (83)}, /* other */
750 COSTS_N_INSNS (1), /* cost of movsx */
751 COSTS_N_INSNS (1), /* cost of movzx */
752 8, /* "large" insn */
753 9, /* MOVE_RATIO */
754 4, /* cost for loading QImode using movzbl */
755 {3, 4, 3}, /* cost of loading integer registers
756 in QImode, HImode and SImode.
757 Relative to reg-reg move (2). */
758 {3, 4, 3}, /* cost of storing integer registers */
759 4, /* cost of reg,reg fld/fst */
760 {4, 4, 12}, /* cost of loading fp registers
761 in SFmode, DFmode and XFmode */
762 {6, 6, 8}, /* cost of storing fp registers
763 in SFmode, DFmode and XFmode */
764 2, /* cost of moving MMX register */
765 {3, 3}, /* cost of loading MMX registers
766 in SImode and DImode */
767 {4, 4}, /* cost of storing MMX registers
768 in SImode and DImode */
769 2, /* cost of moving SSE register */
770 {4, 4, 3}, /* cost of loading SSE registers
771 in SImode, DImode and TImode */
772 {4, 4, 5}, /* cost of storing SSE registers
773 in SImode, DImode and TImode */
774 3, /* MMX or SSE register to integer */
775 /* On K8
776 MOVD reg64, xmmreg Double FSTORE 4
777 MOVD reg32, xmmreg Double FSTORE 4
778 On AMDFAM10
779 MOVD reg64, xmmreg Double FADD 3
780 1/1 1/1
781 MOVD reg32, xmmreg Double FADD 3
782 1/1 1/1 */
783 64, /* size of l1 cache. */
784 512, /* size of l2 cache. */
785 64, /* size of prefetch block */
786 /* New AMD processors never drop prefetches; if they cannot be performed
787 immediately, they are queued. We set number of simultaneous prefetches
788 to a large constant to reflect this (it probably is not a good idea not
789 to limit number of prefetches at all, as their execution also takes some
790 time). */
791 100, /* number of parallel prefetches */
792 2, /* Branch cost */
793 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
794 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
795 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
796 COSTS_N_INSNS (2), /* cost of FABS instruction. */
797 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
798 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
800 /* AMDFAM10 has optimized REP instruction for medium sized blocks, but for
801 very small blocks it is better to use loop. For large blocks, libcall can
802 do nontemporary accesses and beat inline considerably. */
803 {{libcall, {{6, loop}, {14, unrolled_loop}, {-1, rep_prefix_4_byte}}},
804 {libcall, {{16, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
805 {{libcall, {{8, loop}, {24, unrolled_loop},
806 {2048, rep_prefix_4_byte}, {-1, libcall}}},
807 {libcall, {{48, unrolled_loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
808 4, /* scalar_stmt_cost. */
809 2, /* scalar load_cost. */
810 2, /* scalar_store_cost. */
811 6, /* vec_stmt_cost. */
812 0, /* vec_to_scalar_cost. */
813 2, /* scalar_to_vec_cost. */
814 2, /* vec_align_load_cost. */
815 2, /* vec_unalign_load_cost. */
816 2, /* vec_store_cost. */
817 2, /* cond_taken_branch_cost. */
818 1, /* cond_not_taken_branch_cost. */
821 static const
822 struct processor_costs pentium4_cost = {
823 COSTS_N_INSNS (1), /* cost of an add instruction */
824 COSTS_N_INSNS (3), /* cost of a lea instruction */
825 COSTS_N_INSNS (4), /* variable shift costs */
826 COSTS_N_INSNS (4), /* constant shift costs */
827 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
828 COSTS_N_INSNS (15), /* HI */
829 COSTS_N_INSNS (15), /* SI */
830 COSTS_N_INSNS (15), /* DI */
831 COSTS_N_INSNS (15)}, /* other */
832 0, /* cost of multiply per each bit set */
833 {COSTS_N_INSNS (56), /* cost of a divide/mod for QI */
834 COSTS_N_INSNS (56), /* HI */
835 COSTS_N_INSNS (56), /* SI */
836 COSTS_N_INSNS (56), /* DI */
837 COSTS_N_INSNS (56)}, /* other */
838 COSTS_N_INSNS (1), /* cost of movsx */
839 COSTS_N_INSNS (1), /* cost of movzx */
840 16, /* "large" insn */
841 6, /* MOVE_RATIO */
842 2, /* cost for loading QImode using movzbl */
843 {4, 5, 4}, /* cost of loading integer registers
844 in QImode, HImode and SImode.
845 Relative to reg-reg move (2). */
846 {2, 3, 2}, /* cost of storing integer registers */
847 2, /* cost of reg,reg fld/fst */
848 {2, 2, 6}, /* cost of loading fp registers
849 in SFmode, DFmode and XFmode */
850 {4, 4, 6}, /* cost of storing fp registers
851 in SFmode, DFmode and XFmode */
852 2, /* cost of moving MMX register */
853 {2, 2}, /* cost of loading MMX registers
854 in SImode and DImode */
855 {2, 2}, /* cost of storing MMX registers
856 in SImode and DImode */
857 12, /* cost of moving SSE register */
858 {12, 12, 12}, /* cost of loading SSE registers
859 in SImode, DImode and TImode */
860 {2, 2, 8}, /* cost of storing SSE registers
861 in SImode, DImode and TImode */
862 10, /* MMX or SSE register to integer */
863 8, /* size of l1 cache. */
864 256, /* size of l2 cache. */
865 64, /* size of prefetch block */
866 6, /* number of parallel prefetches */
867 2, /* Branch cost */
868 COSTS_N_INSNS (5), /* cost of FADD and FSUB insns. */
869 COSTS_N_INSNS (7), /* cost of FMUL instruction. */
870 COSTS_N_INSNS (43), /* cost of FDIV instruction. */
871 COSTS_N_INSNS (2), /* cost of FABS instruction. */
872 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
873 COSTS_N_INSNS (43), /* cost of FSQRT instruction. */
874 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
875 DUMMY_STRINGOP_ALGS},
876 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
877 {-1, libcall}}},
878 DUMMY_STRINGOP_ALGS},
879 1, /* scalar_stmt_cost. */
880 1, /* scalar load_cost. */
881 1, /* scalar_store_cost. */
882 1, /* vec_stmt_cost. */
883 1, /* vec_to_scalar_cost. */
884 1, /* scalar_to_vec_cost. */
885 1, /* vec_align_load_cost. */
886 2, /* vec_unalign_load_cost. */
887 1, /* vec_store_cost. */
888 3, /* cond_taken_branch_cost. */
889 1, /* cond_not_taken_branch_cost. */
892 static const
893 struct processor_costs nocona_cost = {
894 COSTS_N_INSNS (1), /* cost of an add instruction */
895 COSTS_N_INSNS (1), /* cost of a lea instruction */
896 COSTS_N_INSNS (1), /* variable shift costs */
897 COSTS_N_INSNS (1), /* constant shift costs */
898 {COSTS_N_INSNS (10), /* cost of starting multiply for QI */
899 COSTS_N_INSNS (10), /* HI */
900 COSTS_N_INSNS (10), /* SI */
901 COSTS_N_INSNS (10), /* DI */
902 COSTS_N_INSNS (10)}, /* other */
903 0, /* cost of multiply per each bit set */
904 {COSTS_N_INSNS (66), /* cost of a divide/mod for QI */
905 COSTS_N_INSNS (66), /* HI */
906 COSTS_N_INSNS (66), /* SI */
907 COSTS_N_INSNS (66), /* DI */
908 COSTS_N_INSNS (66)}, /* other */
909 COSTS_N_INSNS (1), /* cost of movsx */
910 COSTS_N_INSNS (1), /* cost of movzx */
911 16, /* "large" insn */
912 17, /* MOVE_RATIO */
913 4, /* cost for loading QImode using movzbl */
914 {4, 4, 4}, /* cost of loading integer registers
915 in QImode, HImode and SImode.
916 Relative to reg-reg move (2). */
917 {4, 4, 4}, /* cost of storing integer registers */
918 3, /* cost of reg,reg fld/fst */
919 {12, 12, 12}, /* cost of loading fp registers
920 in SFmode, DFmode and XFmode */
921 {4, 4, 4}, /* cost of storing fp registers
922 in SFmode, DFmode and XFmode */
923 6, /* cost of moving MMX register */
924 {12, 12}, /* cost of loading MMX registers
925 in SImode and DImode */
926 {12, 12}, /* cost of storing MMX registers
927 in SImode and DImode */
928 6, /* cost of moving SSE register */
929 {12, 12, 12}, /* cost of loading SSE registers
930 in SImode, DImode and TImode */
931 {12, 12, 12}, /* cost of storing SSE registers
932 in SImode, DImode and TImode */
933 8, /* MMX or SSE register to integer */
934 8, /* size of l1 cache. */
935 1024, /* size of l2 cache. */
936 128, /* size of prefetch block */
937 8, /* number of parallel prefetches */
938 1, /* Branch cost */
939 COSTS_N_INSNS (6), /* cost of FADD and FSUB insns. */
940 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
941 COSTS_N_INSNS (40), /* cost of FDIV instruction. */
942 COSTS_N_INSNS (3), /* cost of FABS instruction. */
943 COSTS_N_INSNS (3), /* cost of FCHS instruction. */
944 COSTS_N_INSNS (44), /* cost of FSQRT instruction. */
945 {{libcall, {{12, loop_1_byte}, {-1, rep_prefix_4_byte}}},
946 {libcall, {{32, loop}, {20000, rep_prefix_8_byte},
947 {100000, unrolled_loop}, {-1, libcall}}}},
948 {{libcall, {{6, loop_1_byte}, {48, loop}, {20480, rep_prefix_4_byte},
949 {-1, libcall}}},
950 {libcall, {{24, loop}, {64, unrolled_loop},
951 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
952 1, /* scalar_stmt_cost. */
953 1, /* scalar load_cost. */
954 1, /* scalar_store_cost. */
955 1, /* vec_stmt_cost. */
956 1, /* vec_to_scalar_cost. */
957 1, /* scalar_to_vec_cost. */
958 1, /* vec_align_load_cost. */
959 2, /* vec_unalign_load_cost. */
960 1, /* vec_store_cost. */
961 3, /* cond_taken_branch_cost. */
962 1, /* cond_not_taken_branch_cost. */
965 static const
966 struct processor_costs core2_cost = {
967 COSTS_N_INSNS (1), /* cost of an add instruction */
968 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
969 COSTS_N_INSNS (1), /* variable shift costs */
970 COSTS_N_INSNS (1), /* constant shift costs */
971 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
972 COSTS_N_INSNS (3), /* HI */
973 COSTS_N_INSNS (3), /* SI */
974 COSTS_N_INSNS (3), /* DI */
975 COSTS_N_INSNS (3)}, /* other */
976 0, /* cost of multiply per each bit set */
977 {COSTS_N_INSNS (22), /* cost of a divide/mod for QI */
978 COSTS_N_INSNS (22), /* HI */
979 COSTS_N_INSNS (22), /* SI */
980 COSTS_N_INSNS (22), /* DI */
981 COSTS_N_INSNS (22)}, /* other */
982 COSTS_N_INSNS (1), /* cost of movsx */
983 COSTS_N_INSNS (1), /* cost of movzx */
984 8, /* "large" insn */
985 16, /* MOVE_RATIO */
986 2, /* cost for loading QImode using movzbl */
987 {6, 6, 6}, /* cost of loading integer registers
988 in QImode, HImode and SImode.
989 Relative to reg-reg move (2). */
990 {4, 4, 4}, /* cost of storing integer registers */
991 2, /* cost of reg,reg fld/fst */
992 {6, 6, 6}, /* cost of loading fp registers
993 in SFmode, DFmode and XFmode */
994 {4, 4, 4}, /* cost of storing fp registers
995 in SFmode, DFmode and XFmode */
996 2, /* cost of moving MMX register */
997 {6, 6}, /* cost of loading MMX registers
998 in SImode and DImode */
999 {4, 4}, /* cost of storing MMX registers
1000 in SImode and DImode */
1001 2, /* cost of moving SSE register */
1002 {6, 6, 6}, /* cost of loading SSE registers
1003 in SImode, DImode and TImode */
1004 {4, 4, 4}, /* cost of storing SSE registers
1005 in SImode, DImode and TImode */
1006 2, /* MMX or SSE register to integer */
1007 32, /* size of l1 cache. */
1008 2048, /* size of l2 cache. */
1009 128, /* size of prefetch block */
1010 8, /* number of parallel prefetches */
1011 3, /* Branch cost */
1012 COSTS_N_INSNS (3), /* cost of FADD and FSUB insns. */
1013 COSTS_N_INSNS (5), /* cost of FMUL instruction. */
1014 COSTS_N_INSNS (32), /* cost of FDIV instruction. */
1015 COSTS_N_INSNS (1), /* cost of FABS instruction. */
1016 COSTS_N_INSNS (1), /* cost of FCHS instruction. */
1017 COSTS_N_INSNS (58), /* cost of FSQRT instruction. */
1018 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1019 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1020 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1021 {{libcall, {{8, loop}, {15, unrolled_loop},
1022 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1023 {libcall, {{24, loop}, {32, unrolled_loop},
1024 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1025 1, /* scalar_stmt_cost. */
1026 1, /* scalar load_cost. */
1027 1, /* scalar_store_cost. */
1028 1, /* vec_stmt_cost. */
1029 1, /* vec_to_scalar_cost. */
1030 1, /* scalar_to_vec_cost. */
1031 1, /* vec_align_load_cost. */
1032 2, /* vec_unalign_load_cost. */
1033 1, /* vec_store_cost. */
1034 3, /* cond_taken_branch_cost. */
1035 1, /* cond_not_taken_branch_cost. */
1038 static const
1039 struct processor_costs atom_cost = {
1040 COSTS_N_INSNS (1), /* cost of an add instruction */
1041 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1042 COSTS_N_INSNS (1), /* variable shift costs */
1043 COSTS_N_INSNS (1), /* constant shift costs */
1044 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1045 COSTS_N_INSNS (4), /* HI */
1046 COSTS_N_INSNS (3), /* SI */
1047 COSTS_N_INSNS (4), /* DI */
1048 COSTS_N_INSNS (2)}, /* other */
1049 0, /* cost of multiply per each bit set */
1050 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1051 COSTS_N_INSNS (26), /* HI */
1052 COSTS_N_INSNS (42), /* SI */
1053 COSTS_N_INSNS (74), /* DI */
1054 COSTS_N_INSNS (74)}, /* other */
1055 COSTS_N_INSNS (1), /* cost of movsx */
1056 COSTS_N_INSNS (1), /* cost of movzx */
1057 8, /* "large" insn */
1058 17, /* MOVE_RATIO */
1059 2, /* cost for loading QImode using movzbl */
1060 {4, 4, 4}, /* cost of loading integer registers
1061 in QImode, HImode and SImode.
1062 Relative to reg-reg move (2). */
1063 {4, 4, 4}, /* cost of storing integer registers */
1064 4, /* cost of reg,reg fld/fst */
1065 {12, 12, 12}, /* cost of loading fp registers
1066 in SFmode, DFmode and XFmode */
1067 {6, 6, 8}, /* cost of storing fp registers
1068 in SFmode, DFmode and XFmode */
1069 2, /* cost of moving MMX register */
1070 {8, 8}, /* cost of loading MMX registers
1071 in SImode and DImode */
1072 {8, 8}, /* cost of storing MMX registers
1073 in SImode and DImode */
1074 2, /* cost of moving SSE register */
1075 {8, 8, 8}, /* cost of loading SSE registers
1076 in SImode, DImode and TImode */
1077 {8, 8, 8}, /* cost of storing SSE registers
1078 in SImode, DImode and TImode */
1079 5, /* MMX or SSE register to integer */
1080 32, /* size of l1 cache. */
1081 256, /* size of l2 cache. */
1082 64, /* size of prefetch block */
1083 6, /* number of parallel prefetches */
1084 3, /* Branch cost */
1085 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1086 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1087 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1088 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1089 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1090 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1091 {{libcall, {{11, loop}, {-1, rep_prefix_4_byte}}},
1092 {libcall, {{32, loop}, {64, rep_prefix_4_byte},
1093 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1094 {{libcall, {{8, loop}, {15, unrolled_loop},
1095 {2048, rep_prefix_4_byte}, {-1, libcall}}},
1096 {libcall, {{24, loop}, {32, unrolled_loop},
1097 {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1098 1, /* scalar_stmt_cost. */
1099 1, /* scalar load_cost. */
1100 1, /* scalar_store_cost. */
1101 1, /* vec_stmt_cost. */
1102 1, /* vec_to_scalar_cost. */
1103 1, /* scalar_to_vec_cost. */
1104 1, /* vec_align_load_cost. */
1105 2, /* vec_unalign_load_cost. */
1106 1, /* vec_store_cost. */
1107 3, /* cond_taken_branch_cost. */
1108 1, /* cond_not_taken_branch_cost. */
1111 /* Generic64 should produce code tuned for Nocona and K8. */
1112 static const
1113 struct processor_costs generic64_cost = {
1114 COSTS_N_INSNS (1), /* cost of an add instruction */
1115 /* On all chips taken into consideration lea is 2 cycles and more. With
1116 this cost however our current implementation of synth_mult results in
1117 use of unnecessary temporary registers causing regression on several
1118 SPECfp benchmarks. */
1119 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1120 COSTS_N_INSNS (1), /* variable shift costs */
1121 COSTS_N_INSNS (1), /* constant shift costs */
1122 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1123 COSTS_N_INSNS (4), /* HI */
1124 COSTS_N_INSNS (3), /* SI */
1125 COSTS_N_INSNS (4), /* DI */
1126 COSTS_N_INSNS (2)}, /* other */
1127 0, /* cost of multiply per each bit set */
1128 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1129 COSTS_N_INSNS (26), /* HI */
1130 COSTS_N_INSNS (42), /* SI */
1131 COSTS_N_INSNS (74), /* DI */
1132 COSTS_N_INSNS (74)}, /* other */
1133 COSTS_N_INSNS (1), /* cost of movsx */
1134 COSTS_N_INSNS (1), /* cost of movzx */
1135 8, /* "large" insn */
1136 17, /* MOVE_RATIO */
1137 4, /* cost for loading QImode using movzbl */
1138 {4, 4, 4}, /* cost of loading integer registers
1139 in QImode, HImode and SImode.
1140 Relative to reg-reg move (2). */
1141 {4, 4, 4}, /* cost of storing integer registers */
1142 4, /* cost of reg,reg fld/fst */
1143 {12, 12, 12}, /* cost of loading fp registers
1144 in SFmode, DFmode and XFmode */
1145 {6, 6, 8}, /* cost of storing fp registers
1146 in SFmode, DFmode and XFmode */
1147 2, /* cost of moving MMX register */
1148 {8, 8}, /* cost of loading MMX registers
1149 in SImode and DImode */
1150 {8, 8}, /* cost of storing MMX registers
1151 in SImode and DImode */
1152 2, /* cost of moving SSE register */
1153 {8, 8, 8}, /* cost of loading SSE registers
1154 in SImode, DImode and TImode */
1155 {8, 8, 8}, /* cost of storing SSE registers
1156 in SImode, DImode and TImode */
1157 5, /* MMX or SSE register to integer */
1158 32, /* size of l1 cache. */
1159 512, /* size of l2 cache. */
1160 64, /* size of prefetch block */
1161 6, /* number of parallel prefetches */
1162 /* Benchmarks shows large regressions on K8 sixtrack benchmark when this value
1163 is increased to perhaps more appropriate value of 5. */
1164 3, /* Branch cost */
1165 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1166 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1167 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1168 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1169 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1170 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1171 {DUMMY_STRINGOP_ALGS,
1172 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1173 {DUMMY_STRINGOP_ALGS,
1174 {libcall, {{32, loop}, {8192, rep_prefix_8_byte}, {-1, libcall}}}},
1175 1, /* scalar_stmt_cost. */
1176 1, /* scalar load_cost. */
1177 1, /* scalar_store_cost. */
1178 1, /* vec_stmt_cost. */
1179 1, /* vec_to_scalar_cost. */
1180 1, /* scalar_to_vec_cost. */
1181 1, /* vec_align_load_cost. */
1182 2, /* vec_unalign_load_cost. */
1183 1, /* vec_store_cost. */
1184 3, /* cond_taken_branch_cost. */
1185 1, /* cond_not_taken_branch_cost. */
1188 /* Generic32 should produce code tuned for Athlon, PPro, Pentium4, Nocona and K8. */
1189 static const
1190 struct processor_costs generic32_cost = {
1191 COSTS_N_INSNS (1), /* cost of an add instruction */
1192 COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
1193 COSTS_N_INSNS (1), /* variable shift costs */
1194 COSTS_N_INSNS (1), /* constant shift costs */
1195 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
1196 COSTS_N_INSNS (4), /* HI */
1197 COSTS_N_INSNS (3), /* SI */
1198 COSTS_N_INSNS (4), /* DI */
1199 COSTS_N_INSNS (2)}, /* other */
1200 0, /* cost of multiply per each bit set */
1201 {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
1202 COSTS_N_INSNS (26), /* HI */
1203 COSTS_N_INSNS (42), /* SI */
1204 COSTS_N_INSNS (74), /* DI */
1205 COSTS_N_INSNS (74)}, /* other */
1206 COSTS_N_INSNS (1), /* cost of movsx */
1207 COSTS_N_INSNS (1), /* cost of movzx */
1208 8, /* "large" insn */
1209 17, /* MOVE_RATIO */
1210 4, /* cost for loading QImode using movzbl */
1211 {4, 4, 4}, /* cost of loading integer registers
1212 in QImode, HImode and SImode.
1213 Relative to reg-reg move (2). */
1214 {4, 4, 4}, /* cost of storing integer registers */
1215 4, /* cost of reg,reg fld/fst */
1216 {12, 12, 12}, /* cost of loading fp registers
1217 in SFmode, DFmode and XFmode */
1218 {6, 6, 8}, /* cost of storing fp registers
1219 in SFmode, DFmode and XFmode */
1220 2, /* cost of moving MMX register */
1221 {8, 8}, /* cost of loading MMX registers
1222 in SImode and DImode */
1223 {8, 8}, /* cost of storing MMX registers
1224 in SImode and DImode */
1225 2, /* cost of moving SSE register */
1226 {8, 8, 8}, /* cost of loading SSE registers
1227 in SImode, DImode and TImode */
1228 {8, 8, 8}, /* cost of storing SSE registers
1229 in SImode, DImode and TImode */
1230 5, /* MMX or SSE register to integer */
1231 32, /* size of l1 cache. */
1232 256, /* size of l2 cache. */
1233 64, /* size of prefetch block */
1234 6, /* number of parallel prefetches */
1235 3, /* Branch cost */
1236 COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
1237 COSTS_N_INSNS (8), /* cost of FMUL instruction. */
1238 COSTS_N_INSNS (20), /* cost of FDIV instruction. */
1239 COSTS_N_INSNS (8), /* cost of FABS instruction. */
1240 COSTS_N_INSNS (8), /* cost of FCHS instruction. */
1241 COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
1242 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1243 DUMMY_STRINGOP_ALGS},
1244 {{libcall, {{32, loop}, {8192, rep_prefix_4_byte}, {-1, libcall}}},
1245 DUMMY_STRINGOP_ALGS},
1246 1, /* scalar_stmt_cost. */
1247 1, /* scalar load_cost. */
1248 1, /* scalar_store_cost. */
1249 1, /* vec_stmt_cost. */
1250 1, /* vec_to_scalar_cost. */
1251 1, /* scalar_to_vec_cost. */
1252 1, /* vec_align_load_cost. */
1253 2, /* vec_unalign_load_cost. */
1254 1, /* vec_store_cost. */
1255 3, /* cond_taken_branch_cost. */
1256 1, /* cond_not_taken_branch_cost. */
1259 const struct processor_costs *ix86_cost = &pentium_cost;
1261 /* Processor feature/optimization bitmasks. */
1262 #define m_386 (1<<PROCESSOR_I386)
1263 #define m_486 (1<<PROCESSOR_I486)
1264 #define m_PENT (1<<PROCESSOR_PENTIUM)
1265 #define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
1266 #define m_PENT4 (1<<PROCESSOR_PENTIUM4)
1267 #define m_NOCONA (1<<PROCESSOR_NOCONA)
1268 #define m_CORE2 (1<<PROCESSOR_CORE2)
1269 #define m_ATOM (1<<PROCESSOR_ATOM)
1271 #define m_GEODE (1<<PROCESSOR_GEODE)
1272 #define m_K6 (1<<PROCESSOR_K6)
1273 #define m_K6_GEODE (m_K6 | m_GEODE)
1274 #define m_K8 (1<<PROCESSOR_K8)
1275 #define m_ATHLON (1<<PROCESSOR_ATHLON)
1276 #define m_ATHLON_K8 (m_K8 | m_ATHLON)
1277 #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
1278 #define m_AMD_MULTIPLE (m_K8 | m_ATHLON | m_AMDFAM10)
1280 #define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
1281 #define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
1283 /* Generic instruction choice should be common subset of supported CPUs
1284 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
1285 #define m_GENERIC (m_GENERIC32 | m_GENERIC64)
1287 /* Feature tests against the various tunings. */
1288 unsigned char ix86_tune_features[X86_TUNE_LAST];
1290 /* Feature tests against the various tunings used to create ix86_tune_features
1291 based on the processor mask. */
1292 static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
1293 /* X86_TUNE_USE_LEAVE: Leave does not affect Nocona SPEC2000 results
1294 negatively, so enabling for Generic64 seems like good code size
1295 tradeoff. We can't enable it for 32bit generic because it does not
1296 work well with PPro base chips. */
1297 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_CORE2 | m_GENERIC64,
1299 /* X86_TUNE_PUSH_MEMORY */
1300 m_386 | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4
1301 | m_NOCONA | m_CORE2 | m_GENERIC,
1303 /* X86_TUNE_ZERO_EXTEND_WITH_AND */
1304 m_486 | m_PENT,
1306 /* X86_TUNE_UNROLL_STRLEN */
1307 m_486 | m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_K6
1308 | m_CORE2 | m_GENERIC,
1310 /* X86_TUNE_DEEP_BRANCH_PREDICTION */
1311 m_ATOM | m_PPRO | m_K6_GEODE | m_AMD_MULTIPLE | m_PENT4 | m_GENERIC,
1313 /* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
1314 on simulation result. But after P4 was made, no performance benefit
1315 was observed with branch hints. It also increases the code size.
1316 As a result, icc never generates branch hints. */
1319 /* X86_TUNE_DOUBLE_WITH_ADD */
1320 ~m_386,
1322 /* X86_TUNE_USE_SAHF */
1323 m_ATOM | m_PPRO | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_PENT4
1324 | m_NOCONA | m_CORE2 | m_GENERIC,
1326 /* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
1327 partial dependencies. */
1328 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA
1329 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */,
1331 /* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
1332 register stalls on Generic32 compilation setting as well. However
1333 in current implementation the partial register stalls are not eliminated
1334 very well - they can be introduced via subregs synthesized by combine
1335 and can happen in caller/callee saving sequences. Because this option
1336 pays back little on PPro based chips and is in conflict with partial reg
1337 dependencies used by Athlon/P4 based chips, it is better to leave it off
1338 for generic32 for now. */
1339 m_PPRO,
1341 /* X86_TUNE_PARTIAL_FLAG_REG_STALL */
1342 m_CORE2 | m_GENERIC,
1344 /* X86_TUNE_USE_HIMODE_FIOP */
1345 m_386 | m_486 | m_K6_GEODE,
1347 /* X86_TUNE_USE_SIMODE_FIOP */
1348 ~(m_PPRO | m_AMD_MULTIPLE | m_PENT | m_ATOM | m_CORE2 | m_GENERIC),
1350 /* X86_TUNE_USE_MOV0 */
1351 m_K6,
1353 /* X86_TUNE_USE_CLTD */
1354 ~(m_PENT | m_ATOM | m_K6 | m_CORE2 | m_GENERIC),
1356 /* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
1357 m_PENT4,
1359 /* X86_TUNE_SPLIT_LONG_MOVES */
1360 m_PPRO,
1362 /* X86_TUNE_READ_MODIFY_WRITE */
1363 ~m_PENT,
1365 /* X86_TUNE_READ_MODIFY */
1366 ~(m_PENT | m_PPRO),
1368 /* X86_TUNE_PROMOTE_QIMODE */
1369 m_K6_GEODE | m_PENT | m_ATOM | m_386 | m_486 | m_AMD_MULTIPLE
1370 | m_CORE2 | m_GENERIC /* | m_PENT4 ? */,
1372 /* X86_TUNE_FAST_PREFIX */
1373 ~(m_PENT | m_486 | m_386),
1375 /* X86_TUNE_SINGLE_STRINGOP */
1376 m_386 | m_PENT4 | m_NOCONA,
1378 /* X86_TUNE_QIMODE_MATH */
1381 /* X86_TUNE_HIMODE_MATH: On PPro this flag is meant to avoid partial
1382 register stalls. Just like X86_TUNE_PARTIAL_REG_STALL this option
1383 might be considered for Generic32 if our scheme for avoiding partial
1384 stalls was more effective. */
1385 ~m_PPRO,
1387 /* X86_TUNE_PROMOTE_QI_REGS */
1390 /* X86_TUNE_PROMOTE_HI_REGS */
1391 m_PPRO,
1393 /* X86_TUNE_ADD_ESP_4: Enable if add/sub is preferred over 1/2 push/pop. */
1394 m_ATOM | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT4 | m_NOCONA
1395 | m_CORE2 | m_GENERIC,
1397 /* X86_TUNE_ADD_ESP_8 */
1398 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_K6_GEODE | m_386
1399 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1401 /* X86_TUNE_SUB_ESP_4 */
1402 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2
1403 | m_GENERIC,
1405 /* X86_TUNE_SUB_ESP_8 */
1406 m_AMD_MULTIPLE | m_ATOM | m_PPRO | m_386 | m_486
1407 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1409 /* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
1410 for DFmode copies */
1411 ~(m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1412 | m_GENERIC | m_GEODE),
1414 /* X86_TUNE_PARTIAL_REG_DEPENDENCY */
1415 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1417 /* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
1418 conflict here in between PPro/Pentium4 based chips that thread 128bit
1419 SSE registers as single units versus K8 based chips that divide SSE
1420 registers to two 64bit halves. This knob promotes all store destinations
1421 to be 128bit to allow register renaming on 128bit SSE units, but usually
1422 results in one extra microop on 64bit SSE units. Experimental results
1423 shows that disabling this option on P4 brings over 20% SPECfp regression,
1424 while enabling it on K8 brings roughly 2.4% regression that can be partly
1425 masked by careful scheduling of moves. */
1426 m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC
1427 | m_AMDFAM10,
1429 /* X86_TUNE_SSE_UNALIGNED_MOVE_OPTIMAL */
1430 m_AMDFAM10,
1432 /* X86_TUNE_SSE_SPLIT_REGS: Set for machines where the type and dependencies
1433 are resolved on SSE register parts instead of whole registers, so we may
1434 maintain just lower part of scalar values in proper format leaving the
1435 upper part undefined. */
1436 m_ATHLON_K8,
1438 /* X86_TUNE_SSE_TYPELESS_STORES */
1439 m_AMD_MULTIPLE,
1441 /* X86_TUNE_SSE_LOAD0_BY_PXOR */
1442 m_PPRO | m_PENT4 | m_NOCONA,
1444 /* X86_TUNE_MEMORY_MISMATCH_STALL */
1445 m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC,
1447 /* X86_TUNE_PROLOGUE_USING_MOVE */
1448 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1450 /* X86_TUNE_EPILOGUE_USING_MOVE */
1451 m_ATHLON_K8 | m_ATOM | m_PPRO | m_CORE2 | m_GENERIC,
1453 /* X86_TUNE_SHIFT1 */
1454 ~m_486,
1456 /* X86_TUNE_USE_FFREEP */
1457 m_AMD_MULTIPLE,
1459 /* X86_TUNE_INTER_UNIT_MOVES */
1460 ~(m_AMD_MULTIPLE | m_ATOM | m_GENERIC),
1462 /* X86_TUNE_INTER_UNIT_CONVERSIONS */
1463 ~(m_AMDFAM10),
1465 /* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
1466 than 4 branch instructions in the 16 byte window. */
1467 m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4 | m_NOCONA | m_CORE2
1468 | m_GENERIC,
1470 /* X86_TUNE_SCHEDULE */
1471 m_PPRO | m_AMD_MULTIPLE | m_K6_GEODE | m_PENT | m_ATOM | m_CORE2
1472 | m_GENERIC,
1474 /* X86_TUNE_USE_BT */
1475 m_AMD_MULTIPLE | m_ATOM | m_CORE2 | m_GENERIC,
1477 /* X86_TUNE_USE_INCDEC */
1478 ~(m_PENT4 | m_NOCONA | m_GENERIC | m_ATOM),
1480 /* X86_TUNE_PAD_RETURNS */
1481 m_AMD_MULTIPLE | m_CORE2 | m_GENERIC,
1483 /* X86_TUNE_EXT_80387_CONSTANTS */
1484 m_K6_GEODE | m_ATHLON_K8 | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO
1485 | m_CORE2 | m_GENERIC,
1487 /* X86_TUNE_SHORTEN_X87_SSE */
1488 ~m_K8,
1490 /* X86_TUNE_AVOID_VECTOR_DECODE */
1491 m_K8 | m_GENERIC64,
1493 /* X86_TUNE_PROMOTE_HIMODE_IMUL: Modern CPUs have same latency for HImode
1494 and SImode multiply, but 386 and 486 do HImode multiply faster. */
1495 ~(m_386 | m_486),
1497 /* X86_TUNE_SLOW_IMUL_IMM32_MEM: Imul of 32-bit constant and memory is
1498 vector path on AMD machines. */
1499 m_K8 | m_GENERIC64 | m_AMDFAM10,
1501 /* X86_TUNE_SLOW_IMUL_IMM8: Imul of 8-bit constant is vector path on AMD
1502 machines. */
1503 m_K8 | m_GENERIC64 | m_AMDFAM10,
1505 /* X86_TUNE_MOVE_M1_VIA_OR: On pentiums, it is faster to load -1 via OR
1506 than a MOV. */
1507 m_PENT,
1509 /* X86_TUNE_NOT_UNPAIRABLE: NOT is not pairable on Pentium, while XOR is,
1510 but one byte longer. */
1511 m_PENT,
1513 /* X86_TUNE_NOT_VECTORMODE: On AMD K6, NOT is vector decoded with memory
1514 operand that cannot be represented using a modRM byte. The XOR
1515 replacement is long decoded, so this split helps here as well. */
1516 m_K6,
1518 /* X86_TUNE_USE_VECTOR_FP_CONVERTS: Prefer vector packed SSE conversion
1519 from FP to FP. */
1520 m_AMDFAM10 | m_GENERIC,
1522 /* X86_TUNE_USE_VECTOR_CONVERTS: Prefer vector packed SSE conversion
1523 from integer to FP. */
1524 m_AMDFAM10,
1526 /* X86_TUNE_FUSE_CMP_AND_BRANCH: Fuse a compare or test instruction
1527 with a subsequent conditional jump instruction into a single
1528 compare-and-branch uop. */
1529 m_CORE2,
1531 /* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
1532 will impact LEA instruction selection. */
1533 m_ATOM,
1536 /* Feature tests against the various architecture variations. */
1537 unsigned char ix86_arch_features[X86_ARCH_LAST];
1539 /* Feature tests against the various architecture variations, used to create
1540 ix86_arch_features based on the processor mask. */
1541 static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
1542 /* X86_ARCH_CMOVE: Conditional move was added for pentiumpro. */
1543 ~(m_386 | m_486 | m_PENT | m_K6),
1545 /* X86_ARCH_CMPXCHG: Compare and exchange was added for 80486. */
1546 ~m_386,
1548 /* X86_ARCH_CMPXCHG8B: Compare and exchange 8 bytes was added for pentium. */
1549 ~(m_386 | m_486),
1551 /* X86_ARCH_XADD: Exchange and add was added for 80486. */
1552 ~m_386,
1554 /* X86_ARCH_BSWAP: Byteswap was added for 80486. */
1555 ~m_386,
1558 static const unsigned int x86_accumulate_outgoing_args
1559 = m_AMD_MULTIPLE | m_ATOM | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1560 | m_GENERIC;
1562 static const unsigned int x86_arch_always_fancy_math_387
1563 = m_PENT | m_ATOM | m_PPRO | m_AMD_MULTIPLE | m_PENT4
1564 | m_NOCONA | m_CORE2 | m_GENERIC;
1566 static enum stringop_alg stringop_alg = no_stringop;
1568 /* In case the average insn count for single function invocation is
1569 lower than this constant, emit fast (but longer) prologue and
1570 epilogue code. */
1571 #define FAST_PROLOGUE_INSN_COUNT 20
1573 /* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1574 static const char *const qi_reg_name[] = QI_REGISTER_NAMES;
1575 static const char *const qi_high_reg_name[] = QI_HIGH_REGISTER_NAMES;
1576 static const char *const hi_reg_name[] = HI_REGISTER_NAMES;
1578 /* Array of the smallest class containing reg number REGNO, indexed by
1579 REGNO. Used by REGNO_REG_CLASS in i386.h. */
1581 enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER] =
1583 /* ax, dx, cx, bx */
1584 AREG, DREG, CREG, BREG,
1585 /* si, di, bp, sp */
1586 SIREG, DIREG, NON_Q_REGS, NON_Q_REGS,
1587 /* FP registers */
1588 FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
1589 FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
1590 /* arg pointer */
1591 NON_Q_REGS,
1592 /* flags, fpsr, fpcr, frame */
1593 NO_REGS, NO_REGS, NO_REGS, NON_Q_REGS,
1594 /* SSE registers */
1595 SSE_FIRST_REG, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1596 SSE_REGS, SSE_REGS,
1597 /* MMX registers */
1598 MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS, MMX_REGS,
1599 MMX_REGS, MMX_REGS,
1600 /* REX registers */
1601 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1602 NON_Q_REGS, NON_Q_REGS, NON_Q_REGS, NON_Q_REGS,
1603 /* SSE REX registers */
1604 SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS, SSE_REGS,
1605 SSE_REGS, SSE_REGS,
1608 /* The "default" register map used in 32bit mode. */
1610 int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
1612 0, 2, 1, 3, 6, 7, 4, 5, /* general regs */
1613 12, 13, 14, 15, 16, 17, 18, 19, /* fp regs */
1614 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1615 21, 22, 23, 24, 25, 26, 27, 28, /* SSE */
1616 29, 30, 31, 32, 33, 34, 35, 36, /* MMX */
1617 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1618 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1621 /* The "default" register map used in 64bit mode. */
1623 int const dbx64_register_map[FIRST_PSEUDO_REGISTER] =
1625 0, 1, 2, 3, 4, 5, 6, 7, /* general regs */
1626 33, 34, 35, 36, 37, 38, 39, 40, /* fp regs */
1627 -1, -1, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1628 17, 18, 19, 20, 21, 22, 23, 24, /* SSE */
1629 41, 42, 43, 44, 45, 46, 47, 48, /* MMX */
1630 8,9,10,11,12,13,14,15, /* extended integer registers */
1631 25, 26, 27, 28, 29, 30, 31, 32, /* extended SSE registers */
1634 /* Define the register numbers to be used in Dwarf debugging information.
1635 The SVR4 reference port C compiler uses the following register numbers
1636 in its Dwarf output code:
1637 0 for %eax (gcc regno = 0)
1638 1 for %ecx (gcc regno = 2)
1639 2 for %edx (gcc regno = 1)
1640 3 for %ebx (gcc regno = 3)
1641 4 for %esp (gcc regno = 7)
1642 5 for %ebp (gcc regno = 6)
1643 6 for %esi (gcc regno = 4)
1644 7 for %edi (gcc regno = 5)
1645 The following three DWARF register numbers are never generated by
1646 the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
1647 believes these numbers have these meanings.
1648 8 for %eip (no gcc equivalent)
1649 9 for %eflags (gcc regno = 17)
1650 10 for %trapno (no gcc equivalent)
1651 It is not at all clear how we should number the FP stack registers
1652 for the x86 architecture. If the version of SDB on x86/svr4 were
1653 a bit less brain dead with respect to floating-point then we would
1654 have a precedent to follow with respect to DWARF register numbers
1655 for x86 FP registers, but the SDB on x86/svr4 is so completely
1656 broken with respect to FP registers that it is hardly worth thinking
1657 of it as something to strive for compatibility with.
1658 The version of x86/svr4 SDB I have at the moment does (partially)
1659 seem to believe that DWARF register number 11 is associated with
1660 the x86 register %st(0), but that's about all. Higher DWARF
1661 register numbers don't seem to be associated with anything in
1662 particular, and even for DWARF regno 11, SDB only seems to under-
1663 stand that it should say that a variable lives in %st(0) (when
1664 asked via an `=' command) if we said it was in DWARF regno 11,
1665 but SDB still prints garbage when asked for the value of the
1666 variable in question (via a `/' command).
1667 (Also note that the labels SDB prints for various FP stack regs
1668 when doing an `x' command are all wrong.)
1669 Note that these problems generally don't affect the native SVR4
1670 C compiler because it doesn't allow the use of -O with -g and
1671 because when it is *not* optimizing, it allocates a memory
1672 location for each floating-point variable, and the memory
1673 location is what gets described in the DWARF AT_location
1674 attribute for the variable in question.
1675 Regardless of the severe mental illness of the x86/svr4 SDB, we
1676 do something sensible here and we use the following DWARF
1677 register numbers. Note that these are all stack-top-relative
1678 numbers.
1679 11 for %st(0) (gcc regno = 8)
1680 12 for %st(1) (gcc regno = 9)
1681 13 for %st(2) (gcc regno = 10)
1682 14 for %st(3) (gcc regno = 11)
1683 15 for %st(4) (gcc regno = 12)
1684 16 for %st(5) (gcc regno = 13)
1685 17 for %st(6) (gcc regno = 14)
1686 18 for %st(7) (gcc regno = 15)
1688 int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER] =
1690 0, 2, 1, 3, 6, 7, 5, 4, /* general regs */
1691 11, 12, 13, 14, 15, 16, 17, 18, /* fp regs */
1692 -1, 9, -1, -1, -1, /* arg, flags, fpsr, fpcr, frame */
1693 21, 22, 23, 24, 25, 26, 27, 28, /* SSE registers */
1694 29, 30, 31, 32, 33, 34, 35, 36, /* MMX registers */
1695 -1, -1, -1, -1, -1, -1, -1, -1, /* extended integer registers */
1696 -1, -1, -1, -1, -1, -1, -1, -1, /* extended SSE registers */
1699 /* Test and compare insns in i386.md store the information needed to
1700 generate branch and scc insns here. */
1702 rtx ix86_compare_op0 = NULL_RTX;
1703 rtx ix86_compare_op1 = NULL_RTX;
1705 /* Define parameter passing and return registers. */
1707 static int const x86_64_int_parameter_registers[6] =
1709 DI_REG, SI_REG, DX_REG, CX_REG, R8_REG, R9_REG
1712 static int const x86_64_ms_abi_int_parameter_registers[4] =
1714 CX_REG, DX_REG, R8_REG, R9_REG
1717 static int const x86_64_int_return_registers[4] =
1719 AX_REG, DX_REG, DI_REG, SI_REG
1722 /* Define the structure for the machine field in struct function. */
1724 struct GTY(()) stack_local_entry {
1725 unsigned short mode;
1726 unsigned short n;
1727 rtx rtl;
1728 struct stack_local_entry *next;
1731 /* Structure describing stack frame layout.
1732 Stack grows downward:
1734 [arguments]
1735 <- ARG_POINTER
1736 saved pc
1738 saved frame pointer if frame_pointer_needed
1739 <- HARD_FRAME_POINTER
1740 [saved regs]
1742 [padding0]
1744 [saved SSE regs]
1746 [padding1] \
1748 [va_arg registers] (
1749 > to_allocate <- FRAME_POINTER
1750 [frame] (
1752 [padding2] /
1754 struct ix86_frame
1756 int padding0;
1757 int nsseregs;
1758 int nregs;
1759 int padding1;
1760 int va_arg_size;
1761 HOST_WIDE_INT frame;
1762 int padding2;
1763 int outgoing_arguments_size;
1764 int red_zone_size;
1766 HOST_WIDE_INT to_allocate;
1767 /* The offsets relative to ARG_POINTER. */
1768 HOST_WIDE_INT frame_pointer_offset;
1769 HOST_WIDE_INT hard_frame_pointer_offset;
1770 HOST_WIDE_INT stack_pointer_offset;
1772 /* When save_regs_using_mov is set, emit prologue using
1773 move instead of push instructions. */
1774 bool save_regs_using_mov;
1777 /* Code model option. */
1778 enum cmodel ix86_cmodel;
1779 /* Asm dialect. */
1780 enum asm_dialect ix86_asm_dialect = ASM_ATT;
1781 /* TLS dialects. */
1782 enum tls_dialect ix86_tls_dialect = TLS_DIALECT_GNU;
1784 /* Which unit we are generating floating point math for. */
1785 enum fpmath_unit ix86_fpmath;
1787 /* Which cpu are we scheduling for. */
1788 enum attr_cpu ix86_schedule;
1790 /* Which cpu are we optimizing for. */
1791 enum processor_type ix86_tune;
1793 /* Which instruction set architecture to use. */
1794 enum processor_type ix86_arch;
1796 /* true if sse prefetch instruction is not NOOP. */
1797 int x86_prefetch_sse;
1799 /* ix86_regparm_string as a number */
1800 static int ix86_regparm;
1802 /* -mstackrealign option */
1803 extern int ix86_force_align_arg_pointer;
1804 static const char ix86_force_align_arg_pointer_string[]
1805 = "force_align_arg_pointer";
1807 static rtx (*ix86_gen_leave) (void);
1808 static rtx (*ix86_gen_pop1) (rtx);
1809 static rtx (*ix86_gen_add3) (rtx, rtx, rtx);
1810 static rtx (*ix86_gen_sub3) (rtx, rtx, rtx);
1811 static rtx (*ix86_gen_sub3_carry) (rtx, rtx, rtx, rtx);
1812 static rtx (*ix86_gen_one_cmpl2) (rtx, rtx);
1813 static rtx (*ix86_gen_monitor) (rtx, rtx, rtx);
1814 static rtx (*ix86_gen_andsp) (rtx, rtx, rtx);
1816 /* Preferred alignment for stack boundary in bits. */
1817 unsigned int ix86_preferred_stack_boundary;
1819 /* Alignment for incoming stack boundary in bits specified at
1820 command line. */
1821 static unsigned int ix86_user_incoming_stack_boundary;
1823 /* Default alignment for incoming stack boundary in bits. */
1824 static unsigned int ix86_default_incoming_stack_boundary;
1826 /* Alignment for incoming stack boundary in bits. */
1827 unsigned int ix86_incoming_stack_boundary;
1829 /* The abi used by target. */
1830 enum calling_abi ix86_abi;
1832 /* Values 1-5: see jump.c */
1833 int ix86_branch_cost;
1835 /* Calling abi specific va_list type nodes. */
1836 static GTY(()) tree sysv_va_list_type_node;
1837 static GTY(()) tree ms_va_list_type_node;
1839 /* Variables which are this size or smaller are put in the data/bss
1840 or ldata/lbss sections. */
1842 int ix86_section_threshold = 65536;
1844 /* Prefix built by ASM_GENERATE_INTERNAL_LABEL. */
1845 char internal_label_prefix[16];
1846 int internal_label_prefix_len;
1848 /* Fence to use after loop using movnt. */
1849 tree x86_mfence;
1851 /* Register class used for passing given 64bit part of the argument.
1852 These represent classes as documented by the PS ABI, with the exception
1853 of SSESF, SSEDF classes, that are basically SSE class, just gcc will
1854 use SF or DFmode move instead of DImode to avoid reformatting penalties.
1856 Similarly we play games with INTEGERSI_CLASS to use cheaper SImode moves
1857 whenever possible (upper half does contain padding). */
1858 enum x86_64_reg_class
1860 X86_64_NO_CLASS,
1861 X86_64_INTEGER_CLASS,
1862 X86_64_INTEGERSI_CLASS,
1863 X86_64_SSE_CLASS,
1864 X86_64_SSESF_CLASS,
1865 X86_64_SSEDF_CLASS,
1866 X86_64_SSEUP_CLASS,
1867 X86_64_X87_CLASS,
1868 X86_64_X87UP_CLASS,
1869 X86_64_COMPLEX_X87_CLASS,
1870 X86_64_MEMORY_CLASS
1873 #define MAX_CLASSES 4
1875 /* Table of constants used by fldpi, fldln2, etc.... */
1876 static REAL_VALUE_TYPE ext_80387_constants_table [5];
1877 static bool ext_80387_constants_init = 0;
1880 static struct machine_function * ix86_init_machine_status (void);
1881 static rtx ix86_function_value (const_tree, const_tree, bool);
1882 static int ix86_function_regparm (const_tree, const_tree);
1883 static void ix86_compute_frame_layout (struct ix86_frame *);
1884 static bool ix86_expand_vector_init_one_nonzero (bool, enum machine_mode,
1885 rtx, rtx, int);
1886 static void ix86_add_new_builtins (int);
1888 enum ix86_function_specific_strings
1890 IX86_FUNCTION_SPECIFIC_ARCH,
1891 IX86_FUNCTION_SPECIFIC_TUNE,
1892 IX86_FUNCTION_SPECIFIC_FPMATH,
1893 IX86_FUNCTION_SPECIFIC_MAX
1896 static char *ix86_target_string (int, int, const char *, const char *,
1897 const char *, bool);
1898 static void ix86_debug_options (void) ATTRIBUTE_UNUSED;
1899 static void ix86_function_specific_save (struct cl_target_option *);
1900 static void ix86_function_specific_restore (struct cl_target_option *);
1901 static void ix86_function_specific_print (FILE *, int,
1902 struct cl_target_option *);
1903 static bool ix86_valid_target_attribute_p (tree, tree, tree, int);
1904 static bool ix86_valid_target_attribute_inner_p (tree, char *[]);
1905 static bool ix86_can_inline_p (tree, tree);
1906 static void ix86_set_current_function (tree);
1908 static enum calling_abi ix86_function_abi (const_tree);
1911 /* The svr4 ABI for the i386 says that records and unions are returned
1912 in memory. */
1913 #ifndef DEFAULT_PCC_STRUCT_RETURN
1914 #define DEFAULT_PCC_STRUCT_RETURN 1
1915 #endif
1917 /* Whether -mtune= or -march= were specified */
1918 static int ix86_tune_defaulted;
1919 static int ix86_arch_specified;
1921 /* Bit flags that specify the ISA we are compiling for. */
1922 int ix86_isa_flags = TARGET_64BIT_DEFAULT | TARGET_SUBTARGET_ISA_DEFAULT;
1924 /* A mask of ix86_isa_flags that includes bit X if X
1925 was set or cleared on the command line. */
1926 static int ix86_isa_flags_explicit;
1928 /* Define a set of ISAs which are available when a given ISA is
1929 enabled. MMX and SSE ISAs are handled separately. */
1931 #define OPTION_MASK_ISA_MMX_SET OPTION_MASK_ISA_MMX
1932 #define OPTION_MASK_ISA_3DNOW_SET \
1933 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_MMX_SET)
1935 #define OPTION_MASK_ISA_SSE_SET OPTION_MASK_ISA_SSE
1936 #define OPTION_MASK_ISA_SSE2_SET \
1937 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE_SET)
1938 #define OPTION_MASK_ISA_SSE3_SET \
1939 (OPTION_MASK_ISA_SSE3 | OPTION_MASK_ISA_SSE2_SET)
1940 #define OPTION_MASK_ISA_SSSE3_SET \
1941 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE3_SET)
1942 #define OPTION_MASK_ISA_SSE4_1_SET \
1943 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSSE3_SET)
1944 #define OPTION_MASK_ISA_SSE4_2_SET \
1945 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_SSE4_1_SET)
1946 #define OPTION_MASK_ISA_AVX_SET \
1947 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_SSE4_2_SET)
1948 #define OPTION_MASK_ISA_FMA_SET \
1949 (OPTION_MASK_ISA_FMA | OPTION_MASK_ISA_AVX_SET)
1951 /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
1952 as -msse4.2. */
1953 #define OPTION_MASK_ISA_SSE4_SET OPTION_MASK_ISA_SSE4_2_SET
1955 #define OPTION_MASK_ISA_SSE4A_SET \
1956 (OPTION_MASK_ISA_SSE4A | OPTION_MASK_ISA_SSE3_SET)
1958 /* AES and PCLMUL need SSE2 because they use xmm registers */
1959 #define OPTION_MASK_ISA_AES_SET \
1960 (OPTION_MASK_ISA_AES | OPTION_MASK_ISA_SSE2_SET)
1961 #define OPTION_MASK_ISA_PCLMUL_SET \
1962 (OPTION_MASK_ISA_PCLMUL | OPTION_MASK_ISA_SSE2_SET)
1964 #define OPTION_MASK_ISA_ABM_SET \
1965 (OPTION_MASK_ISA_ABM | OPTION_MASK_ISA_POPCNT)
1967 #define OPTION_MASK_ISA_POPCNT_SET OPTION_MASK_ISA_POPCNT
1968 #define OPTION_MASK_ISA_CX16_SET OPTION_MASK_ISA_CX16
1969 #define OPTION_MASK_ISA_SAHF_SET OPTION_MASK_ISA_SAHF
1970 #define OPTION_MASK_ISA_MOVBE_SET OPTION_MASK_ISA_MOVBE
1971 #define OPTION_MASK_ISA_CRC32_SET OPTION_MASK_ISA_CRC32
1973 /* Define a set of ISAs which aren't available when a given ISA is
1974 disabled. MMX and SSE ISAs are handled separately. */
1976 #define OPTION_MASK_ISA_MMX_UNSET \
1977 (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_3DNOW_UNSET)
1978 #define OPTION_MASK_ISA_3DNOW_UNSET \
1979 (OPTION_MASK_ISA_3DNOW | OPTION_MASK_ISA_3DNOW_A_UNSET)
1980 #define OPTION_MASK_ISA_3DNOW_A_UNSET OPTION_MASK_ISA_3DNOW_A
1982 #define OPTION_MASK_ISA_SSE_UNSET \
1983 (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2_UNSET)
1984 #define OPTION_MASK_ISA_SSE2_UNSET \
1985 (OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE3_UNSET)
1986 #define OPTION_MASK_ISA_SSE3_UNSET \
1987 (OPTION_MASK_ISA_SSE3 \
1988 | OPTION_MASK_ISA_SSSE3_UNSET \
1989 | OPTION_MASK_ISA_SSE4A_UNSET )
1990 #define OPTION_MASK_ISA_SSSE3_UNSET \
1991 (OPTION_MASK_ISA_SSSE3 | OPTION_MASK_ISA_SSE4_1_UNSET)
1992 #define OPTION_MASK_ISA_SSE4_1_UNSET \
1993 (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_SSE4_2_UNSET)
1994 #define OPTION_MASK_ISA_SSE4_2_UNSET \
1995 (OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_AVX_UNSET )
1996 #define OPTION_MASK_ISA_AVX_UNSET \
1997 (OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_FMA_UNSET)
1998 #define OPTION_MASK_ISA_FMA_UNSET OPTION_MASK_ISA_FMA
2000 /* SSE4 includes both SSE4.1 and SSE4.2. -mno-sse4 should the same
2001 as -mno-sse4.1. */
2002 #define OPTION_MASK_ISA_SSE4_UNSET OPTION_MASK_ISA_SSE4_1_UNSET
2004 #define OPTION_MASK_ISA_SSE4A_UNSET \
2005 (OPTION_MASK_ISA_SSE4A)
2006 #define OPTION_MASK_ISA_AES_UNSET OPTION_MASK_ISA_AES
2007 #define OPTION_MASK_ISA_PCLMUL_UNSET OPTION_MASK_ISA_PCLMUL
2008 #define OPTION_MASK_ISA_ABM_UNSET OPTION_MASK_ISA_ABM
2009 #define OPTION_MASK_ISA_POPCNT_UNSET OPTION_MASK_ISA_POPCNT
2010 #define OPTION_MASK_ISA_CX16_UNSET OPTION_MASK_ISA_CX16
2011 #define OPTION_MASK_ISA_SAHF_UNSET OPTION_MASK_ISA_SAHF
2012 #define OPTION_MASK_ISA_MOVBE_UNSET OPTION_MASK_ISA_MOVBE
2013 #define OPTION_MASK_ISA_CRC32_UNSET OPTION_MASK_ISA_CRC32
2015 /* Vectorization library interface and handlers. */
2016 tree (*ix86_veclib_handler)(enum built_in_function, tree, tree) = NULL;
2017 static tree ix86_veclibabi_svml (enum built_in_function, tree, tree);
2018 static tree ix86_veclibabi_acml (enum built_in_function, tree, tree);
2020 /* Processor target table, indexed by processor number */
2021 struct ptt
2023 const struct processor_costs *cost; /* Processor costs */
2024 const int align_loop; /* Default alignments. */
2025 const int align_loop_max_skip;
2026 const int align_jump;
2027 const int align_jump_max_skip;
2028 const int align_func;
2031 static const struct ptt processor_target_table[PROCESSOR_max] =
2033 {&i386_cost, 4, 3, 4, 3, 4},
2034 {&i486_cost, 16, 15, 16, 15, 16},
2035 {&pentium_cost, 16, 7, 16, 7, 16},
2036 {&pentiumpro_cost, 16, 15, 16, 10, 16},
2037 {&geode_cost, 0, 0, 0, 0, 0},
2038 {&k6_cost, 32, 7, 32, 7, 32},
2039 {&athlon_cost, 16, 7, 16, 7, 16},
2040 {&pentium4_cost, 0, 0, 0, 0, 0},
2041 {&k8_cost, 16, 7, 16, 7, 16},
2042 {&nocona_cost, 0, 0, 0, 0, 0},
2043 {&core2_cost, 16, 10, 16, 10, 16},
2044 {&generic32_cost, 16, 7, 16, 7, 16},
2045 {&generic64_cost, 16, 10, 16, 10, 16},
2046 {&amdfam10_cost, 32, 24, 32, 7, 32},
2047 {&atom_cost, 16, 7, 16, 7, 16}
2050 static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
2052 "generic",
2053 "i386",
2054 "i486",
2055 "pentium",
2056 "pentium-mmx",
2057 "pentiumpro",
2058 "pentium2",
2059 "pentium3",
2060 "pentium4",
2061 "pentium-m",
2062 "prescott",
2063 "nocona",
2064 "core2",
2065 "atom",
2066 "geode",
2067 "k6",
2068 "k6-2",
2069 "k6-3",
2070 "athlon",
2071 "athlon-4",
2072 "k8",
2073 "amdfam10"
2076 /* Implement TARGET_HANDLE_OPTION. */
2078 static bool
2079 ix86_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value)
2081 switch (code)
2083 case OPT_mmmx:
2084 if (value)
2086 ix86_isa_flags |= OPTION_MASK_ISA_MMX_SET;
2087 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_SET;
2089 else
2091 ix86_isa_flags &= ~OPTION_MASK_ISA_MMX_UNSET;
2092 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MMX_UNSET;
2094 return true;
2096 case OPT_m3dnow:
2097 if (value)
2099 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_SET;
2100 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_SET;
2102 else
2104 ix86_isa_flags &= ~OPTION_MASK_ISA_3DNOW_UNSET;
2105 ix86_isa_flags_explicit |= OPTION_MASK_ISA_3DNOW_UNSET;
2107 return true;
2109 case OPT_m3dnowa:
2110 return false;
2112 case OPT_msse:
2113 if (value)
2115 ix86_isa_flags |= OPTION_MASK_ISA_SSE_SET;
2116 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_SET;
2118 else
2120 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE_UNSET;
2121 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE_UNSET;
2123 return true;
2125 case OPT_msse2:
2126 if (value)
2128 ix86_isa_flags |= OPTION_MASK_ISA_SSE2_SET;
2129 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_SET;
2131 else
2133 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE2_UNSET;
2134 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE2_UNSET;
2136 return true;
2138 case OPT_msse3:
2139 if (value)
2141 ix86_isa_flags |= OPTION_MASK_ISA_SSE3_SET;
2142 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_SET;
2144 else
2146 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE3_UNSET;
2147 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE3_UNSET;
2149 return true;
2151 case OPT_mssse3:
2152 if (value)
2154 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3_SET;
2155 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_SET;
2157 else
2159 ix86_isa_flags &= ~OPTION_MASK_ISA_SSSE3_UNSET;
2160 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSSE3_UNSET;
2162 return true;
2164 case OPT_msse4_1:
2165 if (value)
2167 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1_SET;
2168 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_SET;
2170 else
2172 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_1_UNSET;
2173 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_1_UNSET;
2175 return true;
2177 case OPT_msse4_2:
2178 if (value)
2180 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2_SET;
2181 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_SET;
2183 else
2185 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_2_UNSET;
2186 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_2_UNSET;
2188 return true;
2190 case OPT_mavx:
2191 if (value)
2193 ix86_isa_flags |= OPTION_MASK_ISA_AVX_SET;
2194 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_SET;
2196 else
2198 ix86_isa_flags &= ~OPTION_MASK_ISA_AVX_UNSET;
2199 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AVX_UNSET;
2201 return true;
2203 case OPT_mfma:
2204 if (value)
2206 ix86_isa_flags |= OPTION_MASK_ISA_FMA_SET;
2207 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_SET;
2209 else
2211 ix86_isa_flags &= ~OPTION_MASK_ISA_FMA_UNSET;
2212 ix86_isa_flags_explicit |= OPTION_MASK_ISA_FMA_UNSET;
2214 return true;
2216 case OPT_msse4:
2217 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_SET;
2218 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_SET;
2219 return true;
2221 case OPT_mno_sse4:
2222 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4_UNSET;
2223 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4_UNSET;
2224 return true;
2226 case OPT_msse4a:
2227 if (value)
2229 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A_SET;
2230 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_SET;
2232 else
2234 ix86_isa_flags &= ~OPTION_MASK_ISA_SSE4A_UNSET;
2235 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SSE4A_UNSET;
2237 return true;
2239 case OPT_mabm:
2240 if (value)
2242 ix86_isa_flags |= OPTION_MASK_ISA_ABM_SET;
2243 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_SET;
2245 else
2247 ix86_isa_flags &= ~OPTION_MASK_ISA_ABM_UNSET;
2248 ix86_isa_flags_explicit |= OPTION_MASK_ISA_ABM_UNSET;
2250 return true;
2252 case OPT_mpopcnt:
2253 if (value)
2255 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT_SET;
2256 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_SET;
2258 else
2260 ix86_isa_flags &= ~OPTION_MASK_ISA_POPCNT_UNSET;
2261 ix86_isa_flags_explicit |= OPTION_MASK_ISA_POPCNT_UNSET;
2263 return true;
2265 case OPT_msahf:
2266 if (value)
2268 ix86_isa_flags |= OPTION_MASK_ISA_SAHF_SET;
2269 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_SET;
2271 else
2273 ix86_isa_flags &= ~OPTION_MASK_ISA_SAHF_UNSET;
2274 ix86_isa_flags_explicit |= OPTION_MASK_ISA_SAHF_UNSET;
2276 return true;
2278 case OPT_mcx16:
2279 if (value)
2281 ix86_isa_flags |= OPTION_MASK_ISA_CX16_SET;
2282 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_SET;
2284 else
2286 ix86_isa_flags &= ~OPTION_MASK_ISA_CX16_UNSET;
2287 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CX16_UNSET;
2289 return true;
2291 case OPT_mmovbe:
2292 if (value)
2294 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE_SET;
2295 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_SET;
2297 else
2299 ix86_isa_flags &= ~OPTION_MASK_ISA_MOVBE_UNSET;
2300 ix86_isa_flags_explicit |= OPTION_MASK_ISA_MOVBE_UNSET;
2302 return true;
2304 case OPT_mcrc32:
2305 if (value)
2307 ix86_isa_flags |= OPTION_MASK_ISA_CRC32_SET;
2308 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_SET;
2310 else
2312 ix86_isa_flags &= ~OPTION_MASK_ISA_CRC32_UNSET;
2313 ix86_isa_flags_explicit |= OPTION_MASK_ISA_CRC32_UNSET;
2315 return true;
2317 case OPT_maes:
2318 if (value)
2320 ix86_isa_flags |= OPTION_MASK_ISA_AES_SET;
2321 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_SET;
2323 else
2325 ix86_isa_flags &= ~OPTION_MASK_ISA_AES_UNSET;
2326 ix86_isa_flags_explicit |= OPTION_MASK_ISA_AES_UNSET;
2328 return true;
2330 case OPT_mpclmul:
2331 if (value)
2333 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL_SET;
2334 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_SET;
2336 else
2338 ix86_isa_flags &= ~OPTION_MASK_ISA_PCLMUL_UNSET;
2339 ix86_isa_flags_explicit |= OPTION_MASK_ISA_PCLMUL_UNSET;
2341 return true;
2343 default:
2344 return true;
2348 /* Return a string the documents the current -m options. The caller is
2349 responsible for freeing the string. */
2351 static char *
2352 ix86_target_string (int isa, int flags, const char *arch, const char *tune,
2353 const char *fpmath, bool add_nl_p)
2355 struct ix86_target_opts
2357 const char *option; /* option string */
2358 int mask; /* isa mask options */
2361 /* This table is ordered so that options like -msse4.2 that imply
2362 preceding options while match those first. */
2363 static struct ix86_target_opts isa_opts[] =
2365 { "-m64", OPTION_MASK_ISA_64BIT },
2366 { "-msse4a", OPTION_MASK_ISA_SSE4A },
2367 { "-msse4.2", OPTION_MASK_ISA_SSE4_2 },
2368 { "-msse4.1", OPTION_MASK_ISA_SSE4_1 },
2369 { "-mssse3", OPTION_MASK_ISA_SSSE3 },
2370 { "-msse3", OPTION_MASK_ISA_SSE3 },
2371 { "-msse2", OPTION_MASK_ISA_SSE2 },
2372 { "-msse", OPTION_MASK_ISA_SSE },
2373 { "-m3dnow", OPTION_MASK_ISA_3DNOW },
2374 { "-m3dnowa", OPTION_MASK_ISA_3DNOW_A },
2375 { "-mmmx", OPTION_MASK_ISA_MMX },
2376 { "-mabm", OPTION_MASK_ISA_ABM },
2377 { "-mpopcnt", OPTION_MASK_ISA_POPCNT },
2378 { "-mmovbe", OPTION_MASK_ISA_MOVBE },
2379 { "-mcrc32", OPTION_MASK_ISA_CRC32 },
2380 { "-maes", OPTION_MASK_ISA_AES },
2381 { "-mpclmul", OPTION_MASK_ISA_PCLMUL },
2384 /* Flag options. */
2385 static struct ix86_target_opts flag_opts[] =
2387 { "-m128bit-long-double", MASK_128BIT_LONG_DOUBLE },
2388 { "-m80387", MASK_80387 },
2389 { "-maccumulate-outgoing-args", MASK_ACCUMULATE_OUTGOING_ARGS },
2390 { "-malign-double", MASK_ALIGN_DOUBLE },
2391 { "-mcld", MASK_CLD },
2392 { "-mfp-ret-in-387", MASK_FLOAT_RETURNS },
2393 { "-mieee-fp", MASK_IEEE_FP },
2394 { "-minline-all-stringops", MASK_INLINE_ALL_STRINGOPS },
2395 { "-minline-stringops-dynamically", MASK_INLINE_STRINGOPS_DYNAMICALLY },
2396 { "-mms-bitfields", MASK_MS_BITFIELD_LAYOUT },
2397 { "-mno-align-stringops", MASK_NO_ALIGN_STRINGOPS },
2398 { "-mno-fancy-math-387", MASK_NO_FANCY_MATH_387 },
2399 { "-mno-push-args", MASK_NO_PUSH_ARGS },
2400 { "-mno-red-zone", MASK_NO_RED_ZONE },
2401 { "-momit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER },
2402 { "-mrecip", MASK_RECIP },
2403 { "-mrtd", MASK_RTD },
2404 { "-msseregparm", MASK_SSEREGPARM },
2405 { "-mstack-arg-probe", MASK_STACK_PROBE },
2406 { "-mtls-direct-seg-refs", MASK_TLS_DIRECT_SEG_REFS },
2409 const char *opts[ARRAY_SIZE (isa_opts) + ARRAY_SIZE (flag_opts) + 6][2];
2411 char isa_other[40];
2412 char target_other[40];
2413 unsigned num = 0;
2414 unsigned i, j;
2415 char *ret;
2416 char *ptr;
2417 size_t len;
2418 size_t line_len;
2419 size_t sep_len;
2421 memset (opts, '\0', sizeof (opts));
2423 /* Add -march= option. */
2424 if (arch)
2426 opts[num][0] = "-march=";
2427 opts[num++][1] = arch;
2430 /* Add -mtune= option. */
2431 if (tune)
2433 opts[num][0] = "-mtune=";
2434 opts[num++][1] = tune;
2437 /* Pick out the options in isa options. */
2438 for (i = 0; i < ARRAY_SIZE (isa_opts); i++)
2440 if ((isa & isa_opts[i].mask) != 0)
2442 opts[num++][0] = isa_opts[i].option;
2443 isa &= ~ isa_opts[i].mask;
2447 if (isa && add_nl_p)
2449 opts[num++][0] = isa_other;
2450 sprintf (isa_other, "(other isa: 0x%x)", isa);
2453 /* Add flag options. */
2454 for (i = 0; i < ARRAY_SIZE (flag_opts); i++)
2456 if ((flags & flag_opts[i].mask) != 0)
2458 opts[num++][0] = flag_opts[i].option;
2459 flags &= ~ flag_opts[i].mask;
2463 if (flags && add_nl_p)
2465 opts[num++][0] = target_other;
2466 sprintf (target_other, "(other flags: 0x%x)", isa);
2469 /* Add -fpmath= option. */
2470 if (fpmath)
2472 opts[num][0] = "-mfpmath=";
2473 opts[num++][1] = fpmath;
2476 /* Any options? */
2477 if (num == 0)
2478 return NULL;
2480 gcc_assert (num < ARRAY_SIZE (opts));
2482 /* Size the string. */
2483 len = 0;
2484 sep_len = (add_nl_p) ? 3 : 1;
2485 for (i = 0; i < num; i++)
2487 len += sep_len;
2488 for (j = 0; j < 2; j++)
2489 if (opts[i][j])
2490 len += strlen (opts[i][j]);
2493 /* Build the string. */
2494 ret = ptr = (char *) xmalloc (len);
2495 line_len = 0;
2497 for (i = 0; i < num; i++)
2499 size_t len2[2];
2501 for (j = 0; j < 2; j++)
2502 len2[j] = (opts[i][j]) ? strlen (opts[i][j]) : 0;
2504 if (i != 0)
2506 *ptr++ = ' ';
2507 line_len++;
2509 if (add_nl_p && line_len + len2[0] + len2[1] > 70)
2511 *ptr++ = '\\';
2512 *ptr++ = '\n';
2513 line_len = 0;
2517 for (j = 0; j < 2; j++)
2518 if (opts[i][j])
2520 memcpy (ptr, opts[i][j], len2[j]);
2521 ptr += len2[j];
2522 line_len += len2[j];
2526 *ptr = '\0';
2527 gcc_assert (ret + len >= ptr);
2529 return ret;
2532 /* Function that is callable from the debugger to print the current
2533 options. */
2534 void
2535 ix86_debug_options (void)
2537 char *opts = ix86_target_string (ix86_isa_flags, target_flags,
2538 ix86_arch_string, ix86_tune_string,
2539 ix86_fpmath_string, true);
2541 if (opts)
2543 fprintf (stderr, "%s\n\n", opts);
2544 free (opts);
2546 else
2547 fputs ("<no options>\n\n", stderr);
2549 return;
2552 /* Sometimes certain combinations of command options do not make
2553 sense on a particular target machine. You can define a macro
2554 `OVERRIDE_OPTIONS' to take account of this. This macro, if
2555 defined, is executed once just after all the command options have
2556 been parsed.
2558 Don't use this macro to turn on various extra optimizations for
2559 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
2561 void
2562 override_options (bool main_args_p)
2564 int i;
2565 unsigned int ix86_arch_mask, ix86_tune_mask;
2566 const char *prefix;
2567 const char *suffix;
2568 const char *sw;
2570 /* Comes from final.c -- no real reason to change it. */
2571 #define MAX_CODE_ALIGN 16
2573 enum pta_flags
2575 PTA_SSE = 1 << 0,
2576 PTA_SSE2 = 1 << 1,
2577 PTA_SSE3 = 1 << 2,
2578 PTA_MMX = 1 << 3,
2579 PTA_PREFETCH_SSE = 1 << 4,
2580 PTA_3DNOW = 1 << 5,
2581 PTA_3DNOW_A = 1 << 6,
2582 PTA_64BIT = 1 << 7,
2583 PTA_SSSE3 = 1 << 8,
2584 PTA_CX16 = 1 << 9,
2585 PTA_POPCNT = 1 << 10,
2586 PTA_ABM = 1 << 11,
2587 PTA_SSE4A = 1 << 12,
2588 PTA_NO_SAHF = 1 << 13,
2589 PTA_SSE4_1 = 1 << 14,
2590 PTA_SSE4_2 = 1 << 15,
2591 PTA_AES = 1 << 16,
2592 PTA_PCLMUL = 1 << 17,
2593 PTA_AVX = 1 << 18,
2594 PTA_FMA = 1 << 19,
2595 PTA_MOVBE = 1 << 20
2598 static struct pta
2600 const char *const name; /* processor name or nickname. */
2601 const enum processor_type processor;
2602 const enum attr_cpu schedule;
2603 const unsigned /*enum pta_flags*/ flags;
2605 const processor_alias_table[] =
2607 {"i386", PROCESSOR_I386, CPU_NONE, 0},
2608 {"i486", PROCESSOR_I486, CPU_NONE, 0},
2609 {"i586", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2610 {"pentium", PROCESSOR_PENTIUM, CPU_PENTIUM, 0},
2611 {"pentium-mmx", PROCESSOR_PENTIUM, CPU_PENTIUM, PTA_MMX},
2612 {"winchip-c6", PROCESSOR_I486, CPU_NONE, PTA_MMX},
2613 {"winchip2", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2614 {"c3", PROCESSOR_I486, CPU_NONE, PTA_MMX | PTA_3DNOW},
2615 {"c3-2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX | PTA_SSE},
2616 {"i686", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2617 {"pentiumpro", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, 0},
2618 {"pentium2", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO, PTA_MMX},
2619 {"pentium3", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2620 PTA_MMX | PTA_SSE},
2621 {"pentium3m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2622 PTA_MMX | PTA_SSE},
2623 {"pentium-m", PROCESSOR_PENTIUMPRO, CPU_PENTIUMPRO,
2624 PTA_MMX | PTA_SSE | PTA_SSE2},
2625 {"pentium4", PROCESSOR_PENTIUM4, CPU_NONE,
2626 PTA_MMX |PTA_SSE | PTA_SSE2},
2627 {"pentium4m", PROCESSOR_PENTIUM4, CPU_NONE,
2628 PTA_MMX | PTA_SSE | PTA_SSE2},
2629 {"prescott", PROCESSOR_NOCONA, CPU_NONE,
2630 PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3},
2631 {"nocona", PROCESSOR_NOCONA, CPU_NONE,
2632 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2633 | PTA_CX16 | PTA_NO_SAHF},
2634 {"core2", PROCESSOR_CORE2, CPU_CORE2,
2635 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2636 | PTA_SSSE3 | PTA_CX16},
2637 {"atom", PROCESSOR_ATOM, CPU_ATOM,
2638 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
2639 | PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
2640 {"geode", PROCESSOR_GEODE, CPU_GEODE,
2641 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A |PTA_PREFETCH_SSE},
2642 {"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
2643 {"k6-2", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2644 {"k6-3", PROCESSOR_K6, CPU_K6, PTA_MMX | PTA_3DNOW},
2645 {"athlon", PROCESSOR_ATHLON, CPU_ATHLON,
2646 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2647 {"athlon-tbird", PROCESSOR_ATHLON, CPU_ATHLON,
2648 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
2649 {"athlon-4", PROCESSOR_ATHLON, CPU_ATHLON,
2650 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2651 {"athlon-xp", PROCESSOR_ATHLON, CPU_ATHLON,
2652 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2653 {"athlon-mp", PROCESSOR_ATHLON, CPU_ATHLON,
2654 PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE},
2655 {"x86-64", PROCESSOR_K8, CPU_K8,
2656 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_NO_SAHF},
2657 {"k8", PROCESSOR_K8, CPU_K8,
2658 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2659 | PTA_SSE2 | PTA_NO_SAHF},
2660 {"k8-sse3", PROCESSOR_K8, CPU_K8,
2661 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2662 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2663 {"opteron", PROCESSOR_K8, CPU_K8,
2664 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2665 | PTA_SSE2 | PTA_NO_SAHF},
2666 {"opteron-sse3", PROCESSOR_K8, CPU_K8,
2667 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2668 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2669 {"athlon64", PROCESSOR_K8, CPU_K8,
2670 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2671 | PTA_SSE2 | PTA_NO_SAHF},
2672 {"athlon64-sse3", PROCESSOR_K8, CPU_K8,
2673 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2674 | PTA_SSE2 | PTA_SSE3 | PTA_NO_SAHF},
2675 {"athlon-fx", PROCESSOR_K8, CPU_K8,
2676 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2677 | PTA_SSE2 | PTA_NO_SAHF},
2678 {"amdfam10", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2679 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2680 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2681 {"barcelona", PROCESSOR_AMDFAM10, CPU_AMDFAM10,
2682 PTA_64BIT | PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_SSE
2683 | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM},
2684 {"generic32", PROCESSOR_GENERIC32, CPU_PENTIUMPRO,
2685 0 /* flags are only used for -march switch. */ },
2686 {"generic64", PROCESSOR_GENERIC64, CPU_GENERIC64,
2687 PTA_64BIT /* flags are only used for -march switch. */ },
2690 int const pta_size = ARRAY_SIZE (processor_alias_table);
2692 /* Set up prefix/suffix so the error messages refer to either the command
2693 line argument, or the attribute(target). */
2694 if (main_args_p)
2696 prefix = "-m";
2697 suffix = "";
2698 sw = "switch";
2700 else
2702 prefix = "option(\"";
2703 suffix = "\")";
2704 sw = "attribute";
2707 #ifdef SUBTARGET_OVERRIDE_OPTIONS
2708 SUBTARGET_OVERRIDE_OPTIONS;
2709 #endif
2711 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
2712 SUBSUBTARGET_OVERRIDE_OPTIONS;
2713 #endif
2715 /* -fPIC is the default for x86_64. */
2716 if (TARGET_MACHO && TARGET_64BIT)
2717 flag_pic = 2;
2719 /* Set the default values for switches whose default depends on TARGET_64BIT
2720 in case they weren't overwritten by command line options. */
2721 if (TARGET_64BIT)
2723 /* Mach-O doesn't support omitting the frame pointer for now. */
2724 if (flag_omit_frame_pointer == 2)
2725 flag_omit_frame_pointer = (TARGET_MACHO ? 0 : 1);
2726 if (flag_asynchronous_unwind_tables == 2)
2727 flag_asynchronous_unwind_tables = 1;
2728 if (flag_pcc_struct_return == 2)
2729 flag_pcc_struct_return = 0;
2731 else
2733 if (flag_omit_frame_pointer == 2)
2734 flag_omit_frame_pointer = 0;
2735 if (flag_asynchronous_unwind_tables == 2)
2736 flag_asynchronous_unwind_tables = 0;
2737 if (flag_pcc_struct_return == 2)
2738 flag_pcc_struct_return = DEFAULT_PCC_STRUCT_RETURN;
2741 /* Need to check -mtune=generic first. */
2742 if (ix86_tune_string)
2744 if (!strcmp (ix86_tune_string, "generic")
2745 || !strcmp (ix86_tune_string, "i686")
2746 /* As special support for cross compilers we read -mtune=native
2747 as -mtune=generic. With native compilers we won't see the
2748 -mtune=native, as it was changed by the driver. */
2749 || !strcmp (ix86_tune_string, "native"))
2751 if (TARGET_64BIT)
2752 ix86_tune_string = "generic64";
2753 else
2754 ix86_tune_string = "generic32";
2756 /* If this call is for setting the option attribute, allow the
2757 generic32/generic64 that was previously set. */
2758 else if (!main_args_p
2759 && (!strcmp (ix86_tune_string, "generic32")
2760 || !strcmp (ix86_tune_string, "generic64")))
2762 else if (!strncmp (ix86_tune_string, "generic", 7))
2763 error ("bad value (%s) for %stune=%s %s",
2764 ix86_tune_string, prefix, suffix, sw);
2766 else
2768 if (ix86_arch_string)
2769 ix86_tune_string = ix86_arch_string;
2770 if (!ix86_tune_string)
2772 ix86_tune_string = cpu_names[TARGET_CPU_DEFAULT];
2773 ix86_tune_defaulted = 1;
2776 /* ix86_tune_string is set to ix86_arch_string or defaulted. We
2777 need to use a sensible tune option. */
2778 if (!strcmp (ix86_tune_string, "generic")
2779 || !strcmp (ix86_tune_string, "x86-64")
2780 || !strcmp (ix86_tune_string, "i686"))
2782 if (TARGET_64BIT)
2783 ix86_tune_string = "generic64";
2784 else
2785 ix86_tune_string = "generic32";
2788 if (ix86_stringop_string)
2790 if (!strcmp (ix86_stringop_string, "rep_byte"))
2791 stringop_alg = rep_prefix_1_byte;
2792 else if (!strcmp (ix86_stringop_string, "libcall"))
2793 stringop_alg = libcall;
2794 else if (!strcmp (ix86_stringop_string, "rep_4byte"))
2795 stringop_alg = rep_prefix_4_byte;
2796 else if (!strcmp (ix86_stringop_string, "rep_8byte")
2797 && TARGET_64BIT)
2798 /* rep; movq isn't available in 32-bit code. */
2799 stringop_alg = rep_prefix_8_byte;
2800 else if (!strcmp (ix86_stringop_string, "byte_loop"))
2801 stringop_alg = loop_1_byte;
2802 else if (!strcmp (ix86_stringop_string, "loop"))
2803 stringop_alg = loop;
2804 else if (!strcmp (ix86_stringop_string, "unrolled_loop"))
2805 stringop_alg = unrolled_loop;
2806 else
2807 error ("bad value (%s) for %sstringop-strategy=%s %s",
2808 ix86_stringop_string, prefix, suffix, sw);
2810 if (!strcmp (ix86_tune_string, "x86-64"))
2811 warning (OPT_Wdeprecated, "%stune=x86-64%s is deprecated. Use "
2812 "%stune=k8%s or %stune=generic%s instead as appropriate.",
2813 prefix, suffix, prefix, suffix, prefix, suffix);
2815 if (!ix86_arch_string)
2816 ix86_arch_string = TARGET_64BIT ? "x86-64" : "i386";
2817 else
2818 ix86_arch_specified = 1;
2820 if (!strcmp (ix86_arch_string, "generic"))
2821 error ("generic CPU can be used only for %stune=%s %s",
2822 prefix, suffix, sw);
2823 if (!strncmp (ix86_arch_string, "generic", 7))
2824 error ("bad value (%s) for %sarch=%s %s",
2825 ix86_arch_string, prefix, suffix, sw);
2827 /* Validate -mabi= value. */
2828 if (ix86_abi_string)
2830 if (strcmp (ix86_abi_string, "sysv") == 0)
2831 ix86_abi = SYSV_ABI;
2832 else if (strcmp (ix86_abi_string, "ms") == 0)
2833 ix86_abi = MS_ABI;
2834 else
2835 error ("unknown ABI (%s) for %sabi=%s %s",
2836 ix86_abi_string, prefix, suffix, sw);
2838 else
2839 ix86_abi = DEFAULT_ABI;
2841 if (ix86_cmodel_string != 0)
2843 if (!strcmp (ix86_cmodel_string, "small"))
2844 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2845 else if (!strcmp (ix86_cmodel_string, "medium"))
2846 ix86_cmodel = flag_pic ? CM_MEDIUM_PIC : CM_MEDIUM;
2847 else if (!strcmp (ix86_cmodel_string, "large"))
2848 ix86_cmodel = flag_pic ? CM_LARGE_PIC : CM_LARGE;
2849 else if (flag_pic)
2850 error ("code model %s does not support PIC mode", ix86_cmodel_string);
2851 else if (!strcmp (ix86_cmodel_string, "32"))
2852 ix86_cmodel = CM_32;
2853 else if (!strcmp (ix86_cmodel_string, "kernel") && !flag_pic)
2854 ix86_cmodel = CM_KERNEL;
2855 else
2856 error ("bad value (%s) for %scmodel=%s %s",
2857 ix86_cmodel_string, prefix, suffix, sw);
2859 else
2861 /* For TARGET_64BIT and MS_ABI, force pic on, in order to enable the
2862 use of rip-relative addressing. This eliminates fixups that
2863 would otherwise be needed if this object is to be placed in a
2864 DLL, and is essentially just as efficient as direct addressing. */
2865 if (TARGET_64BIT && DEFAULT_ABI == MS_ABI)
2866 ix86_cmodel = CM_SMALL_PIC, flag_pic = 1;
2867 else if (TARGET_64BIT)
2868 ix86_cmodel = flag_pic ? CM_SMALL_PIC : CM_SMALL;
2869 else
2870 ix86_cmodel = CM_32;
2872 if (ix86_asm_string != 0)
2874 if (! TARGET_MACHO
2875 && !strcmp (ix86_asm_string, "intel"))
2876 ix86_asm_dialect = ASM_INTEL;
2877 else if (!strcmp (ix86_asm_string, "att"))
2878 ix86_asm_dialect = ASM_ATT;
2879 else
2880 error ("bad value (%s) for %sasm=%s %s",
2881 ix86_asm_string, prefix, suffix, sw);
2883 if ((TARGET_64BIT == 0) != (ix86_cmodel == CM_32))
2884 error ("code model %qs not supported in the %s bit mode",
2885 ix86_cmodel_string, TARGET_64BIT ? "64" : "32");
2886 if ((TARGET_64BIT != 0) != ((ix86_isa_flags & OPTION_MASK_ISA_64BIT) != 0))
2887 sorry ("%i-bit mode not compiled in",
2888 (ix86_isa_flags & OPTION_MASK_ISA_64BIT) ? 64 : 32);
2890 for (i = 0; i < pta_size; i++)
2891 if (! strcmp (ix86_arch_string, processor_alias_table[i].name))
2893 ix86_schedule = processor_alias_table[i].schedule;
2894 ix86_arch = processor_alias_table[i].processor;
2895 /* Default cpu tuning to the architecture. */
2896 ix86_tune = ix86_arch;
2898 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2899 error ("CPU you selected does not support x86-64 "
2900 "instruction set");
2902 if (processor_alias_table[i].flags & PTA_MMX
2903 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MMX))
2904 ix86_isa_flags |= OPTION_MASK_ISA_MMX;
2905 if (processor_alias_table[i].flags & PTA_3DNOW
2906 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW))
2907 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW;
2908 if (processor_alias_table[i].flags & PTA_3DNOW_A
2909 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_3DNOW_A))
2910 ix86_isa_flags |= OPTION_MASK_ISA_3DNOW_A;
2911 if (processor_alias_table[i].flags & PTA_SSE
2912 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE))
2913 ix86_isa_flags |= OPTION_MASK_ISA_SSE;
2914 if (processor_alias_table[i].flags & PTA_SSE2
2915 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE2))
2916 ix86_isa_flags |= OPTION_MASK_ISA_SSE2;
2917 if (processor_alias_table[i].flags & PTA_SSE3
2918 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE3))
2919 ix86_isa_flags |= OPTION_MASK_ISA_SSE3;
2920 if (processor_alias_table[i].flags & PTA_SSSE3
2921 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSSE3))
2922 ix86_isa_flags |= OPTION_MASK_ISA_SSSE3;
2923 if (processor_alias_table[i].flags & PTA_SSE4_1
2924 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_1))
2925 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_1;
2926 if (processor_alias_table[i].flags & PTA_SSE4_2
2927 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4_2))
2928 ix86_isa_flags |= OPTION_MASK_ISA_SSE4_2;
2929 if (processor_alias_table[i].flags & PTA_AVX
2930 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AVX))
2931 ix86_isa_flags |= OPTION_MASK_ISA_AVX;
2932 if (processor_alias_table[i].flags & PTA_FMA
2933 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_FMA))
2934 ix86_isa_flags |= OPTION_MASK_ISA_FMA;
2935 if (processor_alias_table[i].flags & PTA_SSE4A
2936 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SSE4A))
2937 ix86_isa_flags |= OPTION_MASK_ISA_SSE4A;
2938 if (processor_alias_table[i].flags & PTA_ABM
2939 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_ABM))
2940 ix86_isa_flags |= OPTION_MASK_ISA_ABM;
2941 if (processor_alias_table[i].flags & PTA_CX16
2942 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_CX16))
2943 ix86_isa_flags |= OPTION_MASK_ISA_CX16;
2944 if (processor_alias_table[i].flags & (PTA_POPCNT | PTA_ABM)
2945 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_POPCNT))
2946 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT;
2947 if (!(TARGET_64BIT && (processor_alias_table[i].flags & PTA_NO_SAHF))
2948 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_SAHF))
2949 ix86_isa_flags |= OPTION_MASK_ISA_SAHF;
2950 if (processor_alias_table[i].flags & PTA_MOVBE
2951 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_MOVBE))
2952 ix86_isa_flags |= OPTION_MASK_ISA_MOVBE;
2953 if (processor_alias_table[i].flags & PTA_AES
2954 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_AES))
2955 ix86_isa_flags |= OPTION_MASK_ISA_AES;
2956 if (processor_alias_table[i].flags & PTA_PCLMUL
2957 && !(ix86_isa_flags_explicit & OPTION_MASK_ISA_PCLMUL))
2958 ix86_isa_flags |= OPTION_MASK_ISA_PCLMUL;
2959 if (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE))
2960 x86_prefetch_sse = true;
2962 break;
2965 if (i == pta_size)
2966 error ("bad value (%s) for %sarch=%s %s",
2967 ix86_arch_string, prefix, suffix, sw);
2969 ix86_arch_mask = 1u << ix86_arch;
2970 for (i = 0; i < X86_ARCH_LAST; ++i)
2971 ix86_arch_features[i] = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
2973 for (i = 0; i < pta_size; i++)
2974 if (! strcmp (ix86_tune_string, processor_alias_table[i].name))
2976 ix86_schedule = processor_alias_table[i].schedule;
2977 ix86_tune = processor_alias_table[i].processor;
2978 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2980 if (ix86_tune_defaulted)
2982 ix86_tune_string = "x86-64";
2983 for (i = 0; i < pta_size; i++)
2984 if (! strcmp (ix86_tune_string,
2985 processor_alias_table[i].name))
2986 break;
2987 ix86_schedule = processor_alias_table[i].schedule;
2988 ix86_tune = processor_alias_table[i].processor;
2990 else
2991 error ("CPU you selected does not support x86-64 "
2992 "instruction set");
2994 /* Intel CPUs have always interpreted SSE prefetch instructions as
2995 NOPs; so, we can enable SSE prefetch instructions even when
2996 -mtune (rather than -march) points us to a processor that has them.
2997 However, the VIA C3 gives a SIGILL, so we only do that for i686 and
2998 higher processors. */
2999 if (TARGET_CMOVE
3000 && (processor_alias_table[i].flags & (PTA_PREFETCH_SSE | PTA_SSE)))
3001 x86_prefetch_sse = true;
3002 break;
3004 if (i == pta_size)
3005 error ("bad value (%s) for %stune=%s %s",
3006 ix86_tune_string, prefix, suffix, sw);
3008 ix86_tune_mask = 1u << ix86_tune;
3009 for (i = 0; i < X86_TUNE_LAST; ++i)
3010 ix86_tune_features[i] = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3012 if (optimize_size)
3013 ix86_cost = &ix86_size_cost;
3014 else
3015 ix86_cost = processor_target_table[ix86_tune].cost;
3017 /* Arrange to set up i386_stack_locals for all functions. */
3018 init_machine_status = ix86_init_machine_status;
3020 /* Validate -mregparm= value. */
3021 if (ix86_regparm_string)
3023 if (TARGET_64BIT)
3024 warning (0, "%sregparm%s is ignored in 64-bit mode", prefix, suffix);
3025 i = atoi (ix86_regparm_string);
3026 if (i < 0 || i > REGPARM_MAX)
3027 error ("%sregparm=%d%s is not between 0 and %d",
3028 prefix, i, suffix, REGPARM_MAX);
3029 else
3030 ix86_regparm = i;
3032 if (TARGET_64BIT)
3033 ix86_regparm = REGPARM_MAX;
3035 /* If the user has provided any of the -malign-* options,
3036 warn and use that value only if -falign-* is not set.
3037 Remove this code in GCC 3.2 or later. */
3038 if (ix86_align_loops_string)
3040 warning (0, "%salign-loops%s is obsolete, use -falign-loops%s",
3041 prefix, suffix, suffix);
3042 if (align_loops == 0)
3044 i = atoi (ix86_align_loops_string);
3045 if (i < 0 || i > MAX_CODE_ALIGN)
3046 error ("%salign-loops=%d%s is not between 0 and %d",
3047 prefix, i, suffix, MAX_CODE_ALIGN);
3048 else
3049 align_loops = 1 << i;
3053 if (ix86_align_jumps_string)
3055 warning (0, "%salign-jumps%s is obsolete, use -falign-jumps%s",
3056 prefix, suffix, suffix);
3057 if (align_jumps == 0)
3059 i = atoi (ix86_align_jumps_string);
3060 if (i < 0 || i > MAX_CODE_ALIGN)
3061 error ("%salign-loops=%d%s is not between 0 and %d",
3062 prefix, i, suffix, MAX_CODE_ALIGN);
3063 else
3064 align_jumps = 1 << i;
3068 if (ix86_align_funcs_string)
3070 warning (0, "%salign-functions%s is obsolete, use -falign-functions%s",
3071 prefix, suffix, suffix);
3072 if (align_functions == 0)
3074 i = atoi (ix86_align_funcs_string);
3075 if (i < 0 || i > MAX_CODE_ALIGN)
3076 error ("%salign-loops=%d%s is not between 0 and %d",
3077 prefix, i, suffix, MAX_CODE_ALIGN);
3078 else
3079 align_functions = 1 << i;
3083 /* Default align_* from the processor table. */
3084 if (align_loops == 0)
3086 align_loops = processor_target_table[ix86_tune].align_loop;
3087 align_loops_max_skip = processor_target_table[ix86_tune].align_loop_max_skip;
3089 if (align_jumps == 0)
3091 align_jumps = processor_target_table[ix86_tune].align_jump;
3092 align_jumps_max_skip = processor_target_table[ix86_tune].align_jump_max_skip;
3094 if (align_functions == 0)
3096 align_functions = processor_target_table[ix86_tune].align_func;
3099 /* Validate -mbranch-cost= value, or provide default. */
3100 ix86_branch_cost = ix86_cost->branch_cost;
3101 if (ix86_branch_cost_string)
3103 i = atoi (ix86_branch_cost_string);
3104 if (i < 0 || i > 5)
3105 error ("%sbranch-cost=%d%s is not between 0 and 5", prefix, i, suffix);
3106 else
3107 ix86_branch_cost = i;
3109 if (ix86_section_threshold_string)
3111 i = atoi (ix86_section_threshold_string);
3112 if (i < 0)
3113 error ("%slarge-data-threshold=%d%s is negative", prefix, i, suffix);
3114 else
3115 ix86_section_threshold = i;
3118 if (ix86_tls_dialect_string)
3120 if (strcmp (ix86_tls_dialect_string, "gnu") == 0)
3121 ix86_tls_dialect = TLS_DIALECT_GNU;
3122 else if (strcmp (ix86_tls_dialect_string, "gnu2") == 0)
3123 ix86_tls_dialect = TLS_DIALECT_GNU2;
3124 else if (strcmp (ix86_tls_dialect_string, "sun") == 0)
3125 ix86_tls_dialect = TLS_DIALECT_SUN;
3126 else
3127 error ("bad value (%s) for %stls-dialect=%s %s",
3128 ix86_tls_dialect_string, prefix, suffix, sw);
3131 if (ix87_precision_string)
3133 i = atoi (ix87_precision_string);
3134 if (i != 32 && i != 64 && i != 80)
3135 error ("pc%d is not valid precision setting (32, 64 or 80)", i);
3138 if (TARGET_64BIT)
3140 target_flags |= TARGET_SUBTARGET64_DEFAULT & ~target_flags_explicit;
3142 /* Enable by default the SSE and MMX builtins. Do allow the user to
3143 explicitly disable any of these. In particular, disabling SSE and
3144 MMX for kernel code is extremely useful. */
3145 if (!ix86_arch_specified)
3146 ix86_isa_flags
3147 |= ((OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_MMX
3148 | TARGET_SUBTARGET64_ISA_DEFAULT) & ~ix86_isa_flags_explicit);
3150 if (TARGET_RTD)
3151 warning (0, "%srtd%s is ignored in 64bit mode", prefix, suffix);
3153 else
3155 target_flags |= TARGET_SUBTARGET32_DEFAULT & ~target_flags_explicit;
3157 if (!ix86_arch_specified)
3158 ix86_isa_flags
3159 |= TARGET_SUBTARGET32_ISA_DEFAULT & ~ix86_isa_flags_explicit;
3161 /* i386 ABI does not specify red zone. It still makes sense to use it
3162 when programmer takes care to stack from being destroyed. */
3163 if (!(target_flags_explicit & MASK_NO_RED_ZONE))
3164 target_flags |= MASK_NO_RED_ZONE;
3167 /* Keep nonleaf frame pointers. */
3168 if (flag_omit_frame_pointer)
3169 target_flags &= ~MASK_OMIT_LEAF_FRAME_POINTER;
3170 else if (TARGET_OMIT_LEAF_FRAME_POINTER)
3171 flag_omit_frame_pointer = 1;
3173 /* If we're doing fast math, we don't care about comparison order
3174 wrt NaNs. This lets us use a shorter comparison sequence. */
3175 if (flag_finite_math_only)
3176 target_flags &= ~MASK_IEEE_FP;
3178 /* If the architecture always has an FPU, turn off NO_FANCY_MATH_387,
3179 since the insns won't need emulation. */
3180 if (x86_arch_always_fancy_math_387 & ix86_arch_mask)
3181 target_flags &= ~MASK_NO_FANCY_MATH_387;
3183 /* Likewise, if the target doesn't have a 387, or we've specified
3184 software floating point, don't use 387 inline intrinsics. */
3185 if (!TARGET_80387)
3186 target_flags |= MASK_NO_FANCY_MATH_387;
3188 /* Turn on MMX builtins for -msse. */
3189 if (TARGET_SSE)
3191 ix86_isa_flags |= OPTION_MASK_ISA_MMX & ~ix86_isa_flags_explicit;
3192 x86_prefetch_sse = true;
3195 /* Turn on popcnt instruction for -msse4.2 or -mabm. */
3196 if (TARGET_SSE4_2 || TARGET_ABM)
3197 ix86_isa_flags |= OPTION_MASK_ISA_POPCNT & ~ix86_isa_flags_explicit;
3199 /* Validate -mpreferred-stack-boundary= value or default it to
3200 PREFERRED_STACK_BOUNDARY_DEFAULT. */
3201 ix86_preferred_stack_boundary = PREFERRED_STACK_BOUNDARY_DEFAULT;
3202 if (ix86_preferred_stack_boundary_string)
3204 i = atoi (ix86_preferred_stack_boundary_string);
3205 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3206 error ("%spreferred-stack-boundary=%d%s is not between %d and 12",
3207 prefix, i, suffix, TARGET_64BIT ? 4 : 2);
3208 else
3209 ix86_preferred_stack_boundary = (1 << i) * BITS_PER_UNIT;
3212 /* Set the default value for -mstackrealign. */
3213 if (ix86_force_align_arg_pointer == -1)
3214 ix86_force_align_arg_pointer = STACK_REALIGN_DEFAULT;
3216 /* Validate -mincoming-stack-boundary= value or default it to
3217 MIN_STACK_BOUNDARY/PREFERRED_STACK_BOUNDARY. */
3218 if (ix86_force_align_arg_pointer)
3219 ix86_default_incoming_stack_boundary = MIN_STACK_BOUNDARY;
3220 else
3221 ix86_default_incoming_stack_boundary = PREFERRED_STACK_BOUNDARY;
3222 ix86_incoming_stack_boundary = ix86_default_incoming_stack_boundary;
3223 if (ix86_incoming_stack_boundary_string)
3225 i = atoi (ix86_incoming_stack_boundary_string);
3226 if (i < (TARGET_64BIT ? 4 : 2) || i > 12)
3227 error ("-mincoming-stack-boundary=%d is not between %d and 12",
3228 i, TARGET_64BIT ? 4 : 2);
3229 else
3231 ix86_user_incoming_stack_boundary = (1 << i) * BITS_PER_UNIT;
3232 ix86_incoming_stack_boundary
3233 = ix86_user_incoming_stack_boundary;
3237 /* Accept -msseregparm only if at least SSE support is enabled. */
3238 if (TARGET_SSEREGPARM
3239 && ! TARGET_SSE)
3240 error ("%ssseregparm%s used without SSE enabled", prefix, suffix);
3242 ix86_fpmath = TARGET_FPMATH_DEFAULT;
3243 if (ix86_fpmath_string != 0)
3245 if (! strcmp (ix86_fpmath_string, "387"))
3246 ix86_fpmath = FPMATH_387;
3247 else if (! strcmp (ix86_fpmath_string, "sse"))
3249 if (!TARGET_SSE)
3251 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3252 ix86_fpmath = FPMATH_387;
3254 else
3255 ix86_fpmath = FPMATH_SSE;
3257 else if (! strcmp (ix86_fpmath_string, "387,sse")
3258 || ! strcmp (ix86_fpmath_string, "387+sse")
3259 || ! strcmp (ix86_fpmath_string, "sse,387")
3260 || ! strcmp (ix86_fpmath_string, "sse+387")
3261 || ! strcmp (ix86_fpmath_string, "both"))
3263 if (!TARGET_SSE)
3265 warning (0, "SSE instruction set disabled, using 387 arithmetics");
3266 ix86_fpmath = FPMATH_387;
3268 else if (!TARGET_80387)
3270 warning (0, "387 instruction set disabled, using SSE arithmetics");
3271 ix86_fpmath = FPMATH_SSE;
3273 else
3274 ix86_fpmath = (enum fpmath_unit) (FPMATH_SSE | FPMATH_387);
3276 else
3277 error ("bad value (%s) for %sfpmath=%s %s",
3278 ix86_fpmath_string, prefix, suffix, sw);
3281 /* If the i387 is disabled, then do not return values in it. */
3282 if (!TARGET_80387)
3283 target_flags &= ~MASK_FLOAT_RETURNS;
3285 /* Use external vectorized library in vectorizing intrinsics. */
3286 if (ix86_veclibabi_string)
3288 if (strcmp (ix86_veclibabi_string, "svml") == 0)
3289 ix86_veclib_handler = ix86_veclibabi_svml;
3290 else if (strcmp (ix86_veclibabi_string, "acml") == 0)
3291 ix86_veclib_handler = ix86_veclibabi_acml;
3292 else
3293 error ("unknown vectorization library ABI type (%s) for "
3294 "%sveclibabi=%s %s", ix86_veclibabi_string,
3295 prefix, suffix, sw);
3298 if ((x86_accumulate_outgoing_args & ix86_tune_mask)
3299 && !(target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3300 && !optimize_size)
3301 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3303 /* ??? Unwind info is not correct around the CFG unless either a frame
3304 pointer is present or M_A_O_A is set. Fixing this requires rewriting
3305 unwind info generation to be aware of the CFG and propagating states
3306 around edges. */
3307 if ((flag_unwind_tables || flag_asynchronous_unwind_tables
3308 || flag_exceptions || flag_non_call_exceptions)
3309 && flag_omit_frame_pointer
3310 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3312 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3313 warning (0, "unwind tables currently require either a frame pointer "
3314 "or %saccumulate-outgoing-args%s for correctness",
3315 prefix, suffix);
3316 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3319 /* If stack probes are required, the space used for large function
3320 arguments on the stack must also be probed, so enable
3321 -maccumulate-outgoing-args so this happens in the prologue. */
3322 if (TARGET_STACK_PROBE
3323 && !(target_flags & MASK_ACCUMULATE_OUTGOING_ARGS))
3325 if (target_flags_explicit & MASK_ACCUMULATE_OUTGOING_ARGS)
3326 warning (0, "stack probing requires %saccumulate-outgoing-args%s "
3327 "for correctness", prefix, suffix);
3328 target_flags |= MASK_ACCUMULATE_OUTGOING_ARGS;
3331 /* For sane SSE instruction set generation we need fcomi instruction.
3332 It is safe to enable all CMOVE instructions. */
3333 if (TARGET_SSE)
3334 TARGET_CMOVE = 1;
3336 /* Figure out what ASM_GENERATE_INTERNAL_LABEL builds as a prefix. */
3338 char *p;
3339 ASM_GENERATE_INTERNAL_LABEL (internal_label_prefix, "LX", 0);
3340 p = strchr (internal_label_prefix, 'X');
3341 internal_label_prefix_len = p - internal_label_prefix;
3342 *p = '\0';
3345 /* When scheduling description is not available, disable scheduler pass
3346 so it won't slow down the compilation and make x87 code slower. */
3347 if (!TARGET_SCHEDULE)
3348 flag_schedule_insns_after_reload = flag_schedule_insns = 0;
3350 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
3351 set_param_value ("simultaneous-prefetches",
3352 ix86_cost->simultaneous_prefetches);
3353 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
3354 set_param_value ("l1-cache-line-size", ix86_cost->prefetch_block);
3355 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
3356 set_param_value ("l1-cache-size", ix86_cost->l1_cache_size);
3357 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
3358 set_param_value ("l2-cache-size", ix86_cost->l2_cache_size);
3360 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
3361 can be optimized to ap = __builtin_next_arg (0). */
3362 if (!TARGET_64BIT)
3363 targetm.expand_builtin_va_start = NULL;
3365 if (TARGET_64BIT)
3367 ix86_gen_leave = gen_leave_rex64;
3368 ix86_gen_pop1 = gen_popdi1;
3369 ix86_gen_add3 = gen_adddi3;
3370 ix86_gen_sub3 = gen_subdi3;
3371 ix86_gen_sub3_carry = gen_subdi3_carry_rex64;
3372 ix86_gen_one_cmpl2 = gen_one_cmpldi2;
3373 ix86_gen_monitor = gen_sse3_monitor64;
3374 ix86_gen_andsp = gen_anddi3;
3376 else
3378 ix86_gen_leave = gen_leave;
3379 ix86_gen_pop1 = gen_popsi1;
3380 ix86_gen_add3 = gen_addsi3;
3381 ix86_gen_sub3 = gen_subsi3;
3382 ix86_gen_sub3_carry = gen_subsi3_carry;
3383 ix86_gen_one_cmpl2 = gen_one_cmplsi2;
3384 ix86_gen_monitor = gen_sse3_monitor;
3385 ix86_gen_andsp = gen_andsi3;
3388 #ifdef USE_IX86_CLD
3389 /* Use -mcld by default for 32-bit code if configured with --enable-cld. */
3390 if (!TARGET_64BIT)
3391 target_flags |= MASK_CLD & ~target_flags_explicit;
3392 #endif
3394 /* Save the initial options in case the user does function specific options */
3395 if (main_args_p)
3396 target_option_default_node = target_option_current_node
3397 = build_target_option_node ();
3400 /* Update register usage after having seen the compiler flags. */
3402 void
3403 ix86_conditional_register_usage (void)
3405 int i;
3406 unsigned int j;
3408 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3410 if (fixed_regs[i] > 1)
3411 fixed_regs[i] = (fixed_regs[i] == (TARGET_64BIT ? 3 : 2));
3412 if (call_used_regs[i] > 1)
3413 call_used_regs[i] = (call_used_regs[i] == (TARGET_64BIT ? 3 : 2));
3416 /* The PIC register, if it exists, is fixed. */
3417 j = PIC_OFFSET_TABLE_REGNUM;
3418 if (j != INVALID_REGNUM)
3419 fixed_regs[j] = call_used_regs[j] = 1;
3421 /* The MS_ABI changes the set of call-used registers. */
3422 if (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
3424 call_used_regs[SI_REG] = 0;
3425 call_used_regs[DI_REG] = 0;
3426 call_used_regs[XMM6_REG] = 0;
3427 call_used_regs[XMM7_REG] = 0;
3428 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3429 call_used_regs[i] = 0;
3432 /* The default setting of CLOBBERED_REGS is for 32-bit; add in the
3433 other call-clobbered regs for 64-bit. */
3434 if (TARGET_64BIT)
3436 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
3438 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3439 if (TEST_HARD_REG_BIT (reg_class_contents[(int)GENERAL_REGS], i)
3440 && call_used_regs[i])
3441 SET_HARD_REG_BIT (reg_class_contents[(int)CLOBBERED_REGS], i);
3444 /* If MMX is disabled, squash the registers. */
3445 if (! TARGET_MMX)
3446 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3447 if (TEST_HARD_REG_BIT (reg_class_contents[(int)MMX_REGS], i))
3448 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3450 /* If SSE is disabled, squash the registers. */
3451 if (! TARGET_SSE)
3452 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3453 if (TEST_HARD_REG_BIT (reg_class_contents[(int)SSE_REGS], i))
3454 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3456 /* If the FPU is disabled, squash the registers. */
3457 if (! (TARGET_80387 || TARGET_FLOAT_RETURNS_IN_80387))
3458 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3459 if (TEST_HARD_REG_BIT (reg_class_contents[(int)FLOAT_REGS], i))
3460 fixed_regs[i] = call_used_regs[i] = 1, reg_names[i] = "";
3462 /* If 32-bit, squash the 64-bit registers. */
3463 if (! TARGET_64BIT)
3465 for (i = FIRST_REX_INT_REG; i <= LAST_REX_INT_REG; i++)
3466 reg_names[i] = "";
3467 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
3468 reg_names[i] = "";
3473 /* Save the current options */
3475 static void
3476 ix86_function_specific_save (struct cl_target_option *ptr)
3478 ptr->arch = ix86_arch;
3479 ptr->schedule = ix86_schedule;
3480 ptr->tune = ix86_tune;
3481 ptr->fpmath = ix86_fpmath;
3482 ptr->branch_cost = ix86_branch_cost;
3483 ptr->tune_defaulted = ix86_tune_defaulted;
3484 ptr->arch_specified = ix86_arch_specified;
3485 ptr->ix86_isa_flags_explicit = ix86_isa_flags_explicit;
3486 ptr->target_flags_explicit = target_flags_explicit;
3488 /* The fields are char but the variables are not; make sure the
3489 values fit in the fields. */
3490 gcc_assert (ptr->arch == ix86_arch);
3491 gcc_assert (ptr->schedule == ix86_schedule);
3492 gcc_assert (ptr->tune == ix86_tune);
3493 gcc_assert (ptr->fpmath == ix86_fpmath);
3494 gcc_assert (ptr->branch_cost == ix86_branch_cost);
3497 /* Restore the current options */
3499 static void
3500 ix86_function_specific_restore (struct cl_target_option *ptr)
3502 enum processor_type old_tune = ix86_tune;
3503 enum processor_type old_arch = ix86_arch;
3504 unsigned int ix86_arch_mask, ix86_tune_mask;
3505 int i;
3507 ix86_arch = (enum processor_type) ptr->arch;
3508 ix86_schedule = (enum attr_cpu) ptr->schedule;
3509 ix86_tune = (enum processor_type) ptr->tune;
3510 ix86_fpmath = (enum fpmath_unit) ptr->fpmath;
3511 ix86_branch_cost = ptr->branch_cost;
3512 ix86_tune_defaulted = ptr->tune_defaulted;
3513 ix86_arch_specified = ptr->arch_specified;
3514 ix86_isa_flags_explicit = ptr->ix86_isa_flags_explicit;
3515 target_flags_explicit = ptr->target_flags_explicit;
3517 /* Recreate the arch feature tests if the arch changed */
3518 if (old_arch != ix86_arch)
3520 ix86_arch_mask = 1u << ix86_arch;
3521 for (i = 0; i < X86_ARCH_LAST; ++i)
3522 ix86_arch_features[i]
3523 = !!(initial_ix86_arch_features[i] & ix86_arch_mask);
3526 /* Recreate the tune optimization tests */
3527 if (old_tune != ix86_tune)
3529 ix86_tune_mask = 1u << ix86_tune;
3530 for (i = 0; i < X86_TUNE_LAST; ++i)
3531 ix86_tune_features[i]
3532 = !!(initial_ix86_tune_features[i] & ix86_tune_mask);
3536 /* Print the current options */
3538 static void
3539 ix86_function_specific_print (FILE *file, int indent,
3540 struct cl_target_option *ptr)
3542 char *target_string
3543 = ix86_target_string (ptr->ix86_isa_flags, ptr->target_flags,
3544 NULL, NULL, NULL, false);
3546 fprintf (file, "%*sarch = %d (%s)\n",
3547 indent, "",
3548 ptr->arch,
3549 ((ptr->arch < TARGET_CPU_DEFAULT_max)
3550 ? cpu_names[ptr->arch]
3551 : "<unknown>"));
3553 fprintf (file, "%*stune = %d (%s)\n",
3554 indent, "",
3555 ptr->tune,
3556 ((ptr->tune < TARGET_CPU_DEFAULT_max)
3557 ? cpu_names[ptr->tune]
3558 : "<unknown>"));
3560 fprintf (file, "%*sfpmath = %d%s%s\n", indent, "", ptr->fpmath,
3561 (ptr->fpmath & FPMATH_387) ? ", 387" : "",
3562 (ptr->fpmath & FPMATH_SSE) ? ", sse" : "");
3563 fprintf (file, "%*sbranch_cost = %d\n", indent, "", ptr->branch_cost);
3565 if (target_string)
3567 fprintf (file, "%*s%s\n", indent, "", target_string);
3568 free (target_string);
3573 /* Inner function to process the attribute((target(...))), take an argument and
3574 set the current options from the argument. If we have a list, recursively go
3575 over the list. */
3577 static bool
3578 ix86_valid_target_attribute_inner_p (tree args, char *p_strings[])
3580 char *next_optstr;
3581 bool ret = true;
3583 #define IX86_ATTR_ISA(S,O) { S, sizeof (S)-1, ix86_opt_isa, O, 0 }
3584 #define IX86_ATTR_STR(S,O) { S, sizeof (S)-1, ix86_opt_str, O, 0 }
3585 #define IX86_ATTR_YES(S,O,M) { S, sizeof (S)-1, ix86_opt_yes, O, M }
3586 #define IX86_ATTR_NO(S,O,M) { S, sizeof (S)-1, ix86_opt_no, O, M }
3588 enum ix86_opt_type
3590 ix86_opt_unknown,
3591 ix86_opt_yes,
3592 ix86_opt_no,
3593 ix86_opt_str,
3594 ix86_opt_isa
3597 static const struct
3599 const char *string;
3600 size_t len;
3601 enum ix86_opt_type type;
3602 int opt;
3603 int mask;
3604 } attrs[] = {
3605 /* isa options */
3606 IX86_ATTR_ISA ("3dnow", OPT_m3dnow),
3607 IX86_ATTR_ISA ("abm", OPT_mabm),
3608 IX86_ATTR_ISA ("aes", OPT_maes),
3609 IX86_ATTR_ISA ("avx", OPT_mavx),
3610 IX86_ATTR_ISA ("mmx", OPT_mmmx),
3611 IX86_ATTR_ISA ("pclmul", OPT_mpclmul),
3612 IX86_ATTR_ISA ("popcnt", OPT_mpopcnt),
3613 IX86_ATTR_ISA ("sse", OPT_msse),
3614 IX86_ATTR_ISA ("sse2", OPT_msse2),
3615 IX86_ATTR_ISA ("sse3", OPT_msse3),
3616 IX86_ATTR_ISA ("sse4", OPT_msse4),
3617 IX86_ATTR_ISA ("sse4.1", OPT_msse4_1),
3618 IX86_ATTR_ISA ("sse4.2", OPT_msse4_2),
3619 IX86_ATTR_ISA ("sse4a", OPT_msse4a),
3620 IX86_ATTR_ISA ("ssse3", OPT_mssse3),
3622 /* string options */
3623 IX86_ATTR_STR ("arch=", IX86_FUNCTION_SPECIFIC_ARCH),
3624 IX86_ATTR_STR ("fpmath=", IX86_FUNCTION_SPECIFIC_FPMATH),
3625 IX86_ATTR_STR ("tune=", IX86_FUNCTION_SPECIFIC_TUNE),
3627 /* flag options */
3628 IX86_ATTR_YES ("cld",
3629 OPT_mcld,
3630 MASK_CLD),
3632 IX86_ATTR_NO ("fancy-math-387",
3633 OPT_mfancy_math_387,
3634 MASK_NO_FANCY_MATH_387),
3636 IX86_ATTR_YES ("ieee-fp",
3637 OPT_mieee_fp,
3638 MASK_IEEE_FP),
3640 IX86_ATTR_YES ("inline-all-stringops",
3641 OPT_minline_all_stringops,
3642 MASK_INLINE_ALL_STRINGOPS),
3644 IX86_ATTR_YES ("inline-stringops-dynamically",
3645 OPT_minline_stringops_dynamically,
3646 MASK_INLINE_STRINGOPS_DYNAMICALLY),
3648 IX86_ATTR_NO ("align-stringops",
3649 OPT_mno_align_stringops,
3650 MASK_NO_ALIGN_STRINGOPS),
3652 IX86_ATTR_YES ("recip",
3653 OPT_mrecip,
3654 MASK_RECIP),
3658 /* If this is a list, recurse to get the options. */
3659 if (TREE_CODE (args) == TREE_LIST)
3661 bool ret = true;
3663 for (; args; args = TREE_CHAIN (args))
3664 if (TREE_VALUE (args)
3665 && !ix86_valid_target_attribute_inner_p (TREE_VALUE (args), p_strings))
3666 ret = false;
3668 return ret;
3671 else if (TREE_CODE (args) != STRING_CST)
3672 gcc_unreachable ();
3674 /* Handle multiple arguments separated by commas. */
3675 next_optstr = ASTRDUP (TREE_STRING_POINTER (args));
3677 while (next_optstr && *next_optstr != '\0')
3679 char *p = next_optstr;
3680 char *orig_p = p;
3681 char *comma = strchr (next_optstr, ',');
3682 const char *opt_string;
3683 size_t len, opt_len;
3684 int opt;
3685 bool opt_set_p;
3686 char ch;
3687 unsigned i;
3688 enum ix86_opt_type type = ix86_opt_unknown;
3689 int mask = 0;
3691 if (comma)
3693 *comma = '\0';
3694 len = comma - next_optstr;
3695 next_optstr = comma + 1;
3697 else
3699 len = strlen (p);
3700 next_optstr = NULL;
3703 /* Recognize no-xxx. */
3704 if (len > 3 && p[0] == 'n' && p[1] == 'o' && p[2] == '-')
3706 opt_set_p = false;
3707 p += 3;
3708 len -= 3;
3710 else
3711 opt_set_p = true;
3713 /* Find the option. */
3714 ch = *p;
3715 opt = N_OPTS;
3716 for (i = 0; i < ARRAY_SIZE (attrs); i++)
3718 type = attrs[i].type;
3719 opt_len = attrs[i].len;
3720 if (ch == attrs[i].string[0]
3721 && ((type != ix86_opt_str) ? len == opt_len : len > opt_len)
3722 && memcmp (p, attrs[i].string, opt_len) == 0)
3724 opt = attrs[i].opt;
3725 mask = attrs[i].mask;
3726 opt_string = attrs[i].string;
3727 break;
3731 /* Process the option. */
3732 if (opt == N_OPTS)
3734 error ("attribute(target(\"%s\")) is unknown", orig_p);
3735 ret = false;
3738 else if (type == ix86_opt_isa)
3739 ix86_handle_option (opt, p, opt_set_p);
3741 else if (type == ix86_opt_yes || type == ix86_opt_no)
3743 if (type == ix86_opt_no)
3744 opt_set_p = !opt_set_p;
3746 if (opt_set_p)
3747 target_flags |= mask;
3748 else
3749 target_flags &= ~mask;
3752 else if (type == ix86_opt_str)
3754 if (p_strings[opt])
3756 error ("option(\"%s\") was already specified", opt_string);
3757 ret = false;
3759 else
3760 p_strings[opt] = xstrdup (p + opt_len);
3763 else
3764 gcc_unreachable ();
3767 return ret;
3770 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
3772 tree
3773 ix86_valid_target_attribute_tree (tree args)
3775 const char *orig_arch_string = ix86_arch_string;
3776 const char *orig_tune_string = ix86_tune_string;
3777 const char *orig_fpmath_string = ix86_fpmath_string;
3778 int orig_tune_defaulted = ix86_tune_defaulted;
3779 int orig_arch_specified = ix86_arch_specified;
3780 char *option_strings[IX86_FUNCTION_SPECIFIC_MAX] = { NULL, NULL, NULL };
3781 tree t = NULL_TREE;
3782 int i;
3783 struct cl_target_option *def
3784 = TREE_TARGET_OPTION (target_option_default_node);
3786 /* Process each of the options on the chain. */
3787 if (! ix86_valid_target_attribute_inner_p (args, option_strings))
3788 return NULL_TREE;
3790 /* If the changed options are different from the default, rerun override_options,
3791 and then save the options away. The string options are are attribute options,
3792 and will be undone when we copy the save structure. */
3793 if (ix86_isa_flags != def->ix86_isa_flags
3794 || target_flags != def->target_flags
3795 || option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
3796 || option_strings[IX86_FUNCTION_SPECIFIC_TUNE]
3797 || option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3799 /* If we are using the default tune= or arch=, undo the string assigned,
3800 and use the default. */
3801 if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH])
3802 ix86_arch_string = option_strings[IX86_FUNCTION_SPECIFIC_ARCH];
3803 else if (!orig_arch_specified)
3804 ix86_arch_string = NULL;
3806 if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
3807 ix86_tune_string = option_strings[IX86_FUNCTION_SPECIFIC_TUNE];
3808 else if (orig_tune_defaulted)
3809 ix86_tune_string = NULL;
3811 /* If fpmath= is not set, and we now have sse2 on 32-bit, use it. */
3812 if (option_strings[IX86_FUNCTION_SPECIFIC_FPMATH])
3813 ix86_fpmath_string = option_strings[IX86_FUNCTION_SPECIFIC_FPMATH];
3814 else if (!TARGET_64BIT && TARGET_SSE)
3815 ix86_fpmath_string = "sse,387";
3817 /* Do any overrides, such as arch=xxx, or tune=xxx support. */
3818 override_options (false);
3820 /* Add any builtin functions with the new isa if any. */
3821 ix86_add_new_builtins (ix86_isa_flags);
3823 /* Save the current options unless we are validating options for
3824 #pragma. */
3825 t = build_target_option_node ();
3827 ix86_arch_string = orig_arch_string;
3828 ix86_tune_string = orig_tune_string;
3829 ix86_fpmath_string = orig_fpmath_string;
3831 /* Free up memory allocated to hold the strings */
3832 for (i = 0; i < IX86_FUNCTION_SPECIFIC_MAX; i++)
3833 if (option_strings[i])
3834 free (option_strings[i]);
3837 return t;
3840 /* Hook to validate attribute((target("string"))). */
3842 static bool
3843 ix86_valid_target_attribute_p (tree fndecl,
3844 tree ARG_UNUSED (name),
3845 tree args,
3846 int ARG_UNUSED (flags))
3848 struct cl_target_option cur_target;
3849 bool ret = true;
3850 tree old_optimize = build_optimization_node ();
3851 tree new_target, new_optimize;
3852 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
3854 /* If the function changed the optimization levels as well as setting target
3855 options, start with the optimizations specified. */
3856 if (func_optimize && func_optimize != old_optimize)
3857 cl_optimization_restore (TREE_OPTIMIZATION (func_optimize));
3859 /* The target attributes may also change some optimization flags, so update
3860 the optimization options if necessary. */
3861 cl_target_option_save (&cur_target);
3862 new_target = ix86_valid_target_attribute_tree (args);
3863 new_optimize = build_optimization_node ();
3865 if (!new_target)
3866 ret = false;
3868 else if (fndecl)
3870 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
3872 if (old_optimize != new_optimize)
3873 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
3876 cl_target_option_restore (&cur_target);
3878 if (old_optimize != new_optimize)
3879 cl_optimization_restore (TREE_OPTIMIZATION (old_optimize));
3881 return ret;
3885 /* Hook to determine if one function can safely inline another. */
3887 static bool
3888 ix86_can_inline_p (tree caller, tree callee)
3890 bool ret = false;
3891 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
3892 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
3894 /* If callee has no option attributes, then it is ok to inline. */
3895 if (!callee_tree)
3896 ret = true;
3898 /* If caller has no option attributes, but callee does then it is not ok to
3899 inline. */
3900 else if (!caller_tree)
3901 ret = false;
3903 else
3905 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
3906 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
3908 /* Callee's isa options should a subset of the caller's, i.e. a SSE4 function
3909 can inline a SSE2 function but a SSE2 function can't inline a SSE4
3910 function. */
3911 if ((caller_opts->ix86_isa_flags & callee_opts->ix86_isa_flags)
3912 != callee_opts->ix86_isa_flags)
3913 ret = false;
3915 /* See if we have the same non-isa options. */
3916 else if (caller_opts->target_flags != callee_opts->target_flags)
3917 ret = false;
3919 /* See if arch, tune, etc. are the same. */
3920 else if (caller_opts->arch != callee_opts->arch)
3921 ret = false;
3923 else if (caller_opts->tune != callee_opts->tune)
3924 ret = false;
3926 else if (caller_opts->fpmath != callee_opts->fpmath)
3927 ret = false;
3929 else if (caller_opts->branch_cost != callee_opts->branch_cost)
3930 ret = false;
3932 else
3933 ret = true;
3936 return ret;
3940 /* Remember the last target of ix86_set_current_function. */
3941 static GTY(()) tree ix86_previous_fndecl;
3943 /* Establish appropriate back-end context for processing the function
3944 FNDECL. The argument might be NULL to indicate processing at top
3945 level, outside of any function scope. */
3946 static void
3947 ix86_set_current_function (tree fndecl)
3949 /* Only change the context if the function changes. This hook is called
3950 several times in the course of compiling a function, and we don't want to
3951 slow things down too much or call target_reinit when it isn't safe. */
3952 if (fndecl && fndecl != ix86_previous_fndecl)
3954 tree old_tree = (ix86_previous_fndecl
3955 ? DECL_FUNCTION_SPECIFIC_TARGET (ix86_previous_fndecl)
3956 : NULL_TREE);
3958 tree new_tree = (fndecl
3959 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
3960 : NULL_TREE);
3962 ix86_previous_fndecl = fndecl;
3963 if (old_tree == new_tree)
3966 else if (new_tree)
3968 cl_target_option_restore (TREE_TARGET_OPTION (new_tree));
3969 target_reinit ();
3972 else if (old_tree)
3974 struct cl_target_option *def
3975 = TREE_TARGET_OPTION (target_option_current_node);
3977 cl_target_option_restore (def);
3978 target_reinit ();
3984 /* Return true if this goes in large data/bss. */
3986 static bool
3987 ix86_in_large_data_p (tree exp)
3989 if (ix86_cmodel != CM_MEDIUM && ix86_cmodel != CM_MEDIUM_PIC)
3990 return false;
3992 /* Functions are never large data. */
3993 if (TREE_CODE (exp) == FUNCTION_DECL)
3994 return false;
3996 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
3998 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
3999 if (strcmp (section, ".ldata") == 0
4000 || strcmp (section, ".lbss") == 0)
4001 return true;
4002 return false;
4004 else
4006 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
4008 /* If this is an incomplete type with size 0, then we can't put it
4009 in data because it might be too big when completed. */
4010 if (!size || size > ix86_section_threshold)
4011 return true;
4014 return false;
4017 /* Switch to the appropriate section for output of DECL.
4018 DECL is either a `VAR_DECL' node or a constant of some sort.
4019 RELOC indicates whether forming the initial value of DECL requires
4020 link-time relocations. */
4022 static section * x86_64_elf_select_section (tree, int, unsigned HOST_WIDE_INT)
4023 ATTRIBUTE_UNUSED;
4025 static section *
4026 x86_64_elf_select_section (tree decl, int reloc,
4027 unsigned HOST_WIDE_INT align)
4029 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4030 && ix86_in_large_data_p (decl))
4032 const char *sname = NULL;
4033 unsigned int flags = SECTION_WRITE;
4034 switch (categorize_decl_for_section (decl, reloc))
4036 case SECCAT_DATA:
4037 sname = ".ldata";
4038 break;
4039 case SECCAT_DATA_REL:
4040 sname = ".ldata.rel";
4041 break;
4042 case SECCAT_DATA_REL_LOCAL:
4043 sname = ".ldata.rel.local";
4044 break;
4045 case SECCAT_DATA_REL_RO:
4046 sname = ".ldata.rel.ro";
4047 break;
4048 case SECCAT_DATA_REL_RO_LOCAL:
4049 sname = ".ldata.rel.ro.local";
4050 break;
4051 case SECCAT_BSS:
4052 sname = ".lbss";
4053 flags |= SECTION_BSS;
4054 break;
4055 case SECCAT_RODATA:
4056 case SECCAT_RODATA_MERGE_STR:
4057 case SECCAT_RODATA_MERGE_STR_INIT:
4058 case SECCAT_RODATA_MERGE_CONST:
4059 sname = ".lrodata";
4060 flags = 0;
4061 break;
4062 case SECCAT_SRODATA:
4063 case SECCAT_SDATA:
4064 case SECCAT_SBSS:
4065 gcc_unreachable ();
4066 case SECCAT_TEXT:
4067 case SECCAT_TDATA:
4068 case SECCAT_TBSS:
4069 /* We don't split these for medium model. Place them into
4070 default sections and hope for best. */
4071 break;
4072 case SECCAT_EMUTLS_VAR:
4073 case SECCAT_EMUTLS_TMPL:
4074 gcc_unreachable ();
4076 if (sname)
4078 /* We might get called with string constants, but get_named_section
4079 doesn't like them as they are not DECLs. Also, we need to set
4080 flags in that case. */
4081 if (!DECL_P (decl))
4082 return get_section (sname, flags, NULL);
4083 return get_named_section (decl, sname, reloc);
4086 return default_elf_select_section (decl, reloc, align);
4089 /* Build up a unique section name, expressed as a
4090 STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
4091 RELOC indicates whether the initial value of EXP requires
4092 link-time relocations. */
4094 static void ATTRIBUTE_UNUSED
4095 x86_64_elf_unique_section (tree decl, int reloc)
4097 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4098 && ix86_in_large_data_p (decl))
4100 const char *prefix = NULL;
4101 /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
4102 bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
4104 switch (categorize_decl_for_section (decl, reloc))
4106 case SECCAT_DATA:
4107 case SECCAT_DATA_REL:
4108 case SECCAT_DATA_REL_LOCAL:
4109 case SECCAT_DATA_REL_RO:
4110 case SECCAT_DATA_REL_RO_LOCAL:
4111 prefix = one_only ? ".ld" : ".ldata";
4112 break;
4113 case SECCAT_BSS:
4114 prefix = one_only ? ".lb" : ".lbss";
4115 break;
4116 case SECCAT_RODATA:
4117 case SECCAT_RODATA_MERGE_STR:
4118 case SECCAT_RODATA_MERGE_STR_INIT:
4119 case SECCAT_RODATA_MERGE_CONST:
4120 prefix = one_only ? ".lr" : ".lrodata";
4121 break;
4122 case SECCAT_SRODATA:
4123 case SECCAT_SDATA:
4124 case SECCAT_SBSS:
4125 gcc_unreachable ();
4126 case SECCAT_TEXT:
4127 case SECCAT_TDATA:
4128 case SECCAT_TBSS:
4129 /* We don't split these for medium model. Place them into
4130 default sections and hope for best. */
4131 break;
4132 case SECCAT_EMUTLS_VAR:
4133 prefix = targetm.emutls.var_section;
4134 break;
4135 case SECCAT_EMUTLS_TMPL:
4136 prefix = targetm.emutls.tmpl_section;
4137 break;
4139 if (prefix)
4141 const char *name, *linkonce;
4142 char *string;
4144 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
4145 name = targetm.strip_name_encoding (name);
4147 /* If we're using one_only, then there needs to be a .gnu.linkonce
4148 prefix to the section name. */
4149 linkonce = one_only ? ".gnu.linkonce" : "";
4151 string = ACONCAT ((linkonce, prefix, ".", name, NULL));
4153 DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
4154 return;
4157 default_unique_section (decl, reloc);
4160 #ifdef COMMON_ASM_OP
4161 /* This says how to output assembler code to declare an
4162 uninitialized external linkage data object.
4164 For medium model x86-64 we need to use .largecomm opcode for
4165 large objects. */
4166 void
4167 x86_elf_aligned_common (FILE *file,
4168 const char *name, unsigned HOST_WIDE_INT size,
4169 int align)
4171 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4172 && size > (unsigned int)ix86_section_threshold)
4173 fputs (".largecomm\t", file);
4174 else
4175 fputs (COMMON_ASM_OP, file);
4176 assemble_name (file, name);
4177 fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
4178 size, align / BITS_PER_UNIT);
4180 #endif
4182 /* Utility function for targets to use in implementing
4183 ASM_OUTPUT_ALIGNED_BSS. */
4185 void
4186 x86_output_aligned_bss (FILE *file, tree decl ATTRIBUTE_UNUSED,
4187 const char *name, unsigned HOST_WIDE_INT size,
4188 int align)
4190 if ((ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_MEDIUM_PIC)
4191 && size > (unsigned int)ix86_section_threshold)
4192 switch_to_section (get_named_section (decl, ".lbss", 0));
4193 else
4194 switch_to_section (bss_section);
4195 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
4196 #ifdef ASM_DECLARE_OBJECT_NAME
4197 last_assemble_variable_decl = decl;
4198 ASM_DECLARE_OBJECT_NAME (file, name, decl);
4199 #else
4200 /* Standard thing is just output label for the object. */
4201 ASM_OUTPUT_LABEL (file, name);
4202 #endif /* ASM_DECLARE_OBJECT_NAME */
4203 ASM_OUTPUT_SKIP (file, size ? size : 1);
4206 void
4207 optimization_options (int level, int size ATTRIBUTE_UNUSED)
4209 /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
4210 make the problem with not enough registers even worse. */
4211 #ifdef INSN_SCHEDULING
4212 if (level > 1)
4213 flag_schedule_insns = 0;
4214 #endif
4216 if (TARGET_MACHO)
4217 /* The Darwin libraries never set errno, so we might as well
4218 avoid calling them when that's the only reason we would. */
4219 flag_errno_math = 0;
4221 /* The default values of these switches depend on the TARGET_64BIT
4222 that is not known at this moment. Mark these values with 2 and
4223 let user the to override these. In case there is no command line option
4224 specifying them, we will set the defaults in override_options. */
4225 if (optimize >= 1)
4226 flag_omit_frame_pointer = 2;
4227 flag_pcc_struct_return = 2;
4228 flag_asynchronous_unwind_tables = 2;
4229 flag_vect_cost_model = 1;
4230 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
4231 SUBTARGET_OPTIMIZATION_OPTIONS;
4232 #endif
4235 /* Decide whether we can make a sibling call to a function. DECL is the
4236 declaration of the function being targeted by the call and EXP is the
4237 CALL_EXPR representing the call. */
4239 static bool
4240 ix86_function_ok_for_sibcall (tree decl, tree exp)
4242 tree type, decl_or_type;
4243 rtx a, b;
4245 /* If we are generating position-independent code, we cannot sibcall
4246 optimize any indirect call, or a direct call to a global function,
4247 as the PLT requires %ebx be live. */
4248 if (!TARGET_64BIT && flag_pic && (!decl || !targetm.binds_local_p (decl)))
4249 return false;
4251 /* If we need to align the outgoing stack, then sibcalling would
4252 unalign the stack, which may break the called function. */
4253 if (ix86_incoming_stack_boundary < PREFERRED_STACK_BOUNDARY)
4254 return false;
4256 if (decl)
4258 decl_or_type = decl;
4259 type = TREE_TYPE (decl);
4261 else
4263 /* We're looking at the CALL_EXPR, we need the type of the function. */
4264 type = CALL_EXPR_FN (exp); /* pointer expression */
4265 type = TREE_TYPE (type); /* pointer type */
4266 type = TREE_TYPE (type); /* function type */
4267 decl_or_type = type;
4270 /* Check that the return value locations are the same. Like
4271 if we are returning floats on the 80387 register stack, we cannot
4272 make a sibcall from a function that doesn't return a float to a
4273 function that does or, conversely, from a function that does return
4274 a float to a function that doesn't; the necessary stack adjustment
4275 would not be executed. This is also the place we notice
4276 differences in the return value ABI. Note that it is ok for one
4277 of the functions to have void return type as long as the return
4278 value of the other is passed in a register. */
4279 a = ix86_function_value (TREE_TYPE (exp), decl_or_type, false);
4280 b = ix86_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
4281 cfun->decl, false);
4282 if (STACK_REG_P (a) || STACK_REG_P (b))
4284 if (!rtx_equal_p (a, b))
4285 return false;
4287 else if (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
4289 else if (!rtx_equal_p (a, b))
4290 return false;
4292 if (TARGET_64BIT)
4294 /* The SYSV ABI has more call-clobbered registers;
4295 disallow sibcalls from MS to SYSV. */
4296 if (cfun->machine->call_abi == MS_ABI
4297 && ix86_function_type_abi (type) == SYSV_ABI)
4298 return false;
4300 else
4302 /* If this call is indirect, we'll need to be able to use a
4303 call-clobbered register for the address of the target function.
4304 Make sure that all such registers are not used for passing
4305 parameters. Note that DLLIMPORT functions are indirect. */
4306 if (!decl
4307 || (TARGET_DLLIMPORT_DECL_ATTRIBUTES && DECL_DLLIMPORT_P (decl)))
4309 if (ix86_function_regparm (type, NULL) >= 3)
4311 /* ??? Need to count the actual number of registers to be used,
4312 not the possible number of registers. Fix later. */
4313 return false;
4318 /* Otherwise okay. That also includes certain types of indirect calls. */
4319 return true;
4322 /* Handle "cdecl", "stdcall", "fastcall", "regparm" and "sseregparm"
4323 calling convention attributes;
4324 arguments as in struct attribute_spec.handler. */
4326 static tree
4327 ix86_handle_cconv_attribute (tree *node, tree name,
4328 tree args,
4329 int flags ATTRIBUTE_UNUSED,
4330 bool *no_add_attrs)
4332 if (TREE_CODE (*node) != FUNCTION_TYPE
4333 && TREE_CODE (*node) != METHOD_TYPE
4334 && TREE_CODE (*node) != FIELD_DECL
4335 && TREE_CODE (*node) != TYPE_DECL)
4337 warning (OPT_Wattributes, "%qE attribute only applies to functions",
4338 name);
4339 *no_add_attrs = true;
4340 return NULL_TREE;
4343 /* Can combine regparm with all attributes but fastcall. */
4344 if (is_attribute_p ("regparm", name))
4346 tree cst;
4348 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4350 error ("fastcall and regparm attributes are not compatible");
4353 cst = TREE_VALUE (args);
4354 if (TREE_CODE (cst) != INTEGER_CST)
4356 warning (OPT_Wattributes,
4357 "%qE attribute requires an integer constant argument",
4358 name);
4359 *no_add_attrs = true;
4361 else if (compare_tree_int (cst, REGPARM_MAX) > 0)
4363 warning (OPT_Wattributes, "argument to %qE attribute larger than %d",
4364 name, REGPARM_MAX);
4365 *no_add_attrs = true;
4368 return NULL_TREE;
4371 if (TARGET_64BIT)
4373 /* Do not warn when emulating the MS ABI. */
4374 if (TREE_CODE (*node) != FUNCTION_TYPE
4375 || ix86_function_type_abi (*node) != MS_ABI)
4376 warning (OPT_Wattributes, "%qE attribute ignored",
4377 name);
4378 *no_add_attrs = true;
4379 return NULL_TREE;
4382 /* Can combine fastcall with stdcall (redundant) and sseregparm. */
4383 if (is_attribute_p ("fastcall", name))
4385 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4387 error ("fastcall and cdecl attributes are not compatible");
4389 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4391 error ("fastcall and stdcall attributes are not compatible");
4393 if (lookup_attribute ("regparm", TYPE_ATTRIBUTES (*node)))
4395 error ("fastcall and regparm attributes are not compatible");
4399 /* Can combine stdcall with fastcall (redundant), regparm and
4400 sseregparm. */
4401 else if (is_attribute_p ("stdcall", name))
4403 if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (*node)))
4405 error ("stdcall and cdecl attributes are not compatible");
4407 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4409 error ("stdcall and fastcall attributes are not compatible");
4413 /* Can combine cdecl with regparm and sseregparm. */
4414 else if (is_attribute_p ("cdecl", name))
4416 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (*node)))
4418 error ("stdcall and cdecl attributes are not compatible");
4420 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (*node)))
4422 error ("fastcall and cdecl attributes are not compatible");
4426 /* Can combine sseregparm with all attributes. */
4428 return NULL_TREE;
4431 /* Return 0 if the attributes for two types are incompatible, 1 if they
4432 are compatible, and 2 if they are nearly compatible (which causes a
4433 warning to be generated). */
4435 static int
4436 ix86_comp_type_attributes (const_tree type1, const_tree type2)
4438 /* Check for mismatch of non-default calling convention. */
4439 const char *const rtdstr = TARGET_RTD ? "cdecl" : "stdcall";
4441 if (TREE_CODE (type1) != FUNCTION_TYPE
4442 && TREE_CODE (type1) != METHOD_TYPE)
4443 return 1;
4445 /* Check for mismatched fastcall/regparm types. */
4446 if ((!lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type1))
4447 != !lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type2)))
4448 || (ix86_function_regparm (type1, NULL)
4449 != ix86_function_regparm (type2, NULL)))
4450 return 0;
4452 /* Check for mismatched sseregparm types. */
4453 if (!lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type1))
4454 != !lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type2)))
4455 return 0;
4457 /* Check for mismatched return types (cdecl vs stdcall). */
4458 if (!lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type1))
4459 != !lookup_attribute (rtdstr, TYPE_ATTRIBUTES (type2)))
4460 return 0;
4462 return 1;
4465 /* Return the regparm value for a function with the indicated TYPE and DECL.
4466 DECL may be NULL when calling function indirectly
4467 or considering a libcall. */
4469 static int
4470 ix86_function_regparm (const_tree type, const_tree decl)
4472 tree attr;
4473 int regparm;
4475 static bool error_issued;
4477 if (TARGET_64BIT)
4478 return (ix86_function_type_abi (type) == SYSV_ABI
4479 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4481 regparm = ix86_regparm;
4482 attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type));
4483 if (attr)
4485 regparm
4486 = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
4488 if (decl && TREE_CODE (decl) == FUNCTION_DECL)
4490 /* We can't use regparm(3) for nested functions because
4491 these pass static chain pointer in %ecx register. */
4492 if (!error_issued && regparm == 3
4493 && decl_function_context (decl)
4494 && !DECL_NO_STATIC_CHAIN (decl))
4496 error ("nested functions are limited to 2 register parameters");
4497 error_issued = true;
4498 return 0;
4502 return regparm;
4505 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
4506 return 2;
4508 /* Use register calling convention for local functions when possible. */
4509 if (decl
4510 && TREE_CODE (decl) == FUNCTION_DECL
4511 && optimize
4512 && !profile_flag)
4514 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4515 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4516 if (i && i->local)
4518 int local_regparm, globals = 0, regno;
4520 /* Make sure no regparm register is taken by a
4521 fixed register variable. */
4522 for (local_regparm = 0; local_regparm < REGPARM_MAX; local_regparm++)
4523 if (fixed_regs[local_regparm])
4524 break;
4526 /* We can't use regparm(3) for nested functions as these use
4527 static chain pointer in third argument. */
4528 if (local_regparm == 3
4529 && decl_function_context (decl)
4530 && !DECL_NO_STATIC_CHAIN (decl))
4531 local_regparm = 2;
4533 /* Each fixed register usage increases register pressure,
4534 so less registers should be used for argument passing.
4535 This functionality can be overriden by an explicit
4536 regparm value. */
4537 for (regno = 0; regno <= DI_REG; regno++)
4538 if (fixed_regs[regno])
4539 globals++;
4541 local_regparm
4542 = globals < local_regparm ? local_regparm - globals : 0;
4544 if (local_regparm > regparm)
4545 regparm = local_regparm;
4549 return regparm;
4552 /* Return 1 or 2, if we can pass up to SSE_REGPARM_MAX SFmode (1) and
4553 DFmode (2) arguments in SSE registers for a function with the
4554 indicated TYPE and DECL. DECL may be NULL when calling function
4555 indirectly or considering a libcall. Otherwise return 0. */
4557 static int
4558 ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
4560 gcc_assert (!TARGET_64BIT);
4562 /* Use SSE registers to pass SFmode and DFmode arguments if requested
4563 by the sseregparm attribute. */
4564 if (TARGET_SSEREGPARM
4565 || (type && lookup_attribute ("sseregparm", TYPE_ATTRIBUTES (type))))
4567 if (!TARGET_SSE)
4569 if (warn)
4571 if (decl)
4572 error ("Calling %qD with attribute sseregparm without "
4573 "SSE/SSE2 enabled", decl);
4574 else
4575 error ("Calling %qT with attribute sseregparm without "
4576 "SSE/SSE2 enabled", type);
4578 return 0;
4581 return 2;
4584 /* For local functions, pass up to SSE_REGPARM_MAX SFmode
4585 (and DFmode for SSE2) arguments in SSE registers. */
4586 if (decl && TARGET_SSE_MATH && optimize && !profile_flag)
4588 /* FIXME: remove this CONST_CAST when cgraph.[ch] is constified. */
4589 struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
4590 if (i && i->local)
4591 return TARGET_SSE2 ? 2 : 1;
4594 return 0;
4597 /* Return true if EAX is live at the start of the function. Used by
4598 ix86_expand_prologue to determine if we need special help before
4599 calling allocate_stack_worker. */
4601 static bool
4602 ix86_eax_live_at_start_p (void)
4604 /* Cheat. Don't bother working forward from ix86_function_regparm
4605 to the function type to whether an actual argument is located in
4606 eax. Instead just look at cfg info, which is still close enough
4607 to correct at this point. This gives false positives for broken
4608 functions that might use uninitialized data that happens to be
4609 allocated in eax, but who cares? */
4610 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR), 0);
4613 /* Value is the number of bytes of arguments automatically
4614 popped when returning from a subroutine call.
4615 FUNDECL is the declaration node of the function (as a tree),
4616 FUNTYPE is the data type of the function (as a tree),
4617 or for a library call it is an identifier node for the subroutine name.
4618 SIZE is the number of bytes of arguments passed on the stack.
4620 On the 80386, the RTD insn may be used to pop them if the number
4621 of args is fixed, but if the number is variable then the caller
4622 must pop them all. RTD can't be used for library calls now
4623 because the library is compiled with the Unix compiler.
4624 Use of RTD is a selectable option, since it is incompatible with
4625 standard Unix calling sequences. If the option is not selected,
4626 the caller must always pop the args.
4628 The attribute stdcall is equivalent to RTD on a per module basis. */
4631 ix86_return_pops_args (tree fundecl, tree funtype, int size)
4633 int rtd;
4635 /* None of the 64-bit ABIs pop arguments. */
4636 if (TARGET_64BIT)
4637 return 0;
4639 rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
4641 /* Cdecl functions override -mrtd, and never pop the stack. */
4642 if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
4644 /* Stdcall and fastcall functions will pop the stack if not
4645 variable args. */
4646 if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype))
4647 || lookup_attribute ("fastcall", TYPE_ATTRIBUTES (funtype)))
4648 rtd = 1;
4650 if (rtd && ! stdarg_p (funtype))
4651 return size;
4654 /* Lose any fake structure return argument if it is passed on the stack. */
4655 if (aggregate_value_p (TREE_TYPE (funtype), fundecl)
4656 && !KEEP_AGGREGATE_RETURN_POINTER)
4658 int nregs = ix86_function_regparm (funtype, fundecl);
4659 if (nregs == 0)
4660 return GET_MODE_SIZE (Pmode);
4663 return 0;
4666 /* Argument support functions. */
4668 /* Return true when register may be used to pass function parameters. */
4669 bool
4670 ix86_function_arg_regno_p (int regno)
4672 int i;
4673 const int *parm_regs;
4675 if (!TARGET_64BIT)
4677 if (TARGET_MACHO)
4678 return (regno < REGPARM_MAX
4679 || (TARGET_SSE && SSE_REGNO_P (regno) && !fixed_regs[regno]));
4680 else
4681 return (regno < REGPARM_MAX
4682 || (TARGET_MMX && MMX_REGNO_P (regno)
4683 && (regno < FIRST_MMX_REG + MMX_REGPARM_MAX))
4684 || (TARGET_SSE && SSE_REGNO_P (regno)
4685 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX)));
4688 if (TARGET_MACHO)
4690 if (SSE_REGNO_P (regno) && TARGET_SSE)
4691 return true;
4693 else
4695 if (TARGET_SSE && SSE_REGNO_P (regno)
4696 && (regno < FIRST_SSE_REG + SSE_REGPARM_MAX))
4697 return true;
4700 /* TODO: The function should depend on current function ABI but
4701 builtins.c would need updating then. Therefore we use the
4702 default ABI. */
4704 /* RAX is used as hidden argument to va_arg functions. */
4705 if (ix86_abi == SYSV_ABI && regno == AX_REG)
4706 return true;
4708 if (ix86_abi == MS_ABI)
4709 parm_regs = x86_64_ms_abi_int_parameter_registers;
4710 else
4711 parm_regs = x86_64_int_parameter_registers;
4712 for (i = 0; i < (ix86_abi == MS_ABI
4713 ? X86_64_MS_REGPARM_MAX : X86_64_REGPARM_MAX); i++)
4714 if (regno == parm_regs[i])
4715 return true;
4716 return false;
4719 /* Return if we do not know how to pass TYPE solely in registers. */
4721 static bool
4722 ix86_must_pass_in_stack (enum machine_mode mode, const_tree type)
4724 if (must_pass_in_stack_var_size_or_pad (mode, type))
4725 return true;
4727 /* For 32-bit, we want TImode aggregates to go on the stack. But watch out!
4728 The layout_type routine is crafty and tries to trick us into passing
4729 currently unsupported vector types on the stack by using TImode. */
4730 return (!TARGET_64BIT && mode == TImode
4731 && type && TREE_CODE (type) != VECTOR_TYPE);
4734 /* It returns the size, in bytes, of the area reserved for arguments passed
4735 in registers for the function represented by fndecl dependent to the used
4736 abi format. */
4738 ix86_reg_parm_stack_space (const_tree fndecl)
4740 enum calling_abi call_abi = SYSV_ABI;
4741 if (fndecl != NULL_TREE && TREE_CODE (fndecl) == FUNCTION_DECL)
4742 call_abi = ix86_function_abi (fndecl);
4743 else
4744 call_abi = ix86_function_type_abi (fndecl);
4745 if (call_abi == MS_ABI)
4746 return 32;
4747 return 0;
4750 /* Returns value SYSV_ABI, MS_ABI dependent on fntype, specifying the
4751 call abi used. */
4752 enum calling_abi
4753 ix86_function_type_abi (const_tree fntype)
4755 if (TARGET_64BIT && fntype != NULL)
4757 enum calling_abi abi = ix86_abi;
4758 if (abi == SYSV_ABI)
4760 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (fntype)))
4761 abi = MS_ABI;
4763 else if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (fntype)))
4764 abi = SYSV_ABI;
4765 return abi;
4767 return ix86_abi;
4770 static enum calling_abi
4771 ix86_function_abi (const_tree fndecl)
4773 if (! fndecl)
4774 return ix86_abi;
4775 return ix86_function_type_abi (TREE_TYPE (fndecl));
4778 /* Returns value SYSV_ABI, MS_ABI dependent on cfun, specifying the
4779 call abi used. */
4780 enum calling_abi
4781 ix86_cfun_abi (void)
4783 if (! cfun || ! TARGET_64BIT)
4784 return ix86_abi;
4785 return cfun->machine->call_abi;
4788 /* regclass.c */
4789 extern void init_regs (void);
4791 /* Implementation of call abi switching target hook. Specific to FNDECL
4792 the specific call register sets are set. See also CONDITIONAL_REGISTER_USAGE
4793 for more details. */
4794 void
4795 ix86_call_abi_override (const_tree fndecl)
4797 if (fndecl == NULL_TREE)
4798 cfun->machine->call_abi = ix86_abi;
4799 else
4800 cfun->machine->call_abi = ix86_function_type_abi (TREE_TYPE (fndecl));
4803 /* MS and SYSV ABI have different set of call used registers. Avoid expensive
4804 re-initialization of init_regs each time we switch function context since
4805 this is needed only during RTL expansion. */
4806 static void
4807 ix86_maybe_switch_abi (void)
4809 if (TARGET_64BIT &&
4810 call_used_regs[SI_REG] == (cfun->machine->call_abi == MS_ABI))
4811 reinit_regs ();
4814 /* Initialize a variable CUM of type CUMULATIVE_ARGS
4815 for a call to a function whose data type is FNTYPE.
4816 For a library call, FNTYPE is 0. */
4818 void
4819 init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
4820 tree fntype, /* tree ptr for function decl */
4821 rtx libname, /* SYMBOL_REF of library name or 0 */
4822 tree fndecl)
4824 struct cgraph_local_info *i = fndecl ? cgraph_local_info (fndecl) : NULL;
4825 memset (cum, 0, sizeof (*cum));
4827 if (fndecl)
4828 cum->call_abi = ix86_function_abi (fndecl);
4829 else
4830 cum->call_abi = ix86_function_type_abi (fntype);
4831 /* Set up the number of registers to use for passing arguments. */
4833 if (cum->call_abi == MS_ABI && !ACCUMULATE_OUTGOING_ARGS)
4834 sorry ("ms_abi attribute requires -maccumulate-outgoing-args "
4835 "or subtarget optimization implying it");
4836 cum->nregs = ix86_regparm;
4837 if (TARGET_64BIT)
4839 if (cum->call_abi != ix86_abi)
4840 cum->nregs = (ix86_abi != SYSV_ABI
4841 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
4843 if (TARGET_SSE)
4845 cum->sse_nregs = SSE_REGPARM_MAX;
4846 if (TARGET_64BIT)
4848 if (cum->call_abi != ix86_abi)
4849 cum->sse_nregs = (ix86_abi != SYSV_ABI
4850 ? X86_64_SSE_REGPARM_MAX
4851 : X86_64_MS_SSE_REGPARM_MAX);
4854 if (TARGET_MMX)
4855 cum->mmx_nregs = MMX_REGPARM_MAX;
4856 cum->warn_avx = true;
4857 cum->warn_sse = true;
4858 cum->warn_mmx = true;
4860 /* Because type might mismatch in between caller and callee, we need to
4861 use actual type of function for local calls.
4862 FIXME: cgraph_analyze can be told to actually record if function uses
4863 va_start so for local functions maybe_vaarg can be made aggressive
4864 helping K&R code.
4865 FIXME: once typesytem is fixed, we won't need this code anymore. */
4866 if (i && i->local)
4867 fntype = TREE_TYPE (fndecl);
4868 cum->maybe_vaarg = (fntype
4869 ? (!prototype_p (fntype) || stdarg_p (fntype))
4870 : !libname);
4872 if (!TARGET_64BIT)
4874 /* If there are variable arguments, then we won't pass anything
4875 in registers in 32-bit mode. */
4876 if (stdarg_p (fntype))
4878 cum->nregs = 0;
4879 cum->sse_nregs = 0;
4880 cum->mmx_nregs = 0;
4881 cum->warn_avx = 0;
4882 cum->warn_sse = 0;
4883 cum->warn_mmx = 0;
4884 return;
4887 /* Use ecx and edx registers if function has fastcall attribute,
4888 else look for regparm information. */
4889 if (fntype)
4891 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (fntype)))
4893 cum->nregs = 2;
4894 cum->fastcall = 1;
4896 else
4897 cum->nregs = ix86_function_regparm (fntype, fndecl);
4900 /* Set up the number of SSE registers used for passing SFmode
4901 and DFmode arguments. Warn for mismatching ABI. */
4902 cum->float_in_sse = ix86_function_sseregparm (fntype, fndecl, true);
4906 /* Return the "natural" mode for TYPE. In most cases, this is just TYPE_MODE.
4907 But in the case of vector types, it is some vector mode.
4909 When we have only some of our vector isa extensions enabled, then there
4910 are some modes for which vector_mode_supported_p is false. For these
4911 modes, the generic vector support in gcc will choose some non-vector mode
4912 in order to implement the type. By computing the natural mode, we'll
4913 select the proper ABI location for the operand and not depend on whatever
4914 the middle-end decides to do with these vector types.
4916 The midde-end can't deal with the vector types > 16 bytes. In this
4917 case, we return the original mode and warn ABI change if CUM isn't
4918 NULL. */
4920 static enum machine_mode
4921 type_natural_mode (const_tree type, CUMULATIVE_ARGS *cum)
4923 enum machine_mode mode = TYPE_MODE (type);
4925 if (TREE_CODE (type) == VECTOR_TYPE && !VECTOR_MODE_P (mode))
4927 HOST_WIDE_INT size = int_size_in_bytes (type);
4928 if ((size == 8 || size == 16 || size == 32)
4929 /* ??? Generic code allows us to create width 1 vectors. Ignore. */
4930 && TYPE_VECTOR_SUBPARTS (type) > 1)
4932 enum machine_mode innermode = TYPE_MODE (TREE_TYPE (type));
4934 if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4935 mode = MIN_MODE_VECTOR_FLOAT;
4936 else
4937 mode = MIN_MODE_VECTOR_INT;
4939 /* Get the mode which has this inner mode and number of units. */
4940 for (; mode != VOIDmode; mode = GET_MODE_WIDER_MODE (mode))
4941 if (GET_MODE_NUNITS (mode) == TYPE_VECTOR_SUBPARTS (type)
4942 && GET_MODE_INNER (mode) == innermode)
4944 if (size == 32 && !TARGET_AVX)
4946 static bool warnedavx;
4948 if (cum
4949 && !warnedavx
4950 && cum->warn_avx)
4952 warnedavx = true;
4953 warning (0, "AVX vector argument without AVX "
4954 "enabled changes the ABI");
4956 return TYPE_MODE (type);
4958 else
4959 return mode;
4962 gcc_unreachable ();
4966 return mode;
4969 /* We want to pass a value in REGNO whose "natural" mode is MODE. However,
4970 this may not agree with the mode that the type system has chosen for the
4971 register, which is ORIG_MODE. If ORIG_MODE is not BLKmode, then we can
4972 go ahead and use it. Otherwise we have to build a PARALLEL instead. */
4974 static rtx
4975 gen_reg_or_parallel (enum machine_mode mode, enum machine_mode orig_mode,
4976 unsigned int regno)
4978 rtx tmp;
4980 if (orig_mode != BLKmode)
4981 tmp = gen_rtx_REG (orig_mode, regno);
4982 else
4984 tmp = gen_rtx_REG (mode, regno);
4985 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, const0_rtx);
4986 tmp = gen_rtx_PARALLEL (orig_mode, gen_rtvec (1, tmp));
4989 return tmp;
4992 /* x86-64 register passing implementation. See x86-64 ABI for details. Goal
4993 of this code is to classify each 8bytes of incoming argument by the register
4994 class and assign registers accordingly. */
4996 /* Return the union class of CLASS1 and CLASS2.
4997 See the x86-64 PS ABI for details. */
4999 static enum x86_64_reg_class
5000 merge_classes (enum x86_64_reg_class class1, enum x86_64_reg_class class2)
5002 /* Rule #1: If both classes are equal, this is the resulting class. */
5003 if (class1 == class2)
5004 return class1;
5006 /* Rule #2: If one of the classes is NO_CLASS, the resulting class is
5007 the other class. */
5008 if (class1 == X86_64_NO_CLASS)
5009 return class2;
5010 if (class2 == X86_64_NO_CLASS)
5011 return class1;
5013 /* Rule #3: If one of the classes is MEMORY, the result is MEMORY. */
5014 if (class1 == X86_64_MEMORY_CLASS || class2 == X86_64_MEMORY_CLASS)
5015 return X86_64_MEMORY_CLASS;
5017 /* Rule #4: If one of the classes is INTEGER, the result is INTEGER. */
5018 if ((class1 == X86_64_INTEGERSI_CLASS && class2 == X86_64_SSESF_CLASS)
5019 || (class2 == X86_64_INTEGERSI_CLASS && class1 == X86_64_SSESF_CLASS))
5020 return X86_64_INTEGERSI_CLASS;
5021 if (class1 == X86_64_INTEGER_CLASS || class1 == X86_64_INTEGERSI_CLASS
5022 || class2 == X86_64_INTEGER_CLASS || class2 == X86_64_INTEGERSI_CLASS)
5023 return X86_64_INTEGER_CLASS;
5025 /* Rule #5: If one of the classes is X87, X87UP, or COMPLEX_X87 class,
5026 MEMORY is used. */
5027 if (class1 == X86_64_X87_CLASS
5028 || class1 == X86_64_X87UP_CLASS
5029 || class1 == X86_64_COMPLEX_X87_CLASS
5030 || class2 == X86_64_X87_CLASS
5031 || class2 == X86_64_X87UP_CLASS
5032 || class2 == X86_64_COMPLEX_X87_CLASS)
5033 return X86_64_MEMORY_CLASS;
5035 /* Rule #6: Otherwise class SSE is used. */
5036 return X86_64_SSE_CLASS;
5039 /* Classify the argument of type TYPE and mode MODE.
5040 CLASSES will be filled by the register class used to pass each word
5041 of the operand. The number of words is returned. In case the parameter
5042 should be passed in memory, 0 is returned. As a special case for zero
5043 sized containers, classes[0] will be NO_CLASS and 1 is returned.
5045 BIT_OFFSET is used internally for handling records and specifies offset
5046 of the offset in bits modulo 256 to avoid overflow cases.
5048 See the x86-64 PS ABI for details.
5051 static int
5052 classify_argument (enum machine_mode mode, const_tree type,
5053 enum x86_64_reg_class classes[MAX_CLASSES], int bit_offset)
5055 HOST_WIDE_INT bytes =
5056 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5057 int words = (bytes + (bit_offset % 64) / 8 + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5059 /* Variable sized entities are always passed/returned in memory. */
5060 if (bytes < 0)
5061 return 0;
5063 if (mode != VOIDmode
5064 && targetm.calls.must_pass_in_stack (mode, type))
5065 return 0;
5067 if (type && AGGREGATE_TYPE_P (type))
5069 int i;
5070 tree field;
5071 enum x86_64_reg_class subclasses[MAX_CLASSES];
5073 /* On x86-64 we pass structures larger than 32 bytes on the stack. */
5074 if (bytes > 32)
5075 return 0;
5077 for (i = 0; i < words; i++)
5078 classes[i] = X86_64_NO_CLASS;
5080 /* Zero sized arrays or structures are NO_CLASS. We return 0 to
5081 signalize memory class, so handle it as special case. */
5082 if (!words)
5084 classes[0] = X86_64_NO_CLASS;
5085 return 1;
5088 /* Classify each field of record and merge classes. */
5089 switch (TREE_CODE (type))
5091 case RECORD_TYPE:
5092 /* And now merge the fields of structure. */
5093 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5095 if (TREE_CODE (field) == FIELD_DECL)
5097 int num;
5099 if (TREE_TYPE (field) == error_mark_node)
5100 continue;
5102 /* Bitfields are always classified as integer. Handle them
5103 early, since later code would consider them to be
5104 misaligned integers. */
5105 if (DECL_BIT_FIELD (field))
5107 for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5108 i < ((int_bit_position (field) + (bit_offset % 64))
5109 + tree_low_cst (DECL_SIZE (field), 0)
5110 + 63) / 8 / 8; i++)
5111 classes[i] =
5112 merge_classes (X86_64_INTEGER_CLASS,
5113 classes[i]);
5115 else
5117 int pos;
5119 type = TREE_TYPE (field);
5121 /* Flexible array member is ignored. */
5122 if (TYPE_MODE (type) == BLKmode
5123 && TREE_CODE (type) == ARRAY_TYPE
5124 && TYPE_SIZE (type) == NULL_TREE
5125 && TYPE_DOMAIN (type) != NULL_TREE
5126 && (TYPE_MAX_VALUE (TYPE_DOMAIN (type))
5127 == NULL_TREE))
5129 static bool warned;
5131 if (!warned && warn_psabi)
5133 warned = true;
5134 inform (input_location,
5135 "The ABI of passing struct with"
5136 " a flexible array member has"
5137 " changed in GCC 4.4");
5139 continue;
5141 num = classify_argument (TYPE_MODE (type), type,
5142 subclasses,
5143 (int_bit_position (field)
5144 + bit_offset) % 256);
5145 if (!num)
5146 return 0;
5147 pos = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8;
5148 for (i = 0; i < num && (i + pos) < words; i++)
5149 classes[i + pos] =
5150 merge_classes (subclasses[i], classes[i + pos]);
5154 break;
5156 case ARRAY_TYPE:
5157 /* Arrays are handled as small records. */
5159 int num;
5160 num = classify_argument (TYPE_MODE (TREE_TYPE (type)),
5161 TREE_TYPE (type), subclasses, bit_offset);
5162 if (!num)
5163 return 0;
5165 /* The partial classes are now full classes. */
5166 if (subclasses[0] == X86_64_SSESF_CLASS && bytes != 4)
5167 subclasses[0] = X86_64_SSE_CLASS;
5168 if (subclasses[0] == X86_64_INTEGERSI_CLASS
5169 && !((bit_offset % 64) == 0 && bytes == 4))
5170 subclasses[0] = X86_64_INTEGER_CLASS;
5172 for (i = 0; i < words; i++)
5173 classes[i] = subclasses[i % num];
5175 break;
5177 case UNION_TYPE:
5178 case QUAL_UNION_TYPE:
5179 /* Unions are similar to RECORD_TYPE but offset is always 0.
5181 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5183 if (TREE_CODE (field) == FIELD_DECL)
5185 int num;
5187 if (TREE_TYPE (field) == error_mark_node)
5188 continue;
5190 num = classify_argument (TYPE_MODE (TREE_TYPE (field)),
5191 TREE_TYPE (field), subclasses,
5192 bit_offset);
5193 if (!num)
5194 return 0;
5195 for (i = 0; i < num; i++)
5196 classes[i] = merge_classes (subclasses[i], classes[i]);
5199 break;
5201 default:
5202 gcc_unreachable ();
5205 if (words > 2)
5207 /* When size > 16 bytes, if the first one isn't
5208 X86_64_SSE_CLASS or any other ones aren't
5209 X86_64_SSEUP_CLASS, everything should be passed in
5210 memory. */
5211 if (classes[0] != X86_64_SSE_CLASS)
5212 return 0;
5214 for (i = 1; i < words; i++)
5215 if (classes[i] != X86_64_SSEUP_CLASS)
5216 return 0;
5219 /* Final merger cleanup. */
5220 for (i = 0; i < words; i++)
5222 /* If one class is MEMORY, everything should be passed in
5223 memory. */
5224 if (classes[i] == X86_64_MEMORY_CLASS)
5225 return 0;
5227 /* The X86_64_SSEUP_CLASS should be always preceded by
5228 X86_64_SSE_CLASS or X86_64_SSEUP_CLASS. */
5229 if (classes[i] == X86_64_SSEUP_CLASS
5230 && classes[i - 1] != X86_64_SSE_CLASS
5231 && classes[i - 1] != X86_64_SSEUP_CLASS)
5233 /* The first one should never be X86_64_SSEUP_CLASS. */
5234 gcc_assert (i != 0);
5235 classes[i] = X86_64_SSE_CLASS;
5238 /* If X86_64_X87UP_CLASS isn't preceded by X86_64_X87_CLASS,
5239 everything should be passed in memory. */
5240 if (classes[i] == X86_64_X87UP_CLASS
5241 && (classes[i - 1] != X86_64_X87_CLASS))
5243 static bool warned;
5245 /* The first one should never be X86_64_X87UP_CLASS. */
5246 gcc_assert (i != 0);
5247 if (!warned && warn_psabi)
5249 warned = true;
5250 inform (input_location,
5251 "The ABI of passing union with long double"
5252 " has changed in GCC 4.4");
5254 return 0;
5257 return words;
5260 /* Compute alignment needed. We align all types to natural boundaries with
5261 exception of XFmode that is aligned to 64bits. */
5262 if (mode != VOIDmode && mode != BLKmode)
5264 int mode_alignment = GET_MODE_BITSIZE (mode);
5266 if (mode == XFmode)
5267 mode_alignment = 128;
5268 else if (mode == XCmode)
5269 mode_alignment = 256;
5270 if (COMPLEX_MODE_P (mode))
5271 mode_alignment /= 2;
5272 /* Misaligned fields are always returned in memory. */
5273 if (bit_offset % mode_alignment)
5274 return 0;
5277 /* for V1xx modes, just use the base mode */
5278 if (VECTOR_MODE_P (mode) && mode != V1DImode
5279 && GET_MODE_SIZE (GET_MODE_INNER (mode)) == bytes)
5280 mode = GET_MODE_INNER (mode);
5282 /* Classification of atomic types. */
5283 switch (mode)
5285 case SDmode:
5286 case DDmode:
5287 classes[0] = X86_64_SSE_CLASS;
5288 return 1;
5289 case TDmode:
5290 classes[0] = X86_64_SSE_CLASS;
5291 classes[1] = X86_64_SSEUP_CLASS;
5292 return 2;
5293 case DImode:
5294 case SImode:
5295 case HImode:
5296 case QImode:
5297 case CSImode:
5298 case CHImode:
5299 case CQImode:
5301 int size = (bit_offset % 64)+ (int) GET_MODE_BITSIZE (mode);
5303 if (size <= 32)
5305 classes[0] = X86_64_INTEGERSI_CLASS;
5306 return 1;
5308 else if (size <= 64)
5310 classes[0] = X86_64_INTEGER_CLASS;
5311 return 1;
5313 else if (size <= 64+32)
5315 classes[0] = X86_64_INTEGER_CLASS;
5316 classes[1] = X86_64_INTEGERSI_CLASS;
5317 return 2;
5319 else if (size <= 64+64)
5321 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5322 return 2;
5324 else
5325 gcc_unreachable ();
5327 case CDImode:
5328 case TImode:
5329 classes[0] = classes[1] = X86_64_INTEGER_CLASS;
5330 return 2;
5331 case COImode:
5332 case OImode:
5333 /* OImode shouldn't be used directly. */
5334 gcc_unreachable ();
5335 case CTImode:
5336 return 0;
5337 case SFmode:
5338 if (!(bit_offset % 64))
5339 classes[0] = X86_64_SSESF_CLASS;
5340 else
5341 classes[0] = X86_64_SSE_CLASS;
5342 return 1;
5343 case DFmode:
5344 classes[0] = X86_64_SSEDF_CLASS;
5345 return 1;
5346 case XFmode:
5347 classes[0] = X86_64_X87_CLASS;
5348 classes[1] = X86_64_X87UP_CLASS;
5349 return 2;
5350 case TFmode:
5351 classes[0] = X86_64_SSE_CLASS;
5352 classes[1] = X86_64_SSEUP_CLASS;
5353 return 2;
5354 case SCmode:
5355 classes[0] = X86_64_SSE_CLASS;
5356 if (!(bit_offset % 64))
5357 return 1;
5358 else
5360 static bool warned;
5362 if (!warned && warn_psabi)
5364 warned = true;
5365 inform (input_location,
5366 "The ABI of passing structure with complex float"
5367 " member has changed in GCC 4.4");
5369 classes[1] = X86_64_SSESF_CLASS;
5370 return 2;
5372 case DCmode:
5373 classes[0] = X86_64_SSEDF_CLASS;
5374 classes[1] = X86_64_SSEDF_CLASS;
5375 return 2;
5376 case XCmode:
5377 classes[0] = X86_64_COMPLEX_X87_CLASS;
5378 return 1;
5379 case TCmode:
5380 /* This modes is larger than 16 bytes. */
5381 return 0;
5382 case V8SFmode:
5383 case V8SImode:
5384 case V32QImode:
5385 case V16HImode:
5386 case V4DFmode:
5387 case V4DImode:
5388 classes[0] = X86_64_SSE_CLASS;
5389 classes[1] = X86_64_SSEUP_CLASS;
5390 classes[2] = X86_64_SSEUP_CLASS;
5391 classes[3] = X86_64_SSEUP_CLASS;
5392 return 4;
5393 case V4SFmode:
5394 case V4SImode:
5395 case V16QImode:
5396 case V8HImode:
5397 case V2DFmode:
5398 case V2DImode:
5399 classes[0] = X86_64_SSE_CLASS;
5400 classes[1] = X86_64_SSEUP_CLASS;
5401 return 2;
5402 case V1DImode:
5403 case V2SFmode:
5404 case V2SImode:
5405 case V4HImode:
5406 case V8QImode:
5407 classes[0] = X86_64_SSE_CLASS;
5408 return 1;
5409 case BLKmode:
5410 case VOIDmode:
5411 return 0;
5412 default:
5413 gcc_assert (VECTOR_MODE_P (mode));
5415 if (bytes > 16)
5416 return 0;
5418 gcc_assert (GET_MODE_CLASS (GET_MODE_INNER (mode)) == MODE_INT);
5420 if (bit_offset + GET_MODE_BITSIZE (mode) <= 32)
5421 classes[0] = X86_64_INTEGERSI_CLASS;
5422 else
5423 classes[0] = X86_64_INTEGER_CLASS;
5424 classes[1] = X86_64_INTEGER_CLASS;
5425 return 1 + (bytes > 8);
5429 /* Examine the argument and return set number of register required in each
5430 class. Return 0 iff parameter should be passed in memory. */
5431 static int
5432 examine_argument (enum machine_mode mode, const_tree type, int in_return,
5433 int *int_nregs, int *sse_nregs)
5435 enum x86_64_reg_class regclass[MAX_CLASSES];
5436 int n = classify_argument (mode, type, regclass, 0);
5438 *int_nregs = 0;
5439 *sse_nregs = 0;
5440 if (!n)
5441 return 0;
5442 for (n--; n >= 0; n--)
5443 switch (regclass[n])
5445 case X86_64_INTEGER_CLASS:
5446 case X86_64_INTEGERSI_CLASS:
5447 (*int_nregs)++;
5448 break;
5449 case X86_64_SSE_CLASS:
5450 case X86_64_SSESF_CLASS:
5451 case X86_64_SSEDF_CLASS:
5452 (*sse_nregs)++;
5453 break;
5454 case X86_64_NO_CLASS:
5455 case X86_64_SSEUP_CLASS:
5456 break;
5457 case X86_64_X87_CLASS:
5458 case X86_64_X87UP_CLASS:
5459 if (!in_return)
5460 return 0;
5461 break;
5462 case X86_64_COMPLEX_X87_CLASS:
5463 return in_return ? 2 : 0;
5464 case X86_64_MEMORY_CLASS:
5465 gcc_unreachable ();
5467 return 1;
5470 /* Construct container for the argument used by GCC interface. See
5471 FUNCTION_ARG for the detailed description. */
5473 static rtx
5474 construct_container (enum machine_mode mode, enum machine_mode orig_mode,
5475 const_tree type, int in_return, int nintregs, int nsseregs,
5476 const int *intreg, int sse_regno)
5478 /* The following variables hold the static issued_error state. */
5479 static bool issued_sse_arg_error;
5480 static bool issued_sse_ret_error;
5481 static bool issued_x87_ret_error;
5483 enum machine_mode tmpmode;
5484 int bytes =
5485 (mode == BLKmode) ? int_size_in_bytes (type) : (int) GET_MODE_SIZE (mode);
5486 enum x86_64_reg_class regclass[MAX_CLASSES];
5487 int n;
5488 int i;
5489 int nexps = 0;
5490 int needed_sseregs, needed_intregs;
5491 rtx exp[MAX_CLASSES];
5492 rtx ret;
5494 n = classify_argument (mode, type, regclass, 0);
5495 if (!n)
5496 return NULL;
5497 if (!examine_argument (mode, type, in_return, &needed_intregs,
5498 &needed_sseregs))
5499 return NULL;
5500 if (needed_intregs > nintregs || needed_sseregs > nsseregs)
5501 return NULL;
5503 /* We allowed the user to turn off SSE for kernel mode. Don't crash if
5504 some less clueful developer tries to use floating-point anyway. */
5505 if (needed_sseregs && !TARGET_SSE)
5507 if (in_return)
5509 if (!issued_sse_ret_error)
5511 error ("SSE register return with SSE disabled");
5512 issued_sse_ret_error = true;
5515 else if (!issued_sse_arg_error)
5517 error ("SSE register argument with SSE disabled");
5518 issued_sse_arg_error = true;
5520 return NULL;
5523 /* Likewise, error if the ABI requires us to return values in the
5524 x87 registers and the user specified -mno-80387. */
5525 if (!TARGET_80387 && in_return)
5526 for (i = 0; i < n; i++)
5527 if (regclass[i] == X86_64_X87_CLASS
5528 || regclass[i] == X86_64_X87UP_CLASS
5529 || regclass[i] == X86_64_COMPLEX_X87_CLASS)
5531 if (!issued_x87_ret_error)
5533 error ("x87 register return with x87 disabled");
5534 issued_x87_ret_error = true;
5536 return NULL;
5539 /* First construct simple cases. Avoid SCmode, since we want to use
5540 single register to pass this type. */
5541 if (n == 1 && mode != SCmode)
5542 switch (regclass[0])
5544 case X86_64_INTEGER_CLASS:
5545 case X86_64_INTEGERSI_CLASS:
5546 return gen_rtx_REG (mode, intreg[0]);
5547 case X86_64_SSE_CLASS:
5548 case X86_64_SSESF_CLASS:
5549 case X86_64_SSEDF_CLASS:
5550 if (mode != BLKmode)
5551 return gen_reg_or_parallel (mode, orig_mode,
5552 SSE_REGNO (sse_regno));
5553 break;
5554 case X86_64_X87_CLASS:
5555 case X86_64_COMPLEX_X87_CLASS:
5556 return gen_rtx_REG (mode, FIRST_STACK_REG);
5557 case X86_64_NO_CLASS:
5558 /* Zero sized array, struct or class. */
5559 return NULL;
5560 default:
5561 gcc_unreachable ();
5563 if (n == 2 && regclass[0] == X86_64_SSE_CLASS
5564 && regclass[1] == X86_64_SSEUP_CLASS && mode != BLKmode)
5565 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5566 if (n == 4
5567 && regclass[0] == X86_64_SSE_CLASS
5568 && regclass[1] == X86_64_SSEUP_CLASS
5569 && regclass[2] == X86_64_SSEUP_CLASS
5570 && regclass[3] == X86_64_SSEUP_CLASS
5571 && mode != BLKmode)
5572 return gen_rtx_REG (mode, SSE_REGNO (sse_regno));
5574 if (n == 2
5575 && regclass[0] == X86_64_X87_CLASS && regclass[1] == X86_64_X87UP_CLASS)
5576 return gen_rtx_REG (XFmode, FIRST_STACK_REG);
5577 if (n == 2 && regclass[0] == X86_64_INTEGER_CLASS
5578 && regclass[1] == X86_64_INTEGER_CLASS
5579 && (mode == CDImode || mode == TImode || mode == TFmode)
5580 && intreg[0] + 1 == intreg[1])
5581 return gen_rtx_REG (mode, intreg[0]);
5583 /* Otherwise figure out the entries of the PARALLEL. */
5584 for (i = 0; i < n; i++)
5586 int pos;
5588 switch (regclass[i])
5590 case X86_64_NO_CLASS:
5591 break;
5592 case X86_64_INTEGER_CLASS:
5593 case X86_64_INTEGERSI_CLASS:
5594 /* Merge TImodes on aligned occasions here too. */
5595 if (i * 8 + 8 > bytes)
5596 tmpmode = mode_for_size ((bytes - i * 8) * BITS_PER_UNIT, MODE_INT, 0);
5597 else if (regclass[i] == X86_64_INTEGERSI_CLASS)
5598 tmpmode = SImode;
5599 else
5600 tmpmode = DImode;
5601 /* We've requested 24 bytes we don't have mode for. Use DImode. */
5602 if (tmpmode == BLKmode)
5603 tmpmode = DImode;
5604 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5605 gen_rtx_REG (tmpmode, *intreg),
5606 GEN_INT (i*8));
5607 intreg++;
5608 break;
5609 case X86_64_SSESF_CLASS:
5610 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5611 gen_rtx_REG (SFmode,
5612 SSE_REGNO (sse_regno)),
5613 GEN_INT (i*8));
5614 sse_regno++;
5615 break;
5616 case X86_64_SSEDF_CLASS:
5617 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5618 gen_rtx_REG (DFmode,
5619 SSE_REGNO (sse_regno)),
5620 GEN_INT (i*8));
5621 sse_regno++;
5622 break;
5623 case X86_64_SSE_CLASS:
5624 pos = i;
5625 switch (n)
5627 case 1:
5628 tmpmode = DImode;
5629 break;
5630 case 2:
5631 if (i == 0 && regclass[1] == X86_64_SSEUP_CLASS)
5633 tmpmode = TImode;
5634 i++;
5636 else
5637 tmpmode = DImode;
5638 break;
5639 case 4:
5640 gcc_assert (i == 0
5641 && regclass[1] == X86_64_SSEUP_CLASS
5642 && regclass[2] == X86_64_SSEUP_CLASS
5643 && regclass[3] == X86_64_SSEUP_CLASS);
5644 tmpmode = OImode;
5645 i += 3;
5646 break;
5647 default:
5648 gcc_unreachable ();
5650 exp [nexps++] = gen_rtx_EXPR_LIST (VOIDmode,
5651 gen_rtx_REG (tmpmode,
5652 SSE_REGNO (sse_regno)),
5653 GEN_INT (pos*8));
5654 sse_regno++;
5655 break;
5656 default:
5657 gcc_unreachable ();
5661 /* Empty aligned struct, union or class. */
5662 if (nexps == 0)
5663 return NULL;
5665 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nexps));
5666 for (i = 0; i < nexps; i++)
5667 XVECEXP (ret, 0, i) = exp [i];
5668 return ret;
5671 /* Update the data in CUM to advance over an argument of mode MODE
5672 and data type TYPE. (TYPE is null for libcalls where that information
5673 may not be available.) */
5675 static void
5676 function_arg_advance_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5677 tree type, HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5679 switch (mode)
5681 default:
5682 break;
5684 case BLKmode:
5685 if (bytes < 0)
5686 break;
5687 /* FALLTHRU */
5689 case DImode:
5690 case SImode:
5691 case HImode:
5692 case QImode:
5693 cum->words += words;
5694 cum->nregs -= words;
5695 cum->regno += words;
5697 if (cum->nregs <= 0)
5699 cum->nregs = 0;
5700 cum->regno = 0;
5702 break;
5704 case OImode:
5705 /* OImode shouldn't be used directly. */
5706 gcc_unreachable ();
5708 case DFmode:
5709 if (cum->float_in_sse < 2)
5710 break;
5711 case SFmode:
5712 if (cum->float_in_sse < 1)
5713 break;
5714 /* FALLTHRU */
5716 case V8SFmode:
5717 case V8SImode:
5718 case V32QImode:
5719 case V16HImode:
5720 case V4DFmode:
5721 case V4DImode:
5722 case TImode:
5723 case V16QImode:
5724 case V8HImode:
5725 case V4SImode:
5726 case V2DImode:
5727 case V4SFmode:
5728 case V2DFmode:
5729 if (!type || !AGGREGATE_TYPE_P (type))
5731 cum->sse_words += words;
5732 cum->sse_nregs -= 1;
5733 cum->sse_regno += 1;
5734 if (cum->sse_nregs <= 0)
5736 cum->sse_nregs = 0;
5737 cum->sse_regno = 0;
5740 break;
5742 case V8QImode:
5743 case V4HImode:
5744 case V2SImode:
5745 case V2SFmode:
5746 case V1DImode:
5747 if (!type || !AGGREGATE_TYPE_P (type))
5749 cum->mmx_words += words;
5750 cum->mmx_nregs -= 1;
5751 cum->mmx_regno += 1;
5752 if (cum->mmx_nregs <= 0)
5754 cum->mmx_nregs = 0;
5755 cum->mmx_regno = 0;
5758 break;
5762 static void
5763 function_arg_advance_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5764 tree type, HOST_WIDE_INT words, int named)
5766 int int_nregs, sse_nregs;
5768 /* Unnamed 256bit vector mode parameters are passed on stack. */
5769 if (!named && VALID_AVX256_REG_MODE (mode))
5770 return;
5772 if (!examine_argument (mode, type, 0, &int_nregs, &sse_nregs))
5773 cum->words += words;
5774 else if (sse_nregs <= cum->sse_nregs && int_nregs <= cum->nregs)
5776 cum->nregs -= int_nregs;
5777 cum->sse_nregs -= sse_nregs;
5778 cum->regno += int_nregs;
5779 cum->sse_regno += sse_nregs;
5781 else
5782 cum->words += words;
5785 static void
5786 function_arg_advance_ms_64 (CUMULATIVE_ARGS *cum, HOST_WIDE_INT bytes,
5787 HOST_WIDE_INT words)
5789 /* Otherwise, this should be passed indirect. */
5790 gcc_assert (bytes == 1 || bytes == 2 || bytes == 4 || bytes == 8);
5792 cum->words += words;
5793 if (cum->nregs > 0)
5795 cum->nregs -= 1;
5796 cum->regno += 1;
5800 void
5801 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5802 tree type, int named)
5804 HOST_WIDE_INT bytes, words;
5806 if (mode == BLKmode)
5807 bytes = int_size_in_bytes (type);
5808 else
5809 bytes = GET_MODE_SIZE (mode);
5810 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5812 if (type)
5813 mode = type_natural_mode (type, NULL);
5815 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
5816 function_arg_advance_ms_64 (cum, bytes, words);
5817 else if (TARGET_64BIT)
5818 function_arg_advance_64 (cum, mode, type, words, named);
5819 else
5820 function_arg_advance_32 (cum, mode, type, bytes, words);
5823 /* Define where to put the arguments to a function.
5824 Value is zero to push the argument on the stack,
5825 or a hard register in which to store the argument.
5827 MODE is the argument's machine mode.
5828 TYPE is the data type of the argument (as a tree).
5829 This is null for libcalls where that information may
5830 not be available.
5831 CUM is a variable of type CUMULATIVE_ARGS which gives info about
5832 the preceding args and about the function being called.
5833 NAMED is nonzero if this argument is a named parameter
5834 (otherwise it is an extra parameter matching an ellipsis). */
5836 static rtx
5837 function_arg_32 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5838 enum machine_mode orig_mode, tree type,
5839 HOST_WIDE_INT bytes, HOST_WIDE_INT words)
5841 static bool warnedsse, warnedmmx;
5843 /* Avoid the AL settings for the Unix64 ABI. */
5844 if (mode == VOIDmode)
5845 return constm1_rtx;
5847 switch (mode)
5849 default:
5850 break;
5852 case BLKmode:
5853 if (bytes < 0)
5854 break;
5855 /* FALLTHRU */
5856 case DImode:
5857 case SImode:
5858 case HImode:
5859 case QImode:
5860 if (words <= cum->nregs)
5862 int regno = cum->regno;
5864 /* Fastcall allocates the first two DWORD (SImode) or
5865 smaller arguments to ECX and EDX if it isn't an
5866 aggregate type . */
5867 if (cum->fastcall)
5869 if (mode == BLKmode
5870 || mode == DImode
5871 || (type && AGGREGATE_TYPE_P (type)))
5872 break;
5874 /* ECX not EAX is the first allocated register. */
5875 if (regno == AX_REG)
5876 regno = CX_REG;
5878 return gen_rtx_REG (mode, regno);
5880 break;
5882 case DFmode:
5883 if (cum->float_in_sse < 2)
5884 break;
5885 case SFmode:
5886 if (cum->float_in_sse < 1)
5887 break;
5888 /* FALLTHRU */
5889 case TImode:
5890 /* In 32bit, we pass TImode in xmm registers. */
5891 case V16QImode:
5892 case V8HImode:
5893 case V4SImode:
5894 case V2DImode:
5895 case V4SFmode:
5896 case V2DFmode:
5897 if (!type || !AGGREGATE_TYPE_P (type))
5899 if (!TARGET_SSE && !warnedsse && cum->warn_sse)
5901 warnedsse = true;
5902 warning (0, "SSE vector argument without SSE enabled "
5903 "changes the ABI");
5905 if (cum->sse_nregs)
5906 return gen_reg_or_parallel (mode, orig_mode,
5907 cum->sse_regno + FIRST_SSE_REG);
5909 break;
5911 case OImode:
5912 /* OImode shouldn't be used directly. */
5913 gcc_unreachable ();
5915 case V8SFmode:
5916 case V8SImode:
5917 case V32QImode:
5918 case V16HImode:
5919 case V4DFmode:
5920 case V4DImode:
5921 if (!type || !AGGREGATE_TYPE_P (type))
5923 if (cum->sse_nregs)
5924 return gen_reg_or_parallel (mode, orig_mode,
5925 cum->sse_regno + FIRST_SSE_REG);
5927 break;
5929 case V8QImode:
5930 case V4HImode:
5931 case V2SImode:
5932 case V2SFmode:
5933 case V1DImode:
5934 if (!type || !AGGREGATE_TYPE_P (type))
5936 if (!TARGET_MMX && !warnedmmx && cum->warn_mmx)
5938 warnedmmx = true;
5939 warning (0, "MMX vector argument without MMX enabled "
5940 "changes the ABI");
5942 if (cum->mmx_nregs)
5943 return gen_reg_or_parallel (mode, orig_mode,
5944 cum->mmx_regno + FIRST_MMX_REG);
5946 break;
5949 return NULL_RTX;
5952 static rtx
5953 function_arg_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5954 enum machine_mode orig_mode, tree type, int named)
5956 /* Handle a hidden AL argument containing number of registers
5957 for varargs x86-64 functions. */
5958 if (mode == VOIDmode)
5959 return GEN_INT (cum->maybe_vaarg
5960 ? (cum->sse_nregs < 0
5961 ? (cum->call_abi == ix86_abi
5962 ? SSE_REGPARM_MAX
5963 : (ix86_abi != SYSV_ABI
5964 ? X86_64_SSE_REGPARM_MAX
5965 : X86_64_MS_SSE_REGPARM_MAX))
5966 : cum->sse_regno)
5967 : -1);
5969 switch (mode)
5971 default:
5972 break;
5974 case V8SFmode:
5975 case V8SImode:
5976 case V32QImode:
5977 case V16HImode:
5978 case V4DFmode:
5979 case V4DImode:
5980 /* Unnamed 256bit vector mode parameters are passed on stack. */
5981 if (!named)
5982 return NULL;
5983 break;
5986 return construct_container (mode, orig_mode, type, 0, cum->nregs,
5987 cum->sse_nregs,
5988 &x86_64_int_parameter_registers [cum->regno],
5989 cum->sse_regno);
5992 static rtx
5993 function_arg_ms_64 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5994 enum machine_mode orig_mode, int named,
5995 HOST_WIDE_INT bytes)
5997 unsigned int regno;
5999 /* We need to add clobber for MS_ABI->SYSV ABI calls in expand_call.
6000 We use value of -2 to specify that current function call is MSABI. */
6001 if (mode == VOIDmode)
6002 return GEN_INT (-2);
6004 /* If we've run out of registers, it goes on the stack. */
6005 if (cum->nregs == 0)
6006 return NULL_RTX;
6008 regno = x86_64_ms_abi_int_parameter_registers[cum->regno];
6010 /* Only floating point modes are passed in anything but integer regs. */
6011 if (TARGET_SSE && (mode == SFmode || mode == DFmode))
6013 if (named)
6014 regno = cum->regno + FIRST_SSE_REG;
6015 else
6017 rtx t1, t2;
6019 /* Unnamed floating parameters are passed in both the
6020 SSE and integer registers. */
6021 t1 = gen_rtx_REG (mode, cum->regno + FIRST_SSE_REG);
6022 t2 = gen_rtx_REG (mode, regno);
6023 t1 = gen_rtx_EXPR_LIST (VOIDmode, t1, const0_rtx);
6024 t2 = gen_rtx_EXPR_LIST (VOIDmode, t2, const0_rtx);
6025 return gen_rtx_PARALLEL (mode, gen_rtvec (2, t1, t2));
6028 /* Handle aggregated types passed in register. */
6029 if (orig_mode == BLKmode)
6031 if (bytes > 0 && bytes <= 8)
6032 mode = (bytes > 4 ? DImode : SImode);
6033 if (mode == BLKmode)
6034 mode = DImode;
6037 return gen_reg_or_parallel (mode, orig_mode, regno);
6041 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode omode,
6042 tree type, int named)
6044 enum machine_mode mode = omode;
6045 HOST_WIDE_INT bytes, words;
6047 if (mode == BLKmode)
6048 bytes = int_size_in_bytes (type);
6049 else
6050 bytes = GET_MODE_SIZE (mode);
6051 words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6053 /* To simplify the code below, represent vector types with a vector mode
6054 even if MMX/SSE are not active. */
6055 if (type && TREE_CODE (type) == VECTOR_TYPE)
6056 mode = type_natural_mode (type, cum);
6058 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6059 return function_arg_ms_64 (cum, mode, omode, named, bytes);
6060 else if (TARGET_64BIT)
6061 return function_arg_64 (cum, mode, omode, type, named);
6062 else
6063 return function_arg_32 (cum, mode, omode, type, bytes, words);
6066 /* A C expression that indicates when an argument must be passed by
6067 reference. If nonzero for an argument, a copy of that argument is
6068 made in memory and a pointer to the argument is passed instead of
6069 the argument itself. The pointer is passed in whatever way is
6070 appropriate for passing a pointer to that type. */
6072 static bool
6073 ix86_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6074 enum machine_mode mode ATTRIBUTE_UNUSED,
6075 const_tree type, bool named ATTRIBUTE_UNUSED)
6077 /* See Windows x64 Software Convention. */
6078 if (TARGET_64BIT && (cum ? cum->call_abi : ix86_abi) == MS_ABI)
6080 int msize = (int) GET_MODE_SIZE (mode);
6081 if (type)
6083 /* Arrays are passed by reference. */
6084 if (TREE_CODE (type) == ARRAY_TYPE)
6085 return true;
6087 if (AGGREGATE_TYPE_P (type))
6089 /* Structs/unions of sizes other than 8, 16, 32, or 64 bits
6090 are passed by reference. */
6091 msize = int_size_in_bytes (type);
6095 /* __m128 is passed by reference. */
6096 switch (msize) {
6097 case 1: case 2: case 4: case 8:
6098 break;
6099 default:
6100 return true;
6103 else if (TARGET_64BIT && type && int_size_in_bytes (type) == -1)
6104 return 1;
6106 return 0;
6109 /* Return true when TYPE should be 128bit aligned for 32bit argument passing
6110 ABI. */
6111 static bool
6112 contains_aligned_value_p (tree type)
6114 enum machine_mode mode = TYPE_MODE (type);
6115 if (((TARGET_SSE && SSE_REG_MODE_P (mode))
6116 || mode == TDmode
6117 || mode == TFmode
6118 || mode == TCmode)
6119 && (!TYPE_USER_ALIGN (type) || TYPE_ALIGN (type) > 128))
6120 return true;
6121 if (TYPE_ALIGN (type) < 128)
6122 return false;
6124 if (AGGREGATE_TYPE_P (type))
6126 /* Walk the aggregates recursively. */
6127 switch (TREE_CODE (type))
6129 case RECORD_TYPE:
6130 case UNION_TYPE:
6131 case QUAL_UNION_TYPE:
6133 tree field;
6135 /* Walk all the structure fields. */
6136 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
6138 if (TREE_CODE (field) == FIELD_DECL
6139 && contains_aligned_value_p (TREE_TYPE (field)))
6140 return true;
6142 break;
6145 case ARRAY_TYPE:
6146 /* Just for use if some languages passes arrays by value. */
6147 if (contains_aligned_value_p (TREE_TYPE (type)))
6148 return true;
6149 break;
6151 default:
6152 gcc_unreachable ();
6155 return false;
6158 /* Gives the alignment boundary, in bits, of an argument with the
6159 specified mode and type. */
6162 ix86_function_arg_boundary (enum machine_mode mode, tree type)
6164 int align;
6165 if (type)
6167 /* Since canonical type is used for call, we convert it to
6168 canonical type if needed. */
6169 if (!TYPE_STRUCTURAL_EQUALITY_P (type))
6170 type = TYPE_CANONICAL (type);
6171 align = TYPE_ALIGN (type);
6173 else
6174 align = GET_MODE_ALIGNMENT (mode);
6175 if (align < PARM_BOUNDARY)
6176 align = PARM_BOUNDARY;
6177 /* In 32bit, only _Decimal128 and __float128 are aligned to their
6178 natural boundaries. */
6179 if (!TARGET_64BIT && mode != TDmode && mode != TFmode)
6181 /* i386 ABI defines all arguments to be 4 byte aligned. We have to
6182 make an exception for SSE modes since these require 128bit
6183 alignment.
6185 The handling here differs from field_alignment. ICC aligns MMX
6186 arguments to 4 byte boundaries, while structure fields are aligned
6187 to 8 byte boundaries. */
6188 if (!type)
6190 if (!(TARGET_SSE && SSE_REG_MODE_P (mode)))
6191 align = PARM_BOUNDARY;
6193 else
6195 if (!contains_aligned_value_p (type))
6196 align = PARM_BOUNDARY;
6199 if (align > BIGGEST_ALIGNMENT)
6200 align = BIGGEST_ALIGNMENT;
6201 return align;
6204 /* Return true if N is a possible register number of function value. */
6206 bool
6207 ix86_function_value_regno_p (int regno)
6209 switch (regno)
6211 case 0:
6212 return true;
6214 case FIRST_FLOAT_REG:
6215 /* TODO: The function should depend on current function ABI but
6216 builtins.c would need updating then. Therefore we use the
6217 default ABI. */
6218 if (TARGET_64BIT && ix86_abi == MS_ABI)
6219 return false;
6220 return TARGET_FLOAT_RETURNS_IN_80387;
6222 case FIRST_SSE_REG:
6223 return TARGET_SSE;
6225 case FIRST_MMX_REG:
6226 if (TARGET_MACHO || TARGET_64BIT)
6227 return false;
6228 return TARGET_MMX;
6231 return false;
6234 /* Define how to find the value returned by a function.
6235 VALTYPE is the data type of the value (as a tree).
6236 If the precise function being called is known, FUNC is its FUNCTION_DECL;
6237 otherwise, FUNC is 0. */
6239 static rtx
6240 function_value_32 (enum machine_mode orig_mode, enum machine_mode mode,
6241 const_tree fntype, const_tree fn)
6243 unsigned int regno;
6245 /* 8-byte vector modes in %mm0. See ix86_return_in_memory for where
6246 we normally prevent this case when mmx is not available. However
6247 some ABIs may require the result to be returned like DImode. */
6248 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6249 regno = TARGET_MMX ? FIRST_MMX_REG : 0;
6251 /* 16-byte vector modes in %xmm0. See ix86_return_in_memory for where
6252 we prevent this case when sse is not available. However some ABIs
6253 may require the result to be returned like integer TImode. */
6254 else if (mode == TImode
6255 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6256 regno = TARGET_SSE ? FIRST_SSE_REG : 0;
6258 /* 32-byte vector modes in %ymm0. */
6259 else if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 32)
6260 regno = TARGET_AVX ? FIRST_SSE_REG : 0;
6262 /* Floating point return values in %st(0) (unless -mno-fp-ret-in-387). */
6263 else if (X87_FLOAT_MODE_P (mode) && TARGET_FLOAT_RETURNS_IN_80387)
6264 regno = FIRST_FLOAT_REG;
6265 else
6266 /* Most things go in %eax. */
6267 regno = AX_REG;
6269 /* Override FP return register with %xmm0 for local functions when
6270 SSE math is enabled or for functions with sseregparm attribute. */
6271 if ((fn || fntype) && (mode == SFmode || mode == DFmode))
6273 int sse_level = ix86_function_sseregparm (fntype, fn, false);
6274 if ((sse_level >= 1 && mode == SFmode)
6275 || (sse_level == 2 && mode == DFmode))
6276 regno = FIRST_SSE_REG;
6279 /* OImode shouldn't be used directly. */
6280 gcc_assert (mode != OImode);
6282 return gen_rtx_REG (orig_mode, regno);
6285 static rtx
6286 function_value_64 (enum machine_mode orig_mode, enum machine_mode mode,
6287 const_tree valtype)
6289 rtx ret;
6291 /* Handle libcalls, which don't provide a type node. */
6292 if (valtype == NULL)
6294 switch (mode)
6296 case SFmode:
6297 case SCmode:
6298 case DFmode:
6299 case DCmode:
6300 case TFmode:
6301 case SDmode:
6302 case DDmode:
6303 case TDmode:
6304 return gen_rtx_REG (mode, FIRST_SSE_REG);
6305 case XFmode:
6306 case XCmode:
6307 return gen_rtx_REG (mode, FIRST_FLOAT_REG);
6308 case TCmode:
6309 return NULL;
6310 default:
6311 return gen_rtx_REG (mode, AX_REG);
6315 ret = construct_container (mode, orig_mode, valtype, 1,
6316 X86_64_REGPARM_MAX, X86_64_SSE_REGPARM_MAX,
6317 x86_64_int_return_registers, 0);
6319 /* For zero sized structures, construct_container returns NULL, but we
6320 need to keep rest of compiler happy by returning meaningful value. */
6321 if (!ret)
6322 ret = gen_rtx_REG (orig_mode, AX_REG);
6324 return ret;
6327 static rtx
6328 function_value_ms_64 (enum machine_mode orig_mode, enum machine_mode mode)
6330 unsigned int regno = AX_REG;
6332 if (TARGET_SSE)
6334 switch (GET_MODE_SIZE (mode))
6336 case 16:
6337 if((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6338 && !COMPLEX_MODE_P (mode))
6339 regno = FIRST_SSE_REG;
6340 break;
6341 case 8:
6342 case 4:
6343 if (mode == SFmode || mode == DFmode)
6344 regno = FIRST_SSE_REG;
6345 break;
6346 default:
6347 break;
6350 return gen_rtx_REG (orig_mode, regno);
6353 static rtx
6354 ix86_function_value_1 (const_tree valtype, const_tree fntype_or_decl,
6355 enum machine_mode orig_mode, enum machine_mode mode)
6357 const_tree fn, fntype;
6359 fn = NULL_TREE;
6360 if (fntype_or_decl && DECL_P (fntype_or_decl))
6361 fn = fntype_or_decl;
6362 fntype = fn ? TREE_TYPE (fn) : fntype_or_decl;
6364 if (TARGET_64BIT && ix86_function_type_abi (fntype) == MS_ABI)
6365 return function_value_ms_64 (orig_mode, mode);
6366 else if (TARGET_64BIT)
6367 return function_value_64 (orig_mode, mode, valtype);
6368 else
6369 return function_value_32 (orig_mode, mode, fntype, fn);
6372 static rtx
6373 ix86_function_value (const_tree valtype, const_tree fntype_or_decl,
6374 bool outgoing ATTRIBUTE_UNUSED)
6376 enum machine_mode mode, orig_mode;
6378 orig_mode = TYPE_MODE (valtype);
6379 mode = type_natural_mode (valtype, NULL);
6380 return ix86_function_value_1 (valtype, fntype_or_decl, orig_mode, mode);
6384 ix86_libcall_value (enum machine_mode mode)
6386 return ix86_function_value_1 (NULL, NULL, mode, mode);
6389 /* Return true iff type is returned in memory. */
6391 static int ATTRIBUTE_UNUSED
6392 return_in_memory_32 (const_tree type, enum machine_mode mode)
6394 HOST_WIDE_INT size;
6396 if (mode == BLKmode)
6397 return 1;
6399 size = int_size_in_bytes (type);
6401 if (MS_AGGREGATE_RETURN && AGGREGATE_TYPE_P (type) && size <= 8)
6402 return 0;
6404 if (VECTOR_MODE_P (mode) || mode == TImode)
6406 /* User-created vectors small enough to fit in EAX. */
6407 if (size < 8)
6408 return 0;
6410 /* MMX/3dNow values are returned in MM0,
6411 except when it doesn't exits. */
6412 if (size == 8)
6413 return (TARGET_MMX ? 0 : 1);
6415 /* SSE values are returned in XMM0, except when it doesn't exist. */
6416 if (size == 16)
6417 return (TARGET_SSE ? 0 : 1);
6419 /* AVX values are returned in YMM0, except when it doesn't exist. */
6420 if (size == 32)
6421 return TARGET_AVX ? 0 : 1;
6424 if (mode == XFmode)
6425 return 0;
6427 if (size > 12)
6428 return 1;
6430 /* OImode shouldn't be used directly. */
6431 gcc_assert (mode != OImode);
6433 return 0;
6436 static int ATTRIBUTE_UNUSED
6437 return_in_memory_64 (const_tree type, enum machine_mode mode)
6439 int needed_intregs, needed_sseregs;
6440 return !examine_argument (mode, type, 1, &needed_intregs, &needed_sseregs);
6443 static int ATTRIBUTE_UNUSED
6444 return_in_memory_ms_64 (const_tree type, enum machine_mode mode)
6446 HOST_WIDE_INT size = int_size_in_bytes (type);
6448 /* __m128 is returned in xmm0. */
6449 if ((SCALAR_INT_MODE_P (mode) || VECTOR_MODE_P (mode))
6450 && !COMPLEX_MODE_P (mode) && (GET_MODE_SIZE (mode) == 16 || size == 16))
6451 return 0;
6453 /* Otherwise, the size must be exactly in [1248]. */
6454 return (size != 1 && size != 2 && size != 4 && size != 8);
6457 static bool
6458 ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6460 #ifdef SUBTARGET_RETURN_IN_MEMORY
6461 return SUBTARGET_RETURN_IN_MEMORY (type, fntype);
6462 #else
6463 const enum machine_mode mode = type_natural_mode (type, NULL);
6465 if (TARGET_64BIT)
6467 if (ix86_function_type_abi (fntype) == MS_ABI)
6468 return return_in_memory_ms_64 (type, mode);
6469 else
6470 return return_in_memory_64 (type, mode);
6472 else
6473 return return_in_memory_32 (type, mode);
6474 #endif
6477 /* Return false iff TYPE is returned in memory. This version is used
6478 on Solaris 10. It is similar to the generic ix86_return_in_memory,
6479 but differs notably in that when MMX is available, 8-byte vectors
6480 are returned in memory, rather than in MMX registers. */
6482 bool
6483 ix86_sol10_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
6485 int size;
6486 enum machine_mode mode = type_natural_mode (type, NULL);
6488 if (TARGET_64BIT)
6489 return return_in_memory_64 (type, mode);
6491 if (mode == BLKmode)
6492 return 1;
6494 size = int_size_in_bytes (type);
6496 if (VECTOR_MODE_P (mode))
6498 /* Return in memory only if MMX registers *are* available. This
6499 seems backwards, but it is consistent with the existing
6500 Solaris x86 ABI. */
6501 if (size == 8)
6502 return TARGET_MMX;
6503 if (size == 16)
6504 return !TARGET_SSE;
6506 else if (mode == TImode)
6507 return !TARGET_SSE;
6508 else if (mode == XFmode)
6509 return 0;
6511 return size > 12;
6514 /* When returning SSE vector types, we have a choice of either
6515 (1) being abi incompatible with a -march switch, or
6516 (2) generating an error.
6517 Given no good solution, I think the safest thing is one warning.
6518 The user won't be able to use -Werror, but....
6520 Choose the STRUCT_VALUE_RTX hook because that's (at present) only
6521 called in response to actually generating a caller or callee that
6522 uses such a type. As opposed to TARGET_RETURN_IN_MEMORY, which is called
6523 via aggregate_value_p for general type probing from tree-ssa. */
6525 static rtx
6526 ix86_struct_value_rtx (tree type, int incoming ATTRIBUTE_UNUSED)
6528 static bool warnedsse, warnedmmx;
6530 if (!TARGET_64BIT && type)
6532 /* Look at the return type of the function, not the function type. */
6533 enum machine_mode mode = TYPE_MODE (TREE_TYPE (type));
6535 if (!TARGET_SSE && !warnedsse)
6537 if (mode == TImode
6538 || (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 16))
6540 warnedsse = true;
6541 warning (0, "SSE vector return without SSE enabled "
6542 "changes the ABI");
6546 if (!TARGET_MMX && !warnedmmx)
6548 if (VECTOR_MODE_P (mode) && GET_MODE_SIZE (mode) == 8)
6550 warnedmmx = true;
6551 warning (0, "MMX vector return without MMX enabled "
6552 "changes the ABI");
6557 return NULL;
6561 /* Create the va_list data type. */
6563 /* Returns the calling convention specific va_list date type.
6564 The argument ABI can be DEFAULT_ABI, MS_ABI, or SYSV_ABI. */
6566 static tree
6567 ix86_build_builtin_va_list_abi (enum calling_abi abi)
6569 tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
6571 /* For i386 we use plain pointer to argument area. */
6572 if (!TARGET_64BIT || abi == MS_ABI)
6573 return build_pointer_type (char_type_node);
6575 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6576 type_decl = build_decl (BUILTINS_LOCATION,
6577 TYPE_DECL, get_identifier ("__va_list_tag"), record);
6579 f_gpr = build_decl (BUILTINS_LOCATION,
6580 FIELD_DECL, get_identifier ("gp_offset"),
6581 unsigned_type_node);
6582 f_fpr = build_decl (BUILTINS_LOCATION,
6583 FIELD_DECL, get_identifier ("fp_offset"),
6584 unsigned_type_node);
6585 f_ovf = build_decl (BUILTINS_LOCATION,
6586 FIELD_DECL, get_identifier ("overflow_arg_area"),
6587 ptr_type_node);
6588 f_sav = build_decl (BUILTINS_LOCATION,
6589 FIELD_DECL, get_identifier ("reg_save_area"),
6590 ptr_type_node);
6592 va_list_gpr_counter_field = f_gpr;
6593 va_list_fpr_counter_field = f_fpr;
6595 DECL_FIELD_CONTEXT (f_gpr) = record;
6596 DECL_FIELD_CONTEXT (f_fpr) = record;
6597 DECL_FIELD_CONTEXT (f_ovf) = record;
6598 DECL_FIELD_CONTEXT (f_sav) = record;
6600 TREE_CHAIN (record) = type_decl;
6601 TYPE_NAME (record) = type_decl;
6602 TYPE_FIELDS (record) = f_gpr;
6603 TREE_CHAIN (f_gpr) = f_fpr;
6604 TREE_CHAIN (f_fpr) = f_ovf;
6605 TREE_CHAIN (f_ovf) = f_sav;
6607 layout_type (record);
6609 /* The correct type is an array type of one element. */
6610 return build_array_type (record, build_index_type (size_zero_node));
6613 /* Setup the builtin va_list data type and for 64-bit the additional
6614 calling convention specific va_list data types. */
6616 static tree
6617 ix86_build_builtin_va_list (void)
6619 tree ret = ix86_build_builtin_va_list_abi (ix86_abi);
6621 /* Initialize abi specific va_list builtin types. */
6622 if (TARGET_64BIT)
6624 tree t;
6625 if (ix86_abi == MS_ABI)
6627 t = ix86_build_builtin_va_list_abi (SYSV_ABI);
6628 if (TREE_CODE (t) != RECORD_TYPE)
6629 t = build_variant_type_copy (t);
6630 sysv_va_list_type_node = t;
6632 else
6634 t = ret;
6635 if (TREE_CODE (t) != RECORD_TYPE)
6636 t = build_variant_type_copy (t);
6637 sysv_va_list_type_node = t;
6639 if (ix86_abi != MS_ABI)
6641 t = ix86_build_builtin_va_list_abi (MS_ABI);
6642 if (TREE_CODE (t) != RECORD_TYPE)
6643 t = build_variant_type_copy (t);
6644 ms_va_list_type_node = t;
6646 else
6648 t = ret;
6649 if (TREE_CODE (t) != RECORD_TYPE)
6650 t = build_variant_type_copy (t);
6651 ms_va_list_type_node = t;
6655 return ret;
6658 /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
6660 static void
6661 setup_incoming_varargs_64 (CUMULATIVE_ARGS *cum)
6663 rtx save_area, mem;
6664 rtx label;
6665 rtx label_ref;
6666 rtx tmp_reg;
6667 rtx nsse_reg;
6668 alias_set_type set;
6669 int i;
6670 int regparm = ix86_regparm;
6672 if (cum->call_abi != ix86_abi)
6673 regparm = (ix86_abi != SYSV_ABI
6674 ? X86_64_REGPARM_MAX : X86_64_MS_REGPARM_MAX);
6676 /* GPR size of varargs save area. */
6677 if (cfun->va_list_gpr_size)
6678 ix86_varargs_gpr_size = X86_64_REGPARM_MAX * UNITS_PER_WORD;
6679 else
6680 ix86_varargs_gpr_size = 0;
6682 /* FPR size of varargs save area. We don't need it if we don't pass
6683 anything in SSE registers. */
6684 if (cum->sse_nregs && cfun->va_list_fpr_size)
6685 ix86_varargs_fpr_size = X86_64_SSE_REGPARM_MAX * 16;
6686 else
6687 ix86_varargs_fpr_size = 0;
6689 if (! ix86_varargs_gpr_size && ! ix86_varargs_fpr_size)
6690 return;
6692 save_area = frame_pointer_rtx;
6693 set = get_varargs_alias_set ();
6695 for (i = cum->regno;
6696 i < regparm
6697 && i < cum->regno + cfun->va_list_gpr_size / UNITS_PER_WORD;
6698 i++)
6700 mem = gen_rtx_MEM (Pmode,
6701 plus_constant (save_area, i * UNITS_PER_WORD));
6702 MEM_NOTRAP_P (mem) = 1;
6703 set_mem_alias_set (mem, set);
6704 emit_move_insn (mem, gen_rtx_REG (Pmode,
6705 x86_64_int_parameter_registers[i]));
6708 if (ix86_varargs_fpr_size)
6710 /* Now emit code to save SSE registers. The AX parameter contains number
6711 of SSE parameter registers used to call this function. We use
6712 sse_prologue_save insn template that produces computed jump across
6713 SSE saves. We need some preparation work to get this working. */
6715 label = gen_label_rtx ();
6716 label_ref = gen_rtx_LABEL_REF (Pmode, label);
6718 /* Compute address to jump to :
6719 label - eax*4 + nnamed_sse_arguments*4 Or
6720 label - eax*5 + nnamed_sse_arguments*5 for AVX. */
6721 tmp_reg = gen_reg_rtx (Pmode);
6722 nsse_reg = gen_reg_rtx (Pmode);
6723 emit_insn (gen_zero_extendqidi2 (nsse_reg, gen_rtx_REG (QImode, AX_REG)));
6724 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6725 gen_rtx_MULT (Pmode, nsse_reg,
6726 GEN_INT (4))));
6728 /* vmovaps is one byte longer than movaps. */
6729 if (TARGET_AVX)
6730 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6731 gen_rtx_PLUS (Pmode, tmp_reg,
6732 nsse_reg)));
6734 if (cum->sse_regno)
6735 emit_move_insn
6736 (nsse_reg,
6737 gen_rtx_CONST (DImode,
6738 gen_rtx_PLUS (DImode,
6739 label_ref,
6740 GEN_INT (cum->sse_regno
6741 * (TARGET_AVX ? 5 : 4)))));
6742 else
6743 emit_move_insn (nsse_reg, label_ref);
6744 emit_insn (gen_subdi3 (nsse_reg, nsse_reg, tmp_reg));
6746 /* Compute address of memory block we save into. We always use pointer
6747 pointing 127 bytes after first byte to store - this is needed to keep
6748 instruction size limited by 4 bytes (5 bytes for AVX) with one
6749 byte displacement. */
6750 tmp_reg = gen_reg_rtx (Pmode);
6751 emit_insn (gen_rtx_SET (VOIDmode, tmp_reg,
6752 plus_constant (save_area,
6753 ix86_varargs_gpr_size + 127)));
6754 mem = gen_rtx_MEM (BLKmode, plus_constant (tmp_reg, -127));
6755 MEM_NOTRAP_P (mem) = 1;
6756 set_mem_alias_set (mem, set);
6757 set_mem_align (mem, BITS_PER_WORD);
6759 /* And finally do the dirty job! */
6760 emit_insn (gen_sse_prologue_save (mem, nsse_reg,
6761 GEN_INT (cum->sse_regno), label));
6765 static void
6766 setup_incoming_varargs_ms_64 (CUMULATIVE_ARGS *cum)
6768 alias_set_type set = get_varargs_alias_set ();
6769 int i;
6771 for (i = cum->regno; i < X86_64_MS_REGPARM_MAX; i++)
6773 rtx reg, mem;
6775 mem = gen_rtx_MEM (Pmode,
6776 plus_constant (virtual_incoming_args_rtx,
6777 i * UNITS_PER_WORD));
6778 MEM_NOTRAP_P (mem) = 1;
6779 set_mem_alias_set (mem, set);
6781 reg = gen_rtx_REG (Pmode, x86_64_ms_abi_int_parameter_registers[i]);
6782 emit_move_insn (mem, reg);
6786 static void
6787 ix86_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
6788 tree type, int *pretend_size ATTRIBUTE_UNUSED,
6789 int no_rtl)
6791 CUMULATIVE_ARGS next_cum;
6792 tree fntype;
6794 /* This argument doesn't appear to be used anymore. Which is good,
6795 because the old code here didn't suppress rtl generation. */
6796 gcc_assert (!no_rtl);
6798 if (!TARGET_64BIT)
6799 return;
6801 fntype = TREE_TYPE (current_function_decl);
6803 /* For varargs, we do not want to skip the dummy va_dcl argument.
6804 For stdargs, we do want to skip the last named argument. */
6805 next_cum = *cum;
6806 if (stdarg_p (fntype))
6807 function_arg_advance (&next_cum, mode, type, 1);
6809 if (cum->call_abi == MS_ABI)
6810 setup_incoming_varargs_ms_64 (&next_cum);
6811 else
6812 setup_incoming_varargs_64 (&next_cum);
6815 /* Checks if TYPE is of kind va_list char *. */
6817 static bool
6818 is_va_list_char_pointer (tree type)
6820 tree canonic;
6822 /* For 32-bit it is always true. */
6823 if (!TARGET_64BIT)
6824 return true;
6825 canonic = ix86_canonical_va_list_type (type);
6826 return (canonic == ms_va_list_type_node
6827 || (ix86_abi == MS_ABI && canonic == va_list_type_node));
6830 /* Implement va_start. */
6832 static void
6833 ix86_va_start (tree valist, rtx nextarg)
6835 HOST_WIDE_INT words, n_gpr, n_fpr;
6836 tree f_gpr, f_fpr, f_ovf, f_sav;
6837 tree gpr, fpr, ovf, sav, t;
6838 tree type;
6840 /* Only 64bit target needs something special. */
6841 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
6843 std_expand_builtin_va_start (valist, nextarg);
6844 return;
6847 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
6848 f_fpr = TREE_CHAIN (f_gpr);
6849 f_ovf = TREE_CHAIN (f_fpr);
6850 f_sav = TREE_CHAIN (f_ovf);
6852 valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
6853 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
6854 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
6855 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
6856 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
6858 /* Count number of gp and fp argument registers used. */
6859 words = crtl->args.info.words;
6860 n_gpr = crtl->args.info.regno;
6861 n_fpr = crtl->args.info.sse_regno;
6863 if (cfun->va_list_gpr_size)
6865 type = TREE_TYPE (gpr);
6866 t = build2 (MODIFY_EXPR, type,
6867 gpr, build_int_cst (type, n_gpr * 8));
6868 TREE_SIDE_EFFECTS (t) = 1;
6869 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6872 if (TARGET_SSE && cfun->va_list_fpr_size)
6874 type = TREE_TYPE (fpr);
6875 t = build2 (MODIFY_EXPR, type, fpr,
6876 build_int_cst (type, n_fpr * 16 + 8*X86_64_REGPARM_MAX));
6877 TREE_SIDE_EFFECTS (t) = 1;
6878 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6881 /* Find the overflow area. */
6882 type = TREE_TYPE (ovf);
6883 t = make_tree (type, crtl->args.internal_arg_pointer);
6884 if (words != 0)
6885 t = build2 (POINTER_PLUS_EXPR, type, t,
6886 size_int (words * UNITS_PER_WORD));
6887 t = build2 (MODIFY_EXPR, type, ovf, t);
6888 TREE_SIDE_EFFECTS (t) = 1;
6889 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6891 if (ix86_varargs_gpr_size || ix86_varargs_fpr_size)
6893 /* Find the register save area.
6894 Prologue of the function save it right above stack frame. */
6895 type = TREE_TYPE (sav);
6896 t = make_tree (type, frame_pointer_rtx);
6897 if (!ix86_varargs_gpr_size)
6898 t = build2 (POINTER_PLUS_EXPR, type, t,
6899 size_int (-8 * X86_64_REGPARM_MAX));
6900 t = build2 (MODIFY_EXPR, type, sav, t);
6901 TREE_SIDE_EFFECTS (t) = 1;
6902 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6906 /* Implement va_arg. */
6908 static tree
6909 ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
6910 gimple_seq *post_p)
6912 static const int intreg[6] = { 0, 1, 2, 3, 4, 5 };
6913 tree f_gpr, f_fpr, f_ovf, f_sav;
6914 tree gpr, fpr, ovf, sav, t;
6915 int size, rsize;
6916 tree lab_false, lab_over = NULL_TREE;
6917 tree addr, t2;
6918 rtx container;
6919 int indirect_p = 0;
6920 tree ptrtype;
6921 enum machine_mode nat_mode;
6922 int arg_boundary;
6924 /* Only 64bit target needs something special. */
6925 if (!TARGET_64BIT || is_va_list_char_pointer (TREE_TYPE (valist)))
6926 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6928 f_gpr = TYPE_FIELDS (TREE_TYPE (sysv_va_list_type_node));
6929 f_fpr = TREE_CHAIN (f_gpr);
6930 f_ovf = TREE_CHAIN (f_fpr);
6931 f_sav = TREE_CHAIN (f_ovf);
6933 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr),
6934 build_va_arg_indirect_ref (valist), f_gpr, NULL_TREE);
6935 valist = build_va_arg_indirect_ref (valist);
6936 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr, NULL_TREE);
6937 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf, NULL_TREE);
6938 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav, NULL_TREE);
6940 indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, false);
6941 if (indirect_p)
6942 type = build_pointer_type (type);
6943 size = int_size_in_bytes (type);
6944 rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
6946 nat_mode = type_natural_mode (type, NULL);
6947 switch (nat_mode)
6949 case V8SFmode:
6950 case V8SImode:
6951 case V32QImode:
6952 case V16HImode:
6953 case V4DFmode:
6954 case V4DImode:
6955 /* Unnamed 256bit vector mode parameters are passed on stack. */
6956 if (ix86_cfun_abi () == SYSV_ABI)
6958 container = NULL;
6959 break;
6962 default:
6963 container = construct_container (nat_mode, TYPE_MODE (type),
6964 type, 0, X86_64_REGPARM_MAX,
6965 X86_64_SSE_REGPARM_MAX, intreg,
6967 break;
6970 /* Pull the value out of the saved registers. */
6972 addr = create_tmp_var (ptr_type_node, "addr");
6974 if (container)
6976 int needed_intregs, needed_sseregs;
6977 bool need_temp;
6978 tree int_addr, sse_addr;
6980 lab_false = create_artificial_label (UNKNOWN_LOCATION);
6981 lab_over = create_artificial_label (UNKNOWN_LOCATION);
6983 examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
6985 need_temp = (!REG_P (container)
6986 && ((needed_intregs && TYPE_ALIGN (type) > 64)
6987 || TYPE_ALIGN (type) > 128));
6989 /* In case we are passing structure, verify that it is consecutive block
6990 on the register save area. If not we need to do moves. */
6991 if (!need_temp && !REG_P (container))
6993 /* Verify that all registers are strictly consecutive */
6994 if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
6996 int i;
6998 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7000 rtx slot = XVECEXP (container, 0, i);
7001 if (REGNO (XEXP (slot, 0)) != FIRST_SSE_REG + (unsigned int) i
7002 || INTVAL (XEXP (slot, 1)) != i * 16)
7003 need_temp = 1;
7006 else
7008 int i;
7010 for (i = 0; i < XVECLEN (container, 0) && !need_temp; i++)
7012 rtx slot = XVECEXP (container, 0, i);
7013 if (REGNO (XEXP (slot, 0)) != (unsigned int) i
7014 || INTVAL (XEXP (slot, 1)) != i * 8)
7015 need_temp = 1;
7019 if (!need_temp)
7021 int_addr = addr;
7022 sse_addr = addr;
7024 else
7026 int_addr = create_tmp_var (ptr_type_node, "int_addr");
7027 sse_addr = create_tmp_var (ptr_type_node, "sse_addr");
7030 /* First ensure that we fit completely in registers. */
7031 if (needed_intregs)
7033 t = build_int_cst (TREE_TYPE (gpr),
7034 (X86_64_REGPARM_MAX - needed_intregs + 1) * 8);
7035 t = build2 (GE_EXPR, boolean_type_node, gpr, t);
7036 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7037 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7038 gimplify_and_add (t, pre_p);
7040 if (needed_sseregs)
7042 t = build_int_cst (TREE_TYPE (fpr),
7043 (X86_64_SSE_REGPARM_MAX - needed_sseregs + 1) * 16
7044 + X86_64_REGPARM_MAX * 8);
7045 t = build2 (GE_EXPR, boolean_type_node, fpr, t);
7046 t2 = build1 (GOTO_EXPR, void_type_node, lab_false);
7047 t = build3 (COND_EXPR, void_type_node, t, t2, NULL_TREE);
7048 gimplify_and_add (t, pre_p);
7051 /* Compute index to start of area used for integer regs. */
7052 if (needed_intregs)
7054 /* int_addr = gpr + sav; */
7055 t = fold_convert (sizetype, gpr);
7056 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7057 gimplify_assign (int_addr, t, pre_p);
7059 if (needed_sseregs)
7061 /* sse_addr = fpr + sav; */
7062 t = fold_convert (sizetype, fpr);
7063 t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, t);
7064 gimplify_assign (sse_addr, t, pre_p);
7066 if (need_temp)
7068 int i;
7069 tree temp = create_tmp_var (type, "va_arg_tmp");
7071 /* addr = &temp; */
7072 t = build1 (ADDR_EXPR, build_pointer_type (type), temp);
7073 gimplify_assign (addr, t, pre_p);
7075 for (i = 0; i < XVECLEN (container, 0); i++)
7077 rtx slot = XVECEXP (container, 0, i);
7078 rtx reg = XEXP (slot, 0);
7079 enum machine_mode mode = GET_MODE (reg);
7080 tree piece_type = lang_hooks.types.type_for_mode (mode, 1);
7081 tree addr_type = build_pointer_type (piece_type);
7082 tree daddr_type = build_pointer_type_for_mode (piece_type,
7083 ptr_mode, true);
7084 tree src_addr, src;
7085 int src_offset;
7086 tree dest_addr, dest;
7088 if (SSE_REGNO_P (REGNO (reg)))
7090 src_addr = sse_addr;
7091 src_offset = (REGNO (reg) - FIRST_SSE_REG) * 16;
7093 else
7095 src_addr = int_addr;
7096 src_offset = REGNO (reg) * 8;
7098 src_addr = fold_convert (addr_type, src_addr);
7099 src_addr = fold_build2 (POINTER_PLUS_EXPR, addr_type, src_addr,
7100 size_int (src_offset));
7101 src = build_va_arg_indirect_ref (src_addr);
7103 dest_addr = fold_convert (daddr_type, addr);
7104 dest_addr = fold_build2 (POINTER_PLUS_EXPR, daddr_type, dest_addr,
7105 size_int (INTVAL (XEXP (slot, 1))));
7106 dest = build_va_arg_indirect_ref (dest_addr);
7108 gimplify_assign (dest, src, pre_p);
7112 if (needed_intregs)
7114 t = build2 (PLUS_EXPR, TREE_TYPE (gpr), gpr,
7115 build_int_cst (TREE_TYPE (gpr), needed_intregs * 8));
7116 gimplify_assign (gpr, t, pre_p);
7119 if (needed_sseregs)
7121 t = build2 (PLUS_EXPR, TREE_TYPE (fpr), fpr,
7122 build_int_cst (TREE_TYPE (fpr), needed_sseregs * 16));
7123 gimplify_assign (fpr, t, pre_p);
7126 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
7128 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_false));
7131 /* ... otherwise out of the overflow area. */
7133 /* When we align parameter on stack for caller, if the parameter
7134 alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
7135 aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee
7136 here with caller. */
7137 arg_boundary = FUNCTION_ARG_BOUNDARY (VOIDmode, type);
7138 if ((unsigned int) arg_boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
7139 arg_boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
7141 /* Care for on-stack alignment if needed. */
7142 if (arg_boundary <= 64
7143 || integer_zerop (TYPE_SIZE (type)))
7144 t = ovf;
7145 else
7147 HOST_WIDE_INT align = arg_boundary / 8;
7148 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), ovf,
7149 size_int (align - 1));
7150 t = fold_convert (sizetype, t);
7151 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
7152 size_int (-align));
7153 t = fold_convert (TREE_TYPE (ovf), t);
7155 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
7156 gimplify_assign (addr, t, pre_p);
7158 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
7159 size_int (rsize * UNITS_PER_WORD));
7160 gimplify_assign (unshare_expr (ovf), t, pre_p);
7162 if (container)
7163 gimple_seq_add_stmt (pre_p, gimple_build_label (lab_over));
7165 ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
7166 addr = fold_convert (ptrtype, addr);
7168 if (indirect_p)
7169 addr = build_va_arg_indirect_ref (addr);
7170 return build_va_arg_indirect_ref (addr);
7173 /* Return nonzero if OPNUM's MEM should be matched
7174 in movabs* patterns. */
7177 ix86_check_movabs (rtx insn, int opnum)
7179 rtx set, mem;
7181 set = PATTERN (insn);
7182 if (GET_CODE (set) == PARALLEL)
7183 set = XVECEXP (set, 0, 0);
7184 gcc_assert (GET_CODE (set) == SET);
7185 mem = XEXP (set, opnum);
7186 while (GET_CODE (mem) == SUBREG)
7187 mem = SUBREG_REG (mem);
7188 gcc_assert (MEM_P (mem));
7189 return (volatile_ok || !MEM_VOLATILE_P (mem));
7192 /* Initialize the table of extra 80387 mathematical constants. */
7194 static void
7195 init_ext_80387_constants (void)
7197 static const char * cst[5] =
7199 "0.3010299956639811952256464283594894482", /* 0: fldlg2 */
7200 "0.6931471805599453094286904741849753009", /* 1: fldln2 */
7201 "1.4426950408889634073876517827983434472", /* 2: fldl2e */
7202 "3.3219280948873623478083405569094566090", /* 3: fldl2t */
7203 "3.1415926535897932385128089594061862044", /* 4: fldpi */
7205 int i;
7207 for (i = 0; i < 5; i++)
7209 real_from_string (&ext_80387_constants_table[i], cst[i]);
7210 /* Ensure each constant is rounded to XFmode precision. */
7211 real_convert (&ext_80387_constants_table[i],
7212 XFmode, &ext_80387_constants_table[i]);
7215 ext_80387_constants_init = 1;
7218 /* Return true if the constant is something that can be loaded with
7219 a special instruction. */
7222 standard_80387_constant_p (rtx x)
7224 enum machine_mode mode = GET_MODE (x);
7226 REAL_VALUE_TYPE r;
7228 if (!(X87_FLOAT_MODE_P (mode) && (GET_CODE (x) == CONST_DOUBLE)))
7229 return -1;
7231 if (x == CONST0_RTX (mode))
7232 return 1;
7233 if (x == CONST1_RTX (mode))
7234 return 2;
7236 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7238 /* For XFmode constants, try to find a special 80387 instruction when
7239 optimizing for size or on those CPUs that benefit from them. */
7240 if (mode == XFmode
7241 && (optimize_function_for_size_p (cfun) || TARGET_EXT_80387_CONSTANTS))
7243 int i;
7245 if (! ext_80387_constants_init)
7246 init_ext_80387_constants ();
7248 for (i = 0; i < 5; i++)
7249 if (real_identical (&r, &ext_80387_constants_table[i]))
7250 return i + 3;
7253 /* Load of the constant -0.0 or -1.0 will be split as
7254 fldz;fchs or fld1;fchs sequence. */
7255 if (real_isnegzero (&r))
7256 return 8;
7257 if (real_identical (&r, &dconstm1))
7258 return 9;
7260 return 0;
7263 /* Return the opcode of the special instruction to be used to load
7264 the constant X. */
7266 const char *
7267 standard_80387_constant_opcode (rtx x)
7269 switch (standard_80387_constant_p (x))
7271 case 1:
7272 return "fldz";
7273 case 2:
7274 return "fld1";
7275 case 3:
7276 return "fldlg2";
7277 case 4:
7278 return "fldln2";
7279 case 5:
7280 return "fldl2e";
7281 case 6:
7282 return "fldl2t";
7283 case 7:
7284 return "fldpi";
7285 case 8:
7286 case 9:
7287 return "#";
7288 default:
7289 gcc_unreachable ();
7293 /* Return the CONST_DOUBLE representing the 80387 constant that is
7294 loaded by the specified special instruction. The argument IDX
7295 matches the return value from standard_80387_constant_p. */
7298 standard_80387_constant_rtx (int idx)
7300 int i;
7302 if (! ext_80387_constants_init)
7303 init_ext_80387_constants ();
7305 switch (idx)
7307 case 3:
7308 case 4:
7309 case 5:
7310 case 6:
7311 case 7:
7312 i = idx - 3;
7313 break;
7315 default:
7316 gcc_unreachable ();
7319 return CONST_DOUBLE_FROM_REAL_VALUE (ext_80387_constants_table[i],
7320 XFmode);
7323 /* Return 1 if X is all 0s and 2 if x is all 1s
7324 in supported SSE vector mode. */
7327 standard_sse_constant_p (rtx x)
7329 enum machine_mode mode = GET_MODE (x);
7331 if (x == const0_rtx || x == CONST0_RTX (GET_MODE (x)))
7332 return 1;
7333 if (vector_all_ones_operand (x, mode))
7334 switch (mode)
7336 case V16QImode:
7337 case V8HImode:
7338 case V4SImode:
7339 case V2DImode:
7340 if (TARGET_SSE2)
7341 return 2;
7342 default:
7343 break;
7346 return 0;
7349 /* Return the opcode of the special instruction to be used to load
7350 the constant X. */
7352 const char *
7353 standard_sse_constant_opcode (rtx insn, rtx x)
7355 switch (standard_sse_constant_p (x))
7357 case 1:
7358 switch (get_attr_mode (insn))
7360 case MODE_V4SF:
7361 return TARGET_AVX ? "vxorps\t%0, %0, %0" : "xorps\t%0, %0";
7362 case MODE_V2DF:
7363 return TARGET_AVX ? "vxorpd\t%0, %0, %0" : "xorpd\t%0, %0";
7364 case MODE_TI:
7365 return TARGET_AVX ? "vpxor\t%0, %0, %0" : "pxor\t%0, %0";
7366 case MODE_V8SF:
7367 return "vxorps\t%x0, %x0, %x0";
7368 case MODE_V4DF:
7369 return "vxorpd\t%x0, %x0, %x0";
7370 case MODE_OI:
7371 return "vpxor\t%x0, %x0, %x0";
7372 default:
7373 break;
7375 case 2:
7376 return TARGET_AVX ? "vpcmpeqd\t%0, %0, %0" : "pcmpeqd\t%0, %0";
7377 default:
7378 break;
7380 gcc_unreachable ();
7383 /* Returns 1 if OP contains a symbol reference */
7386 symbolic_reference_mentioned_p (rtx op)
7388 const char *fmt;
7389 int i;
7391 if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
7392 return 1;
7394 fmt = GET_RTX_FORMAT (GET_CODE (op));
7395 for (i = GET_RTX_LENGTH (GET_CODE (op)) - 1; i >= 0; i--)
7397 if (fmt[i] == 'E')
7399 int j;
7401 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
7402 if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
7403 return 1;
7406 else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
7407 return 1;
7410 return 0;
7413 /* Return 1 if it is appropriate to emit `ret' instructions in the
7414 body of a function. Do this only if the epilogue is simple, needing a
7415 couple of insns. Prior to reloading, we can't tell how many registers
7416 must be saved, so return 0 then. Return 0 if there is no frame
7417 marker to de-allocate. */
7420 ix86_can_use_return_insn_p (void)
7422 struct ix86_frame frame;
7424 if (! reload_completed || frame_pointer_needed)
7425 return 0;
7427 /* Don't allow more than 32 pop, since that's all we can do
7428 with one instruction. */
7429 if (crtl->args.pops_args
7430 && crtl->args.size >= 32768)
7431 return 0;
7433 ix86_compute_frame_layout (&frame);
7434 return frame.to_allocate == 0 && frame.padding0 == 0
7435 && (frame.nregs + frame.nsseregs) == 0;
7438 /* Value should be nonzero if functions must have frame pointers.
7439 Zero means the frame pointer need not be set up (and parms may
7440 be accessed via the stack pointer) in functions that seem suitable. */
7442 static bool
7443 ix86_frame_pointer_required (void)
7445 /* If we accessed previous frames, then the generated code expects
7446 to be able to access the saved ebp value in our frame. */
7447 if (cfun->machine->accesses_prev_frame)
7448 return true;
7450 /* Several x86 os'es need a frame pointer for other reasons,
7451 usually pertaining to setjmp. */
7452 if (SUBTARGET_FRAME_POINTER_REQUIRED)
7453 return true;
7455 /* In override_options, TARGET_OMIT_LEAF_FRAME_POINTER turns off
7456 the frame pointer by default. Turn it back on now if we've not
7457 got a leaf function. */
7458 if (TARGET_OMIT_LEAF_FRAME_POINTER
7459 && (!current_function_is_leaf
7460 || ix86_current_function_calls_tls_descriptor))
7461 return true;
7463 if (crtl->profile)
7464 return true;
7466 return false;
7469 /* Record that the current function accesses previous call frames. */
7471 void
7472 ix86_setup_frame_addresses (void)
7474 cfun->machine->accesses_prev_frame = 1;
7477 #ifndef USE_HIDDEN_LINKONCE
7478 # if (defined(HAVE_GAS_HIDDEN) && (SUPPORTS_ONE_ONLY - 0)) || TARGET_MACHO
7479 # define USE_HIDDEN_LINKONCE 1
7480 # else
7481 # define USE_HIDDEN_LINKONCE 0
7482 # endif
7483 #endif
7485 static int pic_labels_used;
7487 /* Fills in the label name that should be used for a pc thunk for
7488 the given register. */
7490 static void
7491 get_pc_thunk_name (char name[32], unsigned int regno)
7493 gcc_assert (!TARGET_64BIT);
7495 if (USE_HIDDEN_LINKONCE)
7496 sprintf (name, "__i686.get_pc_thunk.%s", reg_names[regno]);
7497 else
7498 ASM_GENERATE_INTERNAL_LABEL (name, "LPR", regno);
7502 /* This function generates code for -fpic that loads %ebx with
7503 the return address of the caller and then returns. */
7505 void
7506 ix86_file_end (void)
7508 rtx xops[2];
7509 int regno;
7511 for (regno = 0; regno < 8; ++regno)
7513 char name[32];
7515 if (! ((pic_labels_used >> regno) & 1))
7516 continue;
7518 get_pc_thunk_name (name, regno);
7520 #if TARGET_MACHO
7521 if (TARGET_MACHO)
7523 switch_to_section (darwin_sections[text_coal_section]);
7524 fputs ("\t.weak_definition\t", asm_out_file);
7525 assemble_name (asm_out_file, name);
7526 fputs ("\n\t.private_extern\t", asm_out_file);
7527 assemble_name (asm_out_file, name);
7528 fputs ("\n", asm_out_file);
7529 ASM_OUTPUT_LABEL (asm_out_file, name);
7531 else
7532 #endif
7533 if (USE_HIDDEN_LINKONCE)
7535 tree decl;
7537 decl = build_decl (BUILTINS_LOCATION,
7538 FUNCTION_DECL, get_identifier (name),
7539 error_mark_node);
7540 TREE_PUBLIC (decl) = 1;
7541 TREE_STATIC (decl) = 1;
7542 DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
7544 (*targetm.asm_out.unique_section) (decl, 0);
7545 switch_to_section (get_named_section (decl, NULL, 0));
7547 (*targetm.asm_out.globalize_label) (asm_out_file, name);
7548 fputs ("\t.hidden\t", asm_out_file);
7549 assemble_name (asm_out_file, name);
7550 putc ('\n', asm_out_file);
7551 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
7553 else
7555 switch_to_section (text_section);
7556 ASM_OUTPUT_LABEL (asm_out_file, name);
7559 xops[0] = gen_rtx_REG (Pmode, regno);
7560 xops[1] = gen_rtx_MEM (Pmode, stack_pointer_rtx);
7561 output_asm_insn ("mov%z0\t{%1, %0|%0, %1}", xops);
7562 output_asm_insn ("ret", xops);
7565 if (NEED_INDICATE_EXEC_STACK)
7566 file_end_indicate_exec_stack ();
7569 /* Emit code for the SET_GOT patterns. */
7571 const char *
7572 output_set_got (rtx dest, rtx label ATTRIBUTE_UNUSED)
7574 rtx xops[3];
7576 xops[0] = dest;
7578 if (TARGET_VXWORKS_RTP && flag_pic)
7580 /* Load (*VXWORKS_GOTT_BASE) into the PIC register. */
7581 xops[2] = gen_rtx_MEM (Pmode,
7582 gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE));
7583 output_asm_insn ("mov{l}\t{%2, %0|%0, %2}", xops);
7585 /* Load (*VXWORKS_GOTT_BASE)[VXWORKS_GOTT_INDEX] into the PIC register.
7586 Use %P and a local symbol in order to print VXWORKS_GOTT_INDEX as
7587 an unadorned address. */
7588 xops[2] = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
7589 SYMBOL_REF_FLAGS (xops[2]) |= SYMBOL_FLAG_LOCAL;
7590 output_asm_insn ("mov{l}\t{%P2(%0), %0|%0, DWORD PTR %P2[%0]}", xops);
7591 return "";
7594 xops[1] = gen_rtx_SYMBOL_REF (Pmode, GOT_SYMBOL_NAME);
7596 if (! TARGET_DEEP_BRANCH_PREDICTION || !flag_pic)
7598 xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
7600 if (!flag_pic)
7601 output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
7602 else
7603 output_asm_insn ("call\t%a2", xops);
7605 #if TARGET_MACHO
7606 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7607 is what will be referenced by the Mach-O PIC subsystem. */
7608 if (!label)
7609 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7610 #endif
7612 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7613 CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
7615 if (flag_pic)
7616 output_asm_insn ("pop%z0\t%0", xops);
7618 else
7620 char name[32];
7621 get_pc_thunk_name (name, REGNO (dest));
7622 pic_labels_used |= 1 << REGNO (dest);
7624 xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
7625 xops[2] = gen_rtx_MEM (QImode, xops[2]);
7626 output_asm_insn ("call\t%X2", xops);
7627 /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
7628 is what will be referenced by the Mach-O PIC subsystem. */
7629 #if TARGET_MACHO
7630 if (!label)
7631 ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
7632 else
7633 targetm.asm_out.internal_label (asm_out_file, "L",
7634 CODE_LABEL_NUMBER (label));
7635 #endif
7638 if (TARGET_MACHO)
7639 return "";
7641 if (!flag_pic || TARGET_DEEP_BRANCH_PREDICTION)
7642 output_asm_insn ("add%z0\t{%1, %0|%0, %1}", xops);
7643 else
7644 output_asm_insn ("add%z0\t{%1+[.-%a2], %0|%0, %1+(.-%a2)}", xops);
7646 return "";
7649 /* Generate an "push" pattern for input ARG. */
7651 static rtx
7652 gen_push (rtx arg)
7654 if (ix86_cfa_state->reg == stack_pointer_rtx)
7655 ix86_cfa_state->offset += UNITS_PER_WORD;
7657 return gen_rtx_SET (VOIDmode,
7658 gen_rtx_MEM (Pmode,
7659 gen_rtx_PRE_DEC (Pmode,
7660 stack_pointer_rtx)),
7661 arg);
7664 /* Return >= 0 if there is an unused call-clobbered register available
7665 for the entire function. */
7667 static unsigned int
7668 ix86_select_alt_pic_regnum (void)
7670 if (current_function_is_leaf && !crtl->profile
7671 && !ix86_current_function_calls_tls_descriptor)
7673 int i, drap;
7674 /* Can't use the same register for both PIC and DRAP. */
7675 if (crtl->drap_reg)
7676 drap = REGNO (crtl->drap_reg);
7677 else
7678 drap = -1;
7679 for (i = 2; i >= 0; --i)
7680 if (i != drap && !df_regs_ever_live_p (i))
7681 return i;
7684 return INVALID_REGNUM;
7687 /* Return 1 if we need to save REGNO. */
7688 static int
7689 ix86_save_reg (unsigned int regno, int maybe_eh_return)
7691 if (pic_offset_table_rtx
7692 && regno == REAL_PIC_OFFSET_TABLE_REGNUM
7693 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
7694 || crtl->profile
7695 || crtl->calls_eh_return
7696 || crtl->uses_const_pool))
7698 if (ix86_select_alt_pic_regnum () != INVALID_REGNUM)
7699 return 0;
7700 return 1;
7703 if (crtl->calls_eh_return && maybe_eh_return)
7705 unsigned i;
7706 for (i = 0; ; i++)
7708 unsigned test = EH_RETURN_DATA_REGNO (i);
7709 if (test == INVALID_REGNUM)
7710 break;
7711 if (test == regno)
7712 return 1;
7716 if (crtl->drap_reg && regno == REGNO (crtl->drap_reg))
7717 return 1;
7719 return (df_regs_ever_live_p (regno)
7720 && !call_used_regs[regno]
7721 && !fixed_regs[regno]
7722 && (regno != HARD_FRAME_POINTER_REGNUM || !frame_pointer_needed));
7725 /* Return number of saved general prupose registers. */
7727 static int
7728 ix86_nsaved_regs (void)
7730 int nregs = 0;
7731 int regno;
7733 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7734 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7735 nregs ++;
7736 return nregs;
7739 /* Return number of saved SSE registrers. */
7741 static int
7742 ix86_nsaved_sseregs (void)
7744 int nregs = 0;
7745 int regno;
7747 if (ix86_cfun_abi () != MS_ABI)
7748 return 0;
7749 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7750 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7751 nregs ++;
7752 return nregs;
7755 /* Given FROM and TO register numbers, say whether this elimination is
7756 allowed. If stack alignment is needed, we can only replace argument
7757 pointer with hard frame pointer, or replace frame pointer with stack
7758 pointer. Otherwise, frame pointer elimination is automatically
7759 handled and all other eliminations are valid. */
7761 static bool
7762 ix86_can_eliminate (const int from, const int to)
7764 if (stack_realign_fp)
7765 return ((from == ARG_POINTER_REGNUM
7766 && to == HARD_FRAME_POINTER_REGNUM)
7767 || (from == FRAME_POINTER_REGNUM
7768 && to == STACK_POINTER_REGNUM));
7769 else
7770 return to == STACK_POINTER_REGNUM ? !frame_pointer_needed : true;
7773 /* Return the offset between two registers, one to be eliminated, and the other
7774 its replacement, at the start of a routine. */
7776 HOST_WIDE_INT
7777 ix86_initial_elimination_offset (int from, int to)
7779 struct ix86_frame frame;
7780 ix86_compute_frame_layout (&frame);
7782 if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7783 return frame.hard_frame_pointer_offset;
7784 else if (from == FRAME_POINTER_REGNUM
7785 && to == HARD_FRAME_POINTER_REGNUM)
7786 return frame.hard_frame_pointer_offset - frame.frame_pointer_offset;
7787 else
7789 gcc_assert (to == STACK_POINTER_REGNUM);
7791 if (from == ARG_POINTER_REGNUM)
7792 return frame.stack_pointer_offset;
7794 gcc_assert (from == FRAME_POINTER_REGNUM);
7795 return frame.stack_pointer_offset - frame.frame_pointer_offset;
7799 /* In a dynamically-aligned function, we can't know the offset from
7800 stack pointer to frame pointer, so we must ensure that setjmp
7801 eliminates fp against the hard fp (%ebp) rather than trying to
7802 index from %esp up to the top of the frame across a gap that is
7803 of unknown (at compile-time) size. */
7804 static rtx
7805 ix86_builtin_setjmp_frame_value (void)
7807 return stack_realign_fp ? hard_frame_pointer_rtx : virtual_stack_vars_rtx;
7810 /* Fill structure ix86_frame about frame of currently computed function. */
7812 static void
7813 ix86_compute_frame_layout (struct ix86_frame *frame)
7815 unsigned int stack_alignment_needed;
7816 HOST_WIDE_INT offset;
7817 unsigned int preferred_alignment;
7818 HOST_WIDE_INT size = get_frame_size ();
7820 frame->nregs = ix86_nsaved_regs ();
7821 frame->nsseregs = ix86_nsaved_sseregs ();
7823 stack_alignment_needed = crtl->stack_alignment_needed / BITS_PER_UNIT;
7824 preferred_alignment = crtl->preferred_stack_boundary / BITS_PER_UNIT;
7826 /* MS ABI seem to require stack alignment to be always 16 except for function
7827 prologues. */
7828 if (ix86_cfun_abi () == MS_ABI && preferred_alignment < 16)
7830 preferred_alignment = 16;
7831 stack_alignment_needed = 16;
7832 crtl->preferred_stack_boundary = 128;
7833 crtl->stack_alignment_needed = 128;
7836 gcc_assert (!size || stack_alignment_needed);
7837 gcc_assert (preferred_alignment >= STACK_BOUNDARY / BITS_PER_UNIT);
7838 gcc_assert (preferred_alignment <= stack_alignment_needed);
7840 /* During reload iteration the amount of registers saved can change.
7841 Recompute the value as needed. Do not recompute when amount of registers
7842 didn't change as reload does multiple calls to the function and does not
7843 expect the decision to change within single iteration. */
7844 if (!optimize_function_for_size_p (cfun)
7845 && cfun->machine->use_fast_prologue_epilogue_nregs != frame->nregs)
7847 int count = frame->nregs;
7849 cfun->machine->use_fast_prologue_epilogue_nregs = count;
7850 /* The fast prologue uses move instead of push to save registers. This
7851 is significantly longer, but also executes faster as modern hardware
7852 can execute the moves in parallel, but can't do that for push/pop.
7854 Be careful about choosing what prologue to emit: When function takes
7855 many instructions to execute we may use slow version as well as in
7856 case function is known to be outside hot spot (this is known with
7857 feedback only). Weight the size of function by number of registers
7858 to save as it is cheap to use one or two push instructions but very
7859 slow to use many of them. */
7860 if (count)
7861 count = (count - 1) * FAST_PROLOGUE_INSN_COUNT;
7862 if (cfun->function_frequency < FUNCTION_FREQUENCY_NORMAL
7863 || (flag_branch_probabilities
7864 && cfun->function_frequency < FUNCTION_FREQUENCY_HOT))
7865 cfun->machine->use_fast_prologue_epilogue = false;
7866 else
7867 cfun->machine->use_fast_prologue_epilogue
7868 = !expensive_function_p (count);
7870 if (TARGET_PROLOGUE_USING_MOVE
7871 && cfun->machine->use_fast_prologue_epilogue)
7872 frame->save_regs_using_mov = true;
7873 else
7874 frame->save_regs_using_mov = false;
7877 /* Skip return address and saved base pointer. */
7878 offset = frame_pointer_needed ? UNITS_PER_WORD * 2 : UNITS_PER_WORD;
7880 frame->hard_frame_pointer_offset = offset;
7882 /* Set offset to aligned because the realigned frame starts from
7883 here. */
7884 if (stack_realign_fp)
7885 offset = (offset + stack_alignment_needed -1) & -stack_alignment_needed;
7887 /* Register save area */
7888 offset += frame->nregs * UNITS_PER_WORD;
7890 /* Align SSE reg save area. */
7891 if (frame->nsseregs)
7892 frame->padding0 = ((offset + 16 - 1) & -16) - offset;
7893 else
7894 frame->padding0 = 0;
7896 /* SSE register save area. */
7897 offset += frame->padding0 + frame->nsseregs * 16;
7899 /* Va-arg area */
7900 frame->va_arg_size = ix86_varargs_gpr_size + ix86_varargs_fpr_size;
7901 offset += frame->va_arg_size;
7903 /* Align start of frame for local function. */
7904 frame->padding1 = ((offset + stack_alignment_needed - 1)
7905 & -stack_alignment_needed) - offset;
7907 offset += frame->padding1;
7909 /* Frame pointer points here. */
7910 frame->frame_pointer_offset = offset;
7912 offset += size;
7914 /* Add outgoing arguments area. Can be skipped if we eliminated
7915 all the function calls as dead code.
7916 Skipping is however impossible when function calls alloca. Alloca
7917 expander assumes that last crtl->outgoing_args_size
7918 of stack frame are unused. */
7919 if (ACCUMULATE_OUTGOING_ARGS
7920 && (!current_function_is_leaf || cfun->calls_alloca
7921 || ix86_current_function_calls_tls_descriptor))
7923 offset += crtl->outgoing_args_size;
7924 frame->outgoing_arguments_size = crtl->outgoing_args_size;
7926 else
7927 frame->outgoing_arguments_size = 0;
7929 /* Align stack boundary. Only needed if we're calling another function
7930 or using alloca. */
7931 if (!current_function_is_leaf || cfun->calls_alloca
7932 || ix86_current_function_calls_tls_descriptor)
7933 frame->padding2 = ((offset + preferred_alignment - 1)
7934 & -preferred_alignment) - offset;
7935 else
7936 frame->padding2 = 0;
7938 offset += frame->padding2;
7940 /* We've reached end of stack frame. */
7941 frame->stack_pointer_offset = offset;
7943 /* Size prologue needs to allocate. */
7944 frame->to_allocate =
7945 (size + frame->padding1 + frame->padding2
7946 + frame->outgoing_arguments_size + frame->va_arg_size);
7948 if ((!frame->to_allocate && frame->nregs <= 1)
7949 || (TARGET_64BIT && frame->to_allocate >= (HOST_WIDE_INT) 0x80000000))
7950 frame->save_regs_using_mov = false;
7952 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
7953 && current_function_sp_is_unchanging
7954 && current_function_is_leaf
7955 && !ix86_current_function_calls_tls_descriptor)
7957 frame->red_zone_size = frame->to_allocate;
7958 if (frame->save_regs_using_mov)
7959 frame->red_zone_size += frame->nregs * UNITS_PER_WORD;
7960 if (frame->red_zone_size > RED_ZONE_SIZE - RED_ZONE_RESERVE)
7961 frame->red_zone_size = RED_ZONE_SIZE - RED_ZONE_RESERVE;
7963 else
7964 frame->red_zone_size = 0;
7965 frame->to_allocate -= frame->red_zone_size;
7966 frame->stack_pointer_offset -= frame->red_zone_size;
7969 /* Emit code to save registers in the prologue. */
7971 static void
7972 ix86_emit_save_regs (void)
7974 unsigned int regno;
7975 rtx insn;
7977 for (regno = FIRST_PSEUDO_REGISTER - 1; regno-- > 0; )
7978 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7980 insn = emit_insn (gen_push (gen_rtx_REG (Pmode, regno)));
7981 RTX_FRAME_RELATED_P (insn) = 1;
7985 /* Emit code to save registers using MOV insns. First register
7986 is restored from POINTER + OFFSET. */
7987 static void
7988 ix86_emit_save_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
7990 unsigned int regno;
7991 rtx insn;
7993 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7994 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
7996 insn = emit_move_insn (adjust_address (gen_rtx_MEM (Pmode, pointer),
7997 Pmode, offset),
7998 gen_rtx_REG (Pmode, regno));
7999 RTX_FRAME_RELATED_P (insn) = 1;
8000 offset += UNITS_PER_WORD;
8004 /* Emit code to save registers using MOV insns. First register
8005 is restored from POINTER + OFFSET. */
8006 static void
8007 ix86_emit_save_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset)
8009 unsigned int regno;
8010 rtx insn;
8011 rtx mem;
8013 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8014 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, true))
8016 mem = adjust_address (gen_rtx_MEM (TImode, pointer), TImode, offset);
8017 set_mem_align (mem, 128);
8018 insn = emit_move_insn (mem, gen_rtx_REG (TImode, regno));
8019 RTX_FRAME_RELATED_P (insn) = 1;
8020 offset += 16;
8024 static GTY(()) rtx queued_cfa_restores;
8026 /* Add a REG_CFA_RESTORE REG note to INSN or queue them until next stack
8027 manipulation insn. Don't add it if the previously
8028 saved value will be left untouched within stack red-zone till return,
8029 as unwinders can find the same value in the register and
8030 on the stack. */
8032 static void
8033 ix86_add_cfa_restore_note (rtx insn, rtx reg, HOST_WIDE_INT red_offset)
8035 if (TARGET_RED_ZONE
8036 && !TARGET_64BIT_MS_ABI
8037 && red_offset + RED_ZONE_SIZE >= 0
8038 && crtl->args.pops_args < 65536)
8039 return;
8041 if (insn)
8043 add_reg_note (insn, REG_CFA_RESTORE, reg);
8044 RTX_FRAME_RELATED_P (insn) = 1;
8046 else
8047 queued_cfa_restores
8048 = alloc_reg_note (REG_CFA_RESTORE, reg, queued_cfa_restores);
8051 /* Add queued REG_CFA_RESTORE notes if any to INSN. */
8053 static void
8054 ix86_add_queued_cfa_restore_notes (rtx insn)
8056 rtx last;
8057 if (!queued_cfa_restores)
8058 return;
8059 for (last = queued_cfa_restores; XEXP (last, 1); last = XEXP (last, 1))
8061 XEXP (last, 1) = REG_NOTES (insn);
8062 REG_NOTES (insn) = queued_cfa_restores;
8063 queued_cfa_restores = NULL_RTX;
8064 RTX_FRAME_RELATED_P (insn) = 1;
8067 /* Expand prologue or epilogue stack adjustment.
8068 The pattern exist to put a dependency on all ebp-based memory accesses.
8069 STYLE should be negative if instructions should be marked as frame related,
8070 zero if %r11 register is live and cannot be freely used and positive
8071 otherwise. */
8073 static void
8074 pro_epilogue_adjust_stack (rtx dest, rtx src, rtx offset,
8075 int style, bool set_cfa)
8077 rtx insn;
8079 if (! TARGET_64BIT)
8080 insn = emit_insn (gen_pro_epilogue_adjust_stack_1 (dest, src, offset));
8081 else if (x86_64_immediate_operand (offset, DImode))
8082 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64 (dest, src, offset));
8083 else
8085 rtx r11;
8086 /* r11 is used by indirect sibcall return as well, set before the
8087 epilogue and used after the epilogue. ATM indirect sibcall
8088 shouldn't be used together with huge frame sizes in one
8089 function because of the frame_size check in sibcall.c. */
8090 gcc_assert (style);
8091 r11 = gen_rtx_REG (DImode, R11_REG);
8092 insn = emit_insn (gen_rtx_SET (DImode, r11, offset));
8093 if (style < 0)
8094 RTX_FRAME_RELATED_P (insn) = 1;
8095 insn = emit_insn (gen_pro_epilogue_adjust_stack_rex64_2 (dest, src, r11,
8096 offset));
8099 if (style >= 0)
8100 ix86_add_queued_cfa_restore_notes (insn);
8102 if (set_cfa)
8104 rtx r;
8106 gcc_assert (ix86_cfa_state->reg == src);
8107 ix86_cfa_state->offset += INTVAL (offset);
8108 ix86_cfa_state->reg = dest;
8110 r = gen_rtx_PLUS (Pmode, src, offset);
8111 r = gen_rtx_SET (VOIDmode, dest, r);
8112 add_reg_note (insn, REG_CFA_ADJUST_CFA, r);
8113 RTX_FRAME_RELATED_P (insn) = 1;
8115 else if (style < 0)
8116 RTX_FRAME_RELATED_P (insn) = 1;
8119 /* Find an available register to be used as dynamic realign argument
8120 pointer regsiter. Such a register will be written in prologue and
8121 used in begin of body, so it must not be
8122 1. parameter passing register.
8123 2. GOT pointer.
8124 We reuse static-chain register if it is available. Otherwise, we
8125 use DI for i386 and R13 for x86-64. We chose R13 since it has
8126 shorter encoding.
8128 Return: the regno of chosen register. */
8130 static unsigned int
8131 find_drap_reg (void)
8133 tree decl = cfun->decl;
8135 if (TARGET_64BIT)
8137 /* Use R13 for nested function or function need static chain.
8138 Since function with tail call may use any caller-saved
8139 registers in epilogue, DRAP must not use caller-saved
8140 register in such case. */
8141 if ((decl_function_context (decl)
8142 && !DECL_NO_STATIC_CHAIN (decl))
8143 || crtl->tail_call_emit)
8144 return R13_REG;
8146 return R10_REG;
8148 else
8150 /* Use DI for nested function or function need static chain.
8151 Since function with tail call may use any caller-saved
8152 registers in epilogue, DRAP must not use caller-saved
8153 register in such case. */
8154 if ((decl_function_context (decl)
8155 && !DECL_NO_STATIC_CHAIN (decl))
8156 || crtl->tail_call_emit)
8157 return DI_REG;
8159 /* Reuse static chain register if it isn't used for parameter
8160 passing. */
8161 if (ix86_function_regparm (TREE_TYPE (decl), decl) <= 2
8162 && !lookup_attribute ("fastcall",
8163 TYPE_ATTRIBUTES (TREE_TYPE (decl))))
8164 return CX_REG;
8165 else
8166 return DI_REG;
8170 /* Update incoming stack boundary and estimated stack alignment. */
8172 static void
8173 ix86_update_stack_boundary (void)
8175 /* Prefer the one specified at command line. */
8176 ix86_incoming_stack_boundary
8177 = (ix86_user_incoming_stack_boundary
8178 ? ix86_user_incoming_stack_boundary
8179 : ix86_default_incoming_stack_boundary);
8181 /* Incoming stack alignment can be changed on individual functions
8182 via force_align_arg_pointer attribute. We use the smallest
8183 incoming stack boundary. */
8184 if (ix86_incoming_stack_boundary > MIN_STACK_BOUNDARY
8185 && lookup_attribute (ix86_force_align_arg_pointer_string,
8186 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
8187 ix86_incoming_stack_boundary = MIN_STACK_BOUNDARY;
8189 /* The incoming stack frame has to be aligned at least at
8190 parm_stack_boundary. */
8191 if (ix86_incoming_stack_boundary < crtl->parm_stack_boundary)
8192 ix86_incoming_stack_boundary = crtl->parm_stack_boundary;
8194 /* Stack at entrance of main is aligned by runtime. We use the
8195 smallest incoming stack boundary. */
8196 if (ix86_incoming_stack_boundary > MAIN_STACK_BOUNDARY
8197 && DECL_NAME (current_function_decl)
8198 && MAIN_NAME_P (DECL_NAME (current_function_decl))
8199 && DECL_FILE_SCOPE_P (current_function_decl))
8200 ix86_incoming_stack_boundary = MAIN_STACK_BOUNDARY;
8202 /* x86_64 vararg needs 16byte stack alignment for register save
8203 area. */
8204 if (TARGET_64BIT
8205 && cfun->stdarg
8206 && crtl->stack_alignment_estimated < 128)
8207 crtl->stack_alignment_estimated = 128;
8210 /* Handle the TARGET_GET_DRAP_RTX hook. Return NULL if no DRAP is
8211 needed or an rtx for DRAP otherwise. */
8213 static rtx
8214 ix86_get_drap_rtx (void)
8216 if (ix86_force_drap || !ACCUMULATE_OUTGOING_ARGS)
8217 crtl->need_drap = true;
8219 if (stack_realign_drap)
8221 /* Assign DRAP to vDRAP and returns vDRAP */
8222 unsigned int regno = find_drap_reg ();
8223 rtx drap_vreg;
8224 rtx arg_ptr;
8225 rtx seq, insn;
8227 arg_ptr = gen_rtx_REG (Pmode, regno);
8228 crtl->drap_reg = arg_ptr;
8230 start_sequence ();
8231 drap_vreg = copy_to_reg (arg_ptr);
8232 seq = get_insns ();
8233 end_sequence ();
8235 insn = emit_insn_before (seq, NEXT_INSN (entry_of_function ()));
8236 RTX_FRAME_RELATED_P (insn) = 1;
8237 return drap_vreg;
8239 else
8240 return NULL;
8243 /* Handle the TARGET_INTERNAL_ARG_POINTER hook. */
8245 static rtx
8246 ix86_internal_arg_pointer (void)
8248 return virtual_incoming_args_rtx;
8251 /* Finalize stack_realign_needed flag, which will guide prologue/epilogue
8252 to be generated in correct form. */
8253 static void
8254 ix86_finalize_stack_realign_flags (void)
8256 /* Check if stack realign is really needed after reload, and
8257 stores result in cfun */
8258 unsigned int incoming_stack_boundary
8259 = (crtl->parm_stack_boundary > ix86_incoming_stack_boundary
8260 ? crtl->parm_stack_boundary : ix86_incoming_stack_boundary);
8261 unsigned int stack_realign = (incoming_stack_boundary
8262 < (current_function_is_leaf
8263 ? crtl->max_used_stack_slot_alignment
8264 : crtl->stack_alignment_needed));
8266 if (crtl->stack_realign_finalized)
8268 /* After stack_realign_needed is finalized, we can't no longer
8269 change it. */
8270 gcc_assert (crtl->stack_realign_needed == stack_realign);
8272 else
8274 crtl->stack_realign_needed = stack_realign;
8275 crtl->stack_realign_finalized = true;
8279 /* Expand the prologue into a bunch of separate insns. */
8281 void
8282 ix86_expand_prologue (void)
8284 rtx insn;
8285 bool pic_reg_used;
8286 struct ix86_frame frame;
8287 HOST_WIDE_INT allocate;
8289 ix86_finalize_stack_realign_flags ();
8291 /* DRAP should not coexist with stack_realign_fp */
8292 gcc_assert (!(crtl->drap_reg && stack_realign_fp));
8294 /* Initialize CFA state for before the prologue. */
8295 ix86_cfa_state->reg = stack_pointer_rtx;
8296 ix86_cfa_state->offset = INCOMING_FRAME_SP_OFFSET;
8298 ix86_compute_frame_layout (&frame);
8300 /* Emit prologue code to adjust stack alignment and setup DRAP, in case
8301 of DRAP is needed and stack realignment is really needed after reload */
8302 if (crtl->drap_reg && crtl->stack_realign_needed)
8304 rtx x, y;
8305 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8306 int param_ptr_offset = (call_used_regs[REGNO (crtl->drap_reg)]
8307 ? 0 : UNITS_PER_WORD);
8309 gcc_assert (stack_realign_drap);
8311 /* Grab the argument pointer. */
8312 x = plus_constant (stack_pointer_rtx,
8313 (UNITS_PER_WORD + param_ptr_offset));
8314 y = crtl->drap_reg;
8316 /* Only need to push parameter pointer reg if it is caller
8317 saved reg */
8318 if (!call_used_regs[REGNO (crtl->drap_reg)])
8320 /* Push arg pointer reg */
8321 insn = emit_insn (gen_push (y));
8322 RTX_FRAME_RELATED_P (insn) = 1;
8325 insn = emit_insn (gen_rtx_SET (VOIDmode, y, x));
8326 RTX_FRAME_RELATED_P (insn) = 1;
8327 ix86_cfa_state->reg = crtl->drap_reg;
8329 /* Align the stack. */
8330 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8331 stack_pointer_rtx,
8332 GEN_INT (-align_bytes)));
8333 RTX_FRAME_RELATED_P (insn) = 1;
8335 /* Replicate the return address on the stack so that return
8336 address can be reached via (argp - 1) slot. This is needed
8337 to implement macro RETURN_ADDR_RTX and intrinsic function
8338 expand_builtin_return_addr etc. */
8339 x = crtl->drap_reg;
8340 x = gen_frame_mem (Pmode,
8341 plus_constant (x, -UNITS_PER_WORD));
8342 insn = emit_insn (gen_push (x));
8343 RTX_FRAME_RELATED_P (insn) = 1;
8346 /* Note: AT&T enter does NOT have reversed args. Enter is probably
8347 slower on all targets. Also sdb doesn't like it. */
8349 if (frame_pointer_needed)
8351 insn = emit_insn (gen_push (hard_frame_pointer_rtx));
8352 RTX_FRAME_RELATED_P (insn) = 1;
8354 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
8355 RTX_FRAME_RELATED_P (insn) = 1;
8357 if (ix86_cfa_state->reg == stack_pointer_rtx)
8358 ix86_cfa_state->reg = hard_frame_pointer_rtx;
8361 if (stack_realign_fp)
8363 int align_bytes = crtl->stack_alignment_needed / BITS_PER_UNIT;
8364 gcc_assert (align_bytes > MIN_STACK_BOUNDARY / BITS_PER_UNIT);
8366 /* Align the stack. */
8367 insn = emit_insn ((*ix86_gen_andsp) (stack_pointer_rtx,
8368 stack_pointer_rtx,
8369 GEN_INT (-align_bytes)));
8370 RTX_FRAME_RELATED_P (insn) = 1;
8373 allocate = frame.to_allocate + frame.nsseregs * 16 + frame.padding0;
8375 if (!frame.save_regs_using_mov)
8376 ix86_emit_save_regs ();
8377 else
8378 allocate += frame.nregs * UNITS_PER_WORD;
8380 /* When using red zone we may start register saving before allocating
8381 the stack frame saving one cycle of the prologue. However I will
8382 avoid doing this if I am going to have to probe the stack since
8383 at least on x86_64 the stack probe can turn into a call that clobbers
8384 a red zone location */
8385 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE && frame.save_regs_using_mov
8386 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT))
8387 ix86_emit_save_regs_using_mov ((frame_pointer_needed
8388 && !crtl->stack_realign_needed)
8389 ? hard_frame_pointer_rtx
8390 : stack_pointer_rtx,
8391 -frame.nregs * UNITS_PER_WORD);
8393 if (allocate == 0)
8395 else if (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)
8396 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8397 GEN_INT (-allocate), -1,
8398 ix86_cfa_state->reg == stack_pointer_rtx);
8399 else
8401 /* Only valid for Win32. */
8402 rtx eax = gen_rtx_REG (Pmode, AX_REG);
8403 bool eax_live;
8404 rtx t;
8406 gcc_assert (!TARGET_64BIT || cfun->machine->call_abi == MS_ABI);
8408 if (cfun->machine->call_abi == MS_ABI)
8409 eax_live = false;
8410 else
8411 eax_live = ix86_eax_live_at_start_p ();
8413 if (eax_live)
8415 emit_insn (gen_push (eax));
8416 allocate -= UNITS_PER_WORD;
8419 emit_move_insn (eax, GEN_INT (allocate));
8421 if (TARGET_64BIT)
8422 insn = gen_allocate_stack_worker_64 (eax, eax);
8423 else
8424 insn = gen_allocate_stack_worker_32 (eax, eax);
8425 insn = emit_insn (insn);
8427 if (ix86_cfa_state->reg == stack_pointer_rtx)
8429 ix86_cfa_state->offset += allocate;
8430 t = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (-allocate));
8431 t = gen_rtx_SET (VOIDmode, stack_pointer_rtx, t);
8432 add_reg_note (insn, REG_CFA_ADJUST_CFA, t);
8433 RTX_FRAME_RELATED_P (insn) = 1;
8436 if (eax_live)
8438 if (frame_pointer_needed)
8439 t = plus_constant (hard_frame_pointer_rtx,
8440 allocate
8441 - frame.to_allocate
8442 - frame.nregs * UNITS_PER_WORD);
8443 else
8444 t = plus_constant (stack_pointer_rtx, allocate);
8445 emit_move_insn (eax, gen_rtx_MEM (Pmode, t));
8449 if (frame.save_regs_using_mov
8450 && !(!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE
8451 && (! TARGET_STACK_PROBE || allocate < CHECK_STACK_LIMIT)))
8453 if (!frame_pointer_needed
8454 || !(frame.to_allocate + frame.padding0)
8455 || crtl->stack_realign_needed)
8456 ix86_emit_save_regs_using_mov (stack_pointer_rtx,
8457 frame.to_allocate
8458 + frame.nsseregs * 16 + frame.padding0);
8459 else
8460 ix86_emit_save_regs_using_mov (hard_frame_pointer_rtx,
8461 -frame.nregs * UNITS_PER_WORD);
8463 if (!frame_pointer_needed
8464 || !(frame.to_allocate + frame.padding0)
8465 || crtl->stack_realign_needed)
8466 ix86_emit_save_sse_regs_using_mov (stack_pointer_rtx,
8467 frame.to_allocate);
8468 else
8469 ix86_emit_save_sse_regs_using_mov (hard_frame_pointer_rtx,
8470 - frame.nregs * UNITS_PER_WORD
8471 - frame.nsseregs * 16
8472 - frame.padding0);
8474 pic_reg_used = false;
8475 if (pic_offset_table_rtx
8476 && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
8477 || crtl->profile))
8479 unsigned int alt_pic_reg_used = ix86_select_alt_pic_regnum ();
8481 if (alt_pic_reg_used != INVALID_REGNUM)
8482 SET_REGNO (pic_offset_table_rtx, alt_pic_reg_used);
8484 pic_reg_used = true;
8487 if (pic_reg_used)
8489 if (TARGET_64BIT)
8491 if (ix86_cmodel == CM_LARGE_PIC)
8493 rtx tmp_reg = gen_rtx_REG (DImode, R11_REG);
8494 rtx label = gen_label_rtx ();
8495 emit_label (label);
8496 LABEL_PRESERVE_P (label) = 1;
8497 gcc_assert (REGNO (pic_offset_table_rtx) != REGNO (tmp_reg));
8498 insn = emit_insn (gen_set_rip_rex64 (pic_offset_table_rtx, label));
8499 insn = emit_insn (gen_set_got_offset_rex64 (tmp_reg, label));
8500 insn = emit_insn (gen_adddi3 (pic_offset_table_rtx,
8501 pic_offset_table_rtx, tmp_reg));
8503 else
8504 insn = emit_insn (gen_set_got_rex64 (pic_offset_table_rtx));
8506 else
8507 insn = emit_insn (gen_set_got (pic_offset_table_rtx));
8510 /* In the pic_reg_used case, make sure that the got load isn't deleted
8511 when mcount needs it. Blockage to avoid call movement across mcount
8512 call is emitted in generic code after the NOTE_INSN_PROLOGUE_END
8513 note. */
8514 if (crtl->profile && pic_reg_used)
8515 emit_insn (gen_prologue_use (pic_offset_table_rtx));
8517 if (crtl->drap_reg && !crtl->stack_realign_needed)
8519 /* vDRAP is setup but after reload it turns out stack realign
8520 isn't necessary, here we will emit prologue to setup DRAP
8521 without stack realign adjustment */
8522 int drap_bp_offset = UNITS_PER_WORD * 2;
8523 rtx x = plus_constant (hard_frame_pointer_rtx, drap_bp_offset);
8524 insn = emit_insn (gen_rtx_SET (VOIDmode, crtl->drap_reg, x));
8527 /* Prevent instructions from being scheduled into register save push
8528 sequence when access to the redzone area is done through frame pointer.
8529 The offset betweeh the frame pointer and the stack pointer is calculated
8530 relative to the value of the stack pointer at the end of the function
8531 prologue, and moving instructions that access redzone area via frame
8532 pointer inside push sequence violates this assumption. */
8533 if (frame_pointer_needed && frame.red_zone_size)
8534 emit_insn (gen_memory_blockage ());
8536 /* Emit cld instruction if stringops are used in the function. */
8537 if (TARGET_CLD && ix86_current_function_needs_cld)
8538 emit_insn (gen_cld ());
8541 /* Emit code to restore REG using a POP insn. */
8543 static void
8544 ix86_emit_restore_reg_using_pop (rtx reg, HOST_WIDE_INT red_offset)
8546 rtx insn = emit_insn (ix86_gen_pop1 (reg));
8548 if (ix86_cfa_state->reg == crtl->drap_reg
8549 && REGNO (reg) == REGNO (crtl->drap_reg))
8551 /* Previously we'd represented the CFA as an expression
8552 like *(%ebp - 8). We've just popped that value from
8553 the stack, which means we need to reset the CFA to
8554 the drap register. This will remain until we restore
8555 the stack pointer. */
8556 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
8557 RTX_FRAME_RELATED_P (insn) = 1;
8558 return;
8561 if (ix86_cfa_state->reg == stack_pointer_rtx)
8563 ix86_cfa_state->offset -= UNITS_PER_WORD;
8564 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8565 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
8566 RTX_FRAME_RELATED_P (insn) = 1;
8569 /* When the frame pointer is the CFA, and we pop it, we are
8570 swapping back to the stack pointer as the CFA. This happens
8571 for stack frames that don't allocate other data, so we assume
8572 the stack pointer is now pointing at the return address, i.e.
8573 the function entry state, which makes the offset be 1 word. */
8574 else if (ix86_cfa_state->reg == hard_frame_pointer_rtx
8575 && reg == hard_frame_pointer_rtx)
8577 ix86_cfa_state->reg = stack_pointer_rtx;
8578 ix86_cfa_state->offset = UNITS_PER_WORD;
8580 add_reg_note (insn, REG_CFA_DEF_CFA,
8581 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
8582 GEN_INT (UNITS_PER_WORD)));
8583 RTX_FRAME_RELATED_P (insn) = 1;
8586 ix86_add_cfa_restore_note (insn, reg, red_offset);
8589 /* Emit code to restore saved registers using POP insns. */
8591 static void
8592 ix86_emit_restore_regs_using_pop (HOST_WIDE_INT red_offset)
8594 int regno;
8596 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8597 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, false))
8599 ix86_emit_restore_reg_using_pop (gen_rtx_REG (Pmode, regno),
8600 red_offset);
8601 red_offset += UNITS_PER_WORD;
8605 /* Emit code and notes for the LEAVE instruction. */
8607 static void
8608 ix86_emit_leave (HOST_WIDE_INT red_offset)
8610 rtx insn = emit_insn (ix86_gen_leave ());
8612 ix86_add_queued_cfa_restore_notes (insn);
8614 if (ix86_cfa_state->reg == hard_frame_pointer_rtx)
8616 add_reg_note (insn, REG_CFA_ADJUST_CFA,
8617 copy_rtx (XVECEXP (PATTERN (insn), 0, 0)));
8618 RTX_FRAME_RELATED_P (insn) = 1;
8619 ix86_add_cfa_restore_note (insn, hard_frame_pointer_rtx, red_offset);
8623 /* Emit code to restore saved registers using MOV insns. First register
8624 is restored from POINTER + OFFSET. */
8625 static void
8626 ix86_emit_restore_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8627 HOST_WIDE_INT red_offset,
8628 int maybe_eh_return)
8630 unsigned int regno;
8631 rtx base_address = gen_rtx_MEM (Pmode, pointer);
8632 rtx insn;
8634 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8635 if (!SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8637 rtx reg = gen_rtx_REG (Pmode, regno);
8639 /* Ensure that adjust_address won't be forced to produce pointer
8640 out of range allowed by x86-64 instruction set. */
8641 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8643 rtx r11;
8645 r11 = gen_rtx_REG (DImode, R11_REG);
8646 emit_move_insn (r11, GEN_INT (offset));
8647 emit_insn (gen_adddi3 (r11, r11, pointer));
8648 base_address = gen_rtx_MEM (Pmode, r11);
8649 offset = 0;
8651 insn = emit_move_insn (reg,
8652 adjust_address (base_address, Pmode, offset));
8653 offset += UNITS_PER_WORD;
8655 if (ix86_cfa_state->reg == crtl->drap_reg
8656 && regno == REGNO (crtl->drap_reg))
8658 /* Previously we'd represented the CFA as an expression
8659 like *(%ebp - 8). We've just popped that value from
8660 the stack, which means we need to reset the CFA to
8661 the drap register. This will remain until we restore
8662 the stack pointer. */
8663 add_reg_note (insn, REG_CFA_DEF_CFA, reg);
8664 RTX_FRAME_RELATED_P (insn) = 1;
8666 else
8667 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
8669 red_offset += UNITS_PER_WORD;
8673 /* Emit code to restore saved registers using MOV insns. First register
8674 is restored from POINTER + OFFSET. */
8675 static void
8676 ix86_emit_restore_sse_regs_using_mov (rtx pointer, HOST_WIDE_INT offset,
8677 HOST_WIDE_INT red_offset,
8678 int maybe_eh_return)
8680 int regno;
8681 rtx base_address = gen_rtx_MEM (TImode, pointer);
8682 rtx mem, insn;
8684 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
8685 if (SSE_REGNO_P (regno) && ix86_save_reg (regno, maybe_eh_return))
8687 rtx reg = gen_rtx_REG (TImode, regno);
8689 /* Ensure that adjust_address won't be forced to produce pointer
8690 out of range allowed by x86-64 instruction set. */
8691 if (TARGET_64BIT && offset != trunc_int_for_mode (offset, SImode))
8693 rtx r11;
8695 r11 = gen_rtx_REG (DImode, R11_REG);
8696 emit_move_insn (r11, GEN_INT (offset));
8697 emit_insn (gen_adddi3 (r11, r11, pointer));
8698 base_address = gen_rtx_MEM (TImode, r11);
8699 offset = 0;
8701 mem = adjust_address (base_address, TImode, offset);
8702 set_mem_align (mem, 128);
8703 insn = emit_move_insn (reg, mem);
8704 offset += 16;
8706 ix86_add_cfa_restore_note (NULL_RTX, reg, red_offset);
8708 red_offset += 16;
8712 /* Restore function stack, frame, and registers. */
8714 void
8715 ix86_expand_epilogue (int style)
8717 int sp_valid;
8718 struct ix86_frame frame;
8719 HOST_WIDE_INT offset, red_offset;
8720 struct machine_cfa_state cfa_state_save = *ix86_cfa_state;
8721 bool using_drap;
8723 ix86_finalize_stack_realign_flags ();
8725 /* When stack is realigned, SP must be valid. */
8726 sp_valid = (!frame_pointer_needed
8727 || current_function_sp_is_unchanging
8728 || stack_realign_fp);
8730 ix86_compute_frame_layout (&frame);
8732 /* See the comment about red zone and frame
8733 pointer usage in ix86_expand_prologue. */
8734 if (frame_pointer_needed && frame.red_zone_size)
8735 emit_insn (gen_memory_blockage ());
8737 using_drap = crtl->drap_reg && crtl->stack_realign_needed;
8738 gcc_assert (!using_drap || ix86_cfa_state->reg == crtl->drap_reg);
8740 /* Calculate start of saved registers relative to ebp. Special care
8741 must be taken for the normal return case of a function using
8742 eh_return: the eax and edx registers are marked as saved, but not
8743 restored along this path. */
8744 offset = frame.nregs;
8745 if (crtl->calls_eh_return && style != 2)
8746 offset -= 2;
8747 offset *= -UNITS_PER_WORD;
8748 offset -= frame.nsseregs * 16 + frame.padding0;
8750 /* Calculate start of saved registers relative to esp on entry of the
8751 function. When realigning stack, this needs to be the most negative
8752 value possible at runtime. */
8753 red_offset = offset;
8754 if (using_drap)
8755 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
8756 + UNITS_PER_WORD;
8757 else if (stack_realign_fp)
8758 red_offset -= crtl->stack_alignment_needed / BITS_PER_UNIT
8759 - UNITS_PER_WORD;
8760 if (frame_pointer_needed)
8761 red_offset -= UNITS_PER_WORD;
8763 /* If we're only restoring one register and sp is not valid then
8764 using a move instruction to restore the register since it's
8765 less work than reloading sp and popping the register.
8767 The default code result in stack adjustment using add/lea instruction,
8768 while this code results in LEAVE instruction (or discrete equivalent),
8769 so it is profitable in some other cases as well. Especially when there
8770 are no registers to restore. We also use this code when TARGET_USE_LEAVE
8771 and there is exactly one register to pop. This heuristic may need some
8772 tuning in future. */
8773 if ((!sp_valid && (frame.nregs + frame.nsseregs) <= 1)
8774 || (TARGET_EPILOGUE_USING_MOVE
8775 && cfun->machine->use_fast_prologue_epilogue
8776 && ((frame.nregs + frame.nsseregs) > 1
8777 || (frame.to_allocate + frame.padding0) != 0))
8778 || (frame_pointer_needed && !(frame.nregs + frame.nsseregs)
8779 && (frame.to_allocate + frame.padding0) != 0)
8780 || (frame_pointer_needed && TARGET_USE_LEAVE
8781 && cfun->machine->use_fast_prologue_epilogue
8782 && (frame.nregs + frame.nsseregs) == 1)
8783 || crtl->calls_eh_return)
8785 /* Restore registers. We can use ebp or esp to address the memory
8786 locations. If both are available, default to ebp, since offsets
8787 are known to be small. Only exception is esp pointing directly
8788 to the end of block of saved registers, where we may simplify
8789 addressing mode.
8791 If we are realigning stack with bp and sp, regs restore can't
8792 be addressed by bp. sp must be used instead. */
8794 if (!frame_pointer_needed
8795 || (sp_valid && !(frame.to_allocate + frame.padding0))
8796 || stack_realign_fp)
8798 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
8799 frame.to_allocate, red_offset,
8800 style == 2);
8801 ix86_emit_restore_regs_using_mov (stack_pointer_rtx,
8802 frame.to_allocate
8803 + frame.nsseregs * 16
8804 + frame.padding0,
8805 red_offset
8806 + frame.nsseregs * 16
8807 + frame.padding0, style == 2);
8809 else
8811 ix86_emit_restore_sse_regs_using_mov (hard_frame_pointer_rtx,
8812 offset, red_offset,
8813 style == 2);
8814 ix86_emit_restore_regs_using_mov (hard_frame_pointer_rtx,
8815 offset
8816 + frame.nsseregs * 16
8817 + frame.padding0,
8818 red_offset
8819 + frame.nsseregs * 16
8820 + frame.padding0, style == 2);
8823 red_offset -= offset;
8825 /* eh_return epilogues need %ecx added to the stack pointer. */
8826 if (style == 2)
8828 rtx tmp, sa = EH_RETURN_STACKADJ_RTX;
8830 /* Stack align doesn't work with eh_return. */
8831 gcc_assert (!crtl->stack_realign_needed);
8833 if (frame_pointer_needed)
8835 tmp = gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx, sa);
8836 tmp = plus_constant (tmp, UNITS_PER_WORD);
8837 tmp = emit_insn (gen_rtx_SET (VOIDmode, sa, tmp));
8839 tmp = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
8840 tmp = emit_move_insn (hard_frame_pointer_rtx, tmp);
8842 /* Note that we use SA as a temporary CFA, as the return
8843 address is at the proper place relative to it. We
8844 pretend this happens at the FP restore insn because
8845 prior to this insn the FP would be stored at the wrong
8846 offset relative to SA, and after this insn we have no
8847 other reasonable register to use for the CFA. We don't
8848 bother resetting the CFA to the SP for the duration of
8849 the return insn. */
8850 add_reg_note (tmp, REG_CFA_DEF_CFA,
8851 plus_constant (sa, UNITS_PER_WORD));
8852 ix86_add_queued_cfa_restore_notes (tmp);
8853 add_reg_note (tmp, REG_CFA_RESTORE, hard_frame_pointer_rtx);
8854 RTX_FRAME_RELATED_P (tmp) = 1;
8855 ix86_cfa_state->reg = sa;
8856 ix86_cfa_state->offset = UNITS_PER_WORD;
8858 pro_epilogue_adjust_stack (stack_pointer_rtx, sa,
8859 const0_rtx, style, false);
8861 else
8863 tmp = gen_rtx_PLUS (Pmode, stack_pointer_rtx, sa);
8864 tmp = plus_constant (tmp, (frame.to_allocate
8865 + frame.nregs * UNITS_PER_WORD
8866 + frame.nsseregs * 16
8867 + frame.padding0));
8868 tmp = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, tmp));
8869 ix86_add_queued_cfa_restore_notes (tmp);
8871 gcc_assert (ix86_cfa_state->reg == stack_pointer_rtx);
8872 if (ix86_cfa_state->offset != UNITS_PER_WORD)
8874 ix86_cfa_state->offset = UNITS_PER_WORD;
8875 add_reg_note (tmp, REG_CFA_DEF_CFA,
8876 plus_constant (stack_pointer_rtx,
8877 UNITS_PER_WORD));
8878 RTX_FRAME_RELATED_P (tmp) = 1;
8882 else if (!frame_pointer_needed)
8883 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8884 GEN_INT (frame.to_allocate
8885 + frame.nregs * UNITS_PER_WORD
8886 + frame.nsseregs * 16
8887 + frame.padding0),
8888 style, !using_drap);
8889 /* If not an i386, mov & pop is faster than "leave". */
8890 else if (TARGET_USE_LEAVE || optimize_function_for_size_p (cfun)
8891 || !cfun->machine->use_fast_prologue_epilogue)
8892 ix86_emit_leave (red_offset);
8893 else
8895 pro_epilogue_adjust_stack (stack_pointer_rtx,
8896 hard_frame_pointer_rtx,
8897 const0_rtx, style, !using_drap);
8899 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx, red_offset);
8902 else
8904 /* First step is to deallocate the stack frame so that we can
8905 pop the registers.
8907 If we realign stack with frame pointer, then stack pointer
8908 won't be able to recover via lea $offset(%bp), %sp, because
8909 there is a padding area between bp and sp for realign.
8910 "add $to_allocate, %sp" must be used instead. */
8911 if (!sp_valid)
8913 gcc_assert (frame_pointer_needed);
8914 gcc_assert (!stack_realign_fp);
8915 pro_epilogue_adjust_stack (stack_pointer_rtx,
8916 hard_frame_pointer_rtx,
8917 GEN_INT (offset), style, false);
8918 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
8919 frame.to_allocate, red_offset,
8920 style == 2);
8921 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8922 GEN_INT (frame.nsseregs * 16 + frame.padding0),
8923 style, false);
8925 else if (frame.to_allocate || frame.padding0 || frame.nsseregs)
8927 ix86_emit_restore_sse_regs_using_mov (stack_pointer_rtx,
8928 frame.to_allocate, red_offset,
8929 style == 2);
8930 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
8931 GEN_INT (frame.to_allocate
8932 + frame.nsseregs * 16
8933 + frame.padding0), style,
8934 !using_drap && !frame_pointer_needed);
8937 ix86_emit_restore_regs_using_pop (red_offset + frame.nsseregs * 16
8938 + frame.padding0);
8939 red_offset -= offset;
8941 if (frame_pointer_needed)
8943 /* Leave results in shorter dependency chains on CPUs that are
8944 able to grok it fast. */
8945 if (TARGET_USE_LEAVE)
8946 ix86_emit_leave (red_offset);
8947 else
8949 /* For stack realigned really happens, recover stack
8950 pointer to hard frame pointer is a must, if not using
8951 leave. */
8952 if (stack_realign_fp)
8953 pro_epilogue_adjust_stack (stack_pointer_rtx,
8954 hard_frame_pointer_rtx,
8955 const0_rtx, style, !using_drap);
8956 ix86_emit_restore_reg_using_pop (hard_frame_pointer_rtx,
8957 red_offset);
8962 if (using_drap)
8964 int param_ptr_offset = (call_used_regs[REGNO (crtl->drap_reg)]
8965 ? 0 : UNITS_PER_WORD);
8966 rtx insn;
8968 gcc_assert (stack_realign_drap);
8970 insn = emit_insn ((*ix86_gen_add3) (stack_pointer_rtx,
8971 crtl->drap_reg,
8972 GEN_INT (-(UNITS_PER_WORD
8973 + param_ptr_offset))));
8975 ix86_cfa_state->reg = stack_pointer_rtx;
8976 ix86_cfa_state->offset = UNITS_PER_WORD + param_ptr_offset;
8978 add_reg_note (insn, REG_CFA_DEF_CFA,
8979 gen_rtx_PLUS (Pmode, ix86_cfa_state->reg,
8980 GEN_INT (ix86_cfa_state->offset)));
8981 RTX_FRAME_RELATED_P (insn) = 1;
8983 if (param_ptr_offset)
8984 ix86_emit_restore_reg_using_pop (crtl->drap_reg, -UNITS_PER_WORD);
8987 /* Sibcall epilogues don't want a return instruction. */
8988 if (style == 0)
8990 *ix86_cfa_state = cfa_state_save;
8991 return;
8994 if (crtl->args.pops_args && crtl->args.size)
8996 rtx popc = GEN_INT (crtl->args.pops_args);
8998 /* i386 can only pop 64K bytes. If asked to pop more, pop return
8999 address, do explicit add, and jump indirectly to the caller. */
9001 if (crtl->args.pops_args >= 65536)
9003 rtx ecx = gen_rtx_REG (SImode, CX_REG);
9004 rtx insn;
9006 /* There is no "pascal" calling convention in any 64bit ABI. */
9007 gcc_assert (!TARGET_64BIT);
9009 insn = emit_insn (gen_popsi1 (ecx));
9010 ix86_cfa_state->offset -= UNITS_PER_WORD;
9012 add_reg_note (insn, REG_CFA_ADJUST_CFA,
9013 copy_rtx (XVECEXP (PATTERN (insn), 0, 1)));
9014 add_reg_note (insn, REG_CFA_REGISTER,
9015 gen_rtx_SET (VOIDmode, ecx, pc_rtx));
9016 RTX_FRAME_RELATED_P (insn) = 1;
9018 pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
9019 popc, -1, true);
9020 emit_jump_insn (gen_return_indirect_internal (ecx));
9022 else
9023 emit_jump_insn (gen_return_pop_internal (popc));
9025 else
9026 emit_jump_insn (gen_return_internal ());
9028 /* Restore the state back to the state from the prologue,
9029 so that it's correct for the next epilogue. */
9030 *ix86_cfa_state = cfa_state_save;
9033 /* Reset from the function's potential modifications. */
9035 static void
9036 ix86_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
9037 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
9039 if (pic_offset_table_rtx)
9040 SET_REGNO (pic_offset_table_rtx, REAL_PIC_OFFSET_TABLE_REGNUM);
9041 #if TARGET_MACHO
9042 /* Mach-O doesn't support labels at the end of objects, so if
9043 it looks like we might want one, insert a NOP. */
9045 rtx insn = get_last_insn ();
9046 while (insn
9047 && NOTE_P (insn)
9048 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
9049 insn = PREV_INSN (insn);
9050 if (insn
9051 && (LABEL_P (insn)
9052 || (NOTE_P (insn)
9053 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
9054 fputs ("\tnop\n", file);
9056 #endif
9060 /* Extract the parts of an RTL expression that is a valid memory address
9061 for an instruction. Return 0 if the structure of the address is
9062 grossly off. Return -1 if the address contains ASHIFT, so it is not
9063 strictly valid, but still used for computing length of lea instruction. */
9066 ix86_decompose_address (rtx addr, struct ix86_address *out)
9068 rtx base = NULL_RTX, index = NULL_RTX, disp = NULL_RTX;
9069 rtx base_reg, index_reg;
9070 HOST_WIDE_INT scale = 1;
9071 rtx scale_rtx = NULL_RTX;
9072 int retval = 1;
9073 enum ix86_address_seg seg = SEG_DEFAULT;
9075 if (REG_P (addr) || GET_CODE (addr) == SUBREG)
9076 base = addr;
9077 else if (GET_CODE (addr) == PLUS)
9079 rtx addends[4], op;
9080 int n = 0, i;
9082 op = addr;
9085 if (n >= 4)
9086 return 0;
9087 addends[n++] = XEXP (op, 1);
9088 op = XEXP (op, 0);
9090 while (GET_CODE (op) == PLUS);
9091 if (n >= 4)
9092 return 0;
9093 addends[n] = op;
9095 for (i = n; i >= 0; --i)
9097 op = addends[i];
9098 switch (GET_CODE (op))
9100 case MULT:
9101 if (index)
9102 return 0;
9103 index = XEXP (op, 0);
9104 scale_rtx = XEXP (op, 1);
9105 break;
9107 case UNSPEC:
9108 if (XINT (op, 1) == UNSPEC_TP
9109 && TARGET_TLS_DIRECT_SEG_REFS
9110 && seg == SEG_DEFAULT)
9111 seg = TARGET_64BIT ? SEG_FS : SEG_GS;
9112 else
9113 return 0;
9114 break;
9116 case REG:
9117 case SUBREG:
9118 if (!base)
9119 base = op;
9120 else if (!index)
9121 index = op;
9122 else
9123 return 0;
9124 break;
9126 case CONST:
9127 case CONST_INT:
9128 case SYMBOL_REF:
9129 case LABEL_REF:
9130 if (disp)
9131 return 0;
9132 disp = op;
9133 break;
9135 default:
9136 return 0;
9140 else if (GET_CODE (addr) == MULT)
9142 index = XEXP (addr, 0); /* index*scale */
9143 scale_rtx = XEXP (addr, 1);
9145 else if (GET_CODE (addr) == ASHIFT)
9147 rtx tmp;
9149 /* We're called for lea too, which implements ashift on occasion. */
9150 index = XEXP (addr, 0);
9151 tmp = XEXP (addr, 1);
9152 if (!CONST_INT_P (tmp))
9153 return 0;
9154 scale = INTVAL (tmp);
9155 if ((unsigned HOST_WIDE_INT) scale > 3)
9156 return 0;
9157 scale = 1 << scale;
9158 retval = -1;
9160 else
9161 disp = addr; /* displacement */
9163 /* Extract the integral value of scale. */
9164 if (scale_rtx)
9166 if (!CONST_INT_P (scale_rtx))
9167 return 0;
9168 scale = INTVAL (scale_rtx);
9171 base_reg = base && GET_CODE (base) == SUBREG ? SUBREG_REG (base) : base;
9172 index_reg = index && GET_CODE (index) == SUBREG ? SUBREG_REG (index) : index;
9174 /* Avoid useless 0 displacement. */
9175 if (disp == const0_rtx && (base || index))
9176 disp = NULL_RTX;
9178 /* Allow arg pointer and stack pointer as index if there is not scaling. */
9179 if (base_reg && index_reg && scale == 1
9180 && (index_reg == arg_pointer_rtx
9181 || index_reg == frame_pointer_rtx
9182 || (REG_P (index_reg) && REGNO (index_reg) == STACK_POINTER_REGNUM)))
9184 rtx tmp;
9185 tmp = base, base = index, index = tmp;
9186 tmp = base_reg, base_reg = index_reg, index_reg = tmp;
9189 /* Special case: %ebp cannot be encoded as a base without a displacement.
9190 Similarly %r13. */
9191 if (!disp
9192 && base_reg
9193 && (base_reg == hard_frame_pointer_rtx
9194 || base_reg == frame_pointer_rtx
9195 || base_reg == arg_pointer_rtx
9196 || (REG_P (base_reg)
9197 && (REGNO (base_reg) == HARD_FRAME_POINTER_REGNUM
9198 || REGNO (base_reg) == R13_REG))))
9199 disp = const0_rtx;
9201 /* Special case: on K6, [%esi] makes the instruction vector decoded.
9202 Avoid this by transforming to [%esi+0].
9203 Reload calls address legitimization without cfun defined, so we need
9204 to test cfun for being non-NULL. */
9205 if (TARGET_K6 && cfun && optimize_function_for_speed_p (cfun)
9206 && base_reg && !index_reg && !disp
9207 && REG_P (base_reg)
9208 && REGNO_REG_CLASS (REGNO (base_reg)) == SIREG)
9209 disp = const0_rtx;
9211 /* Special case: encode reg+reg instead of reg*2. */
9212 if (!base && index && scale == 2)
9213 base = index, base_reg = index_reg, scale = 1;
9215 /* Special case: scaling cannot be encoded without base or displacement. */
9216 if (!base && !disp && index && scale != 1)
9217 disp = const0_rtx;
9219 out->base = base;
9220 out->index = index;
9221 out->disp = disp;
9222 out->scale = scale;
9223 out->seg = seg;
9225 return retval;
9228 /* Return cost of the memory address x.
9229 For i386, it is better to use a complex address than let gcc copy
9230 the address into a reg and make a new pseudo. But not if the address
9231 requires to two regs - that would mean more pseudos with longer
9232 lifetimes. */
9233 static int
9234 ix86_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
9236 struct ix86_address parts;
9237 int cost = 1;
9238 int ok = ix86_decompose_address (x, &parts);
9240 gcc_assert (ok);
9242 if (parts.base && GET_CODE (parts.base) == SUBREG)
9243 parts.base = SUBREG_REG (parts.base);
9244 if (parts.index && GET_CODE (parts.index) == SUBREG)
9245 parts.index = SUBREG_REG (parts.index);
9247 /* Attempt to minimize number of registers in the address. */
9248 if ((parts.base
9249 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER))
9250 || (parts.index
9251 && (!REG_P (parts.index)
9252 || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)))
9253 cost++;
9255 if (parts.base
9256 && (!REG_P (parts.base) || REGNO (parts.base) >= FIRST_PSEUDO_REGISTER)
9257 && parts.index
9258 && (!REG_P (parts.index) || REGNO (parts.index) >= FIRST_PSEUDO_REGISTER)
9259 && parts.base != parts.index)
9260 cost++;
9262 /* AMD-K6 don't like addresses with ModR/M set to 00_xxx_100b,
9263 since it's predecode logic can't detect the length of instructions
9264 and it degenerates to vector decoded. Increase cost of such
9265 addresses here. The penalty is minimally 2 cycles. It may be worthwhile
9266 to split such addresses or even refuse such addresses at all.
9268 Following addressing modes are affected:
9269 [base+scale*index]
9270 [scale*index+disp]
9271 [base+index]
9273 The first and last case may be avoidable by explicitly coding the zero in
9274 memory address, but I don't have AMD-K6 machine handy to check this
9275 theory. */
9277 if (TARGET_K6
9278 && ((!parts.disp && parts.base && parts.index && parts.scale != 1)
9279 || (parts.disp && !parts.base && parts.index && parts.scale != 1)
9280 || (!parts.disp && parts.base && parts.index && parts.scale == 1)))
9281 cost += 10;
9283 return cost;
9286 /* Allow {LABEL | SYMBOL}_REF - SYMBOL_REF-FOR-PICBASE for Mach-O as
9287 this is used for to form addresses to local data when -fPIC is in
9288 use. */
9290 static bool
9291 darwin_local_data_pic (rtx disp)
9293 return (GET_CODE (disp) == UNSPEC
9294 && XINT (disp, 1) == UNSPEC_MACHOPIC_OFFSET);
9297 /* Determine if a given RTX is a valid constant. We already know this
9298 satisfies CONSTANT_P. */
9300 bool
9301 legitimate_constant_p (rtx x)
9303 switch (GET_CODE (x))
9305 case CONST:
9306 x = XEXP (x, 0);
9308 if (GET_CODE (x) == PLUS)
9310 if (!CONST_INT_P (XEXP (x, 1)))
9311 return false;
9312 x = XEXP (x, 0);
9315 if (TARGET_MACHO && darwin_local_data_pic (x))
9316 return true;
9318 /* Only some unspecs are valid as "constants". */
9319 if (GET_CODE (x) == UNSPEC)
9320 switch (XINT (x, 1))
9322 case UNSPEC_GOT:
9323 case UNSPEC_GOTOFF:
9324 case UNSPEC_PLTOFF:
9325 return TARGET_64BIT;
9326 case UNSPEC_TPOFF:
9327 case UNSPEC_NTPOFF:
9328 x = XVECEXP (x, 0, 0);
9329 return (GET_CODE (x) == SYMBOL_REF
9330 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9331 case UNSPEC_DTPOFF:
9332 x = XVECEXP (x, 0, 0);
9333 return (GET_CODE (x) == SYMBOL_REF
9334 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
9335 default:
9336 return false;
9339 /* We must have drilled down to a symbol. */
9340 if (GET_CODE (x) == LABEL_REF)
9341 return true;
9342 if (GET_CODE (x) != SYMBOL_REF)
9343 return false;
9344 /* FALLTHRU */
9346 case SYMBOL_REF:
9347 /* TLS symbols are never valid. */
9348 if (SYMBOL_REF_TLS_MODEL (x))
9349 return false;
9351 /* DLLIMPORT symbols are never valid. */
9352 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
9353 && SYMBOL_REF_DLLIMPORT_P (x))
9354 return false;
9355 break;
9357 case CONST_DOUBLE:
9358 if (GET_MODE (x) == TImode
9359 && x != CONST0_RTX (TImode)
9360 && !TARGET_64BIT)
9361 return false;
9362 break;
9364 case CONST_VECTOR:
9365 if (!standard_sse_constant_p (x))
9366 return false;
9368 default:
9369 break;
9372 /* Otherwise we handle everything else in the move patterns. */
9373 return true;
9376 /* Determine if it's legal to put X into the constant pool. This
9377 is not possible for the address of thread-local symbols, which
9378 is checked above. */
9380 static bool
9381 ix86_cannot_force_const_mem (rtx x)
9383 /* We can always put integral constants and vectors in memory. */
9384 switch (GET_CODE (x))
9386 case CONST_INT:
9387 case CONST_DOUBLE:
9388 case CONST_VECTOR:
9389 return false;
9391 default:
9392 break;
9394 return !legitimate_constant_p (x);
9398 /* Nonzero if the constant value X is a legitimate general operand
9399 when generating PIC code. It is given that flag_pic is on and
9400 that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
9402 bool
9403 legitimate_pic_operand_p (rtx x)
9405 rtx inner;
9407 switch (GET_CODE (x))
9409 case CONST:
9410 inner = XEXP (x, 0);
9411 if (GET_CODE (inner) == PLUS
9412 && CONST_INT_P (XEXP (inner, 1)))
9413 inner = XEXP (inner, 0);
9415 /* Only some unspecs are valid as "constants". */
9416 if (GET_CODE (inner) == UNSPEC)
9417 switch (XINT (inner, 1))
9419 case UNSPEC_GOT:
9420 case UNSPEC_GOTOFF:
9421 case UNSPEC_PLTOFF:
9422 return TARGET_64BIT;
9423 case UNSPEC_TPOFF:
9424 x = XVECEXP (inner, 0, 0);
9425 return (GET_CODE (x) == SYMBOL_REF
9426 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
9427 case UNSPEC_MACHOPIC_OFFSET:
9428 return legitimate_pic_address_disp_p (x);
9429 default:
9430 return false;
9432 /* FALLTHRU */
9434 case SYMBOL_REF:
9435 case LABEL_REF:
9436 return legitimate_pic_address_disp_p (x);
9438 default:
9439 return true;
9443 /* Determine if a given CONST RTX is a valid memory displacement
9444 in PIC mode. */
9447 legitimate_pic_address_disp_p (rtx disp)
9449 bool saw_plus;
9451 /* In 64bit mode we can allow direct addresses of symbols and labels
9452 when they are not dynamic symbols. */
9453 if (TARGET_64BIT)
9455 rtx op0 = disp, op1;
9457 switch (GET_CODE (disp))
9459 case LABEL_REF:
9460 return true;
9462 case CONST:
9463 if (GET_CODE (XEXP (disp, 0)) != PLUS)
9464 break;
9465 op0 = XEXP (XEXP (disp, 0), 0);
9466 op1 = XEXP (XEXP (disp, 0), 1);
9467 if (!CONST_INT_P (op1)
9468 || INTVAL (op1) >= 16*1024*1024
9469 || INTVAL (op1) < -16*1024*1024)
9470 break;
9471 if (GET_CODE (op0) == LABEL_REF)
9472 return true;
9473 if (GET_CODE (op0) != SYMBOL_REF)
9474 break;
9475 /* FALLTHRU */
9477 case SYMBOL_REF:
9478 /* TLS references should always be enclosed in UNSPEC. */
9479 if (SYMBOL_REF_TLS_MODEL (op0))
9480 return false;
9481 if (!SYMBOL_REF_FAR_ADDR_P (op0) && SYMBOL_REF_LOCAL_P (op0)
9482 && ix86_cmodel != CM_LARGE_PIC)
9483 return true;
9484 break;
9486 default:
9487 break;
9490 if (GET_CODE (disp) != CONST)
9491 return 0;
9492 disp = XEXP (disp, 0);
9494 if (TARGET_64BIT)
9496 /* We are unsafe to allow PLUS expressions. This limit allowed distance
9497 of GOT tables. We should not need these anyway. */
9498 if (GET_CODE (disp) != UNSPEC
9499 || (XINT (disp, 1) != UNSPEC_GOTPCREL
9500 && XINT (disp, 1) != UNSPEC_GOTOFF
9501 && XINT (disp, 1) != UNSPEC_PLTOFF))
9502 return 0;
9504 if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
9505 && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
9506 return 0;
9507 return 1;
9510 saw_plus = false;
9511 if (GET_CODE (disp) == PLUS)
9513 if (!CONST_INT_P (XEXP (disp, 1)))
9514 return 0;
9515 disp = XEXP (disp, 0);
9516 saw_plus = true;
9519 if (TARGET_MACHO && darwin_local_data_pic (disp))
9520 return 1;
9522 if (GET_CODE (disp) != UNSPEC)
9523 return 0;
9525 switch (XINT (disp, 1))
9527 case UNSPEC_GOT:
9528 if (saw_plus)
9529 return false;
9530 /* We need to check for both symbols and labels because VxWorks loads
9531 text labels with @GOT rather than @GOTOFF. See gotoff_operand for
9532 details. */
9533 return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9534 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
9535 case UNSPEC_GOTOFF:
9536 /* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
9537 While ABI specify also 32bit relocation but we don't produce it in
9538 small PIC model at all. */
9539 if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
9540 || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
9541 && !TARGET_64BIT)
9542 return gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
9543 return false;
9544 case UNSPEC_GOTTPOFF:
9545 case UNSPEC_GOTNTPOFF:
9546 case UNSPEC_INDNTPOFF:
9547 if (saw_plus)
9548 return false;
9549 disp = XVECEXP (disp, 0, 0);
9550 return (GET_CODE (disp) == SYMBOL_REF
9551 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
9552 case UNSPEC_NTPOFF:
9553 disp = XVECEXP (disp, 0, 0);
9554 return (GET_CODE (disp) == SYMBOL_REF
9555 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
9556 case UNSPEC_DTPOFF:
9557 disp = XVECEXP (disp, 0, 0);
9558 return (GET_CODE (disp) == SYMBOL_REF
9559 && SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
9562 return 0;
9565 /* Recognizes RTL expressions that are valid memory addresses for an
9566 instruction. The MODE argument is the machine mode for the MEM
9567 expression that wants to use this address.
9569 It only recognizes address in canonical form. LEGITIMIZE_ADDRESS should
9570 convert common non-canonical forms to canonical form so that they will
9571 be recognized. */
9573 static bool
9574 ix86_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED,
9575 rtx addr, bool strict)
9577 struct ix86_address parts;
9578 rtx base, index, disp;
9579 HOST_WIDE_INT scale;
9581 if (ix86_decompose_address (addr, &parts) <= 0)
9582 /* Decomposition failed. */
9583 return false;
9585 base = parts.base;
9586 index = parts.index;
9587 disp = parts.disp;
9588 scale = parts.scale;
9590 /* Validate base register.
9592 Don't allow SUBREG's that span more than a word here. It can lead to spill
9593 failures when the base is one word out of a two word structure, which is
9594 represented internally as a DImode int. */
9596 if (base)
9598 rtx reg;
9600 if (REG_P (base))
9601 reg = base;
9602 else if (GET_CODE (base) == SUBREG
9603 && REG_P (SUBREG_REG (base))
9604 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (base)))
9605 <= UNITS_PER_WORD)
9606 reg = SUBREG_REG (base);
9607 else
9608 /* Base is not a register. */
9609 return false;
9611 if (GET_MODE (base) != Pmode)
9612 /* Base is not in Pmode. */
9613 return false;
9615 if ((strict && ! REG_OK_FOR_BASE_STRICT_P (reg))
9616 || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (reg)))
9617 /* Base is not valid. */
9618 return false;
9621 /* Validate index register.
9623 Don't allow SUBREG's that span more than a word here -- same as above. */
9625 if (index)
9627 rtx reg;
9629 if (REG_P (index))
9630 reg = index;
9631 else if (GET_CODE (index) == SUBREG
9632 && REG_P (SUBREG_REG (index))
9633 && GET_MODE_SIZE (GET_MODE (SUBREG_REG (index)))
9634 <= UNITS_PER_WORD)
9635 reg = SUBREG_REG (index);
9636 else
9637 /* Index is not a register. */
9638 return false;
9640 if (GET_MODE (index) != Pmode)
9641 /* Index is not in Pmode. */
9642 return false;
9644 if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (reg))
9645 || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (reg)))
9646 /* Index is not valid. */
9647 return false;
9650 /* Validate scale factor. */
9651 if (scale != 1)
9653 if (!index)
9654 /* Scale without index. */
9655 return false;
9657 if (scale != 2 && scale != 4 && scale != 8)
9658 /* Scale is not a valid multiplier. */
9659 return false;
9662 /* Validate displacement. */
9663 if (disp)
9665 if (GET_CODE (disp) == CONST
9666 && GET_CODE (XEXP (disp, 0)) == UNSPEC
9667 && XINT (XEXP (disp, 0), 1) != UNSPEC_MACHOPIC_OFFSET)
9668 switch (XINT (XEXP (disp, 0), 1))
9670 /* Refuse GOTOFF and GOT in 64bit mode since it is always 64bit when
9671 used. While ABI specify also 32bit relocations, we don't produce
9672 them at all and use IP relative instead. */
9673 case UNSPEC_GOT:
9674 case UNSPEC_GOTOFF:
9675 gcc_assert (flag_pic);
9676 if (!TARGET_64BIT)
9677 goto is_legitimate_pic;
9679 /* 64bit address unspec. */
9680 return false;
9682 case UNSPEC_GOTPCREL:
9683 gcc_assert (flag_pic);
9684 goto is_legitimate_pic;
9686 case UNSPEC_GOTTPOFF:
9687 case UNSPEC_GOTNTPOFF:
9688 case UNSPEC_INDNTPOFF:
9689 case UNSPEC_NTPOFF:
9690 case UNSPEC_DTPOFF:
9691 break;
9693 default:
9694 /* Invalid address unspec. */
9695 return false;
9698 else if (SYMBOLIC_CONST (disp)
9699 && (flag_pic
9700 || (TARGET_MACHO
9701 #if TARGET_MACHO
9702 && MACHOPIC_INDIRECT
9703 && !machopic_operand_p (disp)
9704 #endif
9708 is_legitimate_pic:
9709 if (TARGET_64BIT && (index || base))
9711 /* foo@dtpoff(%rX) is ok. */
9712 if (GET_CODE (disp) != CONST
9713 || GET_CODE (XEXP (disp, 0)) != PLUS
9714 || GET_CODE (XEXP (XEXP (disp, 0), 0)) != UNSPEC
9715 || !CONST_INT_P (XEXP (XEXP (disp, 0), 1))
9716 || (XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_DTPOFF
9717 && XINT (XEXP (XEXP (disp, 0), 0), 1) != UNSPEC_NTPOFF))
9718 /* Non-constant pic memory reference. */
9719 return false;
9721 else if (! legitimate_pic_address_disp_p (disp))
9722 /* Displacement is an invalid pic construct. */
9723 return false;
9725 /* This code used to verify that a symbolic pic displacement
9726 includes the pic_offset_table_rtx register.
9728 While this is good idea, unfortunately these constructs may
9729 be created by "adds using lea" optimization for incorrect
9730 code like:
9732 int a;
9733 int foo(int i)
9735 return *(&a+i);
9738 This code is nonsensical, but results in addressing
9739 GOT table with pic_offset_table_rtx base. We can't
9740 just refuse it easily, since it gets matched by
9741 "addsi3" pattern, that later gets split to lea in the
9742 case output register differs from input. While this
9743 can be handled by separate addsi pattern for this case
9744 that never results in lea, this seems to be easier and
9745 correct fix for crash to disable this test. */
9747 else if (GET_CODE (disp) != LABEL_REF
9748 && !CONST_INT_P (disp)
9749 && (GET_CODE (disp) != CONST
9750 || !legitimate_constant_p (disp))
9751 && (GET_CODE (disp) != SYMBOL_REF
9752 || !legitimate_constant_p (disp)))
9753 /* Displacement is not constant. */
9754 return false;
9755 else if (TARGET_64BIT
9756 && !x86_64_immediate_operand (disp, VOIDmode))
9757 /* Displacement is out of range. */
9758 return false;
9761 /* Everything looks valid. */
9762 return true;
9765 /* Determine if a given RTX is a valid constant address. */
9767 bool
9768 constant_address_p (rtx x)
9770 return CONSTANT_P (x) && ix86_legitimate_address_p (Pmode, x, 1);
9773 /* Return a unique alias set for the GOT. */
9775 static alias_set_type
9776 ix86_GOT_alias_set (void)
9778 static alias_set_type set = -1;
9779 if (set == -1)
9780 set = new_alias_set ();
9781 return set;
9784 /* Return a legitimate reference for ORIG (an address) using the
9785 register REG. If REG is 0, a new pseudo is generated.
9787 There are two types of references that must be handled:
9789 1. Global data references must load the address from the GOT, via
9790 the PIC reg. An insn is emitted to do this load, and the reg is
9791 returned.
9793 2. Static data references, constant pool addresses, and code labels
9794 compute the address as an offset from the GOT, whose base is in
9795 the PIC reg. Static data objects have SYMBOL_FLAG_LOCAL set to
9796 differentiate them from global data objects. The returned
9797 address is the PIC reg + an unspec constant.
9799 TARGET_LEGITIMATE_ADDRESS_P rejects symbolic references unless the PIC
9800 reg also appears in the address. */
9802 static rtx
9803 legitimize_pic_address (rtx orig, rtx reg)
9805 rtx addr = orig;
9806 rtx new_rtx = orig;
9807 rtx base;
9809 #if TARGET_MACHO
9810 if (TARGET_MACHO && !TARGET_64BIT)
9812 if (reg == 0)
9813 reg = gen_reg_rtx (Pmode);
9814 /* Use the generic Mach-O PIC machinery. */
9815 return machopic_legitimize_pic_address (orig, GET_MODE (orig), reg);
9817 #endif
9819 if (TARGET_64BIT && legitimate_pic_address_disp_p (addr))
9820 new_rtx = addr;
9821 else if (TARGET_64BIT
9822 && ix86_cmodel != CM_SMALL_PIC
9823 && gotoff_operand (addr, Pmode))
9825 rtx tmpreg;
9826 /* This symbol may be referenced via a displacement from the PIC
9827 base address (@GOTOFF). */
9829 if (reload_in_progress)
9830 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9831 if (GET_CODE (addr) == CONST)
9832 addr = XEXP (addr, 0);
9833 if (GET_CODE (addr) == PLUS)
9835 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
9836 UNSPEC_GOTOFF);
9837 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
9839 else
9840 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
9841 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9842 if (!reg)
9843 tmpreg = gen_reg_rtx (Pmode);
9844 else
9845 tmpreg = reg;
9846 emit_move_insn (tmpreg, new_rtx);
9848 if (reg != 0)
9850 new_rtx = expand_simple_binop (Pmode, PLUS, reg, pic_offset_table_rtx,
9851 tmpreg, 1, OPTAB_DIRECT);
9852 new_rtx = reg;
9854 else new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, tmpreg);
9856 else if (!TARGET_64BIT && gotoff_operand (addr, Pmode))
9858 /* This symbol may be referenced via a displacement from the PIC
9859 base address (@GOTOFF). */
9861 if (reload_in_progress)
9862 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9863 if (GET_CODE (addr) == CONST)
9864 addr = XEXP (addr, 0);
9865 if (GET_CODE (addr) == PLUS)
9867 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, XEXP (addr, 0)),
9868 UNSPEC_GOTOFF);
9869 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, XEXP (addr, 1));
9871 else
9872 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTOFF);
9873 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9874 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
9876 if (reg != 0)
9878 emit_move_insn (reg, new_rtx);
9879 new_rtx = reg;
9882 else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
9883 /* We can't use @GOTOFF for text labels on VxWorks;
9884 see gotoff_operand. */
9885 || (TARGET_VXWORKS_RTP && GET_CODE (addr) == LABEL_REF))
9887 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
9889 if (GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (addr))
9890 return legitimize_dllimport_symbol (addr, true);
9891 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS
9892 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF
9893 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (addr, 0), 0)))
9895 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (addr, 0), 0), true);
9896 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (addr, 0), 1));
9900 if (TARGET_64BIT && ix86_cmodel != CM_LARGE_PIC)
9902 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOTPCREL);
9903 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9904 new_rtx = gen_const_mem (Pmode, new_rtx);
9905 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
9907 if (reg == 0)
9908 reg = gen_reg_rtx (Pmode);
9909 /* Use directly gen_movsi, otherwise the address is loaded
9910 into register for CSE. We don't want to CSE this addresses,
9911 instead we CSE addresses from the GOT table, so skip this. */
9912 emit_insn (gen_movsi (reg, new_rtx));
9913 new_rtx = reg;
9915 else
9917 /* This symbol must be referenced via a load from the
9918 Global Offset Table (@GOT). */
9920 if (reload_in_progress)
9921 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9922 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), UNSPEC_GOT);
9923 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9924 if (TARGET_64BIT)
9925 new_rtx = force_reg (Pmode, new_rtx);
9926 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
9927 new_rtx = gen_const_mem (Pmode, new_rtx);
9928 set_mem_alias_set (new_rtx, ix86_GOT_alias_set ());
9930 if (reg == 0)
9931 reg = gen_reg_rtx (Pmode);
9932 emit_move_insn (reg, new_rtx);
9933 new_rtx = reg;
9936 else
9938 if (CONST_INT_P (addr)
9939 && !x86_64_immediate_operand (addr, VOIDmode))
9941 if (reg)
9943 emit_move_insn (reg, addr);
9944 new_rtx = reg;
9946 else
9947 new_rtx = force_reg (Pmode, addr);
9949 else if (GET_CODE (addr) == CONST)
9951 addr = XEXP (addr, 0);
9953 /* We must match stuff we generate before. Assume the only
9954 unspecs that can get here are ours. Not that we could do
9955 anything with them anyway.... */
9956 if (GET_CODE (addr) == UNSPEC
9957 || (GET_CODE (addr) == PLUS
9958 && GET_CODE (XEXP (addr, 0)) == UNSPEC))
9959 return orig;
9960 gcc_assert (GET_CODE (addr) == PLUS);
9962 if (GET_CODE (addr) == PLUS)
9964 rtx op0 = XEXP (addr, 0), op1 = XEXP (addr, 1);
9966 /* Check first to see if this is a constant offset from a @GOTOFF
9967 symbol reference. */
9968 if (gotoff_operand (op0, Pmode)
9969 && CONST_INT_P (op1))
9971 if (!TARGET_64BIT)
9973 if (reload_in_progress)
9974 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
9975 new_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op0),
9976 UNSPEC_GOTOFF);
9977 new_rtx = gen_rtx_PLUS (Pmode, new_rtx, op1);
9978 new_rtx = gen_rtx_CONST (Pmode, new_rtx);
9979 new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
9981 if (reg != 0)
9983 emit_move_insn (reg, new_rtx);
9984 new_rtx = reg;
9987 else
9989 if (INTVAL (op1) < -16*1024*1024
9990 || INTVAL (op1) >= 16*1024*1024)
9992 if (!x86_64_immediate_operand (op1, Pmode))
9993 op1 = force_reg (Pmode, op1);
9994 new_rtx = gen_rtx_PLUS (Pmode, force_reg (Pmode, op0), op1);
9998 else
10000 base = legitimize_pic_address (XEXP (addr, 0), reg);
10001 new_rtx = legitimize_pic_address (XEXP (addr, 1),
10002 base == reg ? NULL_RTX : reg);
10004 if (CONST_INT_P (new_rtx))
10005 new_rtx = plus_constant (base, INTVAL (new_rtx));
10006 else
10008 if (GET_CODE (new_rtx) == PLUS && CONSTANT_P (XEXP (new_rtx, 1)))
10010 base = gen_rtx_PLUS (Pmode, base, XEXP (new_rtx, 0));
10011 new_rtx = XEXP (new_rtx, 1);
10013 new_rtx = gen_rtx_PLUS (Pmode, base, new_rtx);
10018 return new_rtx;
10021 /* Load the thread pointer. If TO_REG is true, force it into a register. */
10023 static rtx
10024 get_thread_pointer (int to_reg)
10026 rtx tp, reg, insn;
10028 tp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_TP);
10029 if (!to_reg)
10030 return tp;
10032 reg = gen_reg_rtx (Pmode);
10033 insn = gen_rtx_SET (VOIDmode, reg, tp);
10034 insn = emit_insn (insn);
10036 return reg;
10039 /* A subroutine of ix86_legitimize_address and ix86_expand_move. FOR_MOV is
10040 false if we expect this to be used for a memory address and true if
10041 we expect to load the address into a register. */
10043 static rtx
10044 legitimize_tls_address (rtx x, enum tls_model model, int for_mov)
10046 rtx dest, base, off, pic, tp;
10047 int type;
10049 switch (model)
10051 case TLS_MODEL_GLOBAL_DYNAMIC:
10052 dest = gen_reg_rtx (Pmode);
10053 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10055 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10057 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns;
10059 start_sequence ();
10060 emit_call_insn (gen_tls_global_dynamic_64 (rax, x));
10061 insns = get_insns ();
10062 end_sequence ();
10064 RTL_CONST_CALL_P (insns) = 1;
10065 emit_libcall_block (insns, dest, rax, x);
10067 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10068 emit_insn (gen_tls_global_dynamic_64 (dest, x));
10069 else
10070 emit_insn (gen_tls_global_dynamic_32 (dest, x));
10072 if (TARGET_GNU2_TLS)
10074 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tp, dest));
10076 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10078 break;
10080 case TLS_MODEL_LOCAL_DYNAMIC:
10081 base = gen_reg_rtx (Pmode);
10082 tp = TARGET_GNU2_TLS ? get_thread_pointer (1) : 0;
10084 if (TARGET_64BIT && ! TARGET_GNU2_TLS)
10086 rtx rax = gen_rtx_REG (Pmode, AX_REG), insns, note;
10088 start_sequence ();
10089 emit_call_insn (gen_tls_local_dynamic_base_64 (rax));
10090 insns = get_insns ();
10091 end_sequence ();
10093 note = gen_rtx_EXPR_LIST (VOIDmode, const0_rtx, NULL);
10094 note = gen_rtx_EXPR_LIST (VOIDmode, ix86_tls_get_addr (), note);
10095 RTL_CONST_CALL_P (insns) = 1;
10096 emit_libcall_block (insns, base, rax, note);
10098 else if (TARGET_64BIT && TARGET_GNU2_TLS)
10099 emit_insn (gen_tls_local_dynamic_base_64 (base));
10100 else
10101 emit_insn (gen_tls_local_dynamic_base_32 (base));
10103 if (TARGET_GNU2_TLS)
10105 rtx x = ix86_tls_module_base ();
10107 set_unique_reg_note (get_last_insn (), REG_EQUIV,
10108 gen_rtx_MINUS (Pmode, x, tp));
10111 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPOFF);
10112 off = gen_rtx_CONST (Pmode, off);
10114 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, off));
10116 if (TARGET_GNU2_TLS)
10118 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, dest, tp));
10120 set_unique_reg_note (get_last_insn (), REG_EQUIV, x);
10123 break;
10125 case TLS_MODEL_INITIAL_EXEC:
10126 if (TARGET_64BIT)
10128 pic = NULL;
10129 type = UNSPEC_GOTNTPOFF;
10131 else if (flag_pic)
10133 if (reload_in_progress)
10134 df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
10135 pic = pic_offset_table_rtx;
10136 type = TARGET_ANY_GNU_TLS ? UNSPEC_GOTNTPOFF : UNSPEC_GOTTPOFF;
10138 else if (!TARGET_ANY_GNU_TLS)
10140 pic = gen_reg_rtx (Pmode);
10141 emit_insn (gen_set_got (pic));
10142 type = UNSPEC_GOTTPOFF;
10144 else
10146 pic = NULL;
10147 type = UNSPEC_INDNTPOFF;
10150 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), type);
10151 off = gen_rtx_CONST (Pmode, off);
10152 if (pic)
10153 off = gen_rtx_PLUS (Pmode, pic, off);
10154 off = gen_const_mem (Pmode, off);
10155 set_mem_alias_set (off, ix86_GOT_alias_set ());
10157 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10159 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10160 off = force_reg (Pmode, off);
10161 return gen_rtx_PLUS (Pmode, base, off);
10163 else
10165 base = get_thread_pointer (true);
10166 dest = gen_reg_rtx (Pmode);
10167 emit_insn (gen_subsi3 (dest, base, off));
10169 break;
10171 case TLS_MODEL_LOCAL_EXEC:
10172 off = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x),
10173 (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10174 ? UNSPEC_NTPOFF : UNSPEC_TPOFF);
10175 off = gen_rtx_CONST (Pmode, off);
10177 if (TARGET_64BIT || TARGET_ANY_GNU_TLS)
10179 base = get_thread_pointer (for_mov || !TARGET_TLS_DIRECT_SEG_REFS);
10180 return gen_rtx_PLUS (Pmode, base, off);
10182 else
10184 base = get_thread_pointer (true);
10185 dest = gen_reg_rtx (Pmode);
10186 emit_insn (gen_subsi3 (dest, base, off));
10188 break;
10190 default:
10191 gcc_unreachable ();
10194 return dest;
10197 /* Create or return the unique __imp_DECL dllimport symbol corresponding
10198 to symbol DECL. */
10200 static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
10201 htab_t dllimport_map;
10203 static tree
10204 get_dllimport_decl (tree decl)
10206 struct tree_map *h, in;
10207 void **loc;
10208 const char *name;
10209 const char *prefix;
10210 size_t namelen, prefixlen;
10211 char *imp_name;
10212 tree to;
10213 rtx rtl;
10215 if (!dllimport_map)
10216 dllimport_map = htab_create_ggc (512, tree_map_hash, tree_map_eq, 0);
10218 in.hash = htab_hash_pointer (decl);
10219 in.base.from = decl;
10220 loc = htab_find_slot_with_hash (dllimport_map, &in, in.hash, INSERT);
10221 h = (struct tree_map *) *loc;
10222 if (h)
10223 return h->to;
10225 *loc = h = GGC_NEW (struct tree_map);
10226 h->hash = in.hash;
10227 h->base.from = decl;
10228 h->to = to = build_decl (DECL_SOURCE_LOCATION (decl),
10229 VAR_DECL, NULL, ptr_type_node);
10230 DECL_ARTIFICIAL (to) = 1;
10231 DECL_IGNORED_P (to) = 1;
10232 DECL_EXTERNAL (to) = 1;
10233 TREE_READONLY (to) = 1;
10235 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
10236 name = targetm.strip_name_encoding (name);
10237 prefix = name[0] == FASTCALL_PREFIX || user_label_prefix[0] == 0
10238 ? "*__imp_" : "*__imp__";
10239 namelen = strlen (name);
10240 prefixlen = strlen (prefix);
10241 imp_name = (char *) alloca (namelen + prefixlen + 1);
10242 memcpy (imp_name, prefix, prefixlen);
10243 memcpy (imp_name + prefixlen, name, namelen + 1);
10245 name = ggc_alloc_string (imp_name, namelen + prefixlen);
10246 rtl = gen_rtx_SYMBOL_REF (Pmode, name);
10247 SET_SYMBOL_REF_DECL (rtl, to);
10248 SYMBOL_REF_FLAGS (rtl) = SYMBOL_FLAG_LOCAL;
10250 rtl = gen_const_mem (Pmode, rtl);
10251 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
10253 SET_DECL_RTL (to, rtl);
10254 SET_DECL_ASSEMBLER_NAME (to, get_identifier (name));
10256 return to;
10259 /* Expand SYMBOL into its corresponding dllimport symbol. WANT_REG is
10260 true if we require the result be a register. */
10262 static rtx
10263 legitimize_dllimport_symbol (rtx symbol, bool want_reg)
10265 tree imp_decl;
10266 rtx x;
10268 gcc_assert (SYMBOL_REF_DECL (symbol));
10269 imp_decl = get_dllimport_decl (SYMBOL_REF_DECL (symbol));
10271 x = DECL_RTL (imp_decl);
10272 if (want_reg)
10273 x = force_reg (Pmode, x);
10274 return x;
10277 /* Try machine-dependent ways of modifying an illegitimate address
10278 to be legitimate. If we find one, return the new, valid address.
10279 This macro is used in only one place: `memory_address' in explow.c.
10281 OLDX is the address as it was before break_out_memory_refs was called.
10282 In some cases it is useful to look at this to decide what needs to be done.
10284 It is always safe for this macro to do nothing. It exists to recognize
10285 opportunities to optimize the output.
10287 For the 80386, we handle X+REG by loading X into a register R and
10288 using R+REG. R will go in a general reg and indexing will be used.
10289 However, if REG is a broken-out memory address or multiplication,
10290 nothing needs to be done because REG can certainly go in a general reg.
10292 When -fpic is used, special handling is needed for symbolic references.
10293 See comments by legitimize_pic_address in i386.c for details. */
10295 static rtx
10296 ix86_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
10297 enum machine_mode mode)
10299 int changed = 0;
10300 unsigned log;
10302 log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
10303 if (log)
10304 return legitimize_tls_address (x, (enum tls_model) log, false);
10305 if (GET_CODE (x) == CONST
10306 && GET_CODE (XEXP (x, 0)) == PLUS
10307 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10308 && (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
10310 rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
10311 (enum tls_model) log, false);
10312 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10315 if (TARGET_DLLIMPORT_DECL_ATTRIBUTES)
10317 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DLLIMPORT_P (x))
10318 return legitimize_dllimport_symbol (x, true);
10319 if (GET_CODE (x) == CONST
10320 && GET_CODE (XEXP (x, 0)) == PLUS
10321 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
10322 && SYMBOL_REF_DLLIMPORT_P (XEXP (XEXP (x, 0), 0)))
10324 rtx t = legitimize_dllimport_symbol (XEXP (XEXP (x, 0), 0), true);
10325 return gen_rtx_PLUS (Pmode, t, XEXP (XEXP (x, 0), 1));
10329 if (flag_pic && SYMBOLIC_CONST (x))
10330 return legitimize_pic_address (x, 0);
10332 /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
10333 if (GET_CODE (x) == ASHIFT
10334 && CONST_INT_P (XEXP (x, 1))
10335 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (x, 1)) < 4)
10337 changed = 1;
10338 log = INTVAL (XEXP (x, 1));
10339 x = gen_rtx_MULT (Pmode, force_reg (Pmode, XEXP (x, 0)),
10340 GEN_INT (1 << log));
10343 if (GET_CODE (x) == PLUS)
10345 /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
10347 if (GET_CODE (XEXP (x, 0)) == ASHIFT
10348 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
10349 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1)) < 4)
10351 changed = 1;
10352 log = INTVAL (XEXP (XEXP (x, 0), 1));
10353 XEXP (x, 0) = gen_rtx_MULT (Pmode,
10354 force_reg (Pmode, XEXP (XEXP (x, 0), 0)),
10355 GEN_INT (1 << log));
10358 if (GET_CODE (XEXP (x, 1)) == ASHIFT
10359 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
10360 && (unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 1), 1)) < 4)
10362 changed = 1;
10363 log = INTVAL (XEXP (XEXP (x, 1), 1));
10364 XEXP (x, 1) = gen_rtx_MULT (Pmode,
10365 force_reg (Pmode, XEXP (XEXP (x, 1), 0)),
10366 GEN_INT (1 << log));
10369 /* Put multiply first if it isn't already. */
10370 if (GET_CODE (XEXP (x, 1)) == MULT)
10372 rtx tmp = XEXP (x, 0);
10373 XEXP (x, 0) = XEXP (x, 1);
10374 XEXP (x, 1) = tmp;
10375 changed = 1;
10378 /* Canonicalize (plus (mult (reg) (const)) (plus (reg) (const)))
10379 into (plus (plus (mult (reg) (const)) (reg)) (const)). This can be
10380 created by virtual register instantiation, register elimination, and
10381 similar optimizations. */
10382 if (GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == PLUS)
10384 changed = 1;
10385 x = gen_rtx_PLUS (Pmode,
10386 gen_rtx_PLUS (Pmode, XEXP (x, 0),
10387 XEXP (XEXP (x, 1), 0)),
10388 XEXP (XEXP (x, 1), 1));
10391 /* Canonicalize
10392 (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
10393 into (plus (plus (mult (reg) (const)) (reg)) (const)). */
10394 else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
10395 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
10396 && GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
10397 && CONSTANT_P (XEXP (x, 1)))
10399 rtx constant;
10400 rtx other = NULL_RTX;
10402 if (CONST_INT_P (XEXP (x, 1)))
10404 constant = XEXP (x, 1);
10405 other = XEXP (XEXP (XEXP (x, 0), 1), 1);
10407 else if (CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 1), 1)))
10409 constant = XEXP (XEXP (XEXP (x, 0), 1), 1);
10410 other = XEXP (x, 1);
10412 else
10413 constant = 0;
10415 if (constant)
10417 changed = 1;
10418 x = gen_rtx_PLUS (Pmode,
10419 gen_rtx_PLUS (Pmode, XEXP (XEXP (x, 0), 0),
10420 XEXP (XEXP (XEXP (x, 0), 1), 0)),
10421 plus_constant (other, INTVAL (constant)));
10425 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10426 return x;
10428 if (GET_CODE (XEXP (x, 0)) == MULT)
10430 changed = 1;
10431 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
10434 if (GET_CODE (XEXP (x, 1)) == MULT)
10436 changed = 1;
10437 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
10440 if (changed
10441 && REG_P (XEXP (x, 1))
10442 && REG_P (XEXP (x, 0)))
10443 return x;
10445 if (flag_pic && SYMBOLIC_CONST (XEXP (x, 1)))
10447 changed = 1;
10448 x = legitimize_pic_address (x, 0);
10451 if (changed && ix86_legitimate_address_p (mode, x, FALSE))
10452 return x;
10454 if (REG_P (XEXP (x, 0)))
10456 rtx temp = gen_reg_rtx (Pmode);
10457 rtx val = force_operand (XEXP (x, 1), temp);
10458 if (val != temp)
10459 emit_move_insn (temp, val);
10461 XEXP (x, 1) = temp;
10462 return x;
10465 else if (REG_P (XEXP (x, 1)))
10467 rtx temp = gen_reg_rtx (Pmode);
10468 rtx val = force_operand (XEXP (x, 0), temp);
10469 if (val != temp)
10470 emit_move_insn (temp, val);
10472 XEXP (x, 0) = temp;
10473 return x;
10477 return x;
10480 /* Print an integer constant expression in assembler syntax. Addition
10481 and subtraction are the only arithmetic that may appear in these
10482 expressions. FILE is the stdio stream to write to, X is the rtx, and
10483 CODE is the operand print code from the output string. */
10485 static void
10486 output_pic_addr_const (FILE *file, rtx x, int code)
10488 char buf[256];
10490 switch (GET_CODE (x))
10492 case PC:
10493 gcc_assert (flag_pic);
10494 putc ('.', file);
10495 break;
10497 case SYMBOL_REF:
10498 if (! TARGET_MACHO || TARGET_64BIT)
10499 output_addr_const (file, x);
10500 else
10502 const char *name = XSTR (x, 0);
10504 /* Mark the decl as referenced so that cgraph will
10505 output the function. */
10506 if (SYMBOL_REF_DECL (x))
10507 mark_decl_referenced (SYMBOL_REF_DECL (x));
10509 #if TARGET_MACHO
10510 if (MACHOPIC_INDIRECT
10511 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
10512 name = machopic_indirection_name (x, /*stub_p=*/true);
10513 #endif
10514 assemble_name (file, name);
10516 if (!TARGET_MACHO && !(TARGET_64BIT && DEFAULT_ABI == MS_ABI)
10517 && code == 'P' && ! SYMBOL_REF_LOCAL_P (x))
10518 fputs ("@PLT", file);
10519 break;
10521 case LABEL_REF:
10522 x = XEXP (x, 0);
10523 /* FALLTHRU */
10524 case CODE_LABEL:
10525 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (x));
10526 assemble_name (asm_out_file, buf);
10527 break;
10529 case CONST_INT:
10530 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
10531 break;
10533 case CONST:
10534 /* This used to output parentheses around the expression,
10535 but that does not work on the 386 (either ATT or BSD assembler). */
10536 output_pic_addr_const (file, XEXP (x, 0), code);
10537 break;
10539 case CONST_DOUBLE:
10540 if (GET_MODE (x) == VOIDmode)
10542 /* We can use %d if the number is <32 bits and positive. */
10543 if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
10544 fprintf (file, "0x%lx%08lx",
10545 (unsigned long) CONST_DOUBLE_HIGH (x),
10546 (unsigned long) CONST_DOUBLE_LOW (x));
10547 else
10548 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
10550 else
10551 /* We can't handle floating point constants;
10552 PRINT_OPERAND must handle them. */
10553 output_operand_lossage ("floating constant misused");
10554 break;
10556 case PLUS:
10557 /* Some assemblers need integer constants to appear first. */
10558 if (CONST_INT_P (XEXP (x, 0)))
10560 output_pic_addr_const (file, XEXP (x, 0), code);
10561 putc ('+', file);
10562 output_pic_addr_const (file, XEXP (x, 1), code);
10564 else
10566 gcc_assert (CONST_INT_P (XEXP (x, 1)));
10567 output_pic_addr_const (file, XEXP (x, 1), code);
10568 putc ('+', file);
10569 output_pic_addr_const (file, XEXP (x, 0), code);
10571 break;
10573 case MINUS:
10574 if (!TARGET_MACHO)
10575 putc (ASSEMBLER_DIALECT == ASM_INTEL ? '(' : '[', file);
10576 output_pic_addr_const (file, XEXP (x, 0), code);
10577 putc ('-', file);
10578 output_pic_addr_const (file, XEXP (x, 1), code);
10579 if (!TARGET_MACHO)
10580 putc (ASSEMBLER_DIALECT == ASM_INTEL ? ')' : ']', file);
10581 break;
10583 case UNSPEC:
10584 gcc_assert (XVECLEN (x, 0) == 1);
10585 output_pic_addr_const (file, XVECEXP (x, 0, 0), code);
10586 switch (XINT (x, 1))
10588 case UNSPEC_GOT:
10589 fputs ("@GOT", file);
10590 break;
10591 case UNSPEC_GOTOFF:
10592 fputs ("@GOTOFF", file);
10593 break;
10594 case UNSPEC_PLTOFF:
10595 fputs ("@PLTOFF", file);
10596 break;
10597 case UNSPEC_GOTPCREL:
10598 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10599 "@GOTPCREL(%rip)" : "@GOTPCREL[rip]", file);
10600 break;
10601 case UNSPEC_GOTTPOFF:
10602 /* FIXME: This might be @TPOFF in Sun ld too. */
10603 fputs ("@GOTTPOFF", file);
10604 break;
10605 case UNSPEC_TPOFF:
10606 fputs ("@TPOFF", file);
10607 break;
10608 case UNSPEC_NTPOFF:
10609 if (TARGET_64BIT)
10610 fputs ("@TPOFF", file);
10611 else
10612 fputs ("@NTPOFF", file);
10613 break;
10614 case UNSPEC_DTPOFF:
10615 fputs ("@DTPOFF", file);
10616 break;
10617 case UNSPEC_GOTNTPOFF:
10618 if (TARGET_64BIT)
10619 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
10620 "@GOTTPOFF(%rip)": "@GOTTPOFF[rip]", file);
10621 else
10622 fputs ("@GOTNTPOFF", file);
10623 break;
10624 case UNSPEC_INDNTPOFF:
10625 fputs ("@INDNTPOFF", file);
10626 break;
10627 #if TARGET_MACHO
10628 case UNSPEC_MACHOPIC_OFFSET:
10629 putc ('-', file);
10630 machopic_output_function_base_name (file);
10631 break;
10632 #endif
10633 default:
10634 output_operand_lossage ("invalid UNSPEC as operand");
10635 break;
10637 break;
10639 default:
10640 output_operand_lossage ("invalid expression as operand");
10644 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
10645 We need to emit DTP-relative relocations. */
10647 static void ATTRIBUTE_UNUSED
10648 i386_output_dwarf_dtprel (FILE *file, int size, rtx x)
10650 fputs (ASM_LONG, file);
10651 output_addr_const (file, x);
10652 fputs ("@DTPOFF", file);
10653 switch (size)
10655 case 4:
10656 break;
10657 case 8:
10658 fputs (", 0", file);
10659 break;
10660 default:
10661 gcc_unreachable ();
10665 /* Return true if X is a representation of the PIC register. This copes
10666 with calls from ix86_find_base_term, where the register might have
10667 been replaced by a cselib value. */
10669 static bool
10670 ix86_pic_register_p (rtx x)
10672 if (GET_CODE (x) == VALUE)
10673 return (pic_offset_table_rtx
10674 && rtx_equal_for_cselib_p (x, pic_offset_table_rtx));
10675 else
10676 return REG_P (x) && REGNO (x) == PIC_OFFSET_TABLE_REGNUM;
10679 /* In the name of slightly smaller debug output, and to cater to
10680 general assembler lossage, recognize PIC+GOTOFF and turn it back
10681 into a direct symbol reference.
10683 On Darwin, this is necessary to avoid a crash, because Darwin
10684 has a different PIC label for each routine but the DWARF debugging
10685 information is not associated with any particular routine, so it's
10686 necessary to remove references to the PIC label from RTL stored by
10687 the DWARF output code. */
10689 static rtx
10690 ix86_delegitimize_address (rtx x)
10692 rtx orig_x = delegitimize_mem_from_attrs (x);
10693 /* reg_addend is NULL or a multiple of some register. */
10694 rtx reg_addend = NULL_RTX;
10695 /* const_addend is NULL or a const_int. */
10696 rtx const_addend = NULL_RTX;
10697 /* This is the result, or NULL. */
10698 rtx result = NULL_RTX;
10700 x = orig_x;
10702 if (MEM_P (x))
10703 x = XEXP (x, 0);
10705 if (TARGET_64BIT)
10707 if (GET_CODE (x) != CONST
10708 || GET_CODE (XEXP (x, 0)) != UNSPEC
10709 || XINT (XEXP (x, 0), 1) != UNSPEC_GOTPCREL
10710 || !MEM_P (orig_x))
10711 return orig_x;
10712 return XVECEXP (XEXP (x, 0), 0, 0);
10715 if (GET_CODE (x) != PLUS
10716 || GET_CODE (XEXP (x, 1)) != CONST)
10717 return orig_x;
10719 if (ix86_pic_register_p (XEXP (x, 0)))
10720 /* %ebx + GOT/GOTOFF */
10722 else if (GET_CODE (XEXP (x, 0)) == PLUS)
10724 /* %ebx + %reg * scale + GOT/GOTOFF */
10725 reg_addend = XEXP (x, 0);
10726 if (ix86_pic_register_p (XEXP (reg_addend, 0)))
10727 reg_addend = XEXP (reg_addend, 1);
10728 else if (ix86_pic_register_p (XEXP (reg_addend, 1)))
10729 reg_addend = XEXP (reg_addend, 0);
10730 else
10731 return orig_x;
10732 if (!REG_P (reg_addend)
10733 && GET_CODE (reg_addend) != MULT
10734 && GET_CODE (reg_addend) != ASHIFT)
10735 return orig_x;
10737 else
10738 return orig_x;
10740 x = XEXP (XEXP (x, 1), 0);
10741 if (GET_CODE (x) == PLUS
10742 && CONST_INT_P (XEXP (x, 1)))
10744 const_addend = XEXP (x, 1);
10745 x = XEXP (x, 0);
10748 if (GET_CODE (x) == UNSPEC
10749 && ((XINT (x, 1) == UNSPEC_GOT && MEM_P (orig_x))
10750 || (XINT (x, 1) == UNSPEC_GOTOFF && !MEM_P (orig_x))))
10751 result = XVECEXP (x, 0, 0);
10753 if (TARGET_MACHO && darwin_local_data_pic (x)
10754 && !MEM_P (orig_x))
10755 result = XVECEXP (x, 0, 0);
10757 if (! result)
10758 return orig_x;
10760 if (const_addend)
10761 result = gen_rtx_CONST (Pmode, gen_rtx_PLUS (Pmode, result, const_addend));
10762 if (reg_addend)
10763 result = gen_rtx_PLUS (Pmode, reg_addend, result);
10764 return result;
10767 /* If X is a machine specific address (i.e. a symbol or label being
10768 referenced as a displacement from the GOT implemented using an
10769 UNSPEC), then return the base term. Otherwise return X. */
10772 ix86_find_base_term (rtx x)
10774 rtx term;
10776 if (TARGET_64BIT)
10778 if (GET_CODE (x) != CONST)
10779 return x;
10780 term = XEXP (x, 0);
10781 if (GET_CODE (term) == PLUS
10782 && (CONST_INT_P (XEXP (term, 1))
10783 || GET_CODE (XEXP (term, 1)) == CONST_DOUBLE))
10784 term = XEXP (term, 0);
10785 if (GET_CODE (term) != UNSPEC
10786 || XINT (term, 1) != UNSPEC_GOTPCREL)
10787 return x;
10789 return XVECEXP (term, 0, 0);
10792 return ix86_delegitimize_address (x);
10795 static void
10796 put_condition_code (enum rtx_code code, enum machine_mode mode, int reverse,
10797 int fp, FILE *file)
10799 const char *suffix;
10801 if (mode == CCFPmode || mode == CCFPUmode)
10803 code = ix86_fp_compare_code_to_integer (code);
10804 mode = CCmode;
10806 if (reverse)
10807 code = reverse_condition (code);
10809 switch (code)
10811 case EQ:
10812 switch (mode)
10814 case CCAmode:
10815 suffix = "a";
10816 break;
10818 case CCCmode:
10819 suffix = "c";
10820 break;
10822 case CCOmode:
10823 suffix = "o";
10824 break;
10826 case CCSmode:
10827 suffix = "s";
10828 break;
10830 default:
10831 suffix = "e";
10833 break;
10834 case NE:
10835 switch (mode)
10837 case CCAmode:
10838 suffix = "na";
10839 break;
10841 case CCCmode:
10842 suffix = "nc";
10843 break;
10845 case CCOmode:
10846 suffix = "no";
10847 break;
10849 case CCSmode:
10850 suffix = "ns";
10851 break;
10853 default:
10854 suffix = "ne";
10856 break;
10857 case GT:
10858 gcc_assert (mode == CCmode || mode == CCNOmode || mode == CCGCmode);
10859 suffix = "g";
10860 break;
10861 case GTU:
10862 /* ??? Use "nbe" instead of "a" for fcmov lossage on some assemblers.
10863 Those same assemblers have the same but opposite lossage on cmov. */
10864 if (mode == CCmode)
10865 suffix = fp ? "nbe" : "a";
10866 else if (mode == CCCmode)
10867 suffix = "b";
10868 else
10869 gcc_unreachable ();
10870 break;
10871 case LT:
10872 switch (mode)
10874 case CCNOmode:
10875 case CCGOCmode:
10876 suffix = "s";
10877 break;
10879 case CCmode:
10880 case CCGCmode:
10881 suffix = "l";
10882 break;
10884 default:
10885 gcc_unreachable ();
10887 break;
10888 case LTU:
10889 gcc_assert (mode == CCmode || mode == CCCmode);
10890 suffix = "b";
10891 break;
10892 case GE:
10893 switch (mode)
10895 case CCNOmode:
10896 case CCGOCmode:
10897 suffix = "ns";
10898 break;
10900 case CCmode:
10901 case CCGCmode:
10902 suffix = "ge";
10903 break;
10905 default:
10906 gcc_unreachable ();
10908 break;
10909 case GEU:
10910 /* ??? As above. */
10911 gcc_assert (mode == CCmode || mode == CCCmode);
10912 suffix = fp ? "nb" : "ae";
10913 break;
10914 case LE:
10915 gcc_assert (mode == CCmode || mode == CCGCmode || mode == CCNOmode);
10916 suffix = "le";
10917 break;
10918 case LEU:
10919 /* ??? As above. */
10920 if (mode == CCmode)
10921 suffix = "be";
10922 else if (mode == CCCmode)
10923 suffix = fp ? "nb" : "ae";
10924 else
10925 gcc_unreachable ();
10926 break;
10927 case UNORDERED:
10928 suffix = fp ? "u" : "p";
10929 break;
10930 case ORDERED:
10931 suffix = fp ? "nu" : "np";
10932 break;
10933 default:
10934 gcc_unreachable ();
10936 fputs (suffix, file);
10939 /* Print the name of register X to FILE based on its machine mode and number.
10940 If CODE is 'w', pretend the mode is HImode.
10941 If CODE is 'b', pretend the mode is QImode.
10942 If CODE is 'k', pretend the mode is SImode.
10943 If CODE is 'q', pretend the mode is DImode.
10944 If CODE is 'x', pretend the mode is V4SFmode.
10945 If CODE is 't', pretend the mode is V8SFmode.
10946 If CODE is 'h', pretend the reg is the 'high' byte register.
10947 If CODE is 'y', print "st(0)" instead of "st", if the reg is stack op.
10948 If CODE is 'd', duplicate the operand for AVX instruction.
10951 void
10952 print_reg (rtx x, int code, FILE *file)
10954 const char *reg;
10955 bool duplicated = code == 'd' && TARGET_AVX;
10957 gcc_assert (x == pc_rtx
10958 || (REGNO (x) != ARG_POINTER_REGNUM
10959 && REGNO (x) != FRAME_POINTER_REGNUM
10960 && REGNO (x) != FLAGS_REG
10961 && REGNO (x) != FPSR_REG
10962 && REGNO (x) != FPCR_REG));
10964 if (ASSEMBLER_DIALECT == ASM_ATT)
10965 putc ('%', file);
10967 if (x == pc_rtx)
10969 gcc_assert (TARGET_64BIT);
10970 fputs ("rip", file);
10971 return;
10974 if (code == 'w' || MMX_REG_P (x))
10975 code = 2;
10976 else if (code == 'b')
10977 code = 1;
10978 else if (code == 'k')
10979 code = 4;
10980 else if (code == 'q')
10981 code = 8;
10982 else if (code == 'y')
10983 code = 3;
10984 else if (code == 'h')
10985 code = 0;
10986 else if (code == 'x')
10987 code = 16;
10988 else if (code == 't')
10989 code = 32;
10990 else
10991 code = GET_MODE_SIZE (GET_MODE (x));
10993 /* Irritatingly, AMD extended registers use different naming convention
10994 from the normal registers. */
10995 if (REX_INT_REG_P (x))
10997 gcc_assert (TARGET_64BIT);
10998 switch (code)
11000 case 0:
11001 error ("extended registers have no high halves");
11002 break;
11003 case 1:
11004 fprintf (file, "r%ib", REGNO (x) - FIRST_REX_INT_REG + 8);
11005 break;
11006 case 2:
11007 fprintf (file, "r%iw", REGNO (x) - FIRST_REX_INT_REG + 8);
11008 break;
11009 case 4:
11010 fprintf (file, "r%id", REGNO (x) - FIRST_REX_INT_REG + 8);
11011 break;
11012 case 8:
11013 fprintf (file, "r%i", REGNO (x) - FIRST_REX_INT_REG + 8);
11014 break;
11015 default:
11016 error ("unsupported operand size for extended register");
11017 break;
11019 return;
11022 reg = NULL;
11023 switch (code)
11025 case 3:
11026 if (STACK_TOP_P (x))
11028 reg = "st(0)";
11029 break;
11031 /* FALLTHRU */
11032 case 8:
11033 case 4:
11034 case 12:
11035 if (! ANY_FP_REG_P (x))
11036 putc (code == 8 && TARGET_64BIT ? 'r' : 'e', file);
11037 /* FALLTHRU */
11038 case 16:
11039 case 2:
11040 normal:
11041 reg = hi_reg_name[REGNO (x)];
11042 break;
11043 case 1:
11044 if (REGNO (x) >= ARRAY_SIZE (qi_reg_name))
11045 goto normal;
11046 reg = qi_reg_name[REGNO (x)];
11047 break;
11048 case 0:
11049 if (REGNO (x) >= ARRAY_SIZE (qi_high_reg_name))
11050 goto normal;
11051 reg = qi_high_reg_name[REGNO (x)];
11052 break;
11053 case 32:
11054 if (SSE_REG_P (x))
11056 gcc_assert (!duplicated);
11057 putc ('y', file);
11058 fputs (hi_reg_name[REGNO (x)] + 1, file);
11059 return;
11061 break;
11062 default:
11063 gcc_unreachable ();
11066 fputs (reg, file);
11067 if (duplicated)
11069 if (ASSEMBLER_DIALECT == ASM_ATT)
11070 fprintf (file, ", %%%s", reg);
11071 else
11072 fprintf (file, ", %s", reg);
11076 /* Locate some local-dynamic symbol still in use by this function
11077 so that we can print its name in some tls_local_dynamic_base
11078 pattern. */
11080 static int
11081 get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
11083 rtx x = *px;
11085 if (GET_CODE (x) == SYMBOL_REF
11086 && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
11088 cfun->machine->some_ld_name = XSTR (x, 0);
11089 return 1;
11092 return 0;
11095 static const char *
11096 get_some_local_dynamic_name (void)
11098 rtx insn;
11100 if (cfun->machine->some_ld_name)
11101 return cfun->machine->some_ld_name;
11103 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
11104 if (INSN_P (insn)
11105 && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
11106 return cfun->machine->some_ld_name;
11108 gcc_unreachable ();
11111 /* Meaning of CODE:
11112 L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
11113 C -- print opcode suffix for set/cmov insn.
11114 c -- like C, but print reversed condition
11115 E,e -- likewise, but for compare-and-branch fused insn.
11116 F,f -- likewise, but for floating-point.
11117 O -- if HAVE_AS_IX86_CMOV_SUN_SYNTAX, expand to "w.", "l." or "q.",
11118 otherwise nothing
11119 R -- print the prefix for register names.
11120 z -- print the opcode suffix for the size of the current operand.
11121 Z -- likewise, with special suffixes for x87 instructions.
11122 * -- print a star (in certain assembler syntax)
11123 A -- print an absolute memory reference.
11124 w -- print the operand as if it's a "word" (HImode) even if it isn't.
11125 s -- print a shift double count, followed by the assemblers argument
11126 delimiter.
11127 b -- print the QImode name of the register for the indicated operand.
11128 %b0 would print %al if operands[0] is reg 0.
11129 w -- likewise, print the HImode name of the register.
11130 k -- likewise, print the SImode name of the register.
11131 q -- likewise, print the DImode name of the register.
11132 x -- likewise, print the V4SFmode name of the register.
11133 t -- likewise, print the V8SFmode name of the register.
11134 h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
11135 y -- print "st(0)" instead of "st" as a register.
11136 d -- print duplicated register operand for AVX instruction.
11137 D -- print condition for SSE cmp instruction.
11138 P -- if PIC, print an @PLT suffix.
11139 X -- don't print any sort of PIC '@' suffix for a symbol.
11140 & -- print some in-use local-dynamic symbol name.
11141 H -- print a memory address offset by 8; used for sse high-parts
11142 + -- print a branch hint as 'cs' or 'ds' prefix
11143 ; -- print a semicolon (after prefixes due to bug in older gas).
11146 void
11147 print_operand (FILE *file, rtx x, int code)
11149 if (code)
11151 switch (code)
11153 case '*':
11154 if (ASSEMBLER_DIALECT == ASM_ATT)
11155 putc ('*', file);
11156 return;
11158 case '&':
11159 assemble_name (file, get_some_local_dynamic_name ());
11160 return;
11162 case 'A':
11163 switch (ASSEMBLER_DIALECT)
11165 case ASM_ATT:
11166 putc ('*', file);
11167 break;
11169 case ASM_INTEL:
11170 /* Intel syntax. For absolute addresses, registers should not
11171 be surrounded by braces. */
11172 if (!REG_P (x))
11174 putc ('[', file);
11175 PRINT_OPERAND (file, x, 0);
11176 putc (']', file);
11177 return;
11179 break;
11181 default:
11182 gcc_unreachable ();
11185 PRINT_OPERAND (file, x, 0);
11186 return;
11189 case 'L':
11190 if (ASSEMBLER_DIALECT == ASM_ATT)
11191 putc ('l', file);
11192 return;
11194 case 'W':
11195 if (ASSEMBLER_DIALECT == ASM_ATT)
11196 putc ('w', file);
11197 return;
11199 case 'B':
11200 if (ASSEMBLER_DIALECT == ASM_ATT)
11201 putc ('b', file);
11202 return;
11204 case 'Q':
11205 if (ASSEMBLER_DIALECT == ASM_ATT)
11206 putc ('l', file);
11207 return;
11209 case 'S':
11210 if (ASSEMBLER_DIALECT == ASM_ATT)
11211 putc ('s', file);
11212 return;
11214 case 'T':
11215 if (ASSEMBLER_DIALECT == ASM_ATT)
11216 putc ('t', file);
11217 return;
11219 case 'z':
11220 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11222 /* Opcodes don't get size suffixes if using Intel opcodes. */
11223 if (ASSEMBLER_DIALECT == ASM_INTEL)
11224 return;
11226 switch (GET_MODE_SIZE (GET_MODE (x)))
11228 case 1:
11229 putc ('b', file);
11230 return;
11232 case 2:
11233 putc ('w', file);
11234 return;
11236 case 4:
11237 putc ('l', file);
11238 return;
11240 case 8:
11241 putc ('q', file);
11242 return;
11244 default:
11245 output_operand_lossage
11246 ("invalid operand size for operand code '%c'", code);
11247 return;
11251 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11252 warning
11253 (0, "non-integer operand used with operand code '%c'", code);
11254 /* FALLTHRU */
11256 case 'Z':
11257 /* 387 opcodes don't get size suffixes if using Intel opcodes. */
11258 if (ASSEMBLER_DIALECT == ASM_INTEL)
11259 return;
11261 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT)
11263 switch (GET_MODE_SIZE (GET_MODE (x)))
11265 case 2:
11266 #ifdef HAVE_AS_IX86_FILDS
11267 putc ('s', file);
11268 #endif
11269 return;
11271 case 4:
11272 putc ('l', file);
11273 return;
11275 case 8:
11276 #ifdef HAVE_AS_IX86_FILDQ
11277 putc ('q', file);
11278 #else
11279 fputs ("ll", file);
11280 #endif
11281 return;
11283 default:
11284 break;
11287 else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
11289 /* 387 opcodes don't get size suffixes
11290 if the operands are registers. */
11291 if (STACK_REG_P (x))
11292 return;
11294 switch (GET_MODE_SIZE (GET_MODE (x)))
11296 case 4:
11297 putc ('s', file);
11298 return;
11300 case 8:
11301 putc ('l', file);
11302 return;
11304 case 12:
11305 case 16:
11306 putc ('t', file);
11307 return;
11309 default:
11310 break;
11313 else
11315 output_operand_lossage
11316 ("invalid operand type used with operand code '%c'", code);
11317 return;
11320 output_operand_lossage
11321 ("invalid operand size for operand code '%c'", code);
11322 return;
11324 case 'd':
11325 case 'b':
11326 case 'w':
11327 case 'k':
11328 case 'q':
11329 case 'h':
11330 case 't':
11331 case 'y':
11332 case 'x':
11333 case 'X':
11334 case 'P':
11335 break;
11337 case 's':
11338 if (CONST_INT_P (x) || ! SHIFT_DOUBLE_OMITS_COUNT)
11340 PRINT_OPERAND (file, x, 0);
11341 fputs (", ", file);
11343 return;
11345 case 'D':
11346 /* Little bit of braindamage here. The SSE compare instructions
11347 does use completely different names for the comparisons that the
11348 fp conditional moves. */
11349 if (TARGET_AVX)
11351 switch (GET_CODE (x))
11353 case EQ:
11354 fputs ("eq", file);
11355 break;
11356 case UNEQ:
11357 fputs ("eq_us", file);
11358 break;
11359 case LT:
11360 fputs ("lt", file);
11361 break;
11362 case UNLT:
11363 fputs ("nge", file);
11364 break;
11365 case LE:
11366 fputs ("le", file);
11367 break;
11368 case UNLE:
11369 fputs ("ngt", file);
11370 break;
11371 case UNORDERED:
11372 fputs ("unord", file);
11373 break;
11374 case NE:
11375 fputs ("neq", file);
11376 break;
11377 case LTGT:
11378 fputs ("neq_oq", file);
11379 break;
11380 case GE:
11381 fputs ("ge", file);
11382 break;
11383 case UNGE:
11384 fputs ("nlt", file);
11385 break;
11386 case GT:
11387 fputs ("gt", file);
11388 break;
11389 case UNGT:
11390 fputs ("nle", file);
11391 break;
11392 case ORDERED:
11393 fputs ("ord", file);
11394 break;
11395 default:
11396 output_operand_lossage ("operand is not a condition code, invalid operand code 'D'");
11397 return;
11400 else
11402 switch (GET_CODE (x))
11404 case EQ:
11405 case UNEQ:
11406 fputs ("eq", file);
11407 break;
11408 case LT:
11409 case UNLT:
11410 fputs ("lt", file);
11411 break;
11412 case LE:
11413 case UNLE:
11414 fputs ("le", file);
11415 break;
11416 case UNORDERED:
11417 fputs ("unord", file);
11418 break;
11419 case NE:
11420 case LTGT:
11421 fputs ("neq", file);
11422 break;
11423 case UNGE:
11424 case GE:
11425 fputs ("nlt", file);
11426 break;
11427 case UNGT:
11428 case GT:
11429 fputs ("nle", file);
11430 break;
11431 case ORDERED:
11432 fputs ("ord", file);
11433 break;
11434 default:
11435 output_operand_lossage ("operand is not a condition code, invalid operand code 'D'");
11436 return;
11439 return;
11440 case 'O':
11441 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11442 if (ASSEMBLER_DIALECT == ASM_ATT)
11444 switch (GET_MODE (x))
11446 case HImode: putc ('w', file); break;
11447 case SImode:
11448 case SFmode: putc ('l', file); break;
11449 case DImode:
11450 case DFmode: putc ('q', file); break;
11451 default: gcc_unreachable ();
11453 putc ('.', file);
11455 #endif
11456 return;
11457 case 'C':
11458 if (!COMPARISON_P (x))
11460 output_operand_lossage ("operand is neither a constant nor a "
11461 "condition code, invalid operand code "
11462 "'C'");
11463 return;
11465 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 0, file);
11466 return;
11467 case 'F':
11468 if (!COMPARISON_P (x))
11470 output_operand_lossage ("operand is neither a constant nor a "
11471 "condition code, invalid operand code "
11472 "'F'");
11473 return;
11475 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11476 if (ASSEMBLER_DIALECT == ASM_ATT)
11477 putc ('.', file);
11478 #endif
11479 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 0, 1, file);
11480 return;
11482 /* Like above, but reverse condition */
11483 case 'c':
11484 /* Check to see if argument to %c is really a constant
11485 and not a condition code which needs to be reversed. */
11486 if (!COMPARISON_P (x))
11488 output_operand_lossage ("operand is neither a constant nor a "
11489 "condition code, invalid operand "
11490 "code 'c'");
11491 return;
11493 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 0, file);
11494 return;
11495 case 'f':
11496 if (!COMPARISON_P (x))
11498 output_operand_lossage ("operand is neither a constant nor a "
11499 "condition code, invalid operand "
11500 "code 'f'");
11501 return;
11503 #ifdef HAVE_AS_IX86_CMOV_SUN_SYNTAX
11504 if (ASSEMBLER_DIALECT == ASM_ATT)
11505 putc ('.', file);
11506 #endif
11507 put_condition_code (GET_CODE (x), GET_MODE (XEXP (x, 0)), 1, 1, file);
11508 return;
11510 case 'E':
11511 put_condition_code (GET_CODE (x), CCmode, 0, 0, file);
11512 return;
11514 case 'e':
11515 put_condition_code (GET_CODE (x), CCmode, 1, 0, file);
11516 return;
11518 case 'H':
11519 /* It doesn't actually matter what mode we use here, as we're
11520 only going to use this for printing. */
11521 x = adjust_address_nv (x, DImode, 8);
11522 break;
11524 case '+':
11526 rtx x;
11528 if (!optimize
11529 || optimize_function_for_size_p (cfun) || !TARGET_BRANCH_PREDICTION_HINTS)
11530 return;
11532 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
11533 if (x)
11535 int pred_val = INTVAL (XEXP (x, 0));
11537 if (pred_val < REG_BR_PROB_BASE * 45 / 100
11538 || pred_val > REG_BR_PROB_BASE * 55 / 100)
11540 int taken = pred_val > REG_BR_PROB_BASE / 2;
11541 int cputaken = final_forward_branch_p (current_output_insn) == 0;
11543 /* Emit hints only in the case default branch prediction
11544 heuristics would fail. */
11545 if (taken != cputaken)
11547 /* We use 3e (DS) prefix for taken branches and
11548 2e (CS) prefix for not taken branches. */
11549 if (taken)
11550 fputs ("ds ; ", file);
11551 else
11552 fputs ("cs ; ", file);
11556 return;
11559 case ';':
11560 #if TARGET_MACHO
11561 fputs (" ; ", file);
11562 #else
11563 putc (' ', file);
11564 #endif
11565 return;
11567 default:
11568 output_operand_lossage ("invalid operand code '%c'", code);
11572 if (REG_P (x))
11573 print_reg (x, code, file);
11575 else if (MEM_P (x))
11577 /* No `byte ptr' prefix for call instructions or BLKmode operands. */
11578 if (ASSEMBLER_DIALECT == ASM_INTEL && code != 'X' && code != 'P'
11579 && GET_MODE (x) != BLKmode)
11581 const char * size;
11582 switch (GET_MODE_SIZE (GET_MODE (x)))
11584 case 1: size = "BYTE"; break;
11585 case 2: size = "WORD"; break;
11586 case 4: size = "DWORD"; break;
11587 case 8: size = "QWORD"; break;
11588 case 12: size = "XWORD"; break;
11589 case 16:
11590 if (GET_MODE (x) == XFmode)
11591 size = "XWORD";
11592 else
11593 size = "XMMWORD";
11594 break;
11595 default:
11596 gcc_unreachable ();
11599 /* Check for explicit size override (codes 'b', 'w' and 'k') */
11600 if (code == 'b')
11601 size = "BYTE";
11602 else if (code == 'w')
11603 size = "WORD";
11604 else if (code == 'k')
11605 size = "DWORD";
11607 fputs (size, file);
11608 fputs (" PTR ", file);
11611 x = XEXP (x, 0);
11612 /* Avoid (%rip) for call operands. */
11613 if (CONSTANT_ADDRESS_P (x) && code == 'P'
11614 && !CONST_INT_P (x))
11615 output_addr_const (file, x);
11616 else if (this_is_asm_operands && ! address_operand (x, VOIDmode))
11617 output_operand_lossage ("invalid constraints for operand");
11618 else
11619 output_address (x);
11622 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
11624 REAL_VALUE_TYPE r;
11625 long l;
11627 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
11628 REAL_VALUE_TO_TARGET_SINGLE (r, l);
11630 if (ASSEMBLER_DIALECT == ASM_ATT)
11631 putc ('$', file);
11632 fprintf (file, "0x%08lx", (long unsigned int) l);
11635 /* These float cases don't actually occur as immediate operands. */
11636 else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
11638 char dstr[30];
11640 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
11641 fputs (dstr, file);
11644 else if (GET_CODE (x) == CONST_DOUBLE
11645 && GET_MODE (x) == XFmode)
11647 char dstr[30];
11649 real_to_decimal (dstr, CONST_DOUBLE_REAL_VALUE (x), sizeof (dstr), 0, 1);
11650 fputs (dstr, file);
11653 else
11655 /* We have patterns that allow zero sets of memory, for instance.
11656 In 64-bit mode, we should probably support all 8-byte vectors,
11657 since we can in fact encode that into an immediate. */
11658 if (GET_CODE (x) == CONST_VECTOR)
11660 gcc_assert (x == CONST0_RTX (GET_MODE (x)));
11661 x = const0_rtx;
11664 if (code != 'P')
11666 if (CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE)
11668 if (ASSEMBLER_DIALECT == ASM_ATT)
11669 putc ('$', file);
11671 else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
11672 || GET_CODE (x) == LABEL_REF)
11674 if (ASSEMBLER_DIALECT == ASM_ATT)
11675 putc ('$', file);
11676 else
11677 fputs ("OFFSET FLAT:", file);
11680 if (CONST_INT_P (x))
11681 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
11682 else if (flag_pic)
11683 output_pic_addr_const (file, x, code);
11684 else
11685 output_addr_const (file, x);
11689 /* Print a memory operand whose address is ADDR. */
11691 void
11692 print_operand_address (FILE *file, rtx addr)
11694 struct ix86_address parts;
11695 rtx base, index, disp;
11696 int scale;
11697 int ok = ix86_decompose_address (addr, &parts);
11699 gcc_assert (ok);
11701 base = parts.base;
11702 index = parts.index;
11703 disp = parts.disp;
11704 scale = parts.scale;
11706 switch (parts.seg)
11708 case SEG_DEFAULT:
11709 break;
11710 case SEG_FS:
11711 case SEG_GS:
11712 if (ASSEMBLER_DIALECT == ASM_ATT)
11713 putc ('%', file);
11714 fputs ((parts.seg == SEG_FS ? "fs:" : "gs:"), file);
11715 break;
11716 default:
11717 gcc_unreachable ();
11720 /* Use one byte shorter RIP relative addressing for 64bit mode. */
11721 if (TARGET_64BIT && !base && !index)
11723 rtx symbol = disp;
11725 if (GET_CODE (disp) == CONST
11726 && GET_CODE (XEXP (disp, 0)) == PLUS
11727 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
11728 symbol = XEXP (XEXP (disp, 0), 0);
11730 if (GET_CODE (symbol) == LABEL_REF
11731 || (GET_CODE (symbol) == SYMBOL_REF
11732 && SYMBOL_REF_TLS_MODEL (symbol) == 0))
11733 base = pc_rtx;
11735 if (!base && !index)
11737 /* Displacement only requires special attention. */
11739 if (CONST_INT_P (disp))
11741 if (ASSEMBLER_DIALECT == ASM_INTEL && parts.seg == SEG_DEFAULT)
11742 fputs ("ds:", file);
11743 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (disp));
11745 else if (flag_pic)
11746 output_pic_addr_const (file, disp, 0);
11747 else
11748 output_addr_const (file, disp);
11750 else
11752 if (ASSEMBLER_DIALECT == ASM_ATT)
11754 if (disp)
11756 if (flag_pic)
11757 output_pic_addr_const (file, disp, 0);
11758 else if (GET_CODE (disp) == LABEL_REF)
11759 output_asm_label (disp);
11760 else
11761 output_addr_const (file, disp);
11764 putc ('(', file);
11765 if (base)
11766 print_reg (base, 0, file);
11767 if (index)
11769 putc (',', file);
11770 print_reg (index, 0, file);
11771 if (scale != 1)
11772 fprintf (file, ",%d", scale);
11774 putc (')', file);
11776 else
11778 rtx offset = NULL_RTX;
11780 if (disp)
11782 /* Pull out the offset of a symbol; print any symbol itself. */
11783 if (GET_CODE (disp) == CONST
11784 && GET_CODE (XEXP (disp, 0)) == PLUS
11785 && CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
11787 offset = XEXP (XEXP (disp, 0), 1);
11788 disp = gen_rtx_CONST (VOIDmode,
11789 XEXP (XEXP (disp, 0), 0));
11792 if (flag_pic)
11793 output_pic_addr_const (file, disp, 0);
11794 else if (GET_CODE (disp) == LABEL_REF)
11795 output_asm_label (disp);
11796 else if (CONST_INT_P (disp))
11797 offset = disp;
11798 else
11799 output_addr_const (file, disp);
11802 putc ('[', file);
11803 if (base)
11805 print_reg (base, 0, file);
11806 if (offset)
11808 if (INTVAL (offset) >= 0)
11809 putc ('+', file);
11810 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
11813 else if (offset)
11814 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (offset));
11815 else
11816 putc ('0', file);
11818 if (index)
11820 putc ('+', file);
11821 print_reg (index, 0, file);
11822 if (scale != 1)
11823 fprintf (file, "*%d", scale);
11825 putc (']', file);
11830 bool
11831 output_addr_const_extra (FILE *file, rtx x)
11833 rtx op;
11835 if (GET_CODE (x) != UNSPEC)
11836 return false;
11838 op = XVECEXP (x, 0, 0);
11839 switch (XINT (x, 1))
11841 case UNSPEC_GOTTPOFF:
11842 output_addr_const (file, op);
11843 /* FIXME: This might be @TPOFF in Sun ld. */
11844 fputs ("@GOTTPOFF", file);
11845 break;
11846 case UNSPEC_TPOFF:
11847 output_addr_const (file, op);
11848 fputs ("@TPOFF", file);
11849 break;
11850 case UNSPEC_NTPOFF:
11851 output_addr_const (file, op);
11852 if (TARGET_64BIT)
11853 fputs ("@TPOFF", file);
11854 else
11855 fputs ("@NTPOFF", file);
11856 break;
11857 case UNSPEC_DTPOFF:
11858 output_addr_const (file, op);
11859 fputs ("@DTPOFF", file);
11860 break;
11861 case UNSPEC_GOTNTPOFF:
11862 output_addr_const (file, op);
11863 if (TARGET_64BIT)
11864 fputs (ASSEMBLER_DIALECT == ASM_ATT ?
11865 "@GOTTPOFF(%rip)" : "@GOTTPOFF[rip]", file);
11866 else
11867 fputs ("@GOTNTPOFF", file);
11868 break;
11869 case UNSPEC_INDNTPOFF:
11870 output_addr_const (file, op);
11871 fputs ("@INDNTPOFF", file);
11872 break;
11873 #if TARGET_MACHO
11874 case UNSPEC_MACHOPIC_OFFSET:
11875 output_addr_const (file, op);
11876 putc ('-', file);
11877 machopic_output_function_base_name (file);
11878 break;
11879 #endif
11881 default:
11882 return false;
11885 return true;
11888 /* Split one or more DImode RTL references into pairs of SImode
11889 references. The RTL can be REG, offsettable MEM, integer constant, or
11890 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
11891 split and "num" is its length. lo_half and hi_half are output arrays
11892 that parallel "operands". */
11894 void
11895 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
11897 while (num--)
11899 rtx op = operands[num];
11901 /* simplify_subreg refuse to split volatile memory addresses,
11902 but we still have to handle it. */
11903 if (MEM_P (op))
11905 lo_half[num] = adjust_address (op, SImode, 0);
11906 hi_half[num] = adjust_address (op, SImode, 4);
11908 else
11910 lo_half[num] = simplify_gen_subreg (SImode, op,
11911 GET_MODE (op) == VOIDmode
11912 ? DImode : GET_MODE (op), 0);
11913 hi_half[num] = simplify_gen_subreg (SImode, op,
11914 GET_MODE (op) == VOIDmode
11915 ? DImode : GET_MODE (op), 4);
11919 /* Split one or more TImode RTL references into pairs of DImode
11920 references. The RTL can be REG, offsettable MEM, integer constant, or
11921 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
11922 split and "num" is its length. lo_half and hi_half are output arrays
11923 that parallel "operands". */
11925 void
11926 split_ti (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
11928 while (num--)
11930 rtx op = operands[num];
11932 /* simplify_subreg refuse to split volatile memory addresses, but we
11933 still have to handle it. */
11934 if (MEM_P (op))
11936 lo_half[num] = adjust_address (op, DImode, 0);
11937 hi_half[num] = adjust_address (op, DImode, 8);
11939 else
11941 lo_half[num] = simplify_gen_subreg (DImode, op, TImode, 0);
11942 hi_half[num] = simplify_gen_subreg (DImode, op, TImode, 8);
11947 /* Output code to perform a 387 binary operation in INSN, one of PLUS,
11948 MINUS, MULT or DIV. OPERANDS are the insn operands, where operands[3]
11949 is the expression of the binary operation. The output may either be
11950 emitted here, or returned to the caller, like all output_* functions.
11952 There is no guarantee that the operands are the same mode, as they
11953 might be within FLOAT or FLOAT_EXTEND expressions. */
11955 #ifndef SYSV386_COMPAT
11956 /* Set to 1 for compatibility with brain-damaged assemblers. No-one
11957 wants to fix the assemblers because that causes incompatibility
11958 with gcc. No-one wants to fix gcc because that causes
11959 incompatibility with assemblers... You can use the option of
11960 -DSYSV386_COMPAT=0 if you recompile both gcc and gas this way. */
11961 #define SYSV386_COMPAT 1
11962 #endif
11964 const char *
11965 output_387_binary_op (rtx insn, rtx *operands)
11967 static char buf[40];
11968 const char *p;
11969 const char *ssep;
11970 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]) || SSE_REG_P (operands[2]);
11972 #ifdef ENABLE_CHECKING
11973 /* Even if we do not want to check the inputs, this documents input
11974 constraints. Which helps in understanding the following code. */
11975 if (STACK_REG_P (operands[0])
11976 && ((REG_P (operands[1])
11977 && REGNO (operands[0]) == REGNO (operands[1])
11978 && (STACK_REG_P (operands[2]) || MEM_P (operands[2])))
11979 || (REG_P (operands[2])
11980 && REGNO (operands[0]) == REGNO (operands[2])
11981 && (STACK_REG_P (operands[1]) || MEM_P (operands[1]))))
11982 && (STACK_TOP_P (operands[1]) || STACK_TOP_P (operands[2])))
11983 ; /* ok */
11984 else
11985 gcc_assert (is_sse);
11986 #endif
11988 switch (GET_CODE (operands[3]))
11990 case PLUS:
11991 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
11992 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
11993 p = "fiadd";
11994 else
11995 p = "fadd";
11996 ssep = "vadd";
11997 break;
11999 case MINUS:
12000 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12001 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12002 p = "fisub";
12003 else
12004 p = "fsub";
12005 ssep = "vsub";
12006 break;
12008 case MULT:
12009 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12010 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12011 p = "fimul";
12012 else
12013 p = "fmul";
12014 ssep = "vmul";
12015 break;
12017 case DIV:
12018 if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_INT
12019 || GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT)
12020 p = "fidiv";
12021 else
12022 p = "fdiv";
12023 ssep = "vdiv";
12024 break;
12026 default:
12027 gcc_unreachable ();
12030 if (is_sse)
12032 if (TARGET_AVX)
12034 strcpy (buf, ssep);
12035 if (GET_MODE (operands[0]) == SFmode)
12036 strcat (buf, "ss\t{%2, %1, %0|%0, %1, %2}");
12037 else
12038 strcat (buf, "sd\t{%2, %1, %0|%0, %1, %2}");
12040 else
12042 strcpy (buf, ssep + 1);
12043 if (GET_MODE (operands[0]) == SFmode)
12044 strcat (buf, "ss\t{%2, %0|%0, %2}");
12045 else
12046 strcat (buf, "sd\t{%2, %0|%0, %2}");
12048 return buf;
12050 strcpy (buf, p);
12052 switch (GET_CODE (operands[3]))
12054 case MULT:
12055 case PLUS:
12056 if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
12058 rtx temp = operands[2];
12059 operands[2] = operands[1];
12060 operands[1] = temp;
12063 /* know operands[0] == operands[1]. */
12065 if (MEM_P (operands[2]))
12067 p = "%Z2\t%2";
12068 break;
12071 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12073 if (STACK_TOP_P (operands[0]))
12074 /* How is it that we are storing to a dead operand[2]?
12075 Well, presumably operands[1] is dead too. We can't
12076 store the result to st(0) as st(0) gets popped on this
12077 instruction. Instead store to operands[2] (which I
12078 think has to be st(1)). st(1) will be popped later.
12079 gcc <= 2.8.1 didn't have this check and generated
12080 assembly code that the Unixware assembler rejected. */
12081 p = "p\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12082 else
12083 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12084 break;
12087 if (STACK_TOP_P (operands[0]))
12088 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12089 else
12090 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12091 break;
12093 case MINUS:
12094 case DIV:
12095 if (MEM_P (operands[1]))
12097 p = "r%Z1\t%1";
12098 break;
12101 if (MEM_P (operands[2]))
12103 p = "%Z2\t%2";
12104 break;
12107 if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
12109 #if SYSV386_COMPAT
12110 /* The SystemV/386 SVR3.2 assembler, and probably all AT&T
12111 derived assemblers, confusingly reverse the direction of
12112 the operation for fsub{r} and fdiv{r} when the
12113 destination register is not st(0). The Intel assembler
12114 doesn't have this brain damage. Read !SYSV386_COMPAT to
12115 figure out what the hardware really does. */
12116 if (STACK_TOP_P (operands[0]))
12117 p = "{p\t%0, %2|rp\t%2, %0}";
12118 else
12119 p = "{rp\t%2, %0|p\t%0, %2}";
12120 #else
12121 if (STACK_TOP_P (operands[0]))
12122 /* As above for fmul/fadd, we can't store to st(0). */
12123 p = "rp\t{%0, %2|%2, %0}"; /* st(1) = st(0) op st(1); pop */
12124 else
12125 p = "p\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0); pop */
12126 #endif
12127 break;
12130 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
12132 #if SYSV386_COMPAT
12133 if (STACK_TOP_P (operands[0]))
12134 p = "{rp\t%0, %1|p\t%1, %0}";
12135 else
12136 p = "{p\t%1, %0|rp\t%0, %1}";
12137 #else
12138 if (STACK_TOP_P (operands[0]))
12139 p = "p\t{%0, %1|%1, %0}"; /* st(1) = st(1) op st(0); pop */
12140 else
12141 p = "rp\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2); pop */
12142 #endif
12143 break;
12146 if (STACK_TOP_P (operands[0]))
12148 if (STACK_TOP_P (operands[1]))
12149 p = "\t{%y2, %0|%0, %y2}"; /* st(0) = st(0) op st(r2) */
12150 else
12151 p = "r\t{%y1, %0|%0, %y1}"; /* st(0) = st(r1) op st(0) */
12152 break;
12154 else if (STACK_TOP_P (operands[1]))
12156 #if SYSV386_COMPAT
12157 p = "{\t%1, %0|r\t%0, %1}";
12158 #else
12159 p = "r\t{%1, %0|%0, %1}"; /* st(r2) = st(0) op st(r2) */
12160 #endif
12162 else
12164 #if SYSV386_COMPAT
12165 p = "{r\t%2, %0|\t%0, %2}";
12166 #else
12167 p = "\t{%2, %0|%0, %2}"; /* st(r1) = st(r1) op st(0) */
12168 #endif
12170 break;
12172 default:
12173 gcc_unreachable ();
12176 strcat (buf, p);
12177 return buf;
12180 /* Return needed mode for entity in optimize_mode_switching pass. */
12183 ix86_mode_needed (int entity, rtx insn)
12185 enum attr_i387_cw mode;
12187 /* The mode UNINITIALIZED is used to store control word after a
12188 function call or ASM pattern. The mode ANY specify that function
12189 has no requirements on the control word and make no changes in the
12190 bits we are interested in. */
12192 if (CALL_P (insn)
12193 || (NONJUMP_INSN_P (insn)
12194 && (asm_noperands (PATTERN (insn)) >= 0
12195 || GET_CODE (PATTERN (insn)) == ASM_INPUT)))
12196 return I387_CW_UNINITIALIZED;
12198 if (recog_memoized (insn) < 0)
12199 return I387_CW_ANY;
12201 mode = get_attr_i387_cw (insn);
12203 switch (entity)
12205 case I387_TRUNC:
12206 if (mode == I387_CW_TRUNC)
12207 return mode;
12208 break;
12210 case I387_FLOOR:
12211 if (mode == I387_CW_FLOOR)
12212 return mode;
12213 break;
12215 case I387_CEIL:
12216 if (mode == I387_CW_CEIL)
12217 return mode;
12218 break;
12220 case I387_MASK_PM:
12221 if (mode == I387_CW_MASK_PM)
12222 return mode;
12223 break;
12225 default:
12226 gcc_unreachable ();
12229 return I387_CW_ANY;
12232 /* Output code to initialize control word copies used by trunc?f?i and
12233 rounding patterns. CURRENT_MODE is set to current control word,
12234 while NEW_MODE is set to new control word. */
12236 void
12237 emit_i387_cw_initialization (int mode)
12239 rtx stored_mode = assign_386_stack_local (HImode, SLOT_CW_STORED);
12240 rtx new_mode;
12242 enum ix86_stack_slot slot;
12244 rtx reg = gen_reg_rtx (HImode);
12246 emit_insn (gen_x86_fnstcw_1 (stored_mode));
12247 emit_move_insn (reg, copy_rtx (stored_mode));
12249 if (TARGET_64BIT || TARGET_PARTIAL_REG_STALL
12250 || optimize_function_for_size_p (cfun))
12252 switch (mode)
12254 case I387_CW_TRUNC:
12255 /* round toward zero (truncate) */
12256 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0c00)));
12257 slot = SLOT_CW_TRUNC;
12258 break;
12260 case I387_CW_FLOOR:
12261 /* round down toward -oo */
12262 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12263 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0400)));
12264 slot = SLOT_CW_FLOOR;
12265 break;
12267 case I387_CW_CEIL:
12268 /* round up toward +oo */
12269 emit_insn (gen_andhi3 (reg, reg, GEN_INT (~0x0c00)));
12270 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0800)));
12271 slot = SLOT_CW_CEIL;
12272 break;
12274 case I387_CW_MASK_PM:
12275 /* mask precision exception for nearbyint() */
12276 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12277 slot = SLOT_CW_MASK_PM;
12278 break;
12280 default:
12281 gcc_unreachable ();
12284 else
12286 switch (mode)
12288 case I387_CW_TRUNC:
12289 /* round toward zero (truncate) */
12290 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0xc)));
12291 slot = SLOT_CW_TRUNC;
12292 break;
12294 case I387_CW_FLOOR:
12295 /* round down toward -oo */
12296 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x4)));
12297 slot = SLOT_CW_FLOOR;
12298 break;
12300 case I387_CW_CEIL:
12301 /* round up toward +oo */
12302 emit_insn (gen_movsi_insv_1 (reg, GEN_INT (0x8)));
12303 slot = SLOT_CW_CEIL;
12304 break;
12306 case I387_CW_MASK_PM:
12307 /* mask precision exception for nearbyint() */
12308 emit_insn (gen_iorhi3 (reg, reg, GEN_INT (0x0020)));
12309 slot = SLOT_CW_MASK_PM;
12310 break;
12312 default:
12313 gcc_unreachable ();
12317 gcc_assert (slot < MAX_386_STACK_LOCALS);
12319 new_mode = assign_386_stack_local (HImode, slot);
12320 emit_move_insn (new_mode, reg);
12323 /* Output code for INSN to convert a float to a signed int. OPERANDS
12324 are the insn operands. The output may be [HSD]Imode and the input
12325 operand may be [SDX]Fmode. */
12327 const char *
12328 output_fix_trunc (rtx insn, rtx *operands, int fisttp)
12330 int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12331 int dimode_p = GET_MODE (operands[0]) == DImode;
12332 int round_mode = get_attr_i387_cw (insn);
12334 /* Jump through a hoop or two for DImode, since the hardware has no
12335 non-popping instruction. We used to do this a different way, but
12336 that was somewhat fragile and broke with post-reload splitters. */
12337 if ((dimode_p || fisttp) && !stack_top_dies)
12338 output_asm_insn ("fld\t%y1", operands);
12340 gcc_assert (STACK_TOP_P (operands[1]));
12341 gcc_assert (MEM_P (operands[0]));
12342 gcc_assert (GET_MODE (operands[1]) != TFmode);
12344 if (fisttp)
12345 output_asm_insn ("fisttp%Z0\t%0", operands);
12346 else
12348 if (round_mode != I387_CW_ANY)
12349 output_asm_insn ("fldcw\t%3", operands);
12350 if (stack_top_dies || dimode_p)
12351 output_asm_insn ("fistp%Z0\t%0", operands);
12352 else
12353 output_asm_insn ("fist%Z0\t%0", operands);
12354 if (round_mode != I387_CW_ANY)
12355 output_asm_insn ("fldcw\t%2", operands);
12358 return "";
12361 /* Output code for x87 ffreep insn. The OPNO argument, which may only
12362 have the values zero or one, indicates the ffreep insn's operand
12363 from the OPERANDS array. */
12365 static const char *
12366 output_387_ffreep (rtx *operands ATTRIBUTE_UNUSED, int opno)
12368 if (TARGET_USE_FFREEP)
12369 #ifdef HAVE_AS_IX86_FFREEP
12370 return opno ? "ffreep\t%y1" : "ffreep\t%y0";
12371 #else
12373 static char retval[32];
12374 int regno = REGNO (operands[opno]);
12376 gcc_assert (FP_REGNO_P (regno));
12378 regno -= FIRST_STACK_REG;
12380 snprintf (retval, sizeof (retval), ASM_SHORT "0xc%ddf", regno);
12381 return retval;
12383 #endif
12385 return opno ? "fstp\t%y1" : "fstp\t%y0";
12389 /* Output code for INSN to compare OPERANDS. EFLAGS_P is 1 when fcomi
12390 should be used. UNORDERED_P is true when fucom should be used. */
12392 const char *
12393 output_fp_compare (rtx insn, rtx *operands, int eflags_p, int unordered_p)
12395 int stack_top_dies;
12396 rtx cmp_op0, cmp_op1;
12397 int is_sse = SSE_REG_P (operands[0]) || SSE_REG_P (operands[1]);
12399 if (eflags_p)
12401 cmp_op0 = operands[0];
12402 cmp_op1 = operands[1];
12404 else
12406 cmp_op0 = operands[1];
12407 cmp_op1 = operands[2];
12410 if (is_sse)
12412 static const char ucomiss[] = "vucomiss\t{%1, %0|%0, %1}";
12413 static const char ucomisd[] = "vucomisd\t{%1, %0|%0, %1}";
12414 static const char comiss[] = "vcomiss\t{%1, %0|%0, %1}";
12415 static const char comisd[] = "vcomisd\t{%1, %0|%0, %1}";
12417 if (GET_MODE (operands[0]) == SFmode)
12418 if (unordered_p)
12419 return &ucomiss[TARGET_AVX ? 0 : 1];
12420 else
12421 return &comiss[TARGET_AVX ? 0 : 1];
12422 else
12423 if (unordered_p)
12424 return &ucomisd[TARGET_AVX ? 0 : 1];
12425 else
12426 return &comisd[TARGET_AVX ? 0 : 1];
12429 gcc_assert (STACK_TOP_P (cmp_op0));
12431 stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
12433 if (cmp_op1 == CONST0_RTX (GET_MODE (cmp_op1)))
12435 if (stack_top_dies)
12437 output_asm_insn ("ftst\n\tfnstsw\t%0", operands);
12438 return output_387_ffreep (operands, 1);
12440 else
12441 return "ftst\n\tfnstsw\t%0";
12444 if (STACK_REG_P (cmp_op1)
12445 && stack_top_dies
12446 && find_regno_note (insn, REG_DEAD, REGNO (cmp_op1))
12447 && REGNO (cmp_op1) != FIRST_STACK_REG)
12449 /* If both the top of the 387 stack dies, and the other operand
12450 is also a stack register that dies, then this must be a
12451 `fcompp' float compare */
12453 if (eflags_p)
12455 /* There is no double popping fcomi variant. Fortunately,
12456 eflags is immune from the fstp's cc clobbering. */
12457 if (unordered_p)
12458 output_asm_insn ("fucomip\t{%y1, %0|%0, %y1}", operands);
12459 else
12460 output_asm_insn ("fcomip\t{%y1, %0|%0, %y1}", operands);
12461 return output_387_ffreep (operands, 0);
12463 else
12465 if (unordered_p)
12466 return "fucompp\n\tfnstsw\t%0";
12467 else
12468 return "fcompp\n\tfnstsw\t%0";
12471 else
12473 /* Encoded here as eflags_p | intmode | unordered_p | stack_top_dies. */
12475 static const char * const alt[16] =
12477 "fcom%Z2\t%y2\n\tfnstsw\t%0",
12478 "fcomp%Z2\t%y2\n\tfnstsw\t%0",
12479 "fucom%Z2\t%y2\n\tfnstsw\t%0",
12480 "fucomp%Z2\t%y2\n\tfnstsw\t%0",
12482 "ficom%Z2\t%y2\n\tfnstsw\t%0",
12483 "ficomp%Z2\t%y2\n\tfnstsw\t%0",
12484 NULL,
12485 NULL,
12487 "fcomi\t{%y1, %0|%0, %y1}",
12488 "fcomip\t{%y1, %0|%0, %y1}",
12489 "fucomi\t{%y1, %0|%0, %y1}",
12490 "fucomip\t{%y1, %0|%0, %y1}",
12492 NULL,
12493 NULL,
12494 NULL,
12495 NULL
12498 int mask;
12499 const char *ret;
12501 mask = eflags_p << 3;
12502 mask |= (GET_MODE_CLASS (GET_MODE (cmp_op1)) == MODE_INT) << 2;
12503 mask |= unordered_p << 1;
12504 mask |= stack_top_dies;
12506 gcc_assert (mask < 16);
12507 ret = alt[mask];
12508 gcc_assert (ret);
12510 return ret;
12514 void
12515 ix86_output_addr_vec_elt (FILE *file, int value)
12517 const char *directive = ASM_LONG;
12519 #ifdef ASM_QUAD
12520 if (TARGET_64BIT)
12521 directive = ASM_QUAD;
12522 #else
12523 gcc_assert (!TARGET_64BIT);
12524 #endif
12526 fprintf (file, "%s" LPREFIX "%d\n", directive, value);
12529 void
12530 ix86_output_addr_diff_elt (FILE *file, int value, int rel)
12532 const char *directive = ASM_LONG;
12534 #ifdef ASM_QUAD
12535 if (TARGET_64BIT && CASE_VECTOR_MODE == DImode)
12536 directive = ASM_QUAD;
12537 #else
12538 gcc_assert (!TARGET_64BIT);
12539 #endif
12540 /* We can't use @GOTOFF for text labels on VxWorks; see gotoff_operand. */
12541 if (TARGET_64BIT || TARGET_VXWORKS_RTP)
12542 fprintf (file, "%s" LPREFIX "%d-" LPREFIX "%d\n",
12543 directive, value, rel);
12544 else if (HAVE_AS_GOTOFF_IN_DATA)
12545 fprintf (file, ASM_LONG LPREFIX "%d@GOTOFF\n", value);
12546 #if TARGET_MACHO
12547 else if (TARGET_MACHO)
12549 fprintf (file, ASM_LONG LPREFIX "%d-", value);
12550 machopic_output_function_base_name (file);
12551 putc ('\n', file);
12553 #endif
12554 else
12555 asm_fprintf (file, ASM_LONG "%U%s+[.-" LPREFIX "%d]\n",
12556 GOT_SYMBOL_NAME, value);
12559 /* Generate either "mov $0, reg" or "xor reg, reg", as appropriate
12560 for the target. */
12562 void
12563 ix86_expand_clear (rtx dest)
12565 rtx tmp;
12567 /* We play register width games, which are only valid after reload. */
12568 gcc_assert (reload_completed);
12570 /* Avoid HImode and its attendant prefix byte. */
12571 if (GET_MODE_SIZE (GET_MODE (dest)) < 4)
12572 dest = gen_rtx_REG (SImode, REGNO (dest));
12573 tmp = gen_rtx_SET (VOIDmode, dest, const0_rtx);
12575 /* This predicate should match that for movsi_xor and movdi_xor_rex64. */
12576 if (reload_completed && (!TARGET_USE_MOV0 || optimize_insn_for_speed_p ()))
12578 rtx clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
12579 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, clob));
12582 emit_insn (tmp);
12585 /* X is an unchanging MEM. If it is a constant pool reference, return
12586 the constant pool rtx, else NULL. */
12589 maybe_get_pool_constant (rtx x)
12591 x = ix86_delegitimize_address (XEXP (x, 0));
12593 if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
12594 return get_pool_constant (x);
12596 return NULL_RTX;
12599 void
12600 ix86_expand_move (enum machine_mode mode, rtx operands[])
12602 rtx op0, op1;
12603 enum tls_model model;
12605 op0 = operands[0];
12606 op1 = operands[1];
12608 if (GET_CODE (op1) == SYMBOL_REF)
12610 model = SYMBOL_REF_TLS_MODEL (op1);
12611 if (model)
12613 op1 = legitimize_tls_address (op1, model, true);
12614 op1 = force_operand (op1, op0);
12615 if (op1 == op0)
12616 return;
12618 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
12619 && SYMBOL_REF_DLLIMPORT_P (op1))
12620 op1 = legitimize_dllimport_symbol (op1, false);
12622 else if (GET_CODE (op1) == CONST
12623 && GET_CODE (XEXP (op1, 0)) == PLUS
12624 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SYMBOL_REF)
12626 rtx addend = XEXP (XEXP (op1, 0), 1);
12627 rtx symbol = XEXP (XEXP (op1, 0), 0);
12628 rtx tmp = NULL;
12630 model = SYMBOL_REF_TLS_MODEL (symbol);
12631 if (model)
12632 tmp = legitimize_tls_address (symbol, model, true);
12633 else if (TARGET_DLLIMPORT_DECL_ATTRIBUTES
12634 && SYMBOL_REF_DLLIMPORT_P (symbol))
12635 tmp = legitimize_dllimport_symbol (symbol, true);
12637 if (tmp)
12639 tmp = force_operand (tmp, NULL);
12640 tmp = expand_simple_binop (Pmode, PLUS, tmp, addend,
12641 op0, 1, OPTAB_DIRECT);
12642 if (tmp == op0)
12643 return;
12647 if (flag_pic && mode == Pmode && symbolic_operand (op1, Pmode))
12649 if (TARGET_MACHO && !TARGET_64BIT)
12651 #if TARGET_MACHO
12652 if (MACHOPIC_PURE)
12654 rtx temp = ((reload_in_progress
12655 || ((op0 && REG_P (op0))
12656 && mode == Pmode))
12657 ? op0 : gen_reg_rtx (Pmode));
12658 op1 = machopic_indirect_data_reference (op1, temp);
12659 op1 = machopic_legitimize_pic_address (op1, mode,
12660 temp == op1 ? 0 : temp);
12662 else if (MACHOPIC_INDIRECT)
12663 op1 = machopic_indirect_data_reference (op1, 0);
12664 if (op0 == op1)
12665 return;
12666 #endif
12668 else
12670 if (MEM_P (op0))
12671 op1 = force_reg (Pmode, op1);
12672 else if (!TARGET_64BIT || !x86_64_movabs_operand (op1, Pmode))
12674 rtx reg = can_create_pseudo_p () ? NULL_RTX : op0;
12675 op1 = legitimize_pic_address (op1, reg);
12676 if (op0 == op1)
12677 return;
12681 else
12683 if (MEM_P (op0)
12684 && (PUSH_ROUNDING (GET_MODE_SIZE (mode)) != GET_MODE_SIZE (mode)
12685 || !push_operand (op0, mode))
12686 && MEM_P (op1))
12687 op1 = force_reg (mode, op1);
12689 if (push_operand (op0, mode)
12690 && ! general_no_elim_operand (op1, mode))
12691 op1 = copy_to_mode_reg (mode, op1);
12693 /* Force large constants in 64bit compilation into register
12694 to get them CSEed. */
12695 if (can_create_pseudo_p ()
12696 && (mode == DImode) && TARGET_64BIT
12697 && immediate_operand (op1, mode)
12698 && !x86_64_zext_immediate_operand (op1, VOIDmode)
12699 && !register_operand (op0, mode)
12700 && optimize)
12701 op1 = copy_to_mode_reg (mode, op1);
12703 if (can_create_pseudo_p ()
12704 && FLOAT_MODE_P (mode)
12705 && GET_CODE (op1) == CONST_DOUBLE)
12707 /* If we are loading a floating point constant to a register,
12708 force the value to memory now, since we'll get better code
12709 out the back end. */
12711 op1 = validize_mem (force_const_mem (mode, op1));
12712 if (!register_operand (op0, mode))
12714 rtx temp = gen_reg_rtx (mode);
12715 emit_insn (gen_rtx_SET (VOIDmode, temp, op1));
12716 emit_move_insn (op0, temp);
12717 return;
12722 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
12725 void
12726 ix86_expand_vector_move (enum machine_mode mode, rtx operands[])
12728 rtx op0 = operands[0], op1 = operands[1];
12729 unsigned int align = GET_MODE_ALIGNMENT (mode);
12731 /* Force constants other than zero into memory. We do not know how
12732 the instructions used to build constants modify the upper 64 bits
12733 of the register, once we have that information we may be able
12734 to handle some of them more efficiently. */
12735 if (can_create_pseudo_p ()
12736 && register_operand (op0, mode)
12737 && (CONSTANT_P (op1)
12738 || (GET_CODE (op1) == SUBREG
12739 && CONSTANT_P (SUBREG_REG (op1))))
12740 && !standard_sse_constant_p (op1))
12741 op1 = validize_mem (force_const_mem (mode, op1));
12743 /* We need to check memory alignment for SSE mode since attribute
12744 can make operands unaligned. */
12745 if (can_create_pseudo_p ()
12746 && SSE_REG_MODE_P (mode)
12747 && ((MEM_P (op0) && (MEM_ALIGN (op0) < align))
12748 || (MEM_P (op1) && (MEM_ALIGN (op1) < align))))
12750 rtx tmp[2];
12752 /* ix86_expand_vector_move_misalign() does not like constants ... */
12753 if (CONSTANT_P (op1)
12754 || (GET_CODE (op1) == SUBREG
12755 && CONSTANT_P (SUBREG_REG (op1))))
12756 op1 = validize_mem (force_const_mem (mode, op1));
12758 /* ... nor both arguments in memory. */
12759 if (!register_operand (op0, mode)
12760 && !register_operand (op1, mode))
12761 op1 = force_reg (mode, op1);
12763 tmp[0] = op0; tmp[1] = op1;
12764 ix86_expand_vector_move_misalign (mode, tmp);
12765 return;
12768 /* Make operand1 a register if it isn't already. */
12769 if (can_create_pseudo_p ()
12770 && !register_operand (op0, mode)
12771 && !register_operand (op1, mode))
12773 emit_move_insn (op0, force_reg (GET_MODE (op0), op1));
12774 return;
12777 emit_insn (gen_rtx_SET (VOIDmode, op0, op1));
12780 /* Implement the movmisalign patterns for SSE. Non-SSE modes go
12781 straight to ix86_expand_vector_move. */
12782 /* Code generation for scalar reg-reg moves of single and double precision data:
12783 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
12784 movaps reg, reg
12785 else
12786 movss reg, reg
12787 if (x86_sse_partial_reg_dependency == true)
12788 movapd reg, reg
12789 else
12790 movsd reg, reg
12792 Code generation for scalar loads of double precision data:
12793 if (x86_sse_split_regs == true)
12794 movlpd mem, reg (gas syntax)
12795 else
12796 movsd mem, reg
12798 Code generation for unaligned packed loads of single precision data
12799 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
12800 if (x86_sse_unaligned_move_optimal)
12801 movups mem, reg
12803 if (x86_sse_partial_reg_dependency == true)
12805 xorps reg, reg
12806 movlps mem, reg
12807 movhps mem+8, reg
12809 else
12811 movlps mem, reg
12812 movhps mem+8, reg
12815 Code generation for unaligned packed loads of double precision data
12816 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
12817 if (x86_sse_unaligned_move_optimal)
12818 movupd mem, reg
12820 if (x86_sse_split_regs == true)
12822 movlpd mem, reg
12823 movhpd mem+8, reg
12825 else
12827 movsd mem, reg
12828 movhpd mem+8, reg
12832 void
12833 ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
12835 rtx op0, op1, m;
12837 op0 = operands[0];
12838 op1 = operands[1];
12840 if (TARGET_AVX)
12842 switch (GET_MODE_CLASS (mode))
12844 case MODE_VECTOR_INT:
12845 case MODE_INT:
12846 switch (GET_MODE_SIZE (mode))
12848 case 16:
12849 op0 = gen_lowpart (V16QImode, op0);
12850 op1 = gen_lowpart (V16QImode, op1);
12851 emit_insn (gen_avx_movdqu (op0, op1));
12852 break;
12853 case 32:
12854 op0 = gen_lowpart (V32QImode, op0);
12855 op1 = gen_lowpart (V32QImode, op1);
12856 emit_insn (gen_avx_movdqu256 (op0, op1));
12857 break;
12858 default:
12859 gcc_unreachable ();
12861 break;
12862 case MODE_VECTOR_FLOAT:
12863 op0 = gen_lowpart (mode, op0);
12864 op1 = gen_lowpart (mode, op1);
12866 switch (mode)
12868 case V4SFmode:
12869 emit_insn (gen_avx_movups (op0, op1));
12870 break;
12871 case V8SFmode:
12872 emit_insn (gen_avx_movups256 (op0, op1));
12873 break;
12874 case V2DFmode:
12875 emit_insn (gen_avx_movupd (op0, op1));
12876 break;
12877 case V4DFmode:
12878 emit_insn (gen_avx_movupd256 (op0, op1));
12879 break;
12880 default:
12881 gcc_unreachable ();
12883 break;
12885 default:
12886 gcc_unreachable ();
12889 return;
12892 if (MEM_P (op1))
12894 /* If we're optimizing for size, movups is the smallest. */
12895 if (optimize_insn_for_size_p ())
12897 op0 = gen_lowpart (V4SFmode, op0);
12898 op1 = gen_lowpart (V4SFmode, op1);
12899 emit_insn (gen_sse_movups (op0, op1));
12900 return;
12903 /* ??? If we have typed data, then it would appear that using
12904 movdqu is the only way to get unaligned data loaded with
12905 integer type. */
12906 if (TARGET_SSE2 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
12908 op0 = gen_lowpart (V16QImode, op0);
12909 op1 = gen_lowpart (V16QImode, op1);
12910 emit_insn (gen_sse2_movdqu (op0, op1));
12911 return;
12914 if (TARGET_SSE2 && mode == V2DFmode)
12916 rtx zero;
12918 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
12920 op0 = gen_lowpart (V2DFmode, op0);
12921 op1 = gen_lowpart (V2DFmode, op1);
12922 emit_insn (gen_sse2_movupd (op0, op1));
12923 return;
12926 /* When SSE registers are split into halves, we can avoid
12927 writing to the top half twice. */
12928 if (TARGET_SSE_SPLIT_REGS)
12930 emit_clobber (op0);
12931 zero = op0;
12933 else
12935 /* ??? Not sure about the best option for the Intel chips.
12936 The following would seem to satisfy; the register is
12937 entirely cleared, breaking the dependency chain. We
12938 then store to the upper half, with a dependency depth
12939 of one. A rumor has it that Intel recommends two movsd
12940 followed by an unpacklpd, but this is unconfirmed. And
12941 given that the dependency depth of the unpacklpd would
12942 still be one, I'm not sure why this would be better. */
12943 zero = CONST0_RTX (V2DFmode);
12946 m = adjust_address (op1, DFmode, 0);
12947 emit_insn (gen_sse2_loadlpd (op0, zero, m));
12948 m = adjust_address (op1, DFmode, 8);
12949 emit_insn (gen_sse2_loadhpd (op0, op0, m));
12951 else
12953 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
12955 op0 = gen_lowpart (V4SFmode, op0);
12956 op1 = gen_lowpart (V4SFmode, op1);
12957 emit_insn (gen_sse_movups (op0, op1));
12958 return;
12961 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
12962 emit_move_insn (op0, CONST0_RTX (mode));
12963 else
12964 emit_clobber (op0);
12966 if (mode != V4SFmode)
12967 op0 = gen_lowpart (V4SFmode, op0);
12968 m = adjust_address (op1, V2SFmode, 0);
12969 emit_insn (gen_sse_loadlps (op0, op0, m));
12970 m = adjust_address (op1, V2SFmode, 8);
12971 emit_insn (gen_sse_loadhps (op0, op0, m));
12974 else if (MEM_P (op0))
12976 /* If we're optimizing for size, movups is the smallest. */
12977 if (optimize_insn_for_size_p ())
12979 op0 = gen_lowpart (V4SFmode, op0);
12980 op1 = gen_lowpart (V4SFmode, op1);
12981 emit_insn (gen_sse_movups (op0, op1));
12982 return;
12985 /* ??? Similar to above, only less clear because of quote
12986 typeless stores unquote. */
12987 if (TARGET_SSE2 && !TARGET_SSE_TYPELESS_STORES
12988 && GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
12990 op0 = gen_lowpart (V16QImode, op0);
12991 op1 = gen_lowpart (V16QImode, op1);
12992 emit_insn (gen_sse2_movdqu (op0, op1));
12993 return;
12996 if (TARGET_SSE2 && mode == V2DFmode)
12998 m = adjust_address (op0, DFmode, 0);
12999 emit_insn (gen_sse2_storelpd (m, op1));
13000 m = adjust_address (op0, DFmode, 8);
13001 emit_insn (gen_sse2_storehpd (m, op1));
13003 else
13005 if (mode != V4SFmode)
13006 op1 = gen_lowpart (V4SFmode, op1);
13007 m = adjust_address (op0, V2SFmode, 0);
13008 emit_insn (gen_sse_storelps (m, op1));
13009 m = adjust_address (op0, V2SFmode, 8);
13010 emit_insn (gen_sse_storehps (m, op1));
13013 else
13014 gcc_unreachable ();
13017 /* Expand a push in MODE. This is some mode for which we do not support
13018 proper push instructions, at least from the registers that we expect
13019 the value to live in. */
13021 void
13022 ix86_expand_push (enum machine_mode mode, rtx x)
13024 rtx tmp;
13026 tmp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
13027 GEN_INT (-GET_MODE_SIZE (mode)),
13028 stack_pointer_rtx, 1, OPTAB_DIRECT);
13029 if (tmp != stack_pointer_rtx)
13030 emit_move_insn (stack_pointer_rtx, tmp);
13032 tmp = gen_rtx_MEM (mode, stack_pointer_rtx);
13034 /* When we push an operand onto stack, it has to be aligned at least
13035 at the function argument boundary. However since we don't have
13036 the argument type, we can't determine the actual argument
13037 boundary. */
13038 emit_move_insn (tmp, x);
13041 /* Helper function of ix86_fixup_binary_operands to canonicalize
13042 operand order. Returns true if the operands should be swapped. */
13044 static bool
13045 ix86_swap_binary_operands_p (enum rtx_code code, enum machine_mode mode,
13046 rtx operands[])
13048 rtx dst = operands[0];
13049 rtx src1 = operands[1];
13050 rtx src2 = operands[2];
13052 /* If the operation is not commutative, we can't do anything. */
13053 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH)
13054 return false;
13056 /* Highest priority is that src1 should match dst. */
13057 if (rtx_equal_p (dst, src1))
13058 return false;
13059 if (rtx_equal_p (dst, src2))
13060 return true;
13062 /* Next highest priority is that immediate constants come second. */
13063 if (immediate_operand (src2, mode))
13064 return false;
13065 if (immediate_operand (src1, mode))
13066 return true;
13068 /* Lowest priority is that memory references should come second. */
13069 if (MEM_P (src2))
13070 return false;
13071 if (MEM_P (src1))
13072 return true;
13074 return false;
13078 /* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
13079 destination to use for the operation. If different from the true
13080 destination in operands[0], a copy operation will be required. */
13083 ix86_fixup_binary_operands (enum rtx_code code, enum machine_mode mode,
13084 rtx operands[])
13086 rtx dst = operands[0];
13087 rtx src1 = operands[1];
13088 rtx src2 = operands[2];
13090 /* Canonicalize operand order. */
13091 if (ix86_swap_binary_operands_p (code, mode, operands))
13093 rtx temp;
13095 /* It is invalid to swap operands of different modes. */
13096 gcc_assert (GET_MODE (src1) == GET_MODE (src2));
13098 temp = src1;
13099 src1 = src2;
13100 src2 = temp;
13103 /* Both source operands cannot be in memory. */
13104 if (MEM_P (src1) && MEM_P (src2))
13106 /* Optimization: Only read from memory once. */
13107 if (rtx_equal_p (src1, src2))
13109 src2 = force_reg (mode, src2);
13110 src1 = src2;
13112 else
13113 src2 = force_reg (mode, src2);
13116 /* If the destination is memory, and we do not have matching source
13117 operands, do things in registers. */
13118 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13119 dst = gen_reg_rtx (mode);
13121 /* Source 1 cannot be a constant. */
13122 if (CONSTANT_P (src1))
13123 src1 = force_reg (mode, src1);
13125 /* Source 1 cannot be a non-matching memory. */
13126 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13127 src1 = force_reg (mode, src1);
13129 operands[1] = src1;
13130 operands[2] = src2;
13131 return dst;
13134 /* Similarly, but assume that the destination has already been
13135 set up properly. */
13137 void
13138 ix86_fixup_binary_operands_no_copy (enum rtx_code code,
13139 enum machine_mode mode, rtx operands[])
13141 rtx dst = ix86_fixup_binary_operands (code, mode, operands);
13142 gcc_assert (dst == operands[0]);
13145 /* Attempt to expand a binary operator. Make the expansion closer to the
13146 actual machine, then just general_operand, which will allow 3 separate
13147 memory references (one output, two input) in a single insn. */
13149 void
13150 ix86_expand_binary_operator (enum rtx_code code, enum machine_mode mode,
13151 rtx operands[])
13153 rtx src1, src2, dst, op, clob;
13155 dst = ix86_fixup_binary_operands (code, mode, operands);
13156 src1 = operands[1];
13157 src2 = operands[2];
13159 /* Emit the instruction. */
13161 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_ee (code, mode, src1, src2));
13162 if (reload_in_progress)
13164 /* Reload doesn't know about the flags register, and doesn't know that
13165 it doesn't want to clobber it. We can only do this with PLUS. */
13166 gcc_assert (code == PLUS);
13167 emit_insn (op);
13169 else
13171 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13172 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13175 /* Fix up the destination if needed. */
13176 if (dst != operands[0])
13177 emit_move_insn (operands[0], dst);
13180 /* Return TRUE or FALSE depending on whether the binary operator meets the
13181 appropriate constraints. */
13184 ix86_binary_operator_ok (enum rtx_code code, enum machine_mode mode,
13185 rtx operands[3])
13187 rtx dst = operands[0];
13188 rtx src1 = operands[1];
13189 rtx src2 = operands[2];
13191 /* Both source operands cannot be in memory. */
13192 if (MEM_P (src1) && MEM_P (src2))
13193 return 0;
13195 /* Canonicalize operand order for commutative operators. */
13196 if (ix86_swap_binary_operands_p (code, mode, operands))
13198 rtx temp = src1;
13199 src1 = src2;
13200 src2 = temp;
13203 /* If the destination is memory, we must have a matching source operand. */
13204 if (MEM_P (dst) && !rtx_equal_p (dst, src1))
13205 return 0;
13207 /* Source 1 cannot be a constant. */
13208 if (CONSTANT_P (src1))
13209 return 0;
13211 /* Source 1 cannot be a non-matching memory. */
13212 if (MEM_P (src1) && !rtx_equal_p (dst, src1))
13213 return 0;
13215 return 1;
13218 /* Attempt to expand a unary operator. Make the expansion closer to the
13219 actual machine, then just general_operand, which will allow 2 separate
13220 memory references (one output, one input) in a single insn. */
13222 void
13223 ix86_expand_unary_operator (enum rtx_code code, enum machine_mode mode,
13224 rtx operands[])
13226 int matching_memory;
13227 rtx src, dst, op, clob;
13229 dst = operands[0];
13230 src = operands[1];
13232 /* If the destination is memory, and we do not have matching source
13233 operands, do things in registers. */
13234 matching_memory = 0;
13235 if (MEM_P (dst))
13237 if (rtx_equal_p (dst, src))
13238 matching_memory = 1;
13239 else
13240 dst = gen_reg_rtx (mode);
13243 /* When source operand is memory, destination must match. */
13244 if (MEM_P (src) && !matching_memory)
13245 src = force_reg (mode, src);
13247 /* Emit the instruction. */
13249 op = gen_rtx_SET (VOIDmode, dst, gen_rtx_fmt_e (code, mode, src));
13250 if (reload_in_progress || code == NOT)
13252 /* Reload doesn't know about the flags register, and doesn't know that
13253 it doesn't want to clobber it. */
13254 gcc_assert (code == NOT);
13255 emit_insn (op);
13257 else
13259 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13260 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
13263 /* Fix up the destination if needed. */
13264 if (dst != operands[0])
13265 emit_move_insn (operands[0], dst);
13268 #define LEA_SEARCH_THRESHOLD 12
13270 /* Search backward for non-agu definition of register number REGNO1
13271 or register number REGNO2 in INSN's basic block until
13272 1. Pass LEA_SEARCH_THRESHOLD instructions, or
13273 2. Reach BB boundary, or
13274 3. Reach agu definition.
13275 Returns the distance between the non-agu definition point and INSN.
13276 If no definition point, returns -1. */
13278 static int
13279 distance_non_agu_define (unsigned int regno1, unsigned int regno2,
13280 rtx insn)
13282 basic_block bb = BLOCK_FOR_INSN (insn);
13283 int distance = 0;
13284 df_ref *def_rec;
13285 enum attr_type insn_type;
13287 if (insn != BB_HEAD (bb))
13289 rtx prev = PREV_INSN (insn);
13290 while (prev && distance < LEA_SEARCH_THRESHOLD)
13292 if (INSN_P (prev))
13294 distance++;
13295 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13296 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13297 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13298 && (regno1 == DF_REF_REGNO (*def_rec)
13299 || regno2 == DF_REF_REGNO (*def_rec)))
13301 insn_type = get_attr_type (prev);
13302 if (insn_type != TYPE_LEA)
13303 goto done;
13306 if (prev == BB_HEAD (bb))
13307 break;
13308 prev = PREV_INSN (prev);
13312 if (distance < LEA_SEARCH_THRESHOLD)
13314 edge e;
13315 edge_iterator ei;
13316 bool simple_loop = false;
13318 FOR_EACH_EDGE (e, ei, bb->preds)
13319 if (e->src == bb)
13321 simple_loop = true;
13322 break;
13325 if (simple_loop)
13327 rtx prev = BB_END (bb);
13328 while (prev
13329 && prev != insn
13330 && distance < LEA_SEARCH_THRESHOLD)
13332 if (INSN_P (prev))
13334 distance++;
13335 for (def_rec = DF_INSN_DEFS (prev); *def_rec; def_rec++)
13336 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13337 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13338 && (regno1 == DF_REF_REGNO (*def_rec)
13339 || regno2 == DF_REF_REGNO (*def_rec)))
13341 insn_type = get_attr_type (prev);
13342 if (insn_type != TYPE_LEA)
13343 goto done;
13346 prev = PREV_INSN (prev);
13351 distance = -1;
13353 done:
13354 /* get_attr_type may modify recog data. We want to make sure
13355 that recog data is valid for instruction INSN, on which
13356 distance_non_agu_define is called. INSN is unchanged here. */
13357 extract_insn_cached (insn);
13358 return distance;
13361 /* Return the distance between INSN and the next insn that uses
13362 register number REGNO0 in memory address. Return -1 if no such
13363 a use is found within LEA_SEARCH_THRESHOLD or REGNO0 is set. */
13365 static int
13366 distance_agu_use (unsigned int regno0, rtx insn)
13368 basic_block bb = BLOCK_FOR_INSN (insn);
13369 int distance = 0;
13370 df_ref *def_rec;
13371 df_ref *use_rec;
13373 if (insn != BB_END (bb))
13375 rtx next = NEXT_INSN (insn);
13376 while (next && distance < LEA_SEARCH_THRESHOLD)
13378 if (INSN_P (next))
13380 distance++;
13382 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13383 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13384 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13385 && regno0 == DF_REF_REGNO (*use_rec))
13387 /* Return DISTANCE if OP0 is used in memory
13388 address in NEXT. */
13389 return distance;
13392 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13393 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13394 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13395 && regno0 == DF_REF_REGNO (*def_rec))
13397 /* Return -1 if OP0 is set in NEXT. */
13398 return -1;
13401 if (next == BB_END (bb))
13402 break;
13403 next = NEXT_INSN (next);
13407 if (distance < LEA_SEARCH_THRESHOLD)
13409 edge e;
13410 edge_iterator ei;
13411 bool simple_loop = false;
13413 FOR_EACH_EDGE (e, ei, bb->succs)
13414 if (e->dest == bb)
13416 simple_loop = true;
13417 break;
13420 if (simple_loop)
13422 rtx next = BB_HEAD (bb);
13423 while (next
13424 && next != insn
13425 && distance < LEA_SEARCH_THRESHOLD)
13427 if (INSN_P (next))
13429 distance++;
13431 for (use_rec = DF_INSN_USES (next); *use_rec; use_rec++)
13432 if ((DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_LOAD
13433 || DF_REF_TYPE (*use_rec) == DF_REF_REG_MEM_STORE)
13434 && regno0 == DF_REF_REGNO (*use_rec))
13436 /* Return DISTANCE if OP0 is used in memory
13437 address in NEXT. */
13438 return distance;
13441 for (def_rec = DF_INSN_DEFS (next); *def_rec; def_rec++)
13442 if (DF_REF_TYPE (*def_rec) == DF_REF_REG_DEF
13443 && !DF_REF_IS_ARTIFICIAL (*def_rec)
13444 && regno0 == DF_REF_REGNO (*def_rec))
13446 /* Return -1 if OP0 is set in NEXT. */
13447 return -1;
13451 next = NEXT_INSN (next);
13456 return -1;
13459 /* Define this macro to tune LEA priority vs ADD, it take effect when
13460 there is a dilemma of choicing LEA or ADD
13461 Negative value: ADD is more preferred than LEA
13462 Zero: Netrual
13463 Positive value: LEA is more preferred than ADD*/
13464 #define IX86_LEA_PRIORITY 2
13466 /* Return true if it is ok to optimize an ADD operation to LEA
13467 operation to avoid flag register consumation. For the processors
13468 like ATOM, if the destination register of LEA holds an actual
13469 address which will be used soon, LEA is better and otherwise ADD
13470 is better. */
13472 bool
13473 ix86_lea_for_add_ok (enum rtx_code code ATTRIBUTE_UNUSED,
13474 rtx insn, rtx operands[])
13476 unsigned int regno0 = true_regnum (operands[0]);
13477 unsigned int regno1 = true_regnum (operands[1]);
13478 unsigned int regno2;
13480 if (!TARGET_OPT_AGU || optimize_function_for_size_p (cfun))
13481 return regno0 != regno1;
13483 regno2 = true_regnum (operands[2]);
13485 /* If a = b + c, (a!=b && a!=c), must use lea form. */
13486 if (regno0 != regno1 && regno0 != regno2)
13487 return true;
13488 else
13490 int dist_define, dist_use;
13491 dist_define = distance_non_agu_define (regno1, regno2, insn);
13492 if (dist_define <= 0)
13493 return true;
13495 /* If this insn has both backward non-agu dependence and forward
13496 agu dependence, the one with short distance take effect. */
13497 dist_use = distance_agu_use (regno0, insn);
13498 if (dist_use <= 0
13499 || (dist_define + IX86_LEA_PRIORITY) < dist_use)
13500 return false;
13502 return true;
13506 /* Return true if destination reg of SET_BODY is shift count of
13507 USE_BODY. */
13509 static bool
13510 ix86_dep_by_shift_count_body (const_rtx set_body, const_rtx use_body)
13512 rtx set_dest;
13513 rtx shift_rtx;
13514 int i;
13516 /* Retrieve destination of SET_BODY. */
13517 switch (GET_CODE (set_body))
13519 case SET:
13520 set_dest = SET_DEST (set_body);
13521 if (!set_dest || !REG_P (set_dest))
13522 return false;
13523 break;
13524 case PARALLEL:
13525 for (i = XVECLEN (set_body, 0) - 1; i >= 0; i--)
13526 if (ix86_dep_by_shift_count_body (XVECEXP (set_body, 0, i),
13527 use_body))
13528 return true;
13529 default:
13530 return false;
13531 break;
13534 /* Retrieve shift count of USE_BODY. */
13535 switch (GET_CODE (use_body))
13537 case SET:
13538 shift_rtx = XEXP (use_body, 1);
13539 break;
13540 case PARALLEL:
13541 for (i = XVECLEN (use_body, 0) - 1; i >= 0; i--)
13542 if (ix86_dep_by_shift_count_body (set_body,
13543 XVECEXP (use_body, 0, i)))
13544 return true;
13545 default:
13546 return false;
13547 break;
13550 if (shift_rtx
13551 && (GET_CODE (shift_rtx) == ASHIFT
13552 || GET_CODE (shift_rtx) == LSHIFTRT
13553 || GET_CODE (shift_rtx) == ASHIFTRT
13554 || GET_CODE (shift_rtx) == ROTATE
13555 || GET_CODE (shift_rtx) == ROTATERT))
13557 rtx shift_count = XEXP (shift_rtx, 1);
13559 /* Return true if shift count is dest of SET_BODY. */
13560 if (REG_P (shift_count)
13561 && true_regnum (set_dest) == true_regnum (shift_count))
13562 return true;
13565 return false;
13568 /* Return true if destination reg of SET_INSN is shift count of
13569 USE_INSN. */
13571 bool
13572 ix86_dep_by_shift_count (const_rtx set_insn, const_rtx use_insn)
13574 return ix86_dep_by_shift_count_body (PATTERN (set_insn),
13575 PATTERN (use_insn));
13578 /* Return TRUE or FALSE depending on whether the unary operator meets the
13579 appropriate constraints. */
13582 ix86_unary_operator_ok (enum rtx_code code ATTRIBUTE_UNUSED,
13583 enum machine_mode mode ATTRIBUTE_UNUSED,
13584 rtx operands[2] ATTRIBUTE_UNUSED)
13586 /* If one of operands is memory, source and destination must match. */
13587 if ((MEM_P (operands[0])
13588 || MEM_P (operands[1]))
13589 && ! rtx_equal_p (operands[0], operands[1]))
13590 return FALSE;
13591 return TRUE;
13594 /* Post-reload splitter for converting an SF or DFmode value in an
13595 SSE register into an unsigned SImode. */
13597 void
13598 ix86_split_convert_uns_si_sse (rtx operands[])
13600 enum machine_mode vecmode;
13601 rtx value, large, zero_or_two31, input, two31, x;
13603 large = operands[1];
13604 zero_or_two31 = operands[2];
13605 input = operands[3];
13606 two31 = operands[4];
13607 vecmode = GET_MODE (large);
13608 value = gen_rtx_REG (vecmode, REGNO (operands[0]));
13610 /* Load up the value into the low element. We must ensure that the other
13611 elements are valid floats -- zero is the easiest such value. */
13612 if (MEM_P (input))
13614 if (vecmode == V4SFmode)
13615 emit_insn (gen_vec_setv4sf_0 (value, CONST0_RTX (V4SFmode), input));
13616 else
13617 emit_insn (gen_sse2_loadlpd (value, CONST0_RTX (V2DFmode), input));
13619 else
13621 input = gen_rtx_REG (vecmode, REGNO (input));
13622 emit_move_insn (value, CONST0_RTX (vecmode));
13623 if (vecmode == V4SFmode)
13624 emit_insn (gen_sse_movss (value, value, input));
13625 else
13626 emit_insn (gen_sse2_movsd (value, value, input));
13629 emit_move_insn (large, two31);
13630 emit_move_insn (zero_or_two31, MEM_P (two31) ? large : two31);
13632 x = gen_rtx_fmt_ee (LE, vecmode, large, value);
13633 emit_insn (gen_rtx_SET (VOIDmode, large, x));
13635 x = gen_rtx_AND (vecmode, zero_or_two31, large);
13636 emit_insn (gen_rtx_SET (VOIDmode, zero_or_two31, x));
13638 x = gen_rtx_MINUS (vecmode, value, zero_or_two31);
13639 emit_insn (gen_rtx_SET (VOIDmode, value, x));
13641 large = gen_rtx_REG (V4SImode, REGNO (large));
13642 emit_insn (gen_ashlv4si3 (large, large, GEN_INT (31)));
13644 x = gen_rtx_REG (V4SImode, REGNO (value));
13645 if (vecmode == V4SFmode)
13646 emit_insn (gen_sse2_cvttps2dq (x, value));
13647 else
13648 emit_insn (gen_sse2_cvttpd2dq (x, value));
13649 value = x;
13651 emit_insn (gen_xorv4si3 (value, value, large));
13654 /* Convert an unsigned DImode value into a DFmode, using only SSE.
13655 Expects the 64-bit DImode to be supplied in a pair of integral
13656 registers. Requires SSE2; will use SSE3 if available. For x86_32,
13657 -mfpmath=sse, !optimize_size only. */
13659 void
13660 ix86_expand_convert_uns_didf_sse (rtx target, rtx input)
13662 REAL_VALUE_TYPE bias_lo_rvt, bias_hi_rvt;
13663 rtx int_xmm, fp_xmm;
13664 rtx biases, exponents;
13665 rtx x;
13667 int_xmm = gen_reg_rtx (V4SImode);
13668 if (TARGET_INTER_UNIT_MOVES)
13669 emit_insn (gen_movdi_to_sse (int_xmm, input));
13670 else if (TARGET_SSE_SPLIT_REGS)
13672 emit_clobber (int_xmm);
13673 emit_move_insn (gen_lowpart (DImode, int_xmm), input);
13675 else
13677 x = gen_reg_rtx (V2DImode);
13678 ix86_expand_vector_init_one_nonzero (false, V2DImode, x, input, 0);
13679 emit_move_insn (int_xmm, gen_lowpart (V4SImode, x));
13682 x = gen_rtx_CONST_VECTOR (V4SImode,
13683 gen_rtvec (4, GEN_INT (0x43300000UL),
13684 GEN_INT (0x45300000UL),
13685 const0_rtx, const0_rtx));
13686 exponents = validize_mem (force_const_mem (V4SImode, x));
13688 /* int_xmm = {0x45300000UL, fp_xmm/hi, 0x43300000, fp_xmm/lo } */
13689 emit_insn (gen_sse2_punpckldq (int_xmm, int_xmm, exponents));
13691 /* Concatenating (juxtaposing) (0x43300000UL ## fp_value_low_xmm)
13692 yields a valid DF value equal to (0x1.0p52 + double(fp_value_lo_xmm)).
13693 Similarly (0x45300000UL ## fp_value_hi_xmm) yields
13694 (0x1.0p84 + double(fp_value_hi_xmm)).
13695 Note these exponents differ by 32. */
13697 fp_xmm = copy_to_mode_reg (V2DFmode, gen_lowpart (V2DFmode, int_xmm));
13699 /* Subtract off those 0x1.0p52 and 0x1.0p84 biases, to produce values
13700 in [0,2**32-1] and [0]+[2**32,2**64-1] respectively. */
13701 real_ldexp (&bias_lo_rvt, &dconst1, 52);
13702 real_ldexp (&bias_hi_rvt, &dconst1, 84);
13703 biases = const_double_from_real_value (bias_lo_rvt, DFmode);
13704 x = const_double_from_real_value (bias_hi_rvt, DFmode);
13705 biases = gen_rtx_CONST_VECTOR (V2DFmode, gen_rtvec (2, biases, x));
13706 biases = validize_mem (force_const_mem (V2DFmode, biases));
13707 emit_insn (gen_subv2df3 (fp_xmm, fp_xmm, biases));
13709 /* Add the upper and lower DFmode values together. */
13710 if (TARGET_SSE3)
13711 emit_insn (gen_sse3_haddv2df3 (fp_xmm, fp_xmm, fp_xmm));
13712 else
13714 x = copy_to_mode_reg (V2DFmode, fp_xmm);
13715 emit_insn (gen_sse2_unpckhpd (fp_xmm, fp_xmm, fp_xmm));
13716 emit_insn (gen_addv2df3 (fp_xmm, fp_xmm, x));
13719 ix86_expand_vector_extract (false, target, fp_xmm, 0);
13722 /* Not used, but eases macroization of patterns. */
13723 void
13724 ix86_expand_convert_uns_sixf_sse (rtx target ATTRIBUTE_UNUSED,
13725 rtx input ATTRIBUTE_UNUSED)
13727 gcc_unreachable ();
13730 /* Convert an unsigned SImode value into a DFmode. Only currently used
13731 for SSE, but applicable anywhere. */
13733 void
13734 ix86_expand_convert_uns_sidf_sse (rtx target, rtx input)
13736 REAL_VALUE_TYPE TWO31r;
13737 rtx x, fp;
13739 x = expand_simple_binop (SImode, PLUS, input, GEN_INT (-2147483647 - 1),
13740 NULL, 1, OPTAB_DIRECT);
13742 fp = gen_reg_rtx (DFmode);
13743 emit_insn (gen_floatsidf2 (fp, x));
13745 real_ldexp (&TWO31r, &dconst1, 31);
13746 x = const_double_from_real_value (TWO31r, DFmode);
13748 x = expand_simple_binop (DFmode, PLUS, fp, x, target, 0, OPTAB_DIRECT);
13749 if (x != target)
13750 emit_move_insn (target, x);
13753 /* Convert a signed DImode value into a DFmode. Only used for SSE in
13754 32-bit mode; otherwise we have a direct convert instruction. */
13756 void
13757 ix86_expand_convert_sign_didf_sse (rtx target, rtx input)
13759 REAL_VALUE_TYPE TWO32r;
13760 rtx fp_lo, fp_hi, x;
13762 fp_lo = gen_reg_rtx (DFmode);
13763 fp_hi = gen_reg_rtx (DFmode);
13765 emit_insn (gen_floatsidf2 (fp_hi, gen_highpart (SImode, input)));
13767 real_ldexp (&TWO32r, &dconst1, 32);
13768 x = const_double_from_real_value (TWO32r, DFmode);
13769 fp_hi = expand_simple_binop (DFmode, MULT, fp_hi, x, fp_hi, 0, OPTAB_DIRECT);
13771 ix86_expand_convert_uns_sidf_sse (fp_lo, gen_lowpart (SImode, input));
13773 x = expand_simple_binop (DFmode, PLUS, fp_hi, fp_lo, target,
13774 0, OPTAB_DIRECT);
13775 if (x != target)
13776 emit_move_insn (target, x);
13779 /* Convert an unsigned SImode value into a SFmode, using only SSE.
13780 For x86_32, -mfpmath=sse, !optimize_size only. */
13781 void
13782 ix86_expand_convert_uns_sisf_sse (rtx target, rtx input)
13784 REAL_VALUE_TYPE ONE16r;
13785 rtx fp_hi, fp_lo, int_hi, int_lo, x;
13787 real_ldexp (&ONE16r, &dconst1, 16);
13788 x = const_double_from_real_value (ONE16r, SFmode);
13789 int_lo = expand_simple_binop (SImode, AND, input, GEN_INT(0xffff),
13790 NULL, 0, OPTAB_DIRECT);
13791 int_hi = expand_simple_binop (SImode, LSHIFTRT, input, GEN_INT(16),
13792 NULL, 0, OPTAB_DIRECT);
13793 fp_hi = gen_reg_rtx (SFmode);
13794 fp_lo = gen_reg_rtx (SFmode);
13795 emit_insn (gen_floatsisf2 (fp_hi, int_hi));
13796 emit_insn (gen_floatsisf2 (fp_lo, int_lo));
13797 fp_hi = expand_simple_binop (SFmode, MULT, fp_hi, x, fp_hi,
13798 0, OPTAB_DIRECT);
13799 fp_hi = expand_simple_binop (SFmode, PLUS, fp_hi, fp_lo, target,
13800 0, OPTAB_DIRECT);
13801 if (!rtx_equal_p (target, fp_hi))
13802 emit_move_insn (target, fp_hi);
13805 /* A subroutine of ix86_build_signbit_mask. If VECT is true,
13806 then replicate the value for all elements of the vector
13807 register. */
13810 ix86_build_const_vector (enum machine_mode mode, bool vect, rtx value)
13812 rtvec v;
13813 switch (mode)
13815 case SImode:
13816 gcc_assert (vect);
13817 v = gen_rtvec (4, value, value, value, value);
13818 return gen_rtx_CONST_VECTOR (V4SImode, v);
13820 case DImode:
13821 gcc_assert (vect);
13822 v = gen_rtvec (2, value, value);
13823 return gen_rtx_CONST_VECTOR (V2DImode, v);
13825 case SFmode:
13826 if (vect)
13827 v = gen_rtvec (4, value, value, value, value);
13828 else
13829 v = gen_rtvec (4, value, CONST0_RTX (SFmode),
13830 CONST0_RTX (SFmode), CONST0_RTX (SFmode));
13831 return gen_rtx_CONST_VECTOR (V4SFmode, v);
13833 case DFmode:
13834 if (vect)
13835 v = gen_rtvec (2, value, value);
13836 else
13837 v = gen_rtvec (2, value, CONST0_RTX (DFmode));
13838 return gen_rtx_CONST_VECTOR (V2DFmode, v);
13840 default:
13841 gcc_unreachable ();
13845 /* A subroutine of ix86_expand_fp_absneg_operator, copysign expanders
13846 and ix86_expand_int_vcond. Create a mask for the sign bit in MODE
13847 for an SSE register. If VECT is true, then replicate the mask for
13848 all elements of the vector register. If INVERT is true, then create
13849 a mask excluding the sign bit. */
13852 ix86_build_signbit_mask (enum machine_mode mode, bool vect, bool invert)
13854 enum machine_mode vec_mode, imode;
13855 HOST_WIDE_INT hi, lo;
13856 int shift = 63;
13857 rtx v;
13858 rtx mask;
13860 /* Find the sign bit, sign extended to 2*HWI. */
13861 switch (mode)
13863 case SImode:
13864 case SFmode:
13865 imode = SImode;
13866 vec_mode = (mode == SImode) ? V4SImode : V4SFmode;
13867 lo = 0x80000000, hi = lo < 0;
13868 break;
13870 case DImode:
13871 case DFmode:
13872 imode = DImode;
13873 vec_mode = (mode == DImode) ? V2DImode : V2DFmode;
13874 if (HOST_BITS_PER_WIDE_INT >= 64)
13875 lo = (HOST_WIDE_INT)1 << shift, hi = -1;
13876 else
13877 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
13878 break;
13880 case TImode:
13881 case TFmode:
13882 vec_mode = VOIDmode;
13883 if (HOST_BITS_PER_WIDE_INT >= 64)
13885 imode = TImode;
13886 lo = 0, hi = (HOST_WIDE_INT)1 << shift;
13888 else
13890 rtvec vec;
13892 imode = DImode;
13893 lo = 0, hi = (HOST_WIDE_INT)1 << (shift - HOST_BITS_PER_WIDE_INT);
13895 if (invert)
13897 lo = ~lo, hi = ~hi;
13898 v = constm1_rtx;
13900 else
13901 v = const0_rtx;
13903 mask = immed_double_const (lo, hi, imode);
13905 vec = gen_rtvec (2, v, mask);
13906 v = gen_rtx_CONST_VECTOR (V2DImode, vec);
13907 v = copy_to_mode_reg (mode, gen_lowpart (mode, v));
13909 return v;
13911 break;
13913 default:
13914 gcc_unreachable ();
13917 if (invert)
13918 lo = ~lo, hi = ~hi;
13920 /* Force this value into the low part of a fp vector constant. */
13921 mask = immed_double_const (lo, hi, imode);
13922 mask = gen_lowpart (mode, mask);
13924 if (vec_mode == VOIDmode)
13925 return force_reg (mode, mask);
13927 v = ix86_build_const_vector (mode, vect, mask);
13928 return force_reg (vec_mode, v);
13931 /* Generate code for floating point ABS or NEG. */
13933 void
13934 ix86_expand_fp_absneg_operator (enum rtx_code code, enum machine_mode mode,
13935 rtx operands[])
13937 rtx mask, set, use, clob, dst, src;
13938 bool use_sse = false;
13939 bool vector_mode = VECTOR_MODE_P (mode);
13940 enum machine_mode elt_mode = mode;
13942 if (vector_mode)
13944 elt_mode = GET_MODE_INNER (mode);
13945 use_sse = true;
13947 else if (mode == TFmode)
13948 use_sse = true;
13949 else if (TARGET_SSE_MATH)
13950 use_sse = SSE_FLOAT_MODE_P (mode);
13952 /* NEG and ABS performed with SSE use bitwise mask operations.
13953 Create the appropriate mask now. */
13954 if (use_sse)
13955 mask = ix86_build_signbit_mask (elt_mode, vector_mode, code == ABS);
13956 else
13957 mask = NULL_RTX;
13959 dst = operands[0];
13960 src = operands[1];
13962 if (vector_mode)
13964 set = gen_rtx_fmt_ee (code == NEG ? XOR : AND, mode, src, mask);
13965 set = gen_rtx_SET (VOIDmode, dst, set);
13966 emit_insn (set);
13968 else
13970 set = gen_rtx_fmt_e (code, mode, src);
13971 set = gen_rtx_SET (VOIDmode, dst, set);
13972 if (mask)
13974 use = gen_rtx_USE (VOIDmode, mask);
13975 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
13976 emit_insn (gen_rtx_PARALLEL (VOIDmode,
13977 gen_rtvec (3, set, use, clob)));
13979 else
13980 emit_insn (set);
13984 /* Expand a copysign operation. Special case operand 0 being a constant. */
13986 void
13987 ix86_expand_copysign (rtx operands[])
13989 enum machine_mode mode;
13990 rtx dest, op0, op1, mask, nmask;
13992 dest = operands[0];
13993 op0 = operands[1];
13994 op1 = operands[2];
13996 mode = GET_MODE (dest);
13998 if (GET_CODE (op0) == CONST_DOUBLE)
14000 rtx (*copysign_insn)(rtx, rtx, rtx, rtx);
14002 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
14003 op0 = simplify_unary_operation (ABS, mode, op0, mode);
14005 if (mode == SFmode || mode == DFmode)
14007 enum machine_mode vmode;
14009 vmode = mode == SFmode ? V4SFmode : V2DFmode;
14011 if (op0 == CONST0_RTX (mode))
14012 op0 = CONST0_RTX (vmode);
14013 else
14015 rtx v = ix86_build_const_vector (mode, false, op0);
14017 op0 = force_reg (vmode, v);
14020 else if (op0 != CONST0_RTX (mode))
14021 op0 = force_reg (mode, op0);
14023 mask = ix86_build_signbit_mask (mode, 0, 0);
14025 if (mode == SFmode)
14026 copysign_insn = gen_copysignsf3_const;
14027 else if (mode == DFmode)
14028 copysign_insn = gen_copysigndf3_const;
14029 else
14030 copysign_insn = gen_copysigntf3_const;
14032 emit_insn (copysign_insn (dest, op0, op1, mask));
14034 else
14036 rtx (*copysign_insn)(rtx, rtx, rtx, rtx, rtx, rtx);
14038 nmask = ix86_build_signbit_mask (mode, 0, 1);
14039 mask = ix86_build_signbit_mask (mode, 0, 0);
14041 if (mode == SFmode)
14042 copysign_insn = gen_copysignsf3_var;
14043 else if (mode == DFmode)
14044 copysign_insn = gen_copysigndf3_var;
14045 else
14046 copysign_insn = gen_copysigntf3_var;
14048 emit_insn (copysign_insn (dest, NULL_RTX, op0, op1, nmask, mask));
14052 /* Deconstruct a copysign operation into bit masks. Operand 0 is known to
14053 be a constant, and so has already been expanded into a vector constant. */
14055 void
14056 ix86_split_copysign_const (rtx operands[])
14058 enum machine_mode mode, vmode;
14059 rtx dest, op0, mask, x;
14061 dest = operands[0];
14062 op0 = operands[1];
14063 mask = operands[3];
14065 mode = GET_MODE (dest);
14066 vmode = GET_MODE (mask);
14068 dest = simplify_gen_subreg (vmode, dest, mode, 0);
14069 x = gen_rtx_AND (vmode, dest, mask);
14070 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14072 if (op0 != CONST0_RTX (vmode))
14074 x = gen_rtx_IOR (vmode, dest, op0);
14075 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14079 /* Deconstruct a copysign operation into bit masks. Operand 0 is variable,
14080 so we have to do two masks. */
14082 void
14083 ix86_split_copysign_var (rtx operands[])
14085 enum machine_mode mode, vmode;
14086 rtx dest, scratch, op0, op1, mask, nmask, x;
14088 dest = operands[0];
14089 scratch = operands[1];
14090 op0 = operands[2];
14091 op1 = operands[3];
14092 nmask = operands[4];
14093 mask = operands[5];
14095 mode = GET_MODE (dest);
14096 vmode = GET_MODE (mask);
14098 if (rtx_equal_p (op0, op1))
14100 /* Shouldn't happen often (it's useless, obviously), but when it does
14101 we'd generate incorrect code if we continue below. */
14102 emit_move_insn (dest, op0);
14103 return;
14106 if (REG_P (mask) && REGNO (dest) == REGNO (mask)) /* alternative 0 */
14108 gcc_assert (REGNO (op1) == REGNO (scratch));
14110 x = gen_rtx_AND (vmode, scratch, mask);
14111 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14113 dest = mask;
14114 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14115 x = gen_rtx_NOT (vmode, dest);
14116 x = gen_rtx_AND (vmode, x, op0);
14117 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14119 else
14121 if (REGNO (op1) == REGNO (scratch)) /* alternative 1,3 */
14123 x = gen_rtx_AND (vmode, scratch, mask);
14125 else /* alternative 2,4 */
14127 gcc_assert (REGNO (mask) == REGNO (scratch));
14128 op1 = simplify_gen_subreg (vmode, op1, mode, 0);
14129 x = gen_rtx_AND (vmode, scratch, op1);
14131 emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
14133 if (REGNO (op0) == REGNO (dest)) /* alternative 1,2 */
14135 dest = simplify_gen_subreg (vmode, op0, mode, 0);
14136 x = gen_rtx_AND (vmode, dest, nmask);
14138 else /* alternative 3,4 */
14140 gcc_assert (REGNO (nmask) == REGNO (dest));
14141 dest = nmask;
14142 op0 = simplify_gen_subreg (vmode, op0, mode, 0);
14143 x = gen_rtx_AND (vmode, dest, op0);
14145 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14148 x = gen_rtx_IOR (vmode, dest, scratch);
14149 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
14152 /* Return TRUE or FALSE depending on whether the first SET in INSN
14153 has source and destination with matching CC modes, and that the
14154 CC mode is at least as constrained as REQ_MODE. */
14157 ix86_match_ccmode (rtx insn, enum machine_mode req_mode)
14159 rtx set;
14160 enum machine_mode set_mode;
14162 set = PATTERN (insn);
14163 if (GET_CODE (set) == PARALLEL)
14164 set = XVECEXP (set, 0, 0);
14165 gcc_assert (GET_CODE (set) == SET);
14166 gcc_assert (GET_CODE (SET_SRC (set)) == COMPARE);
14168 set_mode = GET_MODE (SET_DEST (set));
14169 switch (set_mode)
14171 case CCNOmode:
14172 if (req_mode != CCNOmode
14173 && (req_mode != CCmode
14174 || XEXP (SET_SRC (set), 1) != const0_rtx))
14175 return 0;
14176 break;
14177 case CCmode:
14178 if (req_mode == CCGCmode)
14179 return 0;
14180 /* FALLTHRU */
14181 case CCGCmode:
14182 if (req_mode == CCGOCmode || req_mode == CCNOmode)
14183 return 0;
14184 /* FALLTHRU */
14185 case CCGOCmode:
14186 if (req_mode == CCZmode)
14187 return 0;
14188 /* FALLTHRU */
14189 case CCAmode:
14190 case CCCmode:
14191 case CCOmode:
14192 case CCSmode:
14193 case CCZmode:
14194 break;
14196 default:
14197 gcc_unreachable ();
14200 return (GET_MODE (SET_SRC (set)) == set_mode);
14203 /* Generate insn patterns to do an integer compare of OPERANDS. */
14205 static rtx
14206 ix86_expand_int_compare (enum rtx_code code, rtx op0, rtx op1)
14208 enum machine_mode cmpmode;
14209 rtx tmp, flags;
14211 cmpmode = SELECT_CC_MODE (code, op0, op1);
14212 flags = gen_rtx_REG (cmpmode, FLAGS_REG);
14214 /* This is very simple, but making the interface the same as in the
14215 FP case makes the rest of the code easier. */
14216 tmp = gen_rtx_COMPARE (cmpmode, op0, op1);
14217 emit_insn (gen_rtx_SET (VOIDmode, flags, tmp));
14219 /* Return the test that should be put into the flags user, i.e.
14220 the bcc, scc, or cmov instruction. */
14221 return gen_rtx_fmt_ee (code, VOIDmode, flags, const0_rtx);
14224 /* Figure out whether to use ordered or unordered fp comparisons.
14225 Return the appropriate mode to use. */
14227 enum machine_mode
14228 ix86_fp_compare_mode (enum rtx_code code ATTRIBUTE_UNUSED)
14230 /* ??? In order to make all comparisons reversible, we do all comparisons
14231 non-trapping when compiling for IEEE. Once gcc is able to distinguish
14232 all forms trapping and nontrapping comparisons, we can make inequality
14233 comparisons trapping again, since it results in better code when using
14234 FCOM based compares. */
14235 return TARGET_IEEE_FP ? CCFPUmode : CCFPmode;
14238 enum machine_mode
14239 ix86_cc_mode (enum rtx_code code, rtx op0, rtx op1)
14241 enum machine_mode mode = GET_MODE (op0);
14243 if (SCALAR_FLOAT_MODE_P (mode))
14245 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
14246 return ix86_fp_compare_mode (code);
14249 switch (code)
14251 /* Only zero flag is needed. */
14252 case EQ: /* ZF=0 */
14253 case NE: /* ZF!=0 */
14254 return CCZmode;
14255 /* Codes needing carry flag. */
14256 case GEU: /* CF=0 */
14257 case LTU: /* CF=1 */
14258 /* Detect overflow checks. They need just the carry flag. */
14259 if (GET_CODE (op0) == PLUS
14260 && rtx_equal_p (op1, XEXP (op0, 0)))
14261 return CCCmode;
14262 else
14263 return CCmode;
14264 case GTU: /* CF=0 & ZF=0 */
14265 case LEU: /* CF=1 | ZF=1 */
14266 /* Detect overflow checks. They need just the carry flag. */
14267 if (GET_CODE (op0) == MINUS
14268 && rtx_equal_p (op1, XEXP (op0, 0)))
14269 return CCCmode;
14270 else
14271 return CCmode;
14272 /* Codes possibly doable only with sign flag when
14273 comparing against zero. */
14274 case GE: /* SF=OF or SF=0 */
14275 case LT: /* SF<>OF or SF=1 */
14276 if (op1 == const0_rtx)
14277 return CCGOCmode;
14278 else
14279 /* For other cases Carry flag is not required. */
14280 return CCGCmode;
14281 /* Codes doable only with sign flag when comparing
14282 against zero, but we miss jump instruction for it
14283 so we need to use relational tests against overflow
14284 that thus needs to be zero. */
14285 case GT: /* ZF=0 & SF=OF */
14286 case LE: /* ZF=1 | SF<>OF */
14287 if (op1 == const0_rtx)
14288 return CCNOmode;
14289 else
14290 return CCGCmode;
14291 /* strcmp pattern do (use flags) and combine may ask us for proper
14292 mode. */
14293 case USE:
14294 return CCmode;
14295 default:
14296 gcc_unreachable ();
14300 /* Return the fixed registers used for condition codes. */
14302 static bool
14303 ix86_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
14305 *p1 = FLAGS_REG;
14306 *p2 = FPSR_REG;
14307 return true;
14310 /* If two condition code modes are compatible, return a condition code
14311 mode which is compatible with both. Otherwise, return
14312 VOIDmode. */
14314 static enum machine_mode
14315 ix86_cc_modes_compatible (enum machine_mode m1, enum machine_mode m2)
14317 if (m1 == m2)
14318 return m1;
14320 if (GET_MODE_CLASS (m1) != MODE_CC || GET_MODE_CLASS (m2) != MODE_CC)
14321 return VOIDmode;
14323 if ((m1 == CCGCmode && m2 == CCGOCmode)
14324 || (m1 == CCGOCmode && m2 == CCGCmode))
14325 return CCGCmode;
14327 switch (m1)
14329 default:
14330 gcc_unreachable ();
14332 case CCmode:
14333 case CCGCmode:
14334 case CCGOCmode:
14335 case CCNOmode:
14336 case CCAmode:
14337 case CCCmode:
14338 case CCOmode:
14339 case CCSmode:
14340 case CCZmode:
14341 switch (m2)
14343 default:
14344 return VOIDmode;
14346 case CCmode:
14347 case CCGCmode:
14348 case CCGOCmode:
14349 case CCNOmode:
14350 case CCAmode:
14351 case CCCmode:
14352 case CCOmode:
14353 case CCSmode:
14354 case CCZmode:
14355 return CCmode;
14358 case CCFPmode:
14359 case CCFPUmode:
14360 /* These are only compatible with themselves, which we already
14361 checked above. */
14362 return VOIDmode;
14367 /* Return a comparison we can do and that it is equivalent to
14368 swap_condition (code) apart possibly from orderedness.
14369 But, never change orderedness if TARGET_IEEE_FP, returning
14370 UNKNOWN in that case if necessary. */
14372 static enum rtx_code
14373 ix86_fp_swap_condition (enum rtx_code code)
14375 switch (code)
14377 case GT: /* GTU - CF=0 & ZF=0 */
14378 return TARGET_IEEE_FP ? UNKNOWN : UNLT;
14379 case GE: /* GEU - CF=0 */
14380 return TARGET_IEEE_FP ? UNKNOWN : UNLE;
14381 case UNLT: /* LTU - CF=1 */
14382 return TARGET_IEEE_FP ? UNKNOWN : GT;
14383 case UNLE: /* LEU - CF=1 | ZF=1 */
14384 return TARGET_IEEE_FP ? UNKNOWN : GE;
14385 default:
14386 return swap_condition (code);
14390 /* Return cost of comparison CODE using the best strategy for performance.
14391 All following functions do use number of instructions as a cost metrics.
14392 In future this should be tweaked to compute bytes for optimize_size and
14393 take into account performance of various instructions on various CPUs. */
14395 static int
14396 ix86_fp_comparison_cost (enum rtx_code code)
14398 int arith_cost;
14400 /* The cost of code using bit-twiddling on %ah. */
14401 switch (code)
14403 case UNLE:
14404 case UNLT:
14405 case LTGT:
14406 case GT:
14407 case GE:
14408 case UNORDERED:
14409 case ORDERED:
14410 case UNEQ:
14411 arith_cost = 4;
14412 break;
14413 case LT:
14414 case NE:
14415 case EQ:
14416 case UNGE:
14417 arith_cost = TARGET_IEEE_FP ? 5 : 4;
14418 break;
14419 case LE:
14420 case UNGT:
14421 arith_cost = TARGET_IEEE_FP ? 6 : 4;
14422 break;
14423 default:
14424 gcc_unreachable ();
14427 switch (ix86_fp_comparison_strategy (code))
14429 case IX86_FPCMP_COMI:
14430 return arith_cost > 4 ? 3 : 2;
14431 case IX86_FPCMP_SAHF:
14432 return arith_cost > 4 ? 4 : 3;
14433 default:
14434 return arith_cost;
14438 /* Return strategy to use for floating-point. We assume that fcomi is always
14439 preferrable where available, since that is also true when looking at size
14440 (2 bytes, vs. 3 for fnstsw+sahf and at least 5 for fnstsw+test). */
14442 enum ix86_fpcmp_strategy
14443 ix86_fp_comparison_strategy (enum rtx_code code ATTRIBUTE_UNUSED)
14445 /* Do fcomi/sahf based test when profitable. */
14447 if (TARGET_CMOVE)
14448 return IX86_FPCMP_COMI;
14450 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_function_for_size_p (cfun)))
14451 return IX86_FPCMP_SAHF;
14453 return IX86_FPCMP_ARITH;
14456 /* Swap, force into registers, or otherwise massage the two operands
14457 to a fp comparison. The operands are updated in place; the new
14458 comparison code is returned. */
14460 static enum rtx_code
14461 ix86_prepare_fp_compare_args (enum rtx_code code, rtx *pop0, rtx *pop1)
14463 enum machine_mode fpcmp_mode = ix86_fp_compare_mode (code);
14464 rtx op0 = *pop0, op1 = *pop1;
14465 enum machine_mode op_mode = GET_MODE (op0);
14466 int is_sse = TARGET_SSE_MATH && SSE_FLOAT_MODE_P (op_mode);
14468 /* All of the unordered compare instructions only work on registers.
14469 The same is true of the fcomi compare instructions. The XFmode
14470 compare instructions require registers except when comparing
14471 against zero or when converting operand 1 from fixed point to
14472 floating point. */
14474 if (!is_sse
14475 && (fpcmp_mode == CCFPUmode
14476 || (op_mode == XFmode
14477 && ! (standard_80387_constant_p (op0) == 1
14478 || standard_80387_constant_p (op1) == 1)
14479 && GET_CODE (op1) != FLOAT)
14480 || ix86_fp_comparison_strategy (code) == IX86_FPCMP_COMI))
14482 op0 = force_reg (op_mode, op0);
14483 op1 = force_reg (op_mode, op1);
14485 else
14487 /* %%% We only allow op1 in memory; op0 must be st(0). So swap
14488 things around if they appear profitable, otherwise force op0
14489 into a register. */
14491 if (standard_80387_constant_p (op0) == 0
14492 || (MEM_P (op0)
14493 && ! (standard_80387_constant_p (op1) == 0
14494 || MEM_P (op1))))
14496 enum rtx_code new_code = ix86_fp_swap_condition (code);
14497 if (new_code != UNKNOWN)
14499 rtx tmp;
14500 tmp = op0, op0 = op1, op1 = tmp;
14501 code = new_code;
14505 if (!REG_P (op0))
14506 op0 = force_reg (op_mode, op0);
14508 if (CONSTANT_P (op1))
14510 int tmp = standard_80387_constant_p (op1);
14511 if (tmp == 0)
14512 op1 = validize_mem (force_const_mem (op_mode, op1));
14513 else if (tmp == 1)
14515 if (TARGET_CMOVE)
14516 op1 = force_reg (op_mode, op1);
14518 else
14519 op1 = force_reg (op_mode, op1);
14523 /* Try to rearrange the comparison to make it cheaper. */
14524 if (ix86_fp_comparison_cost (code)
14525 > ix86_fp_comparison_cost (swap_condition (code))
14526 && (REG_P (op1) || can_create_pseudo_p ()))
14528 rtx tmp;
14529 tmp = op0, op0 = op1, op1 = tmp;
14530 code = swap_condition (code);
14531 if (!REG_P (op0))
14532 op0 = force_reg (op_mode, op0);
14535 *pop0 = op0;
14536 *pop1 = op1;
14537 return code;
14540 /* Convert comparison codes we use to represent FP comparison to integer
14541 code that will result in proper branch. Return UNKNOWN if no such code
14542 is available. */
14544 enum rtx_code
14545 ix86_fp_compare_code_to_integer (enum rtx_code code)
14547 switch (code)
14549 case GT:
14550 return GTU;
14551 case GE:
14552 return GEU;
14553 case ORDERED:
14554 case UNORDERED:
14555 return code;
14556 break;
14557 case UNEQ:
14558 return EQ;
14559 break;
14560 case UNLT:
14561 return LTU;
14562 break;
14563 case UNLE:
14564 return LEU;
14565 break;
14566 case LTGT:
14567 return NE;
14568 break;
14569 default:
14570 return UNKNOWN;
14574 /* Generate insn patterns to do a floating point compare of OPERANDS. */
14576 static rtx
14577 ix86_expand_fp_compare (enum rtx_code code, rtx op0, rtx op1, rtx scratch)
14579 enum machine_mode fpcmp_mode, intcmp_mode;
14580 rtx tmp, tmp2;
14582 fpcmp_mode = ix86_fp_compare_mode (code);
14583 code = ix86_prepare_fp_compare_args (code, &op0, &op1);
14585 /* Do fcomi/sahf based test when profitable. */
14586 switch (ix86_fp_comparison_strategy (code))
14588 case IX86_FPCMP_COMI:
14589 intcmp_mode = fpcmp_mode;
14590 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14591 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
14592 tmp);
14593 emit_insn (tmp);
14594 break;
14596 case IX86_FPCMP_SAHF:
14597 intcmp_mode = fpcmp_mode;
14598 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14599 tmp = gen_rtx_SET (VOIDmode, gen_rtx_REG (fpcmp_mode, FLAGS_REG),
14600 tmp);
14602 if (!scratch)
14603 scratch = gen_reg_rtx (HImode);
14604 tmp2 = gen_rtx_CLOBBER (VOIDmode, scratch);
14605 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, tmp, tmp2)));
14606 break;
14608 case IX86_FPCMP_ARITH:
14609 /* Sadness wrt reg-stack pops killing fpsr -- gotta get fnstsw first. */
14610 tmp = gen_rtx_COMPARE (fpcmp_mode, op0, op1);
14611 tmp2 = gen_rtx_UNSPEC (HImode, gen_rtvec (1, tmp), UNSPEC_FNSTSW);
14612 if (!scratch)
14613 scratch = gen_reg_rtx (HImode);
14614 emit_insn (gen_rtx_SET (VOIDmode, scratch, tmp2));
14616 /* In the unordered case, we have to check C2 for NaN's, which
14617 doesn't happen to work out to anything nice combination-wise.
14618 So do some bit twiddling on the value we've got in AH to come
14619 up with an appropriate set of condition codes. */
14621 intcmp_mode = CCNOmode;
14622 switch (code)
14624 case GT:
14625 case UNGT:
14626 if (code == GT || !TARGET_IEEE_FP)
14628 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
14629 code = EQ;
14631 else
14633 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14634 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
14635 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x44)));
14636 intcmp_mode = CCmode;
14637 code = GEU;
14639 break;
14640 case LT:
14641 case UNLT:
14642 if (code == LT && TARGET_IEEE_FP)
14644 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14645 emit_insn (gen_cmpqi_ext_3 (scratch, const1_rtx));
14646 intcmp_mode = CCmode;
14647 code = EQ;
14649 else
14651 emit_insn (gen_testqi_ext_ccno_0 (scratch, const1_rtx));
14652 code = NE;
14654 break;
14655 case GE:
14656 case UNGE:
14657 if (code == GE || !TARGET_IEEE_FP)
14659 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x05)));
14660 code = EQ;
14662 else
14664 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14665 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch, const1_rtx));
14666 code = NE;
14668 break;
14669 case LE:
14670 case UNLE:
14671 if (code == LE && TARGET_IEEE_FP)
14673 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14674 emit_insn (gen_addqi_ext_1 (scratch, scratch, constm1_rtx));
14675 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
14676 intcmp_mode = CCmode;
14677 code = LTU;
14679 else
14681 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x45)));
14682 code = NE;
14684 break;
14685 case EQ:
14686 case UNEQ:
14687 if (code == EQ && TARGET_IEEE_FP)
14689 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14690 emit_insn (gen_cmpqi_ext_3 (scratch, GEN_INT (0x40)));
14691 intcmp_mode = CCmode;
14692 code = EQ;
14694 else
14696 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
14697 code = NE;
14699 break;
14700 case NE:
14701 case LTGT:
14702 if (code == NE && TARGET_IEEE_FP)
14704 emit_insn (gen_andqi_ext_0 (scratch, scratch, GEN_INT (0x45)));
14705 emit_insn (gen_xorqi_cc_ext_1 (scratch, scratch,
14706 GEN_INT (0x40)));
14707 code = NE;
14709 else
14711 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x40)));
14712 code = EQ;
14714 break;
14716 case UNORDERED:
14717 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
14718 code = NE;
14719 break;
14720 case ORDERED:
14721 emit_insn (gen_testqi_ext_ccno_0 (scratch, GEN_INT (0x04)));
14722 code = EQ;
14723 break;
14725 default:
14726 gcc_unreachable ();
14728 break;
14730 default:
14731 gcc_unreachable();
14734 /* Return the test that should be put into the flags user, i.e.
14735 the bcc, scc, or cmov instruction. */
14736 return gen_rtx_fmt_ee (code, VOIDmode,
14737 gen_rtx_REG (intcmp_mode, FLAGS_REG),
14738 const0_rtx);
14742 ix86_expand_compare (enum rtx_code code)
14744 rtx op0, op1, ret;
14745 op0 = ix86_compare_op0;
14746 op1 = ix86_compare_op1;
14748 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC)
14749 ret = gen_rtx_fmt_ee (code, VOIDmode, ix86_compare_op0, ix86_compare_op1);
14751 else if (SCALAR_FLOAT_MODE_P (GET_MODE (op0)))
14753 gcc_assert (!DECIMAL_FLOAT_MODE_P (GET_MODE (op0)));
14754 ret = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
14756 else
14757 ret = ix86_expand_int_compare (code, op0, op1);
14759 return ret;
14762 void
14763 ix86_expand_branch (enum rtx_code code, rtx label)
14765 rtx tmp;
14767 switch (GET_MODE (ix86_compare_op0))
14769 case SFmode:
14770 case DFmode:
14771 case XFmode:
14772 case QImode:
14773 case HImode:
14774 case SImode:
14775 simple:
14776 tmp = ix86_expand_compare (code);
14777 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
14778 gen_rtx_LABEL_REF (VOIDmode, label),
14779 pc_rtx);
14780 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
14781 return;
14783 case DImode:
14784 if (TARGET_64BIT)
14785 goto simple;
14786 case TImode:
14787 /* Expand DImode branch into multiple compare+branch. */
14789 rtx lo[2], hi[2], label2;
14790 enum rtx_code code1, code2, code3;
14791 enum machine_mode submode;
14793 if (CONSTANT_P (ix86_compare_op0) && ! CONSTANT_P (ix86_compare_op1))
14795 tmp = ix86_compare_op0;
14796 ix86_compare_op0 = ix86_compare_op1;
14797 ix86_compare_op1 = tmp;
14798 code = swap_condition (code);
14800 if (GET_MODE (ix86_compare_op0) == DImode)
14802 split_di (&ix86_compare_op0, 1, lo+0, hi+0);
14803 split_di (&ix86_compare_op1, 1, lo+1, hi+1);
14804 submode = SImode;
14806 else
14808 split_ti (&ix86_compare_op0, 1, lo+0, hi+0);
14809 split_ti (&ix86_compare_op1, 1, lo+1, hi+1);
14810 submode = DImode;
14813 /* When comparing for equality, we can use (hi0^hi1)|(lo0^lo1) to
14814 avoid two branches. This costs one extra insn, so disable when
14815 optimizing for size. */
14817 if ((code == EQ || code == NE)
14818 && (!optimize_insn_for_size_p ()
14819 || hi[1] == const0_rtx || lo[1] == const0_rtx))
14821 rtx xor0, xor1;
14823 xor1 = hi[0];
14824 if (hi[1] != const0_rtx)
14825 xor1 = expand_binop (submode, xor_optab, xor1, hi[1],
14826 NULL_RTX, 0, OPTAB_WIDEN);
14828 xor0 = lo[0];
14829 if (lo[1] != const0_rtx)
14830 xor0 = expand_binop (submode, xor_optab, xor0, lo[1],
14831 NULL_RTX, 0, OPTAB_WIDEN);
14833 tmp = expand_binop (submode, ior_optab, xor1, xor0,
14834 NULL_RTX, 0, OPTAB_WIDEN);
14836 ix86_compare_op0 = tmp;
14837 ix86_compare_op1 = const0_rtx;
14838 ix86_expand_branch (code, label);
14839 return;
14842 /* Otherwise, if we are doing less-than or greater-or-equal-than,
14843 op1 is a constant and the low word is zero, then we can just
14844 examine the high word. Similarly for low word -1 and
14845 less-or-equal-than or greater-than. */
14847 if (CONST_INT_P (hi[1]))
14848 switch (code)
14850 case LT: case LTU: case GE: case GEU:
14851 if (lo[1] == const0_rtx)
14853 ix86_compare_op0 = hi[0];
14854 ix86_compare_op1 = hi[1];
14855 ix86_expand_branch (code, label);
14856 return;
14858 break;
14859 case LE: case LEU: case GT: case GTU:
14860 if (lo[1] == constm1_rtx)
14862 ix86_compare_op0 = hi[0];
14863 ix86_compare_op1 = hi[1];
14864 ix86_expand_branch (code, label);
14865 return;
14867 break;
14868 default:
14869 break;
14872 /* Otherwise, we need two or three jumps. */
14874 label2 = gen_label_rtx ();
14876 code1 = code;
14877 code2 = swap_condition (code);
14878 code3 = unsigned_condition (code);
14880 switch (code)
14882 case LT: case GT: case LTU: case GTU:
14883 break;
14885 case LE: code1 = LT; code2 = GT; break;
14886 case GE: code1 = GT; code2 = LT; break;
14887 case LEU: code1 = LTU; code2 = GTU; break;
14888 case GEU: code1 = GTU; code2 = LTU; break;
14890 case EQ: code1 = UNKNOWN; code2 = NE; break;
14891 case NE: code2 = UNKNOWN; break;
14893 default:
14894 gcc_unreachable ();
14898 * a < b =>
14899 * if (hi(a) < hi(b)) goto true;
14900 * if (hi(a) > hi(b)) goto false;
14901 * if (lo(a) < lo(b)) goto true;
14902 * false:
14905 ix86_compare_op0 = hi[0];
14906 ix86_compare_op1 = hi[1];
14908 if (code1 != UNKNOWN)
14909 ix86_expand_branch (code1, label);
14910 if (code2 != UNKNOWN)
14911 ix86_expand_branch (code2, label2);
14913 ix86_compare_op0 = lo[0];
14914 ix86_compare_op1 = lo[1];
14915 ix86_expand_branch (code3, label);
14917 if (code2 != UNKNOWN)
14918 emit_label (label2);
14919 return;
14922 default:
14923 /* If we have already emitted a compare insn, go straight to simple.
14924 ix86_expand_compare won't emit anything if ix86_compare_emitted
14925 is non NULL. */
14926 gcc_assert (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_CC);
14927 goto simple;
14931 /* Split branch based on floating point condition. */
14932 void
14933 ix86_split_fp_branch (enum rtx_code code, rtx op1, rtx op2,
14934 rtx target1, rtx target2, rtx tmp, rtx pushed)
14936 rtx condition;
14937 rtx i;
14939 if (target2 != pc_rtx)
14941 rtx tmp = target2;
14942 code = reverse_condition_maybe_unordered (code);
14943 target2 = target1;
14944 target1 = tmp;
14947 condition = ix86_expand_fp_compare (code, op1, op2,
14948 tmp);
14950 /* Remove pushed operand from stack. */
14951 if (pushed)
14952 ix86_free_from_memory (GET_MODE (pushed));
14954 i = emit_jump_insn (gen_rtx_SET
14955 (VOIDmode, pc_rtx,
14956 gen_rtx_IF_THEN_ELSE (VOIDmode,
14957 condition, target1, target2)));
14958 if (split_branch_probability >= 0)
14959 add_reg_note (i, REG_BR_PROB, GEN_INT (split_branch_probability));
14962 void
14963 ix86_expand_setcc (enum rtx_code code, rtx dest)
14965 rtx ret;
14967 gcc_assert (GET_MODE (dest) == QImode);
14969 ret = ix86_expand_compare (code);
14970 PUT_MODE (ret, QImode);
14971 emit_insn (gen_rtx_SET (VOIDmode, dest, ret));
14974 /* Expand comparison setting or clearing carry flag. Return true when
14975 successful and set pop for the operation. */
14976 static bool
14977 ix86_expand_carry_flag_compare (enum rtx_code code, rtx op0, rtx op1, rtx *pop)
14979 enum machine_mode mode =
14980 GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
14982 /* Do not handle DImode compares that go through special path. */
14983 if (mode == (TARGET_64BIT ? TImode : DImode))
14984 return false;
14986 if (SCALAR_FLOAT_MODE_P (mode))
14988 rtx compare_op, compare_seq;
14990 gcc_assert (!DECIMAL_FLOAT_MODE_P (mode));
14992 /* Shortcut: following common codes never translate
14993 into carry flag compares. */
14994 if (code == EQ || code == NE || code == UNEQ || code == LTGT
14995 || code == ORDERED || code == UNORDERED)
14996 return false;
14998 /* These comparisons require zero flag; swap operands so they won't. */
14999 if ((code == GT || code == UNLE || code == LE || code == UNGT)
15000 && !TARGET_IEEE_FP)
15002 rtx tmp = op0;
15003 op0 = op1;
15004 op1 = tmp;
15005 code = swap_condition (code);
15008 /* Try to expand the comparison and verify that we end up with
15009 carry flag based comparison. This fails to be true only when
15010 we decide to expand comparison using arithmetic that is not
15011 too common scenario. */
15012 start_sequence ();
15013 compare_op = ix86_expand_fp_compare (code, op0, op1, NULL_RTX);
15014 compare_seq = get_insns ();
15015 end_sequence ();
15017 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
15018 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
15019 code = ix86_fp_compare_code_to_integer (GET_CODE (compare_op));
15020 else
15021 code = GET_CODE (compare_op);
15023 if (code != LTU && code != GEU)
15024 return false;
15026 emit_insn (compare_seq);
15027 *pop = compare_op;
15028 return true;
15031 if (!INTEGRAL_MODE_P (mode))
15032 return false;
15034 switch (code)
15036 case LTU:
15037 case GEU:
15038 break;
15040 /* Convert a==0 into (unsigned)a<1. */
15041 case EQ:
15042 case NE:
15043 if (op1 != const0_rtx)
15044 return false;
15045 op1 = const1_rtx;
15046 code = (code == EQ ? LTU : GEU);
15047 break;
15049 /* Convert a>b into b<a or a>=b-1. */
15050 case GTU:
15051 case LEU:
15052 if (CONST_INT_P (op1))
15054 op1 = gen_int_mode (INTVAL (op1) + 1, GET_MODE (op0));
15055 /* Bail out on overflow. We still can swap operands but that
15056 would force loading of the constant into register. */
15057 if (op1 == const0_rtx
15058 || !x86_64_immediate_operand (op1, GET_MODE (op1)))
15059 return false;
15060 code = (code == GTU ? GEU : LTU);
15062 else
15064 rtx tmp = op1;
15065 op1 = op0;
15066 op0 = tmp;
15067 code = (code == GTU ? LTU : GEU);
15069 break;
15071 /* Convert a>=0 into (unsigned)a<0x80000000. */
15072 case LT:
15073 case GE:
15074 if (mode == DImode || op1 != const0_rtx)
15075 return false;
15076 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15077 code = (code == LT ? GEU : LTU);
15078 break;
15079 case LE:
15080 case GT:
15081 if (mode == DImode || op1 != constm1_rtx)
15082 return false;
15083 op1 = gen_int_mode (1 << (GET_MODE_BITSIZE (mode) - 1), mode);
15084 code = (code == LE ? GEU : LTU);
15085 break;
15087 default:
15088 return false;
15090 /* Swapping operands may cause constant to appear as first operand. */
15091 if (!nonimmediate_operand (op0, VOIDmode))
15093 if (!can_create_pseudo_p ())
15094 return false;
15095 op0 = force_reg (mode, op0);
15097 ix86_compare_op0 = op0;
15098 ix86_compare_op1 = op1;
15099 *pop = ix86_expand_compare (code);
15100 gcc_assert (GET_CODE (*pop) == LTU || GET_CODE (*pop) == GEU);
15101 return true;
15105 ix86_expand_int_movcc (rtx operands[])
15107 enum rtx_code code = GET_CODE (operands[1]), compare_code;
15108 rtx compare_seq, compare_op;
15109 enum machine_mode mode = GET_MODE (operands[0]);
15110 bool sign_bit_compare_p = false;;
15112 start_sequence ();
15113 ix86_compare_op0 = XEXP (operands[1], 0);
15114 ix86_compare_op1 = XEXP (operands[1], 1);
15115 compare_op = ix86_expand_compare (code);
15116 compare_seq = get_insns ();
15117 end_sequence ();
15119 compare_code = GET_CODE (compare_op);
15121 if ((ix86_compare_op1 == const0_rtx && (code == GE || code == LT))
15122 || (ix86_compare_op1 == constm1_rtx && (code == GT || code == LE)))
15123 sign_bit_compare_p = true;
15125 /* Don't attempt mode expansion here -- if we had to expand 5 or 6
15126 HImode insns, we'd be swallowed in word prefix ops. */
15128 if ((mode != HImode || TARGET_FAST_PREFIX)
15129 && (mode != (TARGET_64BIT ? TImode : DImode))
15130 && CONST_INT_P (operands[2])
15131 && CONST_INT_P (operands[3]))
15133 rtx out = operands[0];
15134 HOST_WIDE_INT ct = INTVAL (operands[2]);
15135 HOST_WIDE_INT cf = INTVAL (operands[3]);
15136 HOST_WIDE_INT diff;
15138 diff = ct - cf;
15139 /* Sign bit compares are better done using shifts than we do by using
15140 sbb. */
15141 if (sign_bit_compare_p
15142 || ix86_expand_carry_flag_compare (code, ix86_compare_op0,
15143 ix86_compare_op1, &compare_op))
15145 /* Detect overlap between destination and compare sources. */
15146 rtx tmp = out;
15148 if (!sign_bit_compare_p)
15150 bool fpcmp = false;
15152 compare_code = GET_CODE (compare_op);
15154 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
15155 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
15157 fpcmp = true;
15158 compare_code = ix86_fp_compare_code_to_integer (compare_code);
15161 /* To simplify rest of code, restrict to the GEU case. */
15162 if (compare_code == LTU)
15164 HOST_WIDE_INT tmp = ct;
15165 ct = cf;
15166 cf = tmp;
15167 compare_code = reverse_condition (compare_code);
15168 code = reverse_condition (code);
15170 else
15172 if (fpcmp)
15173 PUT_CODE (compare_op,
15174 reverse_condition_maybe_unordered
15175 (GET_CODE (compare_op)));
15176 else
15177 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
15179 diff = ct - cf;
15181 if (reg_overlap_mentioned_p (out, ix86_compare_op0)
15182 || reg_overlap_mentioned_p (out, ix86_compare_op1))
15183 tmp = gen_reg_rtx (mode);
15185 if (mode == DImode)
15186 emit_insn (gen_x86_movdicc_0_m1_rex64 (tmp, compare_op));
15187 else
15188 emit_insn (gen_x86_movsicc_0_m1 (gen_lowpart (SImode, tmp), compare_op));
15190 else
15192 if (code == GT || code == GE)
15193 code = reverse_condition (code);
15194 else
15196 HOST_WIDE_INT tmp = ct;
15197 ct = cf;
15198 cf = tmp;
15199 diff = ct - cf;
15201 tmp = emit_store_flag (tmp, code, ix86_compare_op0,
15202 ix86_compare_op1, VOIDmode, 0, -1);
15205 if (diff == 1)
15208 * cmpl op0,op1
15209 * sbbl dest,dest
15210 * [addl dest, ct]
15212 * Size 5 - 8.
15214 if (ct)
15215 tmp = expand_simple_binop (mode, PLUS,
15216 tmp, GEN_INT (ct),
15217 copy_rtx (tmp), 1, OPTAB_DIRECT);
15219 else if (cf == -1)
15222 * cmpl op0,op1
15223 * sbbl dest,dest
15224 * orl $ct, dest
15226 * Size 8.
15228 tmp = expand_simple_binop (mode, IOR,
15229 tmp, GEN_INT (ct),
15230 copy_rtx (tmp), 1, OPTAB_DIRECT);
15232 else if (diff == -1 && ct)
15235 * cmpl op0,op1
15236 * sbbl dest,dest
15237 * notl dest
15238 * [addl dest, cf]
15240 * Size 8 - 11.
15242 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15243 if (cf)
15244 tmp = expand_simple_binop (mode, PLUS,
15245 copy_rtx (tmp), GEN_INT (cf),
15246 copy_rtx (tmp), 1, OPTAB_DIRECT);
15248 else
15251 * cmpl op0,op1
15252 * sbbl dest,dest
15253 * [notl dest]
15254 * andl cf - ct, dest
15255 * [addl dest, ct]
15257 * Size 8 - 11.
15260 if (cf == 0)
15262 cf = ct;
15263 ct = 0;
15264 tmp = expand_simple_unop (mode, NOT, tmp, copy_rtx (tmp), 1);
15267 tmp = expand_simple_binop (mode, AND,
15268 copy_rtx (tmp),
15269 gen_int_mode (cf - ct, mode),
15270 copy_rtx (tmp), 1, OPTAB_DIRECT);
15271 if (ct)
15272 tmp = expand_simple_binop (mode, PLUS,
15273 copy_rtx (tmp), GEN_INT (ct),
15274 copy_rtx (tmp), 1, OPTAB_DIRECT);
15277 if (!rtx_equal_p (tmp, out))
15278 emit_move_insn (copy_rtx (out), copy_rtx (tmp));
15280 return 1; /* DONE */
15283 if (diff < 0)
15285 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15287 HOST_WIDE_INT tmp;
15288 tmp = ct, ct = cf, cf = tmp;
15289 diff = -diff;
15291 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15293 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15295 /* We may be reversing unordered compare to normal compare, that
15296 is not valid in general (we may convert non-trapping condition
15297 to trapping one), however on i386 we currently emit all
15298 comparisons unordered. */
15299 compare_code = reverse_condition_maybe_unordered (compare_code);
15300 code = reverse_condition_maybe_unordered (code);
15302 else
15304 compare_code = reverse_condition (compare_code);
15305 code = reverse_condition (code);
15309 compare_code = UNKNOWN;
15310 if (GET_MODE_CLASS (GET_MODE (ix86_compare_op0)) == MODE_INT
15311 && CONST_INT_P (ix86_compare_op1))
15313 if (ix86_compare_op1 == const0_rtx
15314 && (code == LT || code == GE))
15315 compare_code = code;
15316 else if (ix86_compare_op1 == constm1_rtx)
15318 if (code == LE)
15319 compare_code = LT;
15320 else if (code == GT)
15321 compare_code = GE;
15325 /* Optimize dest = (op0 < 0) ? -1 : cf. */
15326 if (compare_code != UNKNOWN
15327 && GET_MODE (ix86_compare_op0) == GET_MODE (out)
15328 && (cf == -1 || ct == -1))
15330 /* If lea code below could be used, only optimize
15331 if it results in a 2 insn sequence. */
15333 if (! (diff == 1 || diff == 2 || diff == 4 || diff == 8
15334 || diff == 3 || diff == 5 || diff == 9)
15335 || (compare_code == LT && ct == -1)
15336 || (compare_code == GE && cf == -1))
15339 * notl op1 (if necessary)
15340 * sarl $31, op1
15341 * orl cf, op1
15343 if (ct != -1)
15345 cf = ct;
15346 ct = -1;
15347 code = reverse_condition (code);
15350 out = emit_store_flag (out, code, ix86_compare_op0,
15351 ix86_compare_op1, VOIDmode, 0, -1);
15353 out = expand_simple_binop (mode, IOR,
15354 out, GEN_INT (cf),
15355 out, 1, OPTAB_DIRECT);
15356 if (out != operands[0])
15357 emit_move_insn (operands[0], out);
15359 return 1; /* DONE */
15364 if ((diff == 1 || diff == 2 || diff == 4 || diff == 8
15365 || diff == 3 || diff == 5 || diff == 9)
15366 && ((mode != QImode && mode != HImode) || !TARGET_PARTIAL_REG_STALL)
15367 && (mode != DImode
15368 || x86_64_immediate_operand (GEN_INT (cf), VOIDmode)))
15371 * xorl dest,dest
15372 * cmpl op1,op2
15373 * setcc dest
15374 * lea cf(dest*(ct-cf)),dest
15376 * Size 14.
15378 * This also catches the degenerate setcc-only case.
15381 rtx tmp;
15382 int nops;
15384 out = emit_store_flag (out, code, ix86_compare_op0,
15385 ix86_compare_op1, VOIDmode, 0, 1);
15387 nops = 0;
15388 /* On x86_64 the lea instruction operates on Pmode, so we need
15389 to get arithmetics done in proper mode to match. */
15390 if (diff == 1)
15391 tmp = copy_rtx (out);
15392 else
15394 rtx out1;
15395 out1 = copy_rtx (out);
15396 tmp = gen_rtx_MULT (mode, out1, GEN_INT (diff & ~1));
15397 nops++;
15398 if (diff & 1)
15400 tmp = gen_rtx_PLUS (mode, tmp, out1);
15401 nops++;
15404 if (cf != 0)
15406 tmp = gen_rtx_PLUS (mode, tmp, GEN_INT (cf));
15407 nops++;
15409 if (!rtx_equal_p (tmp, out))
15411 if (nops == 1)
15412 out = force_operand (tmp, copy_rtx (out));
15413 else
15414 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (out), copy_rtx (tmp)));
15416 if (!rtx_equal_p (out, operands[0]))
15417 emit_move_insn (operands[0], copy_rtx (out));
15419 return 1; /* DONE */
15423 * General case: Jumpful:
15424 * xorl dest,dest cmpl op1, op2
15425 * cmpl op1, op2 movl ct, dest
15426 * setcc dest jcc 1f
15427 * decl dest movl cf, dest
15428 * andl (cf-ct),dest 1:
15429 * addl ct,dest
15431 * Size 20. Size 14.
15433 * This is reasonably steep, but branch mispredict costs are
15434 * high on modern cpus, so consider failing only if optimizing
15435 * for space.
15438 if ((!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15439 && BRANCH_COST (optimize_insn_for_speed_p (),
15440 false) >= 2)
15442 if (cf == 0)
15444 enum machine_mode cmp_mode = GET_MODE (ix86_compare_op0);
15446 cf = ct;
15447 ct = 0;
15449 if (SCALAR_FLOAT_MODE_P (cmp_mode))
15451 gcc_assert (!DECIMAL_FLOAT_MODE_P (cmp_mode));
15453 /* We may be reversing unordered compare to normal compare,
15454 that is not valid in general (we may convert non-trapping
15455 condition to trapping one), however on i386 we currently
15456 emit all comparisons unordered. */
15457 code = reverse_condition_maybe_unordered (code);
15459 else
15461 code = reverse_condition (code);
15462 if (compare_code != UNKNOWN)
15463 compare_code = reverse_condition (compare_code);
15467 if (compare_code != UNKNOWN)
15469 /* notl op1 (if needed)
15470 sarl $31, op1
15471 andl (cf-ct), op1
15472 addl ct, op1
15474 For x < 0 (resp. x <= -1) there will be no notl,
15475 so if possible swap the constants to get rid of the
15476 complement.
15477 True/false will be -1/0 while code below (store flag
15478 followed by decrement) is 0/-1, so the constants need
15479 to be exchanged once more. */
15481 if (compare_code == GE || !cf)
15483 code = reverse_condition (code);
15484 compare_code = LT;
15486 else
15488 HOST_WIDE_INT tmp = cf;
15489 cf = ct;
15490 ct = tmp;
15493 out = emit_store_flag (out, code, ix86_compare_op0,
15494 ix86_compare_op1, VOIDmode, 0, -1);
15496 else
15498 out = emit_store_flag (out, code, ix86_compare_op0,
15499 ix86_compare_op1, VOIDmode, 0, 1);
15501 out = expand_simple_binop (mode, PLUS, copy_rtx (out), constm1_rtx,
15502 copy_rtx (out), 1, OPTAB_DIRECT);
15505 out = expand_simple_binop (mode, AND, copy_rtx (out),
15506 gen_int_mode (cf - ct, mode),
15507 copy_rtx (out), 1, OPTAB_DIRECT);
15508 if (ct)
15509 out = expand_simple_binop (mode, PLUS, copy_rtx (out), GEN_INT (ct),
15510 copy_rtx (out), 1, OPTAB_DIRECT);
15511 if (!rtx_equal_p (out, operands[0]))
15512 emit_move_insn (operands[0], copy_rtx (out));
15514 return 1; /* DONE */
15518 if (!TARGET_CMOVE || (mode == QImode && TARGET_PARTIAL_REG_STALL))
15520 /* Try a few things more with specific constants and a variable. */
15522 optab op;
15523 rtx var, orig_out, out, tmp;
15525 if (BRANCH_COST (optimize_insn_for_speed_p (), false) <= 2)
15526 return 0; /* FAIL */
15528 /* If one of the two operands is an interesting constant, load a
15529 constant with the above and mask it in with a logical operation. */
15531 if (CONST_INT_P (operands[2]))
15533 var = operands[3];
15534 if (INTVAL (operands[2]) == 0 && operands[3] != constm1_rtx)
15535 operands[3] = constm1_rtx, op = and_optab;
15536 else if (INTVAL (operands[2]) == -1 && operands[3] != const0_rtx)
15537 operands[3] = const0_rtx, op = ior_optab;
15538 else
15539 return 0; /* FAIL */
15541 else if (CONST_INT_P (operands[3]))
15543 var = operands[2];
15544 if (INTVAL (operands[3]) == 0 && operands[2] != constm1_rtx)
15545 operands[2] = constm1_rtx, op = and_optab;
15546 else if (INTVAL (operands[3]) == -1 && operands[3] != const0_rtx)
15547 operands[2] = const0_rtx, op = ior_optab;
15548 else
15549 return 0; /* FAIL */
15551 else
15552 return 0; /* FAIL */
15554 orig_out = operands[0];
15555 tmp = gen_reg_rtx (mode);
15556 operands[0] = tmp;
15558 /* Recurse to get the constant loaded. */
15559 if (ix86_expand_int_movcc (operands) == 0)
15560 return 0; /* FAIL */
15562 /* Mask in the interesting variable. */
15563 out = expand_binop (mode, op, var, tmp, orig_out, 0,
15564 OPTAB_WIDEN);
15565 if (!rtx_equal_p (out, orig_out))
15566 emit_move_insn (copy_rtx (orig_out), copy_rtx (out));
15568 return 1; /* DONE */
15572 * For comparison with above,
15574 * movl cf,dest
15575 * movl ct,tmp
15576 * cmpl op1,op2
15577 * cmovcc tmp,dest
15579 * Size 15.
15582 if (! nonimmediate_operand (operands[2], mode))
15583 operands[2] = force_reg (mode, operands[2]);
15584 if (! nonimmediate_operand (operands[3], mode))
15585 operands[3] = force_reg (mode, operands[3]);
15587 if (! register_operand (operands[2], VOIDmode)
15588 && (mode == QImode
15589 || ! register_operand (operands[3], VOIDmode)))
15590 operands[2] = force_reg (mode, operands[2]);
15592 if (mode == QImode
15593 && ! register_operand (operands[3], VOIDmode))
15594 operands[3] = force_reg (mode, operands[3]);
15596 emit_insn (compare_seq);
15597 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
15598 gen_rtx_IF_THEN_ELSE (mode,
15599 compare_op, operands[2],
15600 operands[3])));
15602 return 1; /* DONE */
15605 /* Swap, force into registers, or otherwise massage the two operands
15606 to an sse comparison with a mask result. Thus we differ a bit from
15607 ix86_prepare_fp_compare_args which expects to produce a flags result.
15609 The DEST operand exists to help determine whether to commute commutative
15610 operators. The POP0/POP1 operands are updated in place. The new
15611 comparison code is returned, or UNKNOWN if not implementable. */
15613 static enum rtx_code
15614 ix86_prepare_sse_fp_compare_args (rtx dest, enum rtx_code code,
15615 rtx *pop0, rtx *pop1)
15617 rtx tmp;
15619 switch (code)
15621 case LTGT:
15622 case UNEQ:
15623 /* We have no LTGT as an operator. We could implement it with
15624 NE & ORDERED, but this requires an extra temporary. It's
15625 not clear that it's worth it. */
15626 return UNKNOWN;
15628 case LT:
15629 case LE:
15630 case UNGT:
15631 case UNGE:
15632 /* These are supported directly. */
15633 break;
15635 case EQ:
15636 case NE:
15637 case UNORDERED:
15638 case ORDERED:
15639 /* For commutative operators, try to canonicalize the destination
15640 operand to be first in the comparison - this helps reload to
15641 avoid extra moves. */
15642 if (!dest || !rtx_equal_p (dest, *pop1))
15643 break;
15644 /* FALLTHRU */
15646 case GE:
15647 case GT:
15648 case UNLE:
15649 case UNLT:
15650 /* These are not supported directly. Swap the comparison operands
15651 to transform into something that is supported. */
15652 tmp = *pop0;
15653 *pop0 = *pop1;
15654 *pop1 = tmp;
15655 code = swap_condition (code);
15656 break;
15658 default:
15659 gcc_unreachable ();
15662 return code;
15665 /* Detect conditional moves that exactly match min/max operational
15666 semantics. Note that this is IEEE safe, as long as we don't
15667 interchange the operands.
15669 Returns FALSE if this conditional move doesn't match a MIN/MAX,
15670 and TRUE if the operation is successful and instructions are emitted. */
15672 static bool
15673 ix86_expand_sse_fp_minmax (rtx dest, enum rtx_code code, rtx cmp_op0,
15674 rtx cmp_op1, rtx if_true, rtx if_false)
15676 enum machine_mode mode;
15677 bool is_min;
15678 rtx tmp;
15680 if (code == LT)
15682 else if (code == UNGE)
15684 tmp = if_true;
15685 if_true = if_false;
15686 if_false = tmp;
15688 else
15689 return false;
15691 if (rtx_equal_p (cmp_op0, if_true) && rtx_equal_p (cmp_op1, if_false))
15692 is_min = true;
15693 else if (rtx_equal_p (cmp_op1, if_true) && rtx_equal_p (cmp_op0, if_false))
15694 is_min = false;
15695 else
15696 return false;
15698 mode = GET_MODE (dest);
15700 /* We want to check HONOR_NANS and HONOR_SIGNED_ZEROS here,
15701 but MODE may be a vector mode and thus not appropriate. */
15702 if (!flag_finite_math_only || !flag_unsafe_math_optimizations)
15704 int u = is_min ? UNSPEC_IEEE_MIN : UNSPEC_IEEE_MAX;
15705 rtvec v;
15707 if_true = force_reg (mode, if_true);
15708 v = gen_rtvec (2, if_true, if_false);
15709 tmp = gen_rtx_UNSPEC (mode, v, u);
15711 else
15713 code = is_min ? SMIN : SMAX;
15714 tmp = gen_rtx_fmt_ee (code, mode, if_true, if_false);
15717 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
15718 return true;
15721 /* Expand an sse vector comparison. Return the register with the result. */
15723 static rtx
15724 ix86_expand_sse_cmp (rtx dest, enum rtx_code code, rtx cmp_op0, rtx cmp_op1,
15725 rtx op_true, rtx op_false)
15727 enum machine_mode mode = GET_MODE (dest);
15728 rtx x;
15730 cmp_op0 = force_reg (mode, cmp_op0);
15731 if (!nonimmediate_operand (cmp_op1, mode))
15732 cmp_op1 = force_reg (mode, cmp_op1);
15734 if (optimize
15735 || reg_overlap_mentioned_p (dest, op_true)
15736 || reg_overlap_mentioned_p (dest, op_false))
15737 dest = gen_reg_rtx (mode);
15739 x = gen_rtx_fmt_ee (code, mode, cmp_op0, cmp_op1);
15740 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
15742 return dest;
15745 /* Expand DEST = CMP ? OP_TRUE : OP_FALSE into a sequence of logical
15746 operations. This is used for both scalar and vector conditional moves. */
15748 static void
15749 ix86_expand_sse_movcc (rtx dest, rtx cmp, rtx op_true, rtx op_false)
15751 enum machine_mode mode = GET_MODE (dest);
15752 rtx t2, t3, x;
15754 if (op_false == CONST0_RTX (mode))
15756 op_true = force_reg (mode, op_true);
15757 x = gen_rtx_AND (mode, cmp, op_true);
15758 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
15760 else if (op_true == CONST0_RTX (mode))
15762 op_false = force_reg (mode, op_false);
15763 x = gen_rtx_NOT (mode, cmp);
15764 x = gen_rtx_AND (mode, x, op_false);
15765 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
15767 else
15769 op_true = force_reg (mode, op_true);
15770 op_false = force_reg (mode, op_false);
15772 t2 = gen_reg_rtx (mode);
15773 if (optimize)
15774 t3 = gen_reg_rtx (mode);
15775 else
15776 t3 = dest;
15778 x = gen_rtx_AND (mode, op_true, cmp);
15779 emit_insn (gen_rtx_SET (VOIDmode, t2, x));
15781 x = gen_rtx_NOT (mode, cmp);
15782 x = gen_rtx_AND (mode, x, op_false);
15783 emit_insn (gen_rtx_SET (VOIDmode, t3, x));
15785 x = gen_rtx_IOR (mode, t3, t2);
15786 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
15790 /* Expand a floating-point conditional move. Return true if successful. */
15793 ix86_expand_fp_movcc (rtx operands[])
15795 enum machine_mode mode = GET_MODE (operands[0]);
15796 enum rtx_code code = GET_CODE (operands[1]);
15797 rtx tmp, compare_op;
15799 ix86_compare_op0 = XEXP (operands[1], 0);
15800 ix86_compare_op1 = XEXP (operands[1], 1);
15801 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
15803 enum machine_mode cmode;
15805 /* Since we've no cmove for sse registers, don't force bad register
15806 allocation just to gain access to it. Deny movcc when the
15807 comparison mode doesn't match the move mode. */
15808 cmode = GET_MODE (ix86_compare_op0);
15809 if (cmode == VOIDmode)
15810 cmode = GET_MODE (ix86_compare_op1);
15811 if (cmode != mode)
15812 return 0;
15814 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
15815 &ix86_compare_op0,
15816 &ix86_compare_op1);
15817 if (code == UNKNOWN)
15818 return 0;
15820 if (ix86_expand_sse_fp_minmax (operands[0], code, ix86_compare_op0,
15821 ix86_compare_op1, operands[2],
15822 operands[3]))
15823 return 1;
15825 tmp = ix86_expand_sse_cmp (operands[0], code, ix86_compare_op0,
15826 ix86_compare_op1, operands[2], operands[3]);
15827 ix86_expand_sse_movcc (operands[0], tmp, operands[2], operands[3]);
15828 return 1;
15831 /* The floating point conditional move instructions don't directly
15832 support conditions resulting from a signed integer comparison. */
15834 compare_op = ix86_expand_compare (code);
15835 if (!fcmov_comparison_operator (compare_op, VOIDmode))
15837 tmp = gen_reg_rtx (QImode);
15838 ix86_expand_setcc (code, tmp);
15839 code = NE;
15840 ix86_compare_op0 = tmp;
15841 ix86_compare_op1 = const0_rtx;
15842 compare_op = ix86_expand_compare (code);
15845 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
15846 gen_rtx_IF_THEN_ELSE (mode, compare_op,
15847 operands[2], operands[3])));
15849 return 1;
15852 /* Expand a floating-point vector conditional move; a vcond operation
15853 rather than a movcc operation. */
15855 bool
15856 ix86_expand_fp_vcond (rtx operands[])
15858 enum rtx_code code = GET_CODE (operands[3]);
15859 rtx cmp;
15861 code = ix86_prepare_sse_fp_compare_args (operands[0], code,
15862 &operands[4], &operands[5]);
15863 if (code == UNKNOWN)
15864 return false;
15866 if (ix86_expand_sse_fp_minmax (operands[0], code, operands[4],
15867 operands[5], operands[1], operands[2]))
15868 return true;
15870 cmp = ix86_expand_sse_cmp (operands[0], code, operands[4], operands[5],
15871 operands[1], operands[2]);
15872 ix86_expand_sse_movcc (operands[0], cmp, operands[1], operands[2]);
15873 return true;
15876 /* Expand a signed/unsigned integral vector conditional move. */
15878 bool
15879 ix86_expand_int_vcond (rtx operands[])
15881 enum machine_mode mode = GET_MODE (operands[0]);
15882 enum rtx_code code = GET_CODE (operands[3]);
15883 bool negate = false;
15884 rtx x, cop0, cop1;
15886 cop0 = operands[4];
15887 cop1 = operands[5];
15889 /* Canonicalize the comparison to EQ, GT, GTU. */
15890 switch (code)
15892 case EQ:
15893 case GT:
15894 case GTU:
15895 break;
15897 case NE:
15898 case LE:
15899 case LEU:
15900 code = reverse_condition (code);
15901 negate = true;
15902 break;
15904 case GE:
15905 case GEU:
15906 code = reverse_condition (code);
15907 negate = true;
15908 /* FALLTHRU */
15910 case LT:
15911 case LTU:
15912 code = swap_condition (code);
15913 x = cop0, cop0 = cop1, cop1 = x;
15914 break;
15916 default:
15917 gcc_unreachable ();
15920 /* Only SSE4.1/SSE4.2 supports V2DImode. */
15921 if (mode == V2DImode)
15923 switch (code)
15925 case EQ:
15926 /* SSE4.1 supports EQ. */
15927 if (!TARGET_SSE4_1)
15928 return false;
15929 break;
15931 case GT:
15932 case GTU:
15933 /* SSE4.2 supports GT/GTU. */
15934 if (!TARGET_SSE4_2)
15935 return false;
15936 break;
15938 default:
15939 gcc_unreachable ();
15943 /* Unsigned parallel compare is not supported by the hardware. Play some
15944 tricks to turn this into a signed comparison against 0. */
15945 if (code == GTU)
15947 cop0 = force_reg (mode, cop0);
15949 switch (mode)
15951 case V4SImode:
15952 case V2DImode:
15954 rtx t1, t2, mask;
15956 /* Perform a parallel modulo subtraction. */
15957 t1 = gen_reg_rtx (mode);
15958 emit_insn ((mode == V4SImode
15959 ? gen_subv4si3
15960 : gen_subv2di3) (t1, cop0, cop1));
15962 /* Extract the original sign bit of op0. */
15963 mask = ix86_build_signbit_mask (GET_MODE_INNER (mode),
15964 true, false);
15965 t2 = gen_reg_rtx (mode);
15966 emit_insn ((mode == V4SImode
15967 ? gen_andv4si3
15968 : gen_andv2di3) (t2, cop0, mask));
15970 /* XOR it back into the result of the subtraction. This results
15971 in the sign bit set iff we saw unsigned underflow. */
15972 x = gen_reg_rtx (mode);
15973 emit_insn ((mode == V4SImode
15974 ? gen_xorv4si3
15975 : gen_xorv2di3) (x, t1, t2));
15977 code = GT;
15979 break;
15981 case V16QImode:
15982 case V8HImode:
15983 /* Perform a parallel unsigned saturating subtraction. */
15984 x = gen_reg_rtx (mode);
15985 emit_insn (gen_rtx_SET (VOIDmode, x,
15986 gen_rtx_US_MINUS (mode, cop0, cop1)));
15988 code = EQ;
15989 negate = !negate;
15990 break;
15992 default:
15993 gcc_unreachable ();
15996 cop0 = x;
15997 cop1 = CONST0_RTX (mode);
16000 x = ix86_expand_sse_cmp (operands[0], code, cop0, cop1,
16001 operands[1+negate], operands[2-negate]);
16003 ix86_expand_sse_movcc (operands[0], x, operands[1+negate],
16004 operands[2-negate]);
16005 return true;
16008 /* Unpack OP[1] into the next wider integer vector type. UNSIGNED_P is
16009 true if we should do zero extension, else sign extension. HIGH_P is
16010 true if we want the N/2 high elements, else the low elements. */
16012 void
16013 ix86_expand_sse_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16015 enum machine_mode imode = GET_MODE (operands[1]);
16016 rtx (*unpack)(rtx, rtx, rtx);
16017 rtx se, dest;
16019 switch (imode)
16021 case V16QImode:
16022 if (high_p)
16023 unpack = gen_vec_interleave_highv16qi;
16024 else
16025 unpack = gen_vec_interleave_lowv16qi;
16026 break;
16027 case V8HImode:
16028 if (high_p)
16029 unpack = gen_vec_interleave_highv8hi;
16030 else
16031 unpack = gen_vec_interleave_lowv8hi;
16032 break;
16033 case V4SImode:
16034 if (high_p)
16035 unpack = gen_vec_interleave_highv4si;
16036 else
16037 unpack = gen_vec_interleave_lowv4si;
16038 break;
16039 default:
16040 gcc_unreachable ();
16043 dest = gen_lowpart (imode, operands[0]);
16045 if (unsigned_p)
16046 se = force_reg (imode, CONST0_RTX (imode));
16047 else
16048 se = ix86_expand_sse_cmp (gen_reg_rtx (imode), GT, CONST0_RTX (imode),
16049 operands[1], pc_rtx, pc_rtx);
16051 emit_insn (unpack (dest, operands[1], se));
16054 /* This function performs the same task as ix86_expand_sse_unpack,
16055 but with SSE4.1 instructions. */
16057 void
16058 ix86_expand_sse4_unpack (rtx operands[2], bool unsigned_p, bool high_p)
16060 enum machine_mode imode = GET_MODE (operands[1]);
16061 rtx (*unpack)(rtx, rtx);
16062 rtx src, dest;
16064 switch (imode)
16066 case V16QImode:
16067 if (unsigned_p)
16068 unpack = gen_sse4_1_zero_extendv8qiv8hi2;
16069 else
16070 unpack = gen_sse4_1_extendv8qiv8hi2;
16071 break;
16072 case V8HImode:
16073 if (unsigned_p)
16074 unpack = gen_sse4_1_zero_extendv4hiv4si2;
16075 else
16076 unpack = gen_sse4_1_extendv4hiv4si2;
16077 break;
16078 case V4SImode:
16079 if (unsigned_p)
16080 unpack = gen_sse4_1_zero_extendv2siv2di2;
16081 else
16082 unpack = gen_sse4_1_extendv2siv2di2;
16083 break;
16084 default:
16085 gcc_unreachable ();
16088 dest = operands[0];
16089 if (high_p)
16091 /* Shift higher 8 bytes to lower 8 bytes. */
16092 src = gen_reg_rtx (imode);
16093 emit_insn (gen_sse2_lshrti3 (gen_lowpart (TImode, src),
16094 gen_lowpart (TImode, operands[1]),
16095 GEN_INT (64)));
16097 else
16098 src = operands[1];
16100 emit_insn (unpack (dest, src));
16103 /* Expand conditional increment or decrement using adb/sbb instructions.
16104 The default case using setcc followed by the conditional move can be
16105 done by generic code. */
16107 ix86_expand_int_addcc (rtx operands[])
16109 enum rtx_code code = GET_CODE (operands[1]);
16110 rtx compare_op;
16111 rtx val = const0_rtx;
16112 bool fpcmp = false;
16113 enum machine_mode mode = GET_MODE (operands[0]);
16115 ix86_compare_op0 = XEXP (operands[1], 0);
16116 ix86_compare_op1 = XEXP (operands[1], 1);
16117 if (operands[3] != const1_rtx
16118 && operands[3] != constm1_rtx)
16119 return 0;
16120 if (!ix86_expand_carry_flag_compare (code, ix86_compare_op0,
16121 ix86_compare_op1, &compare_op))
16122 return 0;
16123 code = GET_CODE (compare_op);
16125 if (GET_MODE (XEXP (compare_op, 0)) == CCFPmode
16126 || GET_MODE (XEXP (compare_op, 0)) == CCFPUmode)
16128 fpcmp = true;
16129 code = ix86_fp_compare_code_to_integer (code);
16132 if (code != LTU)
16134 val = constm1_rtx;
16135 if (fpcmp)
16136 PUT_CODE (compare_op,
16137 reverse_condition_maybe_unordered
16138 (GET_CODE (compare_op)));
16139 else
16140 PUT_CODE (compare_op, reverse_condition (GET_CODE (compare_op)));
16142 PUT_MODE (compare_op, mode);
16144 /* Construct either adc or sbb insn. */
16145 if ((code == LTU) == (operands[3] == constm1_rtx))
16147 switch (GET_MODE (operands[0]))
16149 case QImode:
16150 emit_insn (gen_subqi3_carry (operands[0], operands[2], val, compare_op));
16151 break;
16152 case HImode:
16153 emit_insn (gen_subhi3_carry (operands[0], operands[2], val, compare_op));
16154 break;
16155 case SImode:
16156 emit_insn (gen_subsi3_carry (operands[0], operands[2], val, compare_op));
16157 break;
16158 case DImode:
16159 emit_insn (gen_subdi3_carry_rex64 (operands[0], operands[2], val, compare_op));
16160 break;
16161 default:
16162 gcc_unreachable ();
16165 else
16167 switch (GET_MODE (operands[0]))
16169 case QImode:
16170 emit_insn (gen_addqi3_carry (operands[0], operands[2], val, compare_op));
16171 break;
16172 case HImode:
16173 emit_insn (gen_addhi3_carry (operands[0], operands[2], val, compare_op));
16174 break;
16175 case SImode:
16176 emit_insn (gen_addsi3_carry (operands[0], operands[2], val, compare_op));
16177 break;
16178 case DImode:
16179 emit_insn (gen_adddi3_carry_rex64 (operands[0], operands[2], val, compare_op));
16180 break;
16181 default:
16182 gcc_unreachable ();
16185 return 1; /* DONE */
16189 /* Split operands 0 and 1 into SImode parts. Similar to split_di, but
16190 works for floating pointer parameters and nonoffsetable memories.
16191 For pushes, it returns just stack offsets; the values will be saved
16192 in the right order. Maximally three parts are generated. */
16194 static int
16195 ix86_split_to_parts (rtx operand, rtx *parts, enum machine_mode mode)
16197 int size;
16199 if (!TARGET_64BIT)
16200 size = mode==XFmode ? 3 : GET_MODE_SIZE (mode) / 4;
16201 else
16202 size = (GET_MODE_SIZE (mode) + 4) / 8;
16204 gcc_assert (!REG_P (operand) || !MMX_REGNO_P (REGNO (operand)));
16205 gcc_assert (size >= 2 && size <= 4);
16207 /* Optimize constant pool reference to immediates. This is used by fp
16208 moves, that force all constants to memory to allow combining. */
16209 if (MEM_P (operand) && MEM_READONLY_P (operand))
16211 rtx tmp = maybe_get_pool_constant (operand);
16212 if (tmp)
16213 operand = tmp;
16216 if (MEM_P (operand) && !offsettable_memref_p (operand))
16218 /* The only non-offsetable memories we handle are pushes. */
16219 int ok = push_operand (operand, VOIDmode);
16221 gcc_assert (ok);
16223 operand = copy_rtx (operand);
16224 PUT_MODE (operand, Pmode);
16225 parts[0] = parts[1] = parts[2] = parts[3] = operand;
16226 return size;
16229 if (GET_CODE (operand) == CONST_VECTOR)
16231 enum machine_mode imode = int_mode_for_mode (mode);
16232 /* Caution: if we looked through a constant pool memory above,
16233 the operand may actually have a different mode now. That's
16234 ok, since we want to pun this all the way back to an integer. */
16235 operand = simplify_subreg (imode, operand, GET_MODE (operand), 0);
16236 gcc_assert (operand != NULL);
16237 mode = imode;
16240 if (!TARGET_64BIT)
16242 if (mode == DImode)
16243 split_di (&operand, 1, &parts[0], &parts[1]);
16244 else
16246 int i;
16248 if (REG_P (operand))
16250 gcc_assert (reload_completed);
16251 for (i = 0; i < size; i++)
16252 parts[i] = gen_rtx_REG (SImode, REGNO (operand) + i);
16254 else if (offsettable_memref_p (operand))
16256 operand = adjust_address (operand, SImode, 0);
16257 parts[0] = operand;
16258 for (i = 1; i < size; i++)
16259 parts[i] = adjust_address (operand, SImode, 4 * i);
16261 else if (GET_CODE (operand) == CONST_DOUBLE)
16263 REAL_VALUE_TYPE r;
16264 long l[4];
16266 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16267 switch (mode)
16269 case TFmode:
16270 real_to_target (l, &r, mode);
16271 parts[3] = gen_int_mode (l[3], SImode);
16272 parts[2] = gen_int_mode (l[2], SImode);
16273 break;
16274 case XFmode:
16275 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
16276 parts[2] = gen_int_mode (l[2], SImode);
16277 break;
16278 case DFmode:
16279 REAL_VALUE_TO_TARGET_DOUBLE (r, l);
16280 break;
16281 default:
16282 gcc_unreachable ();
16284 parts[1] = gen_int_mode (l[1], SImode);
16285 parts[0] = gen_int_mode (l[0], SImode);
16287 else
16288 gcc_unreachable ();
16291 else
16293 if (mode == TImode)
16294 split_ti (&operand, 1, &parts[0], &parts[1]);
16295 if (mode == XFmode || mode == TFmode)
16297 enum machine_mode upper_mode = mode==XFmode ? SImode : DImode;
16298 if (REG_P (operand))
16300 gcc_assert (reload_completed);
16301 parts[0] = gen_rtx_REG (DImode, REGNO (operand) + 0);
16302 parts[1] = gen_rtx_REG (upper_mode, REGNO (operand) + 1);
16304 else if (offsettable_memref_p (operand))
16306 operand = adjust_address (operand, DImode, 0);
16307 parts[0] = operand;
16308 parts[1] = adjust_address (operand, upper_mode, 8);
16310 else if (GET_CODE (operand) == CONST_DOUBLE)
16312 REAL_VALUE_TYPE r;
16313 long l[4];
16315 REAL_VALUE_FROM_CONST_DOUBLE (r, operand);
16316 real_to_target (l, &r, mode);
16318 /* Do not use shift by 32 to avoid warning on 32bit systems. */
16319 if (HOST_BITS_PER_WIDE_INT >= 64)
16320 parts[0]
16321 = gen_int_mode
16322 ((l[0] & (((HOST_WIDE_INT) 2 << 31) - 1))
16323 + ((((HOST_WIDE_INT) l[1]) << 31) << 1),
16324 DImode);
16325 else
16326 parts[0] = immed_double_const (l[0], l[1], DImode);
16328 if (upper_mode == SImode)
16329 parts[1] = gen_int_mode (l[2], SImode);
16330 else if (HOST_BITS_PER_WIDE_INT >= 64)
16331 parts[1]
16332 = gen_int_mode
16333 ((l[2] & (((HOST_WIDE_INT) 2 << 31) - 1))
16334 + ((((HOST_WIDE_INT) l[3]) << 31) << 1),
16335 DImode);
16336 else
16337 parts[1] = immed_double_const (l[2], l[3], DImode);
16339 else
16340 gcc_unreachable ();
16344 return size;
16347 /* Emit insns to perform a move or push of DI, DF, XF, and TF values.
16348 Return false when normal moves are needed; true when all required
16349 insns have been emitted. Operands 2-4 contain the input values
16350 int the correct order; operands 5-7 contain the output values. */
16352 void
16353 ix86_split_long_move (rtx operands[])
16355 rtx part[2][4];
16356 int nparts, i, j;
16357 int push = 0;
16358 int collisions = 0;
16359 enum machine_mode mode = GET_MODE (operands[0]);
16360 bool collisionparts[4];
16362 /* The DFmode expanders may ask us to move double.
16363 For 64bit target this is single move. By hiding the fact
16364 here we simplify i386.md splitters. */
16365 if (GET_MODE_SIZE (GET_MODE (operands[0])) == 8 && TARGET_64BIT)
16367 /* Optimize constant pool reference to immediates. This is used by
16368 fp moves, that force all constants to memory to allow combining. */
16370 if (MEM_P (operands[1])
16371 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
16372 && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
16373 operands[1] = get_pool_constant (XEXP (operands[1], 0));
16374 if (push_operand (operands[0], VOIDmode))
16376 operands[0] = copy_rtx (operands[0]);
16377 PUT_MODE (operands[0], Pmode);
16379 else
16380 operands[0] = gen_lowpart (DImode, operands[0]);
16381 operands[1] = gen_lowpart (DImode, operands[1]);
16382 emit_move_insn (operands[0], operands[1]);
16383 return;
16386 /* The only non-offsettable memory we handle is push. */
16387 if (push_operand (operands[0], VOIDmode))
16388 push = 1;
16389 else
16390 gcc_assert (!MEM_P (operands[0])
16391 || offsettable_memref_p (operands[0]));
16393 nparts = ix86_split_to_parts (operands[1], part[1], GET_MODE (operands[0]));
16394 ix86_split_to_parts (operands[0], part[0], GET_MODE (operands[0]));
16396 /* When emitting push, take care for source operands on the stack. */
16397 if (push && MEM_P (operands[1])
16398 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
16400 rtx src_base = XEXP (part[1][nparts - 1], 0);
16402 /* Compensate for the stack decrement by 4. */
16403 if (!TARGET_64BIT && nparts == 3
16404 && mode == XFmode && TARGET_128BIT_LONG_DOUBLE)
16405 src_base = plus_constant (src_base, 4);
16407 /* src_base refers to the stack pointer and is
16408 automatically decreased by emitted push. */
16409 for (i = 0; i < nparts; i++)
16410 part[1][i] = change_address (part[1][i],
16411 GET_MODE (part[1][i]), src_base);
16414 /* We need to do copy in the right order in case an address register
16415 of the source overlaps the destination. */
16416 if (REG_P (part[0][0]) && MEM_P (part[1][0]))
16418 rtx tmp;
16420 for (i = 0; i < nparts; i++)
16422 collisionparts[i]
16423 = reg_overlap_mentioned_p (part[0][i], XEXP (part[1][0], 0));
16424 if (collisionparts[i])
16425 collisions++;
16428 /* Collision in the middle part can be handled by reordering. */
16429 if (collisions == 1 && nparts == 3 && collisionparts [1])
16431 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16432 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16434 else if (collisions == 1
16435 && nparts == 4
16436 && (collisionparts [1] || collisionparts [2]))
16438 if (collisionparts [1])
16440 tmp = part[0][1]; part[0][1] = part[0][2]; part[0][2] = tmp;
16441 tmp = part[1][1]; part[1][1] = part[1][2]; part[1][2] = tmp;
16443 else
16445 tmp = part[0][2]; part[0][2] = part[0][3]; part[0][3] = tmp;
16446 tmp = part[1][2]; part[1][2] = part[1][3]; part[1][3] = tmp;
16450 /* If there are more collisions, we can't handle it by reordering.
16451 Do an lea to the last part and use only one colliding move. */
16452 else if (collisions > 1)
16454 rtx base;
16456 collisions = 1;
16458 base = part[0][nparts - 1];
16460 /* Handle the case when the last part isn't valid for lea.
16461 Happens in 64-bit mode storing the 12-byte XFmode. */
16462 if (GET_MODE (base) != Pmode)
16463 base = gen_rtx_REG (Pmode, REGNO (base));
16465 emit_insn (gen_rtx_SET (VOIDmode, base, XEXP (part[1][0], 0)));
16466 part[1][0] = replace_equiv_address (part[1][0], base);
16467 for (i = 1; i < nparts; i++)
16469 tmp = plus_constant (base, UNITS_PER_WORD * i);
16470 part[1][i] = replace_equiv_address (part[1][i], tmp);
16475 if (push)
16477 if (!TARGET_64BIT)
16479 if (nparts == 3)
16481 if (TARGET_128BIT_LONG_DOUBLE && mode == XFmode)
16482 emit_insn (gen_addsi3 (stack_pointer_rtx,
16483 stack_pointer_rtx, GEN_INT (-4)));
16484 emit_move_insn (part[0][2], part[1][2]);
16486 else if (nparts == 4)
16488 emit_move_insn (part[0][3], part[1][3]);
16489 emit_move_insn (part[0][2], part[1][2]);
16492 else
16494 /* In 64bit mode we don't have 32bit push available. In case this is
16495 register, it is OK - we will just use larger counterpart. We also
16496 retype memory - these comes from attempt to avoid REX prefix on
16497 moving of second half of TFmode value. */
16498 if (GET_MODE (part[1][1]) == SImode)
16500 switch (GET_CODE (part[1][1]))
16502 case MEM:
16503 part[1][1] = adjust_address (part[1][1], DImode, 0);
16504 break;
16506 case REG:
16507 part[1][1] = gen_rtx_REG (DImode, REGNO (part[1][1]));
16508 break;
16510 default:
16511 gcc_unreachable ();
16514 if (GET_MODE (part[1][0]) == SImode)
16515 part[1][0] = part[1][1];
16518 emit_move_insn (part[0][1], part[1][1]);
16519 emit_move_insn (part[0][0], part[1][0]);
16520 return;
16523 /* Choose correct order to not overwrite the source before it is copied. */
16524 if ((REG_P (part[0][0])
16525 && REG_P (part[1][1])
16526 && (REGNO (part[0][0]) == REGNO (part[1][1])
16527 || (nparts == 3
16528 && REGNO (part[0][0]) == REGNO (part[1][2]))
16529 || (nparts == 4
16530 && REGNO (part[0][0]) == REGNO (part[1][3]))))
16531 || (collisions > 0
16532 && reg_overlap_mentioned_p (part[0][0], XEXP (part[1][0], 0))))
16534 for (i = 0, j = nparts - 1; i < nparts; i++, j--)
16536 operands[2 + i] = part[0][j];
16537 operands[6 + i] = part[1][j];
16540 else
16542 for (i = 0; i < nparts; i++)
16544 operands[2 + i] = part[0][i];
16545 operands[6 + i] = part[1][i];
16549 /* If optimizing for size, attempt to locally unCSE nonzero constants. */
16550 if (optimize_insn_for_size_p ())
16552 for (j = 0; j < nparts - 1; j++)
16553 if (CONST_INT_P (operands[6 + j])
16554 && operands[6 + j] != const0_rtx
16555 && REG_P (operands[2 + j]))
16556 for (i = j; i < nparts - 1; i++)
16557 if (CONST_INT_P (operands[7 + i])
16558 && INTVAL (operands[7 + i]) == INTVAL (operands[6 + j]))
16559 operands[7 + i] = operands[2 + j];
16562 for (i = 0; i < nparts; i++)
16563 emit_move_insn (operands[2 + i], operands[6 + i]);
16565 return;
16568 /* Helper function of ix86_split_ashl used to generate an SImode/DImode
16569 left shift by a constant, either using a single shift or
16570 a sequence of add instructions. */
16572 static void
16573 ix86_expand_ashl_const (rtx operand, int count, enum machine_mode mode)
16575 if (count == 1)
16577 emit_insn ((mode == DImode
16578 ? gen_addsi3
16579 : gen_adddi3) (operand, operand, operand));
16581 else if (!optimize_insn_for_size_p ()
16582 && count * ix86_cost->add <= ix86_cost->shift_const)
16584 int i;
16585 for (i=0; i<count; i++)
16587 emit_insn ((mode == DImode
16588 ? gen_addsi3
16589 : gen_adddi3) (operand, operand, operand));
16592 else
16593 emit_insn ((mode == DImode
16594 ? gen_ashlsi3
16595 : gen_ashldi3) (operand, operand, GEN_INT (count)));
16598 void
16599 ix86_split_ashl (rtx *operands, rtx scratch, enum machine_mode mode)
16601 rtx low[2], high[2];
16602 int count;
16603 const int single_width = mode == DImode ? 32 : 64;
16605 if (CONST_INT_P (operands[2]))
16607 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
16608 count = INTVAL (operands[2]) & (single_width * 2 - 1);
16610 if (count >= single_width)
16612 emit_move_insn (high[0], low[1]);
16613 emit_move_insn (low[0], const0_rtx);
16615 if (count > single_width)
16616 ix86_expand_ashl_const (high[0], count - single_width, mode);
16618 else
16620 if (!rtx_equal_p (operands[0], operands[1]))
16621 emit_move_insn (operands[0], operands[1]);
16622 emit_insn ((mode == DImode
16623 ? gen_x86_shld
16624 : gen_x86_64_shld) (high[0], low[0], GEN_INT (count)));
16625 ix86_expand_ashl_const (low[0], count, mode);
16627 return;
16630 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
16632 if (operands[1] == const1_rtx)
16634 /* Assuming we've chosen a QImode capable registers, then 1 << N
16635 can be done with two 32/64-bit shifts, no branches, no cmoves. */
16636 if (ANY_QI_REG_P (low[0]) && ANY_QI_REG_P (high[0]))
16638 rtx s, d, flags = gen_rtx_REG (CCZmode, FLAGS_REG);
16640 ix86_expand_clear (low[0]);
16641 ix86_expand_clear (high[0]);
16642 emit_insn (gen_testqi_ccz_1 (operands[2], GEN_INT (single_width)));
16644 d = gen_lowpart (QImode, low[0]);
16645 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
16646 s = gen_rtx_EQ (QImode, flags, const0_rtx);
16647 emit_insn (gen_rtx_SET (VOIDmode, d, s));
16649 d = gen_lowpart (QImode, high[0]);
16650 d = gen_rtx_STRICT_LOW_PART (VOIDmode, d);
16651 s = gen_rtx_NE (QImode, flags, const0_rtx);
16652 emit_insn (gen_rtx_SET (VOIDmode, d, s));
16655 /* Otherwise, we can get the same results by manually performing
16656 a bit extract operation on bit 5/6, and then performing the two
16657 shifts. The two methods of getting 0/1 into low/high are exactly
16658 the same size. Avoiding the shift in the bit extract case helps
16659 pentium4 a bit; no one else seems to care much either way. */
16660 else
16662 rtx x;
16664 if (TARGET_PARTIAL_REG_STALL && !optimize_insn_for_size_p ())
16665 x = gen_rtx_ZERO_EXTEND (mode == DImode ? SImode : DImode, operands[2]);
16666 else
16667 x = gen_lowpart (mode == DImode ? SImode : DImode, operands[2]);
16668 emit_insn (gen_rtx_SET (VOIDmode, high[0], x));
16670 emit_insn ((mode == DImode
16671 ? gen_lshrsi3
16672 : gen_lshrdi3) (high[0], high[0],
16673 GEN_INT (mode == DImode ? 5 : 6)));
16674 emit_insn ((mode == DImode
16675 ? gen_andsi3
16676 : gen_anddi3) (high[0], high[0], const1_rtx));
16677 emit_move_insn (low[0], high[0]);
16678 emit_insn ((mode == DImode
16679 ? gen_xorsi3
16680 : gen_xordi3) (low[0], low[0], const1_rtx));
16683 emit_insn ((mode == DImode
16684 ? gen_ashlsi3
16685 : gen_ashldi3) (low[0], low[0], operands[2]));
16686 emit_insn ((mode == DImode
16687 ? gen_ashlsi3
16688 : gen_ashldi3) (high[0], high[0], operands[2]));
16689 return;
16692 if (operands[1] == constm1_rtx)
16694 /* For -1 << N, we can avoid the shld instruction, because we
16695 know that we're shifting 0...31/63 ones into a -1. */
16696 emit_move_insn (low[0], constm1_rtx);
16697 if (optimize_insn_for_size_p ())
16698 emit_move_insn (high[0], low[0]);
16699 else
16700 emit_move_insn (high[0], constm1_rtx);
16702 else
16704 if (!rtx_equal_p (operands[0], operands[1]))
16705 emit_move_insn (operands[0], operands[1]);
16707 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
16708 emit_insn ((mode == DImode
16709 ? gen_x86_shld
16710 : gen_x86_64_shld) (high[0], low[0], operands[2]));
16713 emit_insn ((mode == DImode ? gen_ashlsi3 : gen_ashldi3) (low[0], low[0], operands[2]));
16715 if (TARGET_CMOVE && scratch)
16717 ix86_expand_clear (scratch);
16718 emit_insn ((mode == DImode
16719 ? gen_x86_shift_adj_1
16720 : gen_x86_64_shift_adj_1) (high[0], low[0], operands[2],
16721 scratch));
16723 else
16724 emit_insn ((mode == DImode
16725 ? gen_x86_shift_adj_2
16726 : gen_x86_64_shift_adj_2) (high[0], low[0], operands[2]));
16729 void
16730 ix86_split_ashr (rtx *operands, rtx scratch, enum machine_mode mode)
16732 rtx low[2], high[2];
16733 int count;
16734 const int single_width = mode == DImode ? 32 : 64;
16736 if (CONST_INT_P (operands[2]))
16738 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
16739 count = INTVAL (operands[2]) & (single_width * 2 - 1);
16741 if (count == single_width * 2 - 1)
16743 emit_move_insn (high[0], high[1]);
16744 emit_insn ((mode == DImode
16745 ? gen_ashrsi3
16746 : gen_ashrdi3) (high[0], high[0],
16747 GEN_INT (single_width - 1)));
16748 emit_move_insn (low[0], high[0]);
16751 else if (count >= single_width)
16753 emit_move_insn (low[0], high[1]);
16754 emit_move_insn (high[0], low[0]);
16755 emit_insn ((mode == DImode
16756 ? gen_ashrsi3
16757 : gen_ashrdi3) (high[0], high[0],
16758 GEN_INT (single_width - 1)));
16759 if (count > single_width)
16760 emit_insn ((mode == DImode
16761 ? gen_ashrsi3
16762 : gen_ashrdi3) (low[0], low[0],
16763 GEN_INT (count - single_width)));
16765 else
16767 if (!rtx_equal_p (operands[0], operands[1]))
16768 emit_move_insn (operands[0], operands[1]);
16769 emit_insn ((mode == DImode
16770 ? gen_x86_shrd
16771 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
16772 emit_insn ((mode == DImode
16773 ? gen_ashrsi3
16774 : gen_ashrdi3) (high[0], high[0], GEN_INT (count)));
16777 else
16779 if (!rtx_equal_p (operands[0], operands[1]))
16780 emit_move_insn (operands[0], operands[1]);
16782 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
16784 emit_insn ((mode == DImode
16785 ? gen_x86_shrd
16786 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
16787 emit_insn ((mode == DImode
16788 ? gen_ashrsi3
16789 : gen_ashrdi3) (high[0], high[0], operands[2]));
16791 if (TARGET_CMOVE && scratch)
16793 emit_move_insn (scratch, high[0]);
16794 emit_insn ((mode == DImode
16795 ? gen_ashrsi3
16796 : gen_ashrdi3) (scratch, scratch,
16797 GEN_INT (single_width - 1)));
16798 emit_insn ((mode == DImode
16799 ? gen_x86_shift_adj_1
16800 : gen_x86_64_shift_adj_1) (low[0], high[0], operands[2],
16801 scratch));
16803 else
16804 emit_insn ((mode == DImode
16805 ? gen_x86_shift_adj_3
16806 : gen_x86_64_shift_adj_3) (low[0], high[0], operands[2]));
16810 void
16811 ix86_split_lshr (rtx *operands, rtx scratch, enum machine_mode mode)
16813 rtx low[2], high[2];
16814 int count;
16815 const int single_width = mode == DImode ? 32 : 64;
16817 if (CONST_INT_P (operands[2]))
16819 (mode == DImode ? split_di : split_ti) (operands, 2, low, high);
16820 count = INTVAL (operands[2]) & (single_width * 2 - 1);
16822 if (count >= single_width)
16824 emit_move_insn (low[0], high[1]);
16825 ix86_expand_clear (high[0]);
16827 if (count > single_width)
16828 emit_insn ((mode == DImode
16829 ? gen_lshrsi3
16830 : gen_lshrdi3) (low[0], low[0],
16831 GEN_INT (count - single_width)));
16833 else
16835 if (!rtx_equal_p (operands[0], operands[1]))
16836 emit_move_insn (operands[0], operands[1]);
16837 emit_insn ((mode == DImode
16838 ? gen_x86_shrd
16839 : gen_x86_64_shrd) (low[0], high[0], GEN_INT (count)));
16840 emit_insn ((mode == DImode
16841 ? gen_lshrsi3
16842 : gen_lshrdi3) (high[0], high[0], GEN_INT (count)));
16845 else
16847 if (!rtx_equal_p (operands[0], operands[1]))
16848 emit_move_insn (operands[0], operands[1]);
16850 (mode == DImode ? split_di : split_ti) (operands, 1, low, high);
16852 emit_insn ((mode == DImode
16853 ? gen_x86_shrd
16854 : gen_x86_64_shrd) (low[0], high[0], operands[2]));
16855 emit_insn ((mode == DImode
16856 ? gen_lshrsi3
16857 : gen_lshrdi3) (high[0], high[0], operands[2]));
16859 /* Heh. By reversing the arguments, we can reuse this pattern. */
16860 if (TARGET_CMOVE && scratch)
16862 ix86_expand_clear (scratch);
16863 emit_insn ((mode == DImode
16864 ? gen_x86_shift_adj_1
16865 : gen_x86_64_shift_adj_1) (low[0], high[0], operands[2],
16866 scratch));
16868 else
16869 emit_insn ((mode == DImode
16870 ? gen_x86_shift_adj_2
16871 : gen_x86_64_shift_adj_2) (low[0], high[0], operands[2]));
16875 /* Predict just emitted jump instruction to be taken with probability PROB. */
16876 static void
16877 predict_jump (int prob)
16879 rtx insn = get_last_insn ();
16880 gcc_assert (JUMP_P (insn));
16881 add_reg_note (insn, REG_BR_PROB, GEN_INT (prob));
16884 /* Helper function for the string operations below. Dest VARIABLE whether
16885 it is aligned to VALUE bytes. If true, jump to the label. */
16886 static rtx
16887 ix86_expand_aligntest (rtx variable, int value, bool epilogue)
16889 rtx label = gen_label_rtx ();
16890 rtx tmpcount = gen_reg_rtx (GET_MODE (variable));
16891 if (GET_MODE (variable) == DImode)
16892 emit_insn (gen_anddi3 (tmpcount, variable, GEN_INT (value)));
16893 else
16894 emit_insn (gen_andsi3 (tmpcount, variable, GEN_INT (value)));
16895 emit_cmp_and_jump_insns (tmpcount, const0_rtx, EQ, 0, GET_MODE (variable),
16896 1, label);
16897 if (epilogue)
16898 predict_jump (REG_BR_PROB_BASE * 50 / 100);
16899 else
16900 predict_jump (REG_BR_PROB_BASE * 90 / 100);
16901 return label;
16904 /* Adjust COUNTER by the VALUE. */
16905 static void
16906 ix86_adjust_counter (rtx countreg, HOST_WIDE_INT value)
16908 if (GET_MODE (countreg) == DImode)
16909 emit_insn (gen_adddi3 (countreg, countreg, GEN_INT (-value)));
16910 else
16911 emit_insn (gen_addsi3 (countreg, countreg, GEN_INT (-value)));
16914 /* Zero extend possibly SImode EXP to Pmode register. */
16916 ix86_zero_extend_to_Pmode (rtx exp)
16918 rtx r;
16919 if (GET_MODE (exp) == VOIDmode)
16920 return force_reg (Pmode, exp);
16921 if (GET_MODE (exp) == Pmode)
16922 return copy_to_mode_reg (Pmode, exp);
16923 r = gen_reg_rtx (Pmode);
16924 emit_insn (gen_zero_extendsidi2 (r, exp));
16925 return r;
16928 /* Divide COUNTREG by SCALE. */
16929 static rtx
16930 scale_counter (rtx countreg, int scale)
16932 rtx sc;
16934 if (scale == 1)
16935 return countreg;
16936 if (CONST_INT_P (countreg))
16937 return GEN_INT (INTVAL (countreg) / scale);
16938 gcc_assert (REG_P (countreg));
16940 sc = expand_simple_binop (GET_MODE (countreg), LSHIFTRT, countreg,
16941 GEN_INT (exact_log2 (scale)),
16942 NULL, 1, OPTAB_DIRECT);
16943 return sc;
16946 /* Return mode for the memcpy/memset loop counter. Prefer SImode over
16947 DImode for constant loop counts. */
16949 static enum machine_mode
16950 counter_mode (rtx count_exp)
16952 if (GET_MODE (count_exp) != VOIDmode)
16953 return GET_MODE (count_exp);
16954 if (!CONST_INT_P (count_exp))
16955 return Pmode;
16956 if (TARGET_64BIT && (INTVAL (count_exp) & ~0xffffffff))
16957 return DImode;
16958 return SImode;
16961 /* When SRCPTR is non-NULL, output simple loop to move memory
16962 pointer to SRCPTR to DESTPTR via chunks of MODE unrolled UNROLL times,
16963 overall size is COUNT specified in bytes. When SRCPTR is NULL, output the
16964 equivalent loop to set memory by VALUE (supposed to be in MODE).
16966 The size is rounded down to whole number of chunk size moved at once.
16967 SRCMEM and DESTMEM provide MEMrtx to feed proper aliasing info. */
16970 static void
16971 expand_set_or_movmem_via_loop (rtx destmem, rtx srcmem,
16972 rtx destptr, rtx srcptr, rtx value,
16973 rtx count, enum machine_mode mode, int unroll,
16974 int expected_size)
16976 rtx out_label, top_label, iter, tmp;
16977 enum machine_mode iter_mode = counter_mode (count);
16978 rtx piece_size = GEN_INT (GET_MODE_SIZE (mode) * unroll);
16979 rtx piece_size_mask = GEN_INT (~((GET_MODE_SIZE (mode) * unroll) - 1));
16980 rtx size;
16981 rtx x_addr;
16982 rtx y_addr;
16983 int i;
16985 top_label = gen_label_rtx ();
16986 out_label = gen_label_rtx ();
16987 iter = gen_reg_rtx (iter_mode);
16989 size = expand_simple_binop (iter_mode, AND, count, piece_size_mask,
16990 NULL, 1, OPTAB_DIRECT);
16991 /* Those two should combine. */
16992 if (piece_size == const1_rtx)
16994 emit_cmp_and_jump_insns (size, const0_rtx, EQ, NULL_RTX, iter_mode,
16995 true, out_label);
16996 predict_jump (REG_BR_PROB_BASE * 10 / 100);
16998 emit_move_insn (iter, const0_rtx);
17000 emit_label (top_label);
17002 tmp = convert_modes (Pmode, iter_mode, iter, true);
17003 x_addr = gen_rtx_PLUS (Pmode, destptr, tmp);
17004 destmem = change_address (destmem, mode, x_addr);
17006 if (srcmem)
17008 y_addr = gen_rtx_PLUS (Pmode, srcptr, copy_rtx (tmp));
17009 srcmem = change_address (srcmem, mode, y_addr);
17011 /* When unrolling for chips that reorder memory reads and writes,
17012 we can save registers by using single temporary.
17013 Also using 4 temporaries is overkill in 32bit mode. */
17014 if (!TARGET_64BIT && 0)
17016 for (i = 0; i < unroll; i++)
17018 if (i)
17020 destmem =
17021 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17022 srcmem =
17023 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17025 emit_move_insn (destmem, srcmem);
17028 else
17030 rtx tmpreg[4];
17031 gcc_assert (unroll <= 4);
17032 for (i = 0; i < unroll; i++)
17034 tmpreg[i] = gen_reg_rtx (mode);
17035 if (i)
17037 srcmem =
17038 adjust_address (copy_rtx (srcmem), mode, GET_MODE_SIZE (mode));
17040 emit_move_insn (tmpreg[i], srcmem);
17042 for (i = 0; i < unroll; i++)
17044 if (i)
17046 destmem =
17047 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17049 emit_move_insn (destmem, tmpreg[i]);
17053 else
17054 for (i = 0; i < unroll; i++)
17056 if (i)
17057 destmem =
17058 adjust_address (copy_rtx (destmem), mode, GET_MODE_SIZE (mode));
17059 emit_move_insn (destmem, value);
17062 tmp = expand_simple_binop (iter_mode, PLUS, iter, piece_size, iter,
17063 true, OPTAB_LIB_WIDEN);
17064 if (tmp != iter)
17065 emit_move_insn (iter, tmp);
17067 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
17068 true, top_label);
17069 if (expected_size != -1)
17071 expected_size /= GET_MODE_SIZE (mode) * unroll;
17072 if (expected_size == 0)
17073 predict_jump (0);
17074 else if (expected_size > REG_BR_PROB_BASE)
17075 predict_jump (REG_BR_PROB_BASE - 1);
17076 else
17077 predict_jump (REG_BR_PROB_BASE - (REG_BR_PROB_BASE + expected_size / 2) / expected_size);
17079 else
17080 predict_jump (REG_BR_PROB_BASE * 80 / 100);
17081 iter = ix86_zero_extend_to_Pmode (iter);
17082 tmp = expand_simple_binop (Pmode, PLUS, destptr, iter, destptr,
17083 true, OPTAB_LIB_WIDEN);
17084 if (tmp != destptr)
17085 emit_move_insn (destptr, tmp);
17086 if (srcptr)
17088 tmp = expand_simple_binop (Pmode, PLUS, srcptr, iter, srcptr,
17089 true, OPTAB_LIB_WIDEN);
17090 if (tmp != srcptr)
17091 emit_move_insn (srcptr, tmp);
17093 emit_label (out_label);
17096 /* Output "rep; mov" instruction.
17097 Arguments have same meaning as for previous function */
17098 static void
17099 expand_movmem_via_rep_mov (rtx destmem, rtx srcmem,
17100 rtx destptr, rtx srcptr,
17101 rtx count,
17102 enum machine_mode mode)
17104 rtx destexp;
17105 rtx srcexp;
17106 rtx countreg;
17108 /* If the size is known, it is shorter to use rep movs. */
17109 if (mode == QImode && CONST_INT_P (count)
17110 && !(INTVAL (count) & 3))
17111 mode = SImode;
17113 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17114 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17115 if (srcptr != XEXP (srcmem, 0) || GET_MODE (srcmem) != BLKmode)
17116 srcmem = adjust_automodify_address_nv (srcmem, BLKmode, srcptr, 0);
17117 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17118 if (mode != QImode)
17120 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17121 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17122 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17123 srcexp = gen_rtx_ASHIFT (Pmode, countreg,
17124 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17125 srcexp = gen_rtx_PLUS (Pmode, srcexp, srcptr);
17127 else
17129 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17130 srcexp = gen_rtx_PLUS (Pmode, srcptr, countreg);
17132 if (CONST_INT_P (count))
17134 count = GEN_INT (INTVAL (count)
17135 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17136 destmem = shallow_copy_rtx (destmem);
17137 srcmem = shallow_copy_rtx (srcmem);
17138 set_mem_size (destmem, count);
17139 set_mem_size (srcmem, count);
17141 else
17143 if (MEM_SIZE (destmem))
17144 set_mem_size (destmem, NULL_RTX);
17145 if (MEM_SIZE (srcmem))
17146 set_mem_size (srcmem, NULL_RTX);
17148 emit_insn (gen_rep_mov (destptr, destmem, srcptr, srcmem, countreg,
17149 destexp, srcexp));
17152 /* Output "rep; stos" instruction.
17153 Arguments have same meaning as for previous function */
17154 static void
17155 expand_setmem_via_rep_stos (rtx destmem, rtx destptr, rtx value,
17156 rtx count, enum machine_mode mode,
17157 rtx orig_value)
17159 rtx destexp;
17160 rtx countreg;
17162 if (destptr != XEXP (destmem, 0) || GET_MODE (destmem) != BLKmode)
17163 destmem = adjust_automodify_address_nv (destmem, BLKmode, destptr, 0);
17164 value = force_reg (mode, gen_lowpart (mode, value));
17165 countreg = ix86_zero_extend_to_Pmode (scale_counter (count, GET_MODE_SIZE (mode)));
17166 if (mode != QImode)
17168 destexp = gen_rtx_ASHIFT (Pmode, countreg,
17169 GEN_INT (exact_log2 (GET_MODE_SIZE (mode))));
17170 destexp = gen_rtx_PLUS (Pmode, destexp, destptr);
17172 else
17173 destexp = gen_rtx_PLUS (Pmode, destptr, countreg);
17174 if (orig_value == const0_rtx && CONST_INT_P (count))
17176 count = GEN_INT (INTVAL (count)
17177 & ~((HOST_WIDE_INT) GET_MODE_SIZE (mode) - 1));
17178 destmem = shallow_copy_rtx (destmem);
17179 set_mem_size (destmem, count);
17181 else if (MEM_SIZE (destmem))
17182 set_mem_size (destmem, NULL_RTX);
17183 emit_insn (gen_rep_stos (destptr, countreg, destmem, value, destexp));
17186 static void
17187 emit_strmov (rtx destmem, rtx srcmem,
17188 rtx destptr, rtx srcptr, enum machine_mode mode, int offset)
17190 rtx src = adjust_automodify_address_nv (srcmem, mode, srcptr, offset);
17191 rtx dest = adjust_automodify_address_nv (destmem, mode, destptr, offset);
17192 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17195 /* Output code to copy at most count & (max_size - 1) bytes from SRC to DEST. */
17196 static void
17197 expand_movmem_epilogue (rtx destmem, rtx srcmem,
17198 rtx destptr, rtx srcptr, rtx count, int max_size)
17200 rtx src, dest;
17201 if (CONST_INT_P (count))
17203 HOST_WIDE_INT countval = INTVAL (count);
17204 int offset = 0;
17206 if ((countval & 0x10) && max_size > 16)
17208 if (TARGET_64BIT)
17210 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17211 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset + 8);
17213 else
17214 gcc_unreachable ();
17215 offset += 16;
17217 if ((countval & 0x08) && max_size > 8)
17219 if (TARGET_64BIT)
17220 emit_strmov (destmem, srcmem, destptr, srcptr, DImode, offset);
17221 else
17223 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17224 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset + 4);
17226 offset += 8;
17228 if ((countval & 0x04) && max_size > 4)
17230 emit_strmov (destmem, srcmem, destptr, srcptr, SImode, offset);
17231 offset += 4;
17233 if ((countval & 0x02) && max_size > 2)
17235 emit_strmov (destmem, srcmem, destptr, srcptr, HImode, offset);
17236 offset += 2;
17238 if ((countval & 0x01) && max_size > 1)
17240 emit_strmov (destmem, srcmem, destptr, srcptr, QImode, offset);
17241 offset += 1;
17243 return;
17245 if (max_size > 8)
17247 count = expand_simple_binop (GET_MODE (count), AND, count, GEN_INT (max_size - 1),
17248 count, 1, OPTAB_DIRECT);
17249 expand_set_or_movmem_via_loop (destmem, srcmem, destptr, srcptr, NULL,
17250 count, QImode, 1, 4);
17251 return;
17254 /* When there are stringops, we can cheaply increase dest and src pointers.
17255 Otherwise we save code size by maintaining offset (zero is readily
17256 available from preceding rep operation) and using x86 addressing modes.
17258 if (TARGET_SINGLE_STRINGOP)
17260 if (max_size > 4)
17262 rtx label = ix86_expand_aligntest (count, 4, true);
17263 src = change_address (srcmem, SImode, srcptr);
17264 dest = change_address (destmem, SImode, destptr);
17265 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17266 emit_label (label);
17267 LABEL_NUSES (label) = 1;
17269 if (max_size > 2)
17271 rtx label = ix86_expand_aligntest (count, 2, true);
17272 src = change_address (srcmem, HImode, srcptr);
17273 dest = change_address (destmem, HImode, destptr);
17274 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17275 emit_label (label);
17276 LABEL_NUSES (label) = 1;
17278 if (max_size > 1)
17280 rtx label = ix86_expand_aligntest (count, 1, true);
17281 src = change_address (srcmem, QImode, srcptr);
17282 dest = change_address (destmem, QImode, destptr);
17283 emit_insn (gen_strmov (destptr, dest, srcptr, src));
17284 emit_label (label);
17285 LABEL_NUSES (label) = 1;
17288 else
17290 rtx offset = force_reg (Pmode, const0_rtx);
17291 rtx tmp;
17293 if (max_size > 4)
17295 rtx label = ix86_expand_aligntest (count, 4, true);
17296 src = change_address (srcmem, SImode, srcptr);
17297 dest = change_address (destmem, SImode, destptr);
17298 emit_move_insn (dest, src);
17299 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (4), NULL,
17300 true, OPTAB_LIB_WIDEN);
17301 if (tmp != offset)
17302 emit_move_insn (offset, tmp);
17303 emit_label (label);
17304 LABEL_NUSES (label) = 1;
17306 if (max_size > 2)
17308 rtx label = ix86_expand_aligntest (count, 2, true);
17309 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17310 src = change_address (srcmem, HImode, tmp);
17311 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17312 dest = change_address (destmem, HImode, tmp);
17313 emit_move_insn (dest, src);
17314 tmp = expand_simple_binop (Pmode, PLUS, offset, GEN_INT (2), tmp,
17315 true, OPTAB_LIB_WIDEN);
17316 if (tmp != offset)
17317 emit_move_insn (offset, tmp);
17318 emit_label (label);
17319 LABEL_NUSES (label) = 1;
17321 if (max_size > 1)
17323 rtx label = ix86_expand_aligntest (count, 1, true);
17324 tmp = gen_rtx_PLUS (Pmode, srcptr, offset);
17325 src = change_address (srcmem, QImode, tmp);
17326 tmp = gen_rtx_PLUS (Pmode, destptr, offset);
17327 dest = change_address (destmem, QImode, tmp);
17328 emit_move_insn (dest, src);
17329 emit_label (label);
17330 LABEL_NUSES (label) = 1;
17335 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17336 static void
17337 expand_setmem_epilogue_via_loop (rtx destmem, rtx destptr, rtx value,
17338 rtx count, int max_size)
17340 count =
17341 expand_simple_binop (counter_mode (count), AND, count,
17342 GEN_INT (max_size - 1), count, 1, OPTAB_DIRECT);
17343 expand_set_or_movmem_via_loop (destmem, NULL, destptr, NULL,
17344 gen_lowpart (QImode, value), count, QImode,
17345 1, max_size / 2);
17348 /* Output code to set at most count & (max_size - 1) bytes starting by DEST. */
17349 static void
17350 expand_setmem_epilogue (rtx destmem, rtx destptr, rtx value, rtx count, int max_size)
17352 rtx dest;
17354 if (CONST_INT_P (count))
17356 HOST_WIDE_INT countval = INTVAL (count);
17357 int offset = 0;
17359 if ((countval & 0x10) && max_size > 16)
17361 if (TARGET_64BIT)
17363 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17364 emit_insn (gen_strset (destptr, dest, value));
17365 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset + 8);
17366 emit_insn (gen_strset (destptr, dest, value));
17368 else
17369 gcc_unreachable ();
17370 offset += 16;
17372 if ((countval & 0x08) && max_size > 8)
17374 if (TARGET_64BIT)
17376 dest = adjust_automodify_address_nv (destmem, DImode, destptr, offset);
17377 emit_insn (gen_strset (destptr, dest, value));
17379 else
17381 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17382 emit_insn (gen_strset (destptr, dest, value));
17383 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset + 4);
17384 emit_insn (gen_strset (destptr, dest, value));
17386 offset += 8;
17388 if ((countval & 0x04) && max_size > 4)
17390 dest = adjust_automodify_address_nv (destmem, SImode, destptr, offset);
17391 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17392 offset += 4;
17394 if ((countval & 0x02) && max_size > 2)
17396 dest = adjust_automodify_address_nv (destmem, HImode, destptr, offset);
17397 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17398 offset += 2;
17400 if ((countval & 0x01) && max_size > 1)
17402 dest = adjust_automodify_address_nv (destmem, QImode, destptr, offset);
17403 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17404 offset += 1;
17406 return;
17408 if (max_size > 32)
17410 expand_setmem_epilogue_via_loop (destmem, destptr, value, count, max_size);
17411 return;
17413 if (max_size > 16)
17415 rtx label = ix86_expand_aligntest (count, 16, true);
17416 if (TARGET_64BIT)
17418 dest = change_address (destmem, DImode, destptr);
17419 emit_insn (gen_strset (destptr, dest, value));
17420 emit_insn (gen_strset (destptr, dest, value));
17422 else
17424 dest = change_address (destmem, SImode, destptr);
17425 emit_insn (gen_strset (destptr, dest, value));
17426 emit_insn (gen_strset (destptr, dest, value));
17427 emit_insn (gen_strset (destptr, dest, value));
17428 emit_insn (gen_strset (destptr, dest, value));
17430 emit_label (label);
17431 LABEL_NUSES (label) = 1;
17433 if (max_size > 8)
17435 rtx label = ix86_expand_aligntest (count, 8, true);
17436 if (TARGET_64BIT)
17438 dest = change_address (destmem, DImode, destptr);
17439 emit_insn (gen_strset (destptr, dest, value));
17441 else
17443 dest = change_address (destmem, SImode, destptr);
17444 emit_insn (gen_strset (destptr, dest, value));
17445 emit_insn (gen_strset (destptr, dest, value));
17447 emit_label (label);
17448 LABEL_NUSES (label) = 1;
17450 if (max_size > 4)
17452 rtx label = ix86_expand_aligntest (count, 4, true);
17453 dest = change_address (destmem, SImode, destptr);
17454 emit_insn (gen_strset (destptr, dest, gen_lowpart (SImode, value)));
17455 emit_label (label);
17456 LABEL_NUSES (label) = 1;
17458 if (max_size > 2)
17460 rtx label = ix86_expand_aligntest (count, 2, true);
17461 dest = change_address (destmem, HImode, destptr);
17462 emit_insn (gen_strset (destptr, dest, gen_lowpart (HImode, value)));
17463 emit_label (label);
17464 LABEL_NUSES (label) = 1;
17466 if (max_size > 1)
17468 rtx label = ix86_expand_aligntest (count, 1, true);
17469 dest = change_address (destmem, QImode, destptr);
17470 emit_insn (gen_strset (destptr, dest, gen_lowpart (QImode, value)));
17471 emit_label (label);
17472 LABEL_NUSES (label) = 1;
17476 /* Copy enough from DEST to SRC to align DEST known to by aligned by ALIGN to
17477 DESIRED_ALIGNMENT. */
17478 static void
17479 expand_movmem_prologue (rtx destmem, rtx srcmem,
17480 rtx destptr, rtx srcptr, rtx count,
17481 int align, int desired_alignment)
17483 if (align <= 1 && desired_alignment > 1)
17485 rtx label = ix86_expand_aligntest (destptr, 1, false);
17486 srcmem = change_address (srcmem, QImode, srcptr);
17487 destmem = change_address (destmem, QImode, destptr);
17488 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17489 ix86_adjust_counter (count, 1);
17490 emit_label (label);
17491 LABEL_NUSES (label) = 1;
17493 if (align <= 2 && desired_alignment > 2)
17495 rtx label = ix86_expand_aligntest (destptr, 2, false);
17496 srcmem = change_address (srcmem, HImode, srcptr);
17497 destmem = change_address (destmem, HImode, destptr);
17498 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17499 ix86_adjust_counter (count, 2);
17500 emit_label (label);
17501 LABEL_NUSES (label) = 1;
17503 if (align <= 4 && desired_alignment > 4)
17505 rtx label = ix86_expand_aligntest (destptr, 4, false);
17506 srcmem = change_address (srcmem, SImode, srcptr);
17507 destmem = change_address (destmem, SImode, destptr);
17508 emit_insn (gen_strmov (destptr, destmem, srcptr, srcmem));
17509 ix86_adjust_counter (count, 4);
17510 emit_label (label);
17511 LABEL_NUSES (label) = 1;
17513 gcc_assert (desired_alignment <= 8);
17516 /* Copy enough from DST to SRC to align DST known to DESIRED_ALIGN.
17517 ALIGN_BYTES is how many bytes need to be copied. */
17518 static rtx
17519 expand_constant_movmem_prologue (rtx dst, rtx *srcp, rtx destreg, rtx srcreg,
17520 int desired_align, int align_bytes)
17522 rtx src = *srcp;
17523 rtx src_size, dst_size;
17524 int off = 0;
17525 int src_align_bytes = get_mem_align_offset (src, desired_align * BITS_PER_UNIT);
17526 if (src_align_bytes >= 0)
17527 src_align_bytes = desired_align - src_align_bytes;
17528 src_size = MEM_SIZE (src);
17529 dst_size = MEM_SIZE (dst);
17530 if (align_bytes & 1)
17532 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
17533 src = adjust_automodify_address_nv (src, QImode, srcreg, 0);
17534 off = 1;
17535 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17537 if (align_bytes & 2)
17539 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
17540 src = adjust_automodify_address_nv (src, HImode, srcreg, off);
17541 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
17542 set_mem_align (dst, 2 * BITS_PER_UNIT);
17543 if (src_align_bytes >= 0
17544 && (src_align_bytes & 1) == (align_bytes & 1)
17545 && MEM_ALIGN (src) < 2 * BITS_PER_UNIT)
17546 set_mem_align (src, 2 * BITS_PER_UNIT);
17547 off = 2;
17548 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17550 if (align_bytes & 4)
17552 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
17553 src = adjust_automodify_address_nv (src, SImode, srcreg, off);
17554 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
17555 set_mem_align (dst, 4 * BITS_PER_UNIT);
17556 if (src_align_bytes >= 0)
17558 unsigned int src_align = 0;
17559 if ((src_align_bytes & 3) == (align_bytes & 3))
17560 src_align = 4;
17561 else if ((src_align_bytes & 1) == (align_bytes & 1))
17562 src_align = 2;
17563 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
17564 set_mem_align (src, src_align * BITS_PER_UNIT);
17566 off = 4;
17567 emit_insn (gen_strmov (destreg, dst, srcreg, src));
17569 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
17570 src = adjust_automodify_address_nv (src, BLKmode, srcreg, off);
17571 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
17572 set_mem_align (dst, desired_align * BITS_PER_UNIT);
17573 if (src_align_bytes >= 0)
17575 unsigned int src_align = 0;
17576 if ((src_align_bytes & 7) == (align_bytes & 7))
17577 src_align = 8;
17578 else if ((src_align_bytes & 3) == (align_bytes & 3))
17579 src_align = 4;
17580 else if ((src_align_bytes & 1) == (align_bytes & 1))
17581 src_align = 2;
17582 if (src_align > (unsigned int) desired_align)
17583 src_align = desired_align;
17584 if (MEM_ALIGN (src) < src_align * BITS_PER_UNIT)
17585 set_mem_align (src, src_align * BITS_PER_UNIT);
17587 if (dst_size)
17588 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
17589 if (src_size)
17590 set_mem_size (dst, GEN_INT (INTVAL (src_size) - align_bytes));
17591 *srcp = src;
17592 return dst;
17595 /* Set enough from DEST to align DEST known to by aligned by ALIGN to
17596 DESIRED_ALIGNMENT. */
17597 static void
17598 expand_setmem_prologue (rtx destmem, rtx destptr, rtx value, rtx count,
17599 int align, int desired_alignment)
17601 if (align <= 1 && desired_alignment > 1)
17603 rtx label = ix86_expand_aligntest (destptr, 1, false);
17604 destmem = change_address (destmem, QImode, destptr);
17605 emit_insn (gen_strset (destptr, destmem, gen_lowpart (QImode, value)));
17606 ix86_adjust_counter (count, 1);
17607 emit_label (label);
17608 LABEL_NUSES (label) = 1;
17610 if (align <= 2 && desired_alignment > 2)
17612 rtx label = ix86_expand_aligntest (destptr, 2, false);
17613 destmem = change_address (destmem, HImode, destptr);
17614 emit_insn (gen_strset (destptr, destmem, gen_lowpart (HImode, value)));
17615 ix86_adjust_counter (count, 2);
17616 emit_label (label);
17617 LABEL_NUSES (label) = 1;
17619 if (align <= 4 && desired_alignment > 4)
17621 rtx label = ix86_expand_aligntest (destptr, 4, false);
17622 destmem = change_address (destmem, SImode, destptr);
17623 emit_insn (gen_strset (destptr, destmem, gen_lowpart (SImode, value)));
17624 ix86_adjust_counter (count, 4);
17625 emit_label (label);
17626 LABEL_NUSES (label) = 1;
17628 gcc_assert (desired_alignment <= 8);
17631 /* Set enough from DST to align DST known to by aligned by ALIGN to
17632 DESIRED_ALIGN. ALIGN_BYTES is how many bytes need to be stored. */
17633 static rtx
17634 expand_constant_setmem_prologue (rtx dst, rtx destreg, rtx value,
17635 int desired_align, int align_bytes)
17637 int off = 0;
17638 rtx dst_size = MEM_SIZE (dst);
17639 if (align_bytes & 1)
17641 dst = adjust_automodify_address_nv (dst, QImode, destreg, 0);
17642 off = 1;
17643 emit_insn (gen_strset (destreg, dst,
17644 gen_lowpart (QImode, value)));
17646 if (align_bytes & 2)
17648 dst = adjust_automodify_address_nv (dst, HImode, destreg, off);
17649 if (MEM_ALIGN (dst) < 2 * BITS_PER_UNIT)
17650 set_mem_align (dst, 2 * BITS_PER_UNIT);
17651 off = 2;
17652 emit_insn (gen_strset (destreg, dst,
17653 gen_lowpart (HImode, value)));
17655 if (align_bytes & 4)
17657 dst = adjust_automodify_address_nv (dst, SImode, destreg, off);
17658 if (MEM_ALIGN (dst) < 4 * BITS_PER_UNIT)
17659 set_mem_align (dst, 4 * BITS_PER_UNIT);
17660 off = 4;
17661 emit_insn (gen_strset (destreg, dst,
17662 gen_lowpart (SImode, value)));
17664 dst = adjust_automodify_address_nv (dst, BLKmode, destreg, off);
17665 if (MEM_ALIGN (dst) < (unsigned int) desired_align * BITS_PER_UNIT)
17666 set_mem_align (dst, desired_align * BITS_PER_UNIT);
17667 if (dst_size)
17668 set_mem_size (dst, GEN_INT (INTVAL (dst_size) - align_bytes));
17669 return dst;
17672 /* Given COUNT and EXPECTED_SIZE, decide on codegen of string operation. */
17673 static enum stringop_alg
17674 decide_alg (HOST_WIDE_INT count, HOST_WIDE_INT expected_size, bool memset,
17675 int *dynamic_check)
17677 const struct stringop_algs * algs;
17678 bool optimize_for_speed;
17679 /* Algorithms using the rep prefix want at least edi and ecx;
17680 additionally, memset wants eax and memcpy wants esi. Don't
17681 consider such algorithms if the user has appropriated those
17682 registers for their own purposes. */
17683 bool rep_prefix_usable = !(fixed_regs[CX_REG] || fixed_regs[DI_REG]
17684 || (memset
17685 ? fixed_regs[AX_REG] : fixed_regs[SI_REG]));
17687 #define ALG_USABLE_P(alg) (rep_prefix_usable \
17688 || (alg != rep_prefix_1_byte \
17689 && alg != rep_prefix_4_byte \
17690 && alg != rep_prefix_8_byte))
17691 const struct processor_costs *cost;
17693 /* Even if the string operation call is cold, we still might spend a lot
17694 of time processing large blocks. */
17695 if (optimize_function_for_size_p (cfun)
17696 || (optimize_insn_for_size_p ()
17697 && expected_size != -1 && expected_size < 256))
17698 optimize_for_speed = false;
17699 else
17700 optimize_for_speed = true;
17702 cost = optimize_for_speed ? ix86_cost : &ix86_size_cost;
17704 *dynamic_check = -1;
17705 if (memset)
17706 algs = &cost->memset[TARGET_64BIT != 0];
17707 else
17708 algs = &cost->memcpy[TARGET_64BIT != 0];
17709 if (stringop_alg != no_stringop && ALG_USABLE_P (stringop_alg))
17710 return stringop_alg;
17711 /* rep; movq or rep; movl is the smallest variant. */
17712 else if (!optimize_for_speed)
17714 if (!count || (count & 3))
17715 return rep_prefix_usable ? rep_prefix_1_byte : loop_1_byte;
17716 else
17717 return rep_prefix_usable ? rep_prefix_4_byte : loop;
17719 /* Very tiny blocks are best handled via the loop, REP is expensive to setup.
17721 else if (expected_size != -1 && expected_size < 4)
17722 return loop_1_byte;
17723 else if (expected_size != -1)
17725 unsigned int i;
17726 enum stringop_alg alg = libcall;
17727 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
17729 /* We get here if the algorithms that were not libcall-based
17730 were rep-prefix based and we are unable to use rep prefixes
17731 based on global register usage. Break out of the loop and
17732 use the heuristic below. */
17733 if (algs->size[i].max == 0)
17734 break;
17735 if (algs->size[i].max >= expected_size || algs->size[i].max == -1)
17737 enum stringop_alg candidate = algs->size[i].alg;
17739 if (candidate != libcall && ALG_USABLE_P (candidate))
17740 alg = candidate;
17741 /* Honor TARGET_INLINE_ALL_STRINGOPS by picking
17742 last non-libcall inline algorithm. */
17743 if (TARGET_INLINE_ALL_STRINGOPS)
17745 /* When the current size is best to be copied by a libcall,
17746 but we are still forced to inline, run the heuristic below
17747 that will pick code for medium sized blocks. */
17748 if (alg != libcall)
17749 return alg;
17750 break;
17752 else if (ALG_USABLE_P (candidate))
17753 return candidate;
17756 gcc_assert (TARGET_INLINE_ALL_STRINGOPS || !rep_prefix_usable);
17758 /* When asked to inline the call anyway, try to pick meaningful choice.
17759 We look for maximal size of block that is faster to copy by hand and
17760 take blocks of at most of that size guessing that average size will
17761 be roughly half of the block.
17763 If this turns out to be bad, we might simply specify the preferred
17764 choice in ix86_costs. */
17765 if ((TARGET_INLINE_ALL_STRINGOPS || TARGET_INLINE_STRINGOPS_DYNAMICALLY)
17766 && (algs->unknown_size == libcall || !ALG_USABLE_P (algs->unknown_size)))
17768 int max = -1;
17769 enum stringop_alg alg;
17770 int i;
17771 bool any_alg_usable_p = true;
17773 for (i = 0; i < NAX_STRINGOP_ALGS; i++)
17775 enum stringop_alg candidate = algs->size[i].alg;
17776 any_alg_usable_p = any_alg_usable_p && ALG_USABLE_P (candidate);
17778 if (candidate != libcall && candidate
17779 && ALG_USABLE_P (candidate))
17780 max = algs->size[i].max;
17782 /* If there aren't any usable algorithms, then recursing on
17783 smaller sizes isn't going to find anything. Just return the
17784 simple byte-at-a-time copy loop. */
17785 if (!any_alg_usable_p)
17787 /* Pick something reasonable. */
17788 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
17789 *dynamic_check = 128;
17790 return loop_1_byte;
17792 if (max == -1)
17793 max = 4096;
17794 alg = decide_alg (count, max / 2, memset, dynamic_check);
17795 gcc_assert (*dynamic_check == -1);
17796 gcc_assert (alg != libcall);
17797 if (TARGET_INLINE_STRINGOPS_DYNAMICALLY)
17798 *dynamic_check = max;
17799 return alg;
17801 return ALG_USABLE_P (algs->unknown_size) ? algs->unknown_size : libcall;
17802 #undef ALG_USABLE_P
17805 /* Decide on alignment. We know that the operand is already aligned to ALIGN
17806 (ALIGN can be based on profile feedback and thus it is not 100% guaranteed). */
17807 static int
17808 decide_alignment (int align,
17809 enum stringop_alg alg,
17810 int expected_size)
17812 int desired_align = 0;
17813 switch (alg)
17815 case no_stringop:
17816 gcc_unreachable ();
17817 case loop:
17818 case unrolled_loop:
17819 desired_align = GET_MODE_SIZE (Pmode);
17820 break;
17821 case rep_prefix_8_byte:
17822 desired_align = 8;
17823 break;
17824 case rep_prefix_4_byte:
17825 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
17826 copying whole cacheline at once. */
17827 if (TARGET_PENTIUMPRO)
17828 desired_align = 8;
17829 else
17830 desired_align = 4;
17831 break;
17832 case rep_prefix_1_byte:
17833 /* PentiumPro has special logic triggering for 8 byte aligned blocks.
17834 copying whole cacheline at once. */
17835 if (TARGET_PENTIUMPRO)
17836 desired_align = 8;
17837 else
17838 desired_align = 1;
17839 break;
17840 case loop_1_byte:
17841 desired_align = 1;
17842 break;
17843 case libcall:
17844 return 0;
17847 if (optimize_size)
17848 desired_align = 1;
17849 if (desired_align < align)
17850 desired_align = align;
17851 if (expected_size != -1 && expected_size < 4)
17852 desired_align = align;
17853 return desired_align;
17856 /* Return the smallest power of 2 greater than VAL. */
17857 static int
17858 smallest_pow2_greater_than (int val)
17860 int ret = 1;
17861 while (ret <= val)
17862 ret <<= 1;
17863 return ret;
17866 /* Expand string move (memcpy) operation. Use i386 string operations when
17867 profitable. expand_setmem contains similar code. The code depends upon
17868 architecture, block size and alignment, but always has the same
17869 overall structure:
17871 1) Prologue guard: Conditional that jumps up to epilogues for small
17872 blocks that can be handled by epilogue alone. This is faster but
17873 also needed for correctness, since prologue assume the block is larger
17874 than the desired alignment.
17876 Optional dynamic check for size and libcall for large
17877 blocks is emitted here too, with -minline-stringops-dynamically.
17879 2) Prologue: copy first few bytes in order to get destination aligned
17880 to DESIRED_ALIGN. It is emitted only when ALIGN is less than
17881 DESIRED_ALIGN and and up to DESIRED_ALIGN - ALIGN bytes can be copied.
17882 We emit either a jump tree on power of two sized blocks, or a byte loop.
17884 3) Main body: the copying loop itself, copying in SIZE_NEEDED chunks
17885 with specified algorithm.
17887 4) Epilogue: code copying tail of the block that is too small to be
17888 handled by main body (or up to size guarded by prologue guard). */
17891 ix86_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
17892 rtx expected_align_exp, rtx expected_size_exp)
17894 rtx destreg;
17895 rtx srcreg;
17896 rtx label = NULL;
17897 rtx tmp;
17898 rtx jump_around_label = NULL;
17899 HOST_WIDE_INT align = 1;
17900 unsigned HOST_WIDE_INT count = 0;
17901 HOST_WIDE_INT expected_size = -1;
17902 int size_needed = 0, epilogue_size_needed;
17903 int desired_align = 0, align_bytes = 0;
17904 enum stringop_alg alg;
17905 int dynamic_check;
17906 bool need_zero_guard = false;
17908 if (CONST_INT_P (align_exp))
17909 align = INTVAL (align_exp);
17910 /* i386 can do misaligned access on reasonably increased cost. */
17911 if (CONST_INT_P (expected_align_exp)
17912 && INTVAL (expected_align_exp) > align)
17913 align = INTVAL (expected_align_exp);
17914 /* ALIGN is the minimum of destination and source alignment, but we care here
17915 just about destination alignment. */
17916 else if (MEM_ALIGN (dst) > (unsigned HOST_WIDE_INT) align * BITS_PER_UNIT)
17917 align = MEM_ALIGN (dst) / BITS_PER_UNIT;
17919 if (CONST_INT_P (count_exp))
17920 count = expected_size = INTVAL (count_exp);
17921 if (CONST_INT_P (expected_size_exp) && count == 0)
17922 expected_size = INTVAL (expected_size_exp);
17924 /* Make sure we don't need to care about overflow later on. */
17925 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
17926 return 0;
17928 /* Step 0: Decide on preferred algorithm, desired alignment and
17929 size of chunks to be copied by main loop. */
17931 alg = decide_alg (count, expected_size, false, &dynamic_check);
17932 desired_align = decide_alignment (align, alg, expected_size);
17934 if (!TARGET_ALIGN_STRINGOPS)
17935 align = desired_align;
17937 if (alg == libcall)
17938 return 0;
17939 gcc_assert (alg != no_stringop);
17940 if (!count)
17941 count_exp = copy_to_mode_reg (GET_MODE (count_exp), count_exp);
17942 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
17943 srcreg = copy_to_mode_reg (Pmode, XEXP (src, 0));
17944 switch (alg)
17946 case libcall:
17947 case no_stringop:
17948 gcc_unreachable ();
17949 case loop:
17950 need_zero_guard = true;
17951 size_needed = GET_MODE_SIZE (Pmode);
17952 break;
17953 case unrolled_loop:
17954 need_zero_guard = true;
17955 size_needed = GET_MODE_SIZE (Pmode) * (TARGET_64BIT ? 4 : 2);
17956 break;
17957 case rep_prefix_8_byte:
17958 size_needed = 8;
17959 break;
17960 case rep_prefix_4_byte:
17961 size_needed = 4;
17962 break;
17963 case rep_prefix_1_byte:
17964 size_needed = 1;
17965 break;
17966 case loop_1_byte:
17967 need_zero_guard = true;
17968 size_needed = 1;
17969 break;
17972 epilogue_size_needed = size_needed;
17974 /* Step 1: Prologue guard. */
17976 /* Alignment code needs count to be in register. */
17977 if (CONST_INT_P (count_exp) && desired_align > align)
17979 if (INTVAL (count_exp) > desired_align
17980 && INTVAL (count_exp) > size_needed)
17982 align_bytes
17983 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
17984 if (align_bytes <= 0)
17985 align_bytes = 0;
17986 else
17987 align_bytes = desired_align - align_bytes;
17989 if (align_bytes == 0)
17990 count_exp = force_reg (counter_mode (count_exp), count_exp);
17992 gcc_assert (desired_align >= 1 && align >= 1);
17994 /* Ensure that alignment prologue won't copy past end of block. */
17995 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
17997 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
17998 /* Epilogue always copies COUNT_EXP & EPILOGUE_SIZE_NEEDED bytes.
17999 Make sure it is power of 2. */
18000 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18002 if (count)
18004 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18006 /* If main algorithm works on QImode, no epilogue is needed.
18007 For small sizes just don't align anything. */
18008 if (size_needed == 1)
18009 desired_align = align;
18010 else
18011 goto epilogue;
18014 else
18016 label = gen_label_rtx ();
18017 emit_cmp_and_jump_insns (count_exp,
18018 GEN_INT (epilogue_size_needed),
18019 LTU, 0, counter_mode (count_exp), 1, label);
18020 if (expected_size == -1 || expected_size < epilogue_size_needed)
18021 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18022 else
18023 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18027 /* Emit code to decide on runtime whether library call or inline should be
18028 used. */
18029 if (dynamic_check != -1)
18031 if (CONST_INT_P (count_exp))
18033 if (UINTVAL (count_exp) >= (unsigned HOST_WIDE_INT)dynamic_check)
18035 emit_block_move_via_libcall (dst, src, count_exp, false);
18036 count_exp = const0_rtx;
18037 goto epilogue;
18040 else
18042 rtx hot_label = gen_label_rtx ();
18043 jump_around_label = gen_label_rtx ();
18044 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18045 LEU, 0, GET_MODE (count_exp), 1, hot_label);
18046 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18047 emit_block_move_via_libcall (dst, src, count_exp, false);
18048 emit_jump (jump_around_label);
18049 emit_label (hot_label);
18053 /* Step 2: Alignment prologue. */
18055 if (desired_align > align)
18057 if (align_bytes == 0)
18059 /* Except for the first move in epilogue, we no longer know
18060 constant offset in aliasing info. It don't seems to worth
18061 the pain to maintain it for the first move, so throw away
18062 the info early. */
18063 src = change_address (src, BLKmode, srcreg);
18064 dst = change_address (dst, BLKmode, destreg);
18065 expand_movmem_prologue (dst, src, destreg, srcreg, count_exp, align,
18066 desired_align);
18068 else
18070 /* If we know how many bytes need to be stored before dst is
18071 sufficiently aligned, maintain aliasing info accurately. */
18072 dst = expand_constant_movmem_prologue (dst, &src, destreg, srcreg,
18073 desired_align, align_bytes);
18074 count_exp = plus_constant (count_exp, -align_bytes);
18075 count -= align_bytes;
18077 if (need_zero_guard
18078 && (count < (unsigned HOST_WIDE_INT) size_needed
18079 || (align_bytes == 0
18080 && count < ((unsigned HOST_WIDE_INT) size_needed
18081 + desired_align - align))))
18083 /* It is possible that we copied enough so the main loop will not
18084 execute. */
18085 gcc_assert (size_needed > 1);
18086 if (label == NULL_RTX)
18087 label = gen_label_rtx ();
18088 emit_cmp_and_jump_insns (count_exp,
18089 GEN_INT (size_needed),
18090 LTU, 0, counter_mode (count_exp), 1, label);
18091 if (expected_size == -1
18092 || expected_size < (desired_align - align) / 2 + size_needed)
18093 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18094 else
18095 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18098 if (label && size_needed == 1)
18100 emit_label (label);
18101 LABEL_NUSES (label) = 1;
18102 label = NULL;
18103 epilogue_size_needed = 1;
18105 else if (label == NULL_RTX)
18106 epilogue_size_needed = size_needed;
18108 /* Step 3: Main loop. */
18110 switch (alg)
18112 case libcall:
18113 case no_stringop:
18114 gcc_unreachable ();
18115 case loop_1_byte:
18116 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18117 count_exp, QImode, 1, expected_size);
18118 break;
18119 case loop:
18120 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18121 count_exp, Pmode, 1, expected_size);
18122 break;
18123 case unrolled_loop:
18124 /* Unroll only by factor of 2 in 32bit mode, since we don't have enough
18125 registers for 4 temporaries anyway. */
18126 expand_set_or_movmem_via_loop (dst, src, destreg, srcreg, NULL,
18127 count_exp, Pmode, TARGET_64BIT ? 4 : 2,
18128 expected_size);
18129 break;
18130 case rep_prefix_8_byte:
18131 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18132 DImode);
18133 break;
18134 case rep_prefix_4_byte:
18135 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18136 SImode);
18137 break;
18138 case rep_prefix_1_byte:
18139 expand_movmem_via_rep_mov (dst, src, destreg, srcreg, count_exp,
18140 QImode);
18141 break;
18143 /* Adjust properly the offset of src and dest memory for aliasing. */
18144 if (CONST_INT_P (count_exp))
18146 src = adjust_automodify_address_nv (src, BLKmode, srcreg,
18147 (count / size_needed) * size_needed);
18148 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18149 (count / size_needed) * size_needed);
18151 else
18153 src = change_address (src, BLKmode, srcreg);
18154 dst = change_address (dst, BLKmode, destreg);
18157 /* Step 4: Epilogue to copy the remaining bytes. */
18158 epilogue:
18159 if (label)
18161 /* When the main loop is done, COUNT_EXP might hold original count,
18162 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18163 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18164 bytes. Compensate if needed. */
18166 if (size_needed < epilogue_size_needed)
18168 tmp =
18169 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18170 GEN_INT (size_needed - 1), count_exp, 1,
18171 OPTAB_DIRECT);
18172 if (tmp != count_exp)
18173 emit_move_insn (count_exp, tmp);
18175 emit_label (label);
18176 LABEL_NUSES (label) = 1;
18179 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18180 expand_movmem_epilogue (dst, src, destreg, srcreg, count_exp,
18181 epilogue_size_needed);
18182 if (jump_around_label)
18183 emit_label (jump_around_label);
18184 return 1;
18187 /* Helper function for memcpy. For QImode value 0xXY produce
18188 0xXYXYXYXY of wide specified by MODE. This is essentially
18189 a * 0x10101010, but we can do slightly better than
18190 synth_mult by unwinding the sequence by hand on CPUs with
18191 slow multiply. */
18192 static rtx
18193 promote_duplicated_reg (enum machine_mode mode, rtx val)
18195 enum machine_mode valmode = GET_MODE (val);
18196 rtx tmp;
18197 int nops = mode == DImode ? 3 : 2;
18199 gcc_assert (mode == SImode || mode == DImode);
18200 if (val == const0_rtx)
18201 return copy_to_mode_reg (mode, const0_rtx);
18202 if (CONST_INT_P (val))
18204 HOST_WIDE_INT v = INTVAL (val) & 255;
18206 v |= v << 8;
18207 v |= v << 16;
18208 if (mode == DImode)
18209 v |= (v << 16) << 16;
18210 return copy_to_mode_reg (mode, gen_int_mode (v, mode));
18213 if (valmode == VOIDmode)
18214 valmode = QImode;
18215 if (valmode != QImode)
18216 val = gen_lowpart (QImode, val);
18217 if (mode == QImode)
18218 return val;
18219 if (!TARGET_PARTIAL_REG_STALL)
18220 nops--;
18221 if (ix86_cost->mult_init[mode == DImode ? 3 : 2]
18222 + ix86_cost->mult_bit * (mode == DImode ? 8 : 4)
18223 <= (ix86_cost->shift_const + ix86_cost->add) * nops
18224 + (COSTS_N_INSNS (TARGET_PARTIAL_REG_STALL == 0)))
18226 rtx reg = convert_modes (mode, QImode, val, true);
18227 tmp = promote_duplicated_reg (mode, const1_rtx);
18228 return expand_simple_binop (mode, MULT, reg, tmp, NULL, 1,
18229 OPTAB_DIRECT);
18231 else
18233 rtx reg = convert_modes (mode, QImode, val, true);
18235 if (!TARGET_PARTIAL_REG_STALL)
18236 if (mode == SImode)
18237 emit_insn (gen_movsi_insv_1 (reg, reg));
18238 else
18239 emit_insn (gen_movdi_insv_1_rex64 (reg, reg));
18240 else
18242 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (8),
18243 NULL, 1, OPTAB_DIRECT);
18244 reg =
18245 expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18247 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (16),
18248 NULL, 1, OPTAB_DIRECT);
18249 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18250 if (mode == SImode)
18251 return reg;
18252 tmp = expand_simple_binop (mode, ASHIFT, reg, GEN_INT (32),
18253 NULL, 1, OPTAB_DIRECT);
18254 reg = expand_simple_binop (mode, IOR, reg, tmp, reg, 1, OPTAB_DIRECT);
18255 return reg;
18259 /* Duplicate value VAL using promote_duplicated_reg into maximal size that will
18260 be needed by main loop copying SIZE_NEEDED chunks and prologue getting
18261 alignment from ALIGN to DESIRED_ALIGN. */
18262 static rtx
18263 promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align, int align)
18265 rtx promoted_val;
18267 if (TARGET_64BIT
18268 && (size_needed > 4 || (desired_align > align && desired_align > 4)))
18269 promoted_val = promote_duplicated_reg (DImode, val);
18270 else if (size_needed > 2 || (desired_align > align && desired_align > 2))
18271 promoted_val = promote_duplicated_reg (SImode, val);
18272 else if (size_needed > 1 || (desired_align > align && desired_align > 1))
18273 promoted_val = promote_duplicated_reg (HImode, val);
18274 else
18275 promoted_val = val;
18277 return promoted_val;
18280 /* Expand string clear operation (bzero). Use i386 string operations when
18281 profitable. See expand_movmem comment for explanation of individual
18282 steps performed. */
18284 ix86_expand_setmem (rtx dst, rtx count_exp, rtx val_exp, rtx align_exp,
18285 rtx expected_align_exp, rtx expected_size_exp)
18287 rtx destreg;
18288 rtx label = NULL;
18289 rtx tmp;
18290 rtx jump_around_label = NULL;
18291 HOST_WIDE_INT align = 1;
18292 unsigned HOST_WIDE_INT count = 0;
18293 HOST_WIDE_INT expected_size = -1;
18294 int size_needed = 0, epilogue_size_needed;
18295 int desired_align = 0, align_bytes = 0;
18296 enum stringop_alg alg;
18297 rtx promoted_val = NULL;
18298 bool force_loopy_epilogue = false;
18299 int dynamic_check;
18300 bool need_zero_guard = false;
18302 if (CONST_INT_P (align_exp))
18303 align = INTVAL (align_exp);
18304 /* i386 can do misaligned access on reasonably increased cost. */
18305 if (CONST_INT_P (expected_align_exp)
18306 && INTVAL (expected_align_exp) > align)
18307 align = INTVAL (expected_align_exp);
18308 if (CONST_INT_P (count_exp))
18309 count = expected_size = INTVAL (count_exp);
18310 if (CONST_INT_P (expected_size_exp) && count == 0)
18311 expected_size = INTVAL (expected_size_exp);
18313 /* Make sure we don't need to care about overflow later on. */
18314 if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
18315 return 0;
18317 /* Step 0: Decide on preferred algorithm, desired alignment and
18318 size of chunks to be copied by main loop. */
18320 alg = decide_alg (count, expected_size, true, &dynamic_check);
18321 desired_align = decide_alignment (align, alg, expected_size);
18323 if (!TARGET_ALIGN_STRINGOPS)
18324 align = desired_align;
18326 if (alg == libcall)
18327 return 0;
18328 gcc_assert (alg != no_stringop);
18329 if (!count)
18330 count_exp = copy_to_mode_reg (counter_mode (count_exp), count_exp);
18331 destreg = copy_to_mode_reg (Pmode, XEXP (dst, 0));
18332 switch (alg)
18334 case libcall:
18335 case no_stringop:
18336 gcc_unreachable ();
18337 case loop:
18338 need_zero_guard = true;
18339 size_needed = GET_MODE_SIZE (Pmode);
18340 break;
18341 case unrolled_loop:
18342 need_zero_guard = true;
18343 size_needed = GET_MODE_SIZE (Pmode) * 4;
18344 break;
18345 case rep_prefix_8_byte:
18346 size_needed = 8;
18347 break;
18348 case rep_prefix_4_byte:
18349 size_needed = 4;
18350 break;
18351 case rep_prefix_1_byte:
18352 size_needed = 1;
18353 break;
18354 case loop_1_byte:
18355 need_zero_guard = true;
18356 size_needed = 1;
18357 break;
18359 epilogue_size_needed = size_needed;
18361 /* Step 1: Prologue guard. */
18363 /* Alignment code needs count to be in register. */
18364 if (CONST_INT_P (count_exp) && desired_align > align)
18366 if (INTVAL (count_exp) > desired_align
18367 && INTVAL (count_exp) > size_needed)
18369 align_bytes
18370 = get_mem_align_offset (dst, desired_align * BITS_PER_UNIT);
18371 if (align_bytes <= 0)
18372 align_bytes = 0;
18373 else
18374 align_bytes = desired_align - align_bytes;
18376 if (align_bytes == 0)
18378 enum machine_mode mode = SImode;
18379 if (TARGET_64BIT && (count & ~0xffffffff))
18380 mode = DImode;
18381 count_exp = force_reg (mode, count_exp);
18384 /* Do the cheap promotion to allow better CSE across the
18385 main loop and epilogue (ie one load of the big constant in the
18386 front of all code. */
18387 if (CONST_INT_P (val_exp))
18388 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18389 desired_align, align);
18390 /* Ensure that alignment prologue won't copy past end of block. */
18391 if (size_needed > 1 || (desired_align > 1 && desired_align > align))
18393 epilogue_size_needed = MAX (size_needed - 1, desired_align - align);
18394 /* Epilogue always copies COUNT_EXP & (EPILOGUE_SIZE_NEEDED - 1) bytes.
18395 Make sure it is power of 2. */
18396 epilogue_size_needed = smallest_pow2_greater_than (epilogue_size_needed);
18398 /* To improve performance of small blocks, we jump around the VAL
18399 promoting mode. This mean that if the promoted VAL is not constant,
18400 we might not use it in the epilogue and have to use byte
18401 loop variant. */
18402 if (epilogue_size_needed > 2 && !promoted_val)
18403 force_loopy_epilogue = true;
18404 if (count)
18406 if (count < (unsigned HOST_WIDE_INT)epilogue_size_needed)
18408 /* If main algorithm works on QImode, no epilogue is needed.
18409 For small sizes just don't align anything. */
18410 if (size_needed == 1)
18411 desired_align = align;
18412 else
18413 goto epilogue;
18416 else
18418 label = gen_label_rtx ();
18419 emit_cmp_and_jump_insns (count_exp,
18420 GEN_INT (epilogue_size_needed),
18421 LTU, 0, counter_mode (count_exp), 1, label);
18422 if (expected_size == -1 || expected_size <= epilogue_size_needed)
18423 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18424 else
18425 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18428 if (dynamic_check != -1)
18430 rtx hot_label = gen_label_rtx ();
18431 jump_around_label = gen_label_rtx ();
18432 emit_cmp_and_jump_insns (count_exp, GEN_INT (dynamic_check - 1),
18433 LEU, 0, counter_mode (count_exp), 1, hot_label);
18434 predict_jump (REG_BR_PROB_BASE * 90 / 100);
18435 set_storage_via_libcall (dst, count_exp, val_exp, false);
18436 emit_jump (jump_around_label);
18437 emit_label (hot_label);
18440 /* Step 2: Alignment prologue. */
18442 /* Do the expensive promotion once we branched off the small blocks. */
18443 if (!promoted_val)
18444 promoted_val = promote_duplicated_reg_to_size (val_exp, size_needed,
18445 desired_align, align);
18446 gcc_assert (desired_align >= 1 && align >= 1);
18448 if (desired_align > align)
18450 if (align_bytes == 0)
18452 /* Except for the first move in epilogue, we no longer know
18453 constant offset in aliasing info. It don't seems to worth
18454 the pain to maintain it for the first move, so throw away
18455 the info early. */
18456 dst = change_address (dst, BLKmode, destreg);
18457 expand_setmem_prologue (dst, destreg, promoted_val, count_exp, align,
18458 desired_align);
18460 else
18462 /* If we know how many bytes need to be stored before dst is
18463 sufficiently aligned, maintain aliasing info accurately. */
18464 dst = expand_constant_setmem_prologue (dst, destreg, promoted_val,
18465 desired_align, align_bytes);
18466 count_exp = plus_constant (count_exp, -align_bytes);
18467 count -= align_bytes;
18469 if (need_zero_guard
18470 && (count < (unsigned HOST_WIDE_INT) size_needed
18471 || (align_bytes == 0
18472 && count < ((unsigned HOST_WIDE_INT) size_needed
18473 + desired_align - align))))
18475 /* It is possible that we copied enough so the main loop will not
18476 execute. */
18477 gcc_assert (size_needed > 1);
18478 if (label == NULL_RTX)
18479 label = gen_label_rtx ();
18480 emit_cmp_and_jump_insns (count_exp,
18481 GEN_INT (size_needed),
18482 LTU, 0, counter_mode (count_exp), 1, label);
18483 if (expected_size == -1
18484 || expected_size < (desired_align - align) / 2 + size_needed)
18485 predict_jump (REG_BR_PROB_BASE * 20 / 100);
18486 else
18487 predict_jump (REG_BR_PROB_BASE * 60 / 100);
18490 if (label && size_needed == 1)
18492 emit_label (label);
18493 LABEL_NUSES (label) = 1;
18494 label = NULL;
18495 promoted_val = val_exp;
18496 epilogue_size_needed = 1;
18498 else if (label == NULL_RTX)
18499 epilogue_size_needed = size_needed;
18501 /* Step 3: Main loop. */
18503 switch (alg)
18505 case libcall:
18506 case no_stringop:
18507 gcc_unreachable ();
18508 case loop_1_byte:
18509 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18510 count_exp, QImode, 1, expected_size);
18511 break;
18512 case loop:
18513 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18514 count_exp, Pmode, 1, expected_size);
18515 break;
18516 case unrolled_loop:
18517 expand_set_or_movmem_via_loop (dst, NULL, destreg, NULL, promoted_val,
18518 count_exp, Pmode, 4, expected_size);
18519 break;
18520 case rep_prefix_8_byte:
18521 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18522 DImode, val_exp);
18523 break;
18524 case rep_prefix_4_byte:
18525 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18526 SImode, val_exp);
18527 break;
18528 case rep_prefix_1_byte:
18529 expand_setmem_via_rep_stos (dst, destreg, promoted_val, count_exp,
18530 QImode, val_exp);
18531 break;
18533 /* Adjust properly the offset of src and dest memory for aliasing. */
18534 if (CONST_INT_P (count_exp))
18535 dst = adjust_automodify_address_nv (dst, BLKmode, destreg,
18536 (count / size_needed) * size_needed);
18537 else
18538 dst = change_address (dst, BLKmode, destreg);
18540 /* Step 4: Epilogue to copy the remaining bytes. */
18542 if (label)
18544 /* When the main loop is done, COUNT_EXP might hold original count,
18545 while we want to copy only COUNT_EXP & SIZE_NEEDED bytes.
18546 Epilogue code will actually copy COUNT_EXP & EPILOGUE_SIZE_NEEDED
18547 bytes. Compensate if needed. */
18549 if (size_needed < epilogue_size_needed)
18551 tmp =
18552 expand_simple_binop (counter_mode (count_exp), AND, count_exp,
18553 GEN_INT (size_needed - 1), count_exp, 1,
18554 OPTAB_DIRECT);
18555 if (tmp != count_exp)
18556 emit_move_insn (count_exp, tmp);
18558 emit_label (label);
18559 LABEL_NUSES (label) = 1;
18561 epilogue:
18562 if (count_exp != const0_rtx && epilogue_size_needed > 1)
18564 if (force_loopy_epilogue)
18565 expand_setmem_epilogue_via_loop (dst, destreg, val_exp, count_exp,
18566 epilogue_size_needed);
18567 else
18568 expand_setmem_epilogue (dst, destreg, promoted_val, count_exp,
18569 epilogue_size_needed);
18571 if (jump_around_label)
18572 emit_label (jump_around_label);
18573 return 1;
18576 /* Expand the appropriate insns for doing strlen if not just doing
18577 repnz; scasb
18579 out = result, initialized with the start address
18580 align_rtx = alignment of the address.
18581 scratch = scratch register, initialized with the startaddress when
18582 not aligned, otherwise undefined
18584 This is just the body. It needs the initializations mentioned above and
18585 some address computing at the end. These things are done in i386.md. */
18587 static void
18588 ix86_expand_strlensi_unroll_1 (rtx out, rtx src, rtx align_rtx)
18590 int align;
18591 rtx tmp;
18592 rtx align_2_label = NULL_RTX;
18593 rtx align_3_label = NULL_RTX;
18594 rtx align_4_label = gen_label_rtx ();
18595 rtx end_0_label = gen_label_rtx ();
18596 rtx mem;
18597 rtx tmpreg = gen_reg_rtx (SImode);
18598 rtx scratch = gen_reg_rtx (SImode);
18599 rtx cmp;
18601 align = 0;
18602 if (CONST_INT_P (align_rtx))
18603 align = INTVAL (align_rtx);
18605 /* Loop to check 1..3 bytes for null to get an aligned pointer. */
18607 /* Is there a known alignment and is it less than 4? */
18608 if (align < 4)
18610 rtx scratch1 = gen_reg_rtx (Pmode);
18611 emit_move_insn (scratch1, out);
18612 /* Is there a known alignment and is it not 2? */
18613 if (align != 2)
18615 align_3_label = gen_label_rtx (); /* Label when aligned to 3-byte */
18616 align_2_label = gen_label_rtx (); /* Label when aligned to 2-byte */
18618 /* Leave just the 3 lower bits. */
18619 align_rtx = expand_binop (Pmode, and_optab, scratch1, GEN_INT (3),
18620 NULL_RTX, 0, OPTAB_WIDEN);
18622 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
18623 Pmode, 1, align_4_label);
18624 emit_cmp_and_jump_insns (align_rtx, const2_rtx, EQ, NULL,
18625 Pmode, 1, align_2_label);
18626 emit_cmp_and_jump_insns (align_rtx, const2_rtx, GTU, NULL,
18627 Pmode, 1, align_3_label);
18629 else
18631 /* Since the alignment is 2, we have to check 2 or 0 bytes;
18632 check if is aligned to 4 - byte. */
18634 align_rtx = expand_binop (Pmode, and_optab, scratch1, const2_rtx,
18635 NULL_RTX, 0, OPTAB_WIDEN);
18637 emit_cmp_and_jump_insns (align_rtx, const0_rtx, EQ, NULL,
18638 Pmode, 1, align_4_label);
18641 mem = change_address (src, QImode, out);
18643 /* Now compare the bytes. */
18645 /* Compare the first n unaligned byte on a byte per byte basis. */
18646 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL,
18647 QImode, 1, end_0_label);
18649 /* Increment the address. */
18650 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
18652 /* Not needed with an alignment of 2 */
18653 if (align != 2)
18655 emit_label (align_2_label);
18657 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
18658 end_0_label);
18660 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
18662 emit_label (align_3_label);
18665 emit_cmp_and_jump_insns (mem, const0_rtx, EQ, NULL, QImode, 1,
18666 end_0_label);
18668 emit_insn ((*ix86_gen_add3) (out, out, const1_rtx));
18671 /* Generate loop to check 4 bytes at a time. It is not a good idea to
18672 align this loop. It gives only huge programs, but does not help to
18673 speed up. */
18674 emit_label (align_4_label);
18676 mem = change_address (src, SImode, out);
18677 emit_move_insn (scratch, mem);
18678 emit_insn ((*ix86_gen_add3) (out, out, GEN_INT (4)));
18680 /* This formula yields a nonzero result iff one of the bytes is zero.
18681 This saves three branches inside loop and many cycles. */
18683 emit_insn (gen_addsi3 (tmpreg, scratch, GEN_INT (-0x01010101)));
18684 emit_insn (gen_one_cmplsi2 (scratch, scratch));
18685 emit_insn (gen_andsi3 (tmpreg, tmpreg, scratch));
18686 emit_insn (gen_andsi3 (tmpreg, tmpreg,
18687 gen_int_mode (0x80808080, SImode)));
18688 emit_cmp_and_jump_insns (tmpreg, const0_rtx, EQ, 0, SImode, 1,
18689 align_4_label);
18691 if (TARGET_CMOVE)
18693 rtx reg = gen_reg_rtx (SImode);
18694 rtx reg2 = gen_reg_rtx (Pmode);
18695 emit_move_insn (reg, tmpreg);
18696 emit_insn (gen_lshrsi3 (reg, reg, GEN_INT (16)));
18698 /* If zero is not in the first two bytes, move two bytes forward. */
18699 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
18700 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
18701 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
18702 emit_insn (gen_rtx_SET (VOIDmode, tmpreg,
18703 gen_rtx_IF_THEN_ELSE (SImode, tmp,
18704 reg,
18705 tmpreg)));
18706 /* Emit lea manually to avoid clobbering of flags. */
18707 emit_insn (gen_rtx_SET (SImode, reg2,
18708 gen_rtx_PLUS (Pmode, out, const2_rtx)));
18710 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
18711 tmp = gen_rtx_EQ (VOIDmode, tmp, const0_rtx);
18712 emit_insn (gen_rtx_SET (VOIDmode, out,
18713 gen_rtx_IF_THEN_ELSE (Pmode, tmp,
18714 reg2,
18715 out)));
18718 else
18720 rtx end_2_label = gen_label_rtx ();
18721 /* Is zero in the first two bytes? */
18723 emit_insn (gen_testsi_ccno_1 (tmpreg, GEN_INT (0x8080)));
18724 tmp = gen_rtx_REG (CCNOmode, FLAGS_REG);
18725 tmp = gen_rtx_NE (VOIDmode, tmp, const0_rtx);
18726 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
18727 gen_rtx_LABEL_REF (VOIDmode, end_2_label),
18728 pc_rtx);
18729 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
18730 JUMP_LABEL (tmp) = end_2_label;
18732 /* Not in the first two. Move two bytes forward. */
18733 emit_insn (gen_lshrsi3 (tmpreg, tmpreg, GEN_INT (16)));
18734 emit_insn ((*ix86_gen_add3) (out, out, const2_rtx));
18736 emit_label (end_2_label);
18740 /* Avoid branch in fixing the byte. */
18741 tmpreg = gen_lowpart (QImode, tmpreg);
18742 emit_insn (gen_addqi3_cc (tmpreg, tmpreg, tmpreg));
18743 cmp = gen_rtx_LTU (Pmode, gen_rtx_REG (CCmode, FLAGS_REG), const0_rtx);
18744 emit_insn ((*ix86_gen_sub3_carry) (out, out, GEN_INT (3), cmp));
18746 emit_label (end_0_label);
18749 /* Expand strlen. */
18752 ix86_expand_strlen (rtx out, rtx src, rtx eoschar, rtx align)
18754 rtx addr, scratch1, scratch2, scratch3, scratch4;
18756 /* The generic case of strlen expander is long. Avoid it's
18757 expanding unless TARGET_INLINE_ALL_STRINGOPS. */
18759 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
18760 && !TARGET_INLINE_ALL_STRINGOPS
18761 && !optimize_insn_for_size_p ()
18762 && (!CONST_INT_P (align) || INTVAL (align) < 4))
18763 return 0;
18765 addr = force_reg (Pmode, XEXP (src, 0));
18766 scratch1 = gen_reg_rtx (Pmode);
18768 if (TARGET_UNROLL_STRLEN && eoschar == const0_rtx && optimize > 1
18769 && !optimize_insn_for_size_p ())
18771 /* Well it seems that some optimizer does not combine a call like
18772 foo(strlen(bar), strlen(bar));
18773 when the move and the subtraction is done here. It does calculate
18774 the length just once when these instructions are done inside of
18775 output_strlen_unroll(). But I think since &bar[strlen(bar)] is
18776 often used and I use one fewer register for the lifetime of
18777 output_strlen_unroll() this is better. */
18779 emit_move_insn (out, addr);
18781 ix86_expand_strlensi_unroll_1 (out, src, align);
18783 /* strlensi_unroll_1 returns the address of the zero at the end of
18784 the string, like memchr(), so compute the length by subtracting
18785 the start address. */
18786 emit_insn ((*ix86_gen_sub3) (out, out, addr));
18788 else
18790 rtx unspec;
18792 /* Can't use this if the user has appropriated eax, ecx, or edi. */
18793 if (fixed_regs[AX_REG] || fixed_regs[CX_REG] || fixed_regs[DI_REG])
18794 return false;
18796 scratch2 = gen_reg_rtx (Pmode);
18797 scratch3 = gen_reg_rtx (Pmode);
18798 scratch4 = force_reg (Pmode, constm1_rtx);
18800 emit_move_insn (scratch3, addr);
18801 eoschar = force_reg (QImode, eoschar);
18803 src = replace_equiv_address_nv (src, scratch3);
18805 /* If .md starts supporting :P, this can be done in .md. */
18806 unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, src, eoschar, align,
18807 scratch4), UNSPEC_SCAS);
18808 emit_insn (gen_strlenqi_1 (scratch1, scratch3, unspec));
18809 emit_insn ((*ix86_gen_one_cmpl2) (scratch2, scratch1));
18810 emit_insn ((*ix86_gen_add3) (out, scratch2, constm1_rtx));
18812 return 1;
18815 /* For given symbol (function) construct code to compute address of it's PLT
18816 entry in large x86-64 PIC model. */
18818 construct_plt_address (rtx symbol)
18820 rtx tmp = gen_reg_rtx (Pmode);
18821 rtx unspec = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_PLTOFF);
18823 gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
18824 gcc_assert (ix86_cmodel == CM_LARGE_PIC);
18826 emit_move_insn (tmp, gen_rtx_CONST (Pmode, unspec));
18827 emit_insn (gen_adddi3 (tmp, tmp, pic_offset_table_rtx));
18828 return tmp;
18831 void
18832 ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
18833 rtx callarg2,
18834 rtx pop, int sibcall)
18836 rtx use = NULL, call;
18838 if (pop == const0_rtx)
18839 pop = NULL;
18840 gcc_assert (!TARGET_64BIT || !pop);
18842 if (TARGET_MACHO && !TARGET_64BIT)
18844 #if TARGET_MACHO
18845 if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
18846 fnaddr = machopic_indirect_call_target (fnaddr);
18847 #endif
18849 else
18851 /* Static functions and indirect calls don't need the pic register. */
18852 if (flag_pic && (!TARGET_64BIT || ix86_cmodel == CM_LARGE_PIC)
18853 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
18854 && ! SYMBOL_REF_LOCAL_P (XEXP (fnaddr, 0)))
18855 use_reg (&use, pic_offset_table_rtx);
18858 if (TARGET_64BIT && INTVAL (callarg2) >= 0)
18860 rtx al = gen_rtx_REG (QImode, AX_REG);
18861 emit_move_insn (al, callarg2);
18862 use_reg (&use, al);
18865 if (ix86_cmodel == CM_LARGE_PIC
18866 && MEM_P (fnaddr)
18867 && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
18868 && !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
18869 fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
18870 else if (sibcall
18871 ? !sibcall_insn_operand (XEXP (fnaddr, 0), Pmode)
18872 : !call_insn_operand (XEXP (fnaddr, 0), Pmode))
18874 fnaddr = copy_to_mode_reg (Pmode, XEXP (fnaddr, 0));
18875 fnaddr = gen_rtx_MEM (QImode, fnaddr);
18878 call = gen_rtx_CALL (VOIDmode, fnaddr, callarg1);
18879 if (retval)
18880 call = gen_rtx_SET (VOIDmode, retval, call);
18881 if (pop)
18883 pop = gen_rtx_PLUS (Pmode, stack_pointer_rtx, pop);
18884 pop = gen_rtx_SET (VOIDmode, stack_pointer_rtx, pop);
18885 call = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, call, pop));
18887 if (TARGET_64BIT
18888 && ix86_cfun_abi () == MS_ABI
18889 && (!callarg2 || INTVAL (callarg2) != -2))
18891 /* We need to represent that SI and DI registers are clobbered
18892 by SYSV calls. */
18893 static int clobbered_registers[] = {
18894 XMM6_REG, XMM7_REG, XMM8_REG,
18895 XMM9_REG, XMM10_REG, XMM11_REG,
18896 XMM12_REG, XMM13_REG, XMM14_REG,
18897 XMM15_REG, SI_REG, DI_REG
18899 unsigned int i;
18900 rtx vec[ARRAY_SIZE (clobbered_registers) + 2];
18901 rtx unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx),
18902 UNSPEC_MS_TO_SYSV_CALL);
18904 vec[0] = call;
18905 vec[1] = unspec;
18906 for (i = 0; i < ARRAY_SIZE (clobbered_registers); i++)
18907 vec[i + 2] = gen_rtx_CLOBBER (SSE_REGNO_P (clobbered_registers[i])
18908 ? TImode : DImode,
18909 gen_rtx_REG
18910 (SSE_REGNO_P (clobbered_registers[i])
18911 ? TImode : DImode,
18912 clobbered_registers[i]));
18914 call = gen_rtx_PARALLEL (VOIDmode,
18915 gen_rtvec_v (ARRAY_SIZE (clobbered_registers)
18916 + 2, vec));
18919 call = emit_call_insn (call);
18920 if (use)
18921 CALL_INSN_FUNCTION_USAGE (call) = use;
18925 /* Clear stack slot assignments remembered from previous functions.
18926 This is called from INIT_EXPANDERS once before RTL is emitted for each
18927 function. */
18929 static struct machine_function *
18930 ix86_init_machine_status (void)
18932 struct machine_function *f;
18934 f = GGC_CNEW (struct machine_function);
18935 f->use_fast_prologue_epilogue_nregs = -1;
18936 f->tls_descriptor_call_expanded_p = 0;
18937 f->call_abi = ix86_abi;
18939 return f;
18942 /* Return a MEM corresponding to a stack slot with mode MODE.
18943 Allocate a new slot if necessary.
18945 The RTL for a function can have several slots available: N is
18946 which slot to use. */
18949 assign_386_stack_local (enum machine_mode mode, enum ix86_stack_slot n)
18951 struct stack_local_entry *s;
18953 gcc_assert (n < MAX_386_STACK_LOCALS);
18955 /* Virtual slot is valid only before vregs are instantiated. */
18956 gcc_assert ((n == SLOT_VIRTUAL) == !virtuals_instantiated);
18958 for (s = ix86_stack_locals; s; s = s->next)
18959 if (s->mode == mode && s->n == n)
18960 return copy_rtx (s->rtl);
18962 s = (struct stack_local_entry *)
18963 ggc_alloc (sizeof (struct stack_local_entry));
18964 s->n = n;
18965 s->mode = mode;
18966 s->rtl = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
18968 s->next = ix86_stack_locals;
18969 ix86_stack_locals = s;
18970 return s->rtl;
18973 /* Construct the SYMBOL_REF for the tls_get_addr function. */
18975 static GTY(()) rtx ix86_tls_symbol;
18977 ix86_tls_get_addr (void)
18980 if (!ix86_tls_symbol)
18982 ix86_tls_symbol = gen_rtx_SYMBOL_REF (Pmode,
18983 (TARGET_ANY_GNU_TLS
18984 && !TARGET_64BIT)
18985 ? "___tls_get_addr"
18986 : "__tls_get_addr");
18989 return ix86_tls_symbol;
18992 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
18994 static GTY(()) rtx ix86_tls_module_base_symbol;
18996 ix86_tls_module_base (void)
18999 if (!ix86_tls_module_base_symbol)
19001 ix86_tls_module_base_symbol = gen_rtx_SYMBOL_REF (Pmode,
19002 "_TLS_MODULE_BASE_");
19003 SYMBOL_REF_FLAGS (ix86_tls_module_base_symbol)
19004 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
19007 return ix86_tls_module_base_symbol;
19010 /* Calculate the length of the memory address in the instruction
19011 encoding. Does not include the one-byte modrm, opcode, or prefix. */
19014 memory_address_length (rtx addr)
19016 struct ix86_address parts;
19017 rtx base, index, disp;
19018 int len;
19019 int ok;
19021 if (GET_CODE (addr) == PRE_DEC
19022 || GET_CODE (addr) == POST_INC
19023 || GET_CODE (addr) == PRE_MODIFY
19024 || GET_CODE (addr) == POST_MODIFY)
19025 return 0;
19027 ok = ix86_decompose_address (addr, &parts);
19028 gcc_assert (ok);
19030 if (parts.base && GET_CODE (parts.base) == SUBREG)
19031 parts.base = SUBREG_REG (parts.base);
19032 if (parts.index && GET_CODE (parts.index) == SUBREG)
19033 parts.index = SUBREG_REG (parts.index);
19035 base = parts.base;
19036 index = parts.index;
19037 disp = parts.disp;
19038 len = 0;
19040 /* Rule of thumb:
19041 - esp as the base always wants an index,
19042 - ebp as the base always wants a displacement,
19043 - r12 as the base always wants an index,
19044 - r13 as the base always wants a displacement. */
19046 /* Register Indirect. */
19047 if (base && !index && !disp)
19049 /* esp (for its index) and ebp (for its displacement) need
19050 the two-byte modrm form. Similarly for r12 and r13 in 64-bit
19051 code. */
19052 if (REG_P (addr)
19053 && (addr == arg_pointer_rtx
19054 || addr == frame_pointer_rtx
19055 || REGNO (addr) == SP_REG
19056 || REGNO (addr) == BP_REG
19057 || REGNO (addr) == R12_REG
19058 || REGNO (addr) == R13_REG))
19059 len = 1;
19062 /* Direct Addressing. In 64-bit mode mod 00 r/m 5
19063 is not disp32, but disp32(%rip), so for disp32
19064 SIB byte is needed, unless print_operand_address
19065 optimizes it into disp32(%rip) or (%rip) is implied
19066 by UNSPEC. */
19067 else if (disp && !base && !index)
19069 len = 4;
19070 if (TARGET_64BIT)
19072 rtx symbol = disp;
19074 if (GET_CODE (disp) == CONST)
19075 symbol = XEXP (disp, 0);
19076 if (GET_CODE (symbol) == PLUS
19077 && CONST_INT_P (XEXP (symbol, 1)))
19078 symbol = XEXP (symbol, 0);
19080 if (GET_CODE (symbol) != LABEL_REF
19081 && (GET_CODE (symbol) != SYMBOL_REF
19082 || SYMBOL_REF_TLS_MODEL (symbol) != 0)
19083 && (GET_CODE (symbol) != UNSPEC
19084 || (XINT (symbol, 1) != UNSPEC_GOTPCREL
19085 && XINT (symbol, 1) != UNSPEC_GOTNTPOFF)))
19086 len += 1;
19090 else
19092 /* Find the length of the displacement constant. */
19093 if (disp)
19095 if (base && satisfies_constraint_K (disp))
19096 len = 1;
19097 else
19098 len = 4;
19100 /* ebp always wants a displacement. Similarly r13. */
19101 else if (base && REG_P (base)
19102 && (REGNO (base) == BP_REG || REGNO (base) == R13_REG))
19103 len = 1;
19105 /* An index requires the two-byte modrm form.... */
19106 if (index
19107 /* ...like esp (or r12), which always wants an index. */
19108 || base == arg_pointer_rtx
19109 || base == frame_pointer_rtx
19110 || (base && REG_P (base)
19111 && (REGNO (base) == SP_REG || REGNO (base) == R12_REG)))
19112 len += 1;
19115 switch (parts.seg)
19117 case SEG_FS:
19118 case SEG_GS:
19119 len += 1;
19120 break;
19121 default:
19122 break;
19125 return len;
19128 /* Compute default value for "length_immediate" attribute. When SHORTFORM
19129 is set, expect that insn have 8bit immediate alternative. */
19131 ix86_attr_length_immediate_default (rtx insn, int shortform)
19133 int len = 0;
19134 int i;
19135 extract_insn_cached (insn);
19136 for (i = recog_data.n_operands - 1; i >= 0; --i)
19137 if (CONSTANT_P (recog_data.operand[i]))
19139 enum attr_mode mode = get_attr_mode (insn);
19141 gcc_assert (!len);
19142 if (shortform && CONST_INT_P (recog_data.operand[i]))
19144 HOST_WIDE_INT ival = INTVAL (recog_data.operand[i]);
19145 switch (mode)
19147 case MODE_QI:
19148 len = 1;
19149 continue;
19150 case MODE_HI:
19151 ival = trunc_int_for_mode (ival, HImode);
19152 break;
19153 case MODE_SI:
19154 ival = trunc_int_for_mode (ival, SImode);
19155 break;
19156 default:
19157 break;
19159 if (IN_RANGE (ival, -128, 127))
19161 len = 1;
19162 continue;
19165 switch (mode)
19167 case MODE_QI:
19168 len = 1;
19169 break;
19170 case MODE_HI:
19171 len = 2;
19172 break;
19173 case MODE_SI:
19174 len = 4;
19175 break;
19176 /* Immediates for DImode instructions are encoded as 32bit sign extended values. */
19177 case MODE_DI:
19178 len = 4;
19179 break;
19180 default:
19181 fatal_insn ("unknown insn mode", insn);
19184 return len;
19186 /* Compute default value for "length_address" attribute. */
19188 ix86_attr_length_address_default (rtx insn)
19190 int i;
19192 if (get_attr_type (insn) == TYPE_LEA)
19194 rtx set = PATTERN (insn), addr;
19196 if (GET_CODE (set) == PARALLEL)
19197 set = XVECEXP (set, 0, 0);
19199 gcc_assert (GET_CODE (set) == SET);
19201 addr = SET_SRC (set);
19202 if (TARGET_64BIT && get_attr_mode (insn) == MODE_SI)
19204 if (GET_CODE (addr) == ZERO_EXTEND)
19205 addr = XEXP (addr, 0);
19206 if (GET_CODE (addr) == SUBREG)
19207 addr = SUBREG_REG (addr);
19210 return memory_address_length (addr);
19213 extract_insn_cached (insn);
19214 for (i = recog_data.n_operands - 1; i >= 0; --i)
19215 if (MEM_P (recog_data.operand[i]))
19217 constrain_operands_cached (reload_completed);
19218 if (which_alternative != -1)
19220 const char *constraints = recog_data.constraints[i];
19221 int alt = which_alternative;
19223 while (*constraints == '=' || *constraints == '+')
19224 constraints++;
19225 while (alt-- > 0)
19226 while (*constraints++ != ',')
19228 /* Skip ignored operands. */
19229 if (*constraints == 'X')
19230 continue;
19232 return memory_address_length (XEXP (recog_data.operand[i], 0));
19234 return 0;
19237 /* Compute default value for "length_vex" attribute. It includes
19238 2 or 3 byte VEX prefix and 1 opcode byte. */
19241 ix86_attr_length_vex_default (rtx insn, int has_0f_opcode,
19242 int has_vex_w)
19244 int i;
19246 /* Only 0f opcode can use 2 byte VEX prefix and VEX W bit uses 3
19247 byte VEX prefix. */
19248 if (!has_0f_opcode || has_vex_w)
19249 return 3 + 1;
19251 /* We can always use 2 byte VEX prefix in 32bit. */
19252 if (!TARGET_64BIT)
19253 return 2 + 1;
19255 extract_insn_cached (insn);
19257 for (i = recog_data.n_operands - 1; i >= 0; --i)
19258 if (REG_P (recog_data.operand[i]))
19260 /* REX.W bit uses 3 byte VEX prefix. */
19261 if (GET_MODE (recog_data.operand[i]) == DImode
19262 && GENERAL_REG_P (recog_data.operand[i]))
19263 return 3 + 1;
19265 else
19267 /* REX.X or REX.B bits use 3 byte VEX prefix. */
19268 if (MEM_P (recog_data.operand[i])
19269 && x86_extended_reg_mentioned_p (recog_data.operand[i]))
19270 return 3 + 1;
19273 return 2 + 1;
19276 /* Return the maximum number of instructions a cpu can issue. */
19278 static int
19279 ix86_issue_rate (void)
19281 switch (ix86_tune)
19283 case PROCESSOR_PENTIUM:
19284 case PROCESSOR_ATOM:
19285 case PROCESSOR_K6:
19286 return 2;
19288 case PROCESSOR_PENTIUMPRO:
19289 case PROCESSOR_PENTIUM4:
19290 case PROCESSOR_ATHLON:
19291 case PROCESSOR_K8:
19292 case PROCESSOR_AMDFAM10:
19293 case PROCESSOR_NOCONA:
19294 case PROCESSOR_GENERIC32:
19295 case PROCESSOR_GENERIC64:
19296 return 3;
19298 case PROCESSOR_CORE2:
19299 return 4;
19301 default:
19302 return 1;
19306 /* A subroutine of ix86_adjust_cost -- return true iff INSN reads flags set
19307 by DEP_INSN and nothing set by DEP_INSN. */
19309 static int
19310 ix86_flags_dependent (rtx insn, rtx dep_insn, enum attr_type insn_type)
19312 rtx set, set2;
19314 /* Simplify the test for uninteresting insns. */
19315 if (insn_type != TYPE_SETCC
19316 && insn_type != TYPE_ICMOV
19317 && insn_type != TYPE_FCMOV
19318 && insn_type != TYPE_IBR)
19319 return 0;
19321 if ((set = single_set (dep_insn)) != 0)
19323 set = SET_DEST (set);
19324 set2 = NULL_RTX;
19326 else if (GET_CODE (PATTERN (dep_insn)) == PARALLEL
19327 && XVECLEN (PATTERN (dep_insn), 0) == 2
19328 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 0)) == SET
19329 && GET_CODE (XVECEXP (PATTERN (dep_insn), 0, 1)) == SET)
19331 set = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19332 set2 = SET_DEST (XVECEXP (PATTERN (dep_insn), 0, 0));
19334 else
19335 return 0;
19337 if (!REG_P (set) || REGNO (set) != FLAGS_REG)
19338 return 0;
19340 /* This test is true if the dependent insn reads the flags but
19341 not any other potentially set register. */
19342 if (!reg_overlap_mentioned_p (set, PATTERN (insn)))
19343 return 0;
19345 if (set2 && reg_overlap_mentioned_p (set2, PATTERN (insn)))
19346 return 0;
19348 return 1;
19351 /* Return true iff USE_INSN has a memory address with operands set by
19352 SET_INSN. */
19354 bool
19355 ix86_agi_dependent (rtx set_insn, rtx use_insn)
19357 int i;
19358 extract_insn_cached (use_insn);
19359 for (i = recog_data.n_operands - 1; i >= 0; --i)
19360 if (MEM_P (recog_data.operand[i]))
19362 rtx addr = XEXP (recog_data.operand[i], 0);
19363 return modified_in_p (addr, set_insn) != 0;
19365 return false;
19368 static int
19369 ix86_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
19371 enum attr_type insn_type, dep_insn_type;
19372 enum attr_memory memory;
19373 rtx set, set2;
19374 int dep_insn_code_number;
19376 /* Anti and output dependencies have zero cost on all CPUs. */
19377 if (REG_NOTE_KIND (link) != 0)
19378 return 0;
19380 dep_insn_code_number = recog_memoized (dep_insn);
19382 /* If we can't recognize the insns, we can't really do anything. */
19383 if (dep_insn_code_number < 0 || recog_memoized (insn) < 0)
19384 return cost;
19386 insn_type = get_attr_type (insn);
19387 dep_insn_type = get_attr_type (dep_insn);
19389 switch (ix86_tune)
19391 case PROCESSOR_PENTIUM:
19392 /* Address Generation Interlock adds a cycle of latency. */
19393 if (insn_type == TYPE_LEA)
19395 rtx addr = PATTERN (insn);
19397 if (GET_CODE (addr) == PARALLEL)
19398 addr = XVECEXP (addr, 0, 0);
19400 gcc_assert (GET_CODE (addr) == SET);
19402 addr = SET_SRC (addr);
19403 if (modified_in_p (addr, dep_insn))
19404 cost += 1;
19406 else if (ix86_agi_dependent (dep_insn, insn))
19407 cost += 1;
19409 /* ??? Compares pair with jump/setcc. */
19410 if (ix86_flags_dependent (insn, dep_insn, insn_type))
19411 cost = 0;
19413 /* Floating point stores require value to be ready one cycle earlier. */
19414 if (insn_type == TYPE_FMOV
19415 && get_attr_memory (insn) == MEMORY_STORE
19416 && !ix86_agi_dependent (dep_insn, insn))
19417 cost += 1;
19418 break;
19420 case PROCESSOR_PENTIUMPRO:
19421 memory = get_attr_memory (insn);
19423 /* INT->FP conversion is expensive. */
19424 if (get_attr_fp_int_src (dep_insn))
19425 cost += 5;
19427 /* There is one cycle extra latency between an FP op and a store. */
19428 if (insn_type == TYPE_FMOV
19429 && (set = single_set (dep_insn)) != NULL_RTX
19430 && (set2 = single_set (insn)) != NULL_RTX
19431 && rtx_equal_p (SET_DEST (set), SET_SRC (set2))
19432 && MEM_P (SET_DEST (set2)))
19433 cost += 1;
19435 /* Show ability of reorder buffer to hide latency of load by executing
19436 in parallel with previous instruction in case
19437 previous instruction is not needed to compute the address. */
19438 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19439 && !ix86_agi_dependent (dep_insn, insn))
19441 /* Claim moves to take one cycle, as core can issue one load
19442 at time and the next load can start cycle later. */
19443 if (dep_insn_type == TYPE_IMOV
19444 || dep_insn_type == TYPE_FMOV)
19445 cost = 1;
19446 else if (cost > 1)
19447 cost--;
19449 break;
19451 case PROCESSOR_K6:
19452 memory = get_attr_memory (insn);
19454 /* The esp dependency is resolved before the instruction is really
19455 finished. */
19456 if ((insn_type == TYPE_PUSH || insn_type == TYPE_POP)
19457 && (dep_insn_type == TYPE_PUSH || dep_insn_type == TYPE_POP))
19458 return 1;
19460 /* INT->FP conversion is expensive. */
19461 if (get_attr_fp_int_src (dep_insn))
19462 cost += 5;
19464 /* Show ability of reorder buffer to hide latency of load by executing
19465 in parallel with previous instruction in case
19466 previous instruction is not needed to compute the address. */
19467 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19468 && !ix86_agi_dependent (dep_insn, insn))
19470 /* Claim moves to take one cycle, as core can issue one load
19471 at time and the next load can start cycle later. */
19472 if (dep_insn_type == TYPE_IMOV
19473 || dep_insn_type == TYPE_FMOV)
19474 cost = 1;
19475 else if (cost > 2)
19476 cost -= 2;
19477 else
19478 cost = 1;
19480 break;
19482 case PROCESSOR_ATHLON:
19483 case PROCESSOR_K8:
19484 case PROCESSOR_AMDFAM10:
19485 case PROCESSOR_ATOM:
19486 case PROCESSOR_GENERIC32:
19487 case PROCESSOR_GENERIC64:
19488 memory = get_attr_memory (insn);
19490 /* Show ability of reorder buffer to hide latency of load by executing
19491 in parallel with previous instruction in case
19492 previous instruction is not needed to compute the address. */
19493 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)
19494 && !ix86_agi_dependent (dep_insn, insn))
19496 enum attr_unit unit = get_attr_unit (insn);
19497 int loadcost = 3;
19499 /* Because of the difference between the length of integer and
19500 floating unit pipeline preparation stages, the memory operands
19501 for floating point are cheaper.
19503 ??? For Athlon it the difference is most probably 2. */
19504 if (unit == UNIT_INTEGER || unit == UNIT_UNKNOWN)
19505 loadcost = 3;
19506 else
19507 loadcost = TARGET_ATHLON ? 2 : 0;
19509 if (cost >= loadcost)
19510 cost -= loadcost;
19511 else
19512 cost = 0;
19515 default:
19516 break;
19519 return cost;
19522 /* How many alternative schedules to try. This should be as wide as the
19523 scheduling freedom in the DFA, but no wider. Making this value too
19524 large results extra work for the scheduler. */
19526 static int
19527 ia32_multipass_dfa_lookahead (void)
19529 switch (ix86_tune)
19531 case PROCESSOR_PENTIUM:
19532 return 2;
19534 case PROCESSOR_PENTIUMPRO:
19535 case PROCESSOR_K6:
19536 return 1;
19538 default:
19539 return 0;
19544 /* Compute the alignment given to a constant that is being placed in memory.
19545 EXP is the constant and ALIGN is the alignment that the object would
19546 ordinarily have.
19547 The value of this function is used instead of that alignment to align
19548 the object. */
19551 ix86_constant_alignment (tree exp, int align)
19553 if (TREE_CODE (exp) == REAL_CST || TREE_CODE (exp) == VECTOR_CST
19554 || TREE_CODE (exp) == INTEGER_CST)
19556 if (TYPE_MODE (TREE_TYPE (exp)) == DFmode && align < 64)
19557 return 64;
19558 else if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (exp))) && align < 128)
19559 return 128;
19561 else if (!optimize_size && TREE_CODE (exp) == STRING_CST
19562 && TREE_STRING_LENGTH (exp) >= 31 && align < BITS_PER_WORD)
19563 return BITS_PER_WORD;
19565 return align;
19568 /* Compute the alignment for a static variable.
19569 TYPE is the data type, and ALIGN is the alignment that
19570 the object would ordinarily have. The value of this function is used
19571 instead of that alignment to align the object. */
19574 ix86_data_alignment (tree type, int align)
19576 int max_align = optimize_size ? BITS_PER_WORD : MIN (256, MAX_OFILE_ALIGNMENT);
19578 if (AGGREGATE_TYPE_P (type)
19579 && TYPE_SIZE (type)
19580 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
19581 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
19582 || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
19583 && align < max_align)
19584 align = max_align;
19586 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
19587 to 16byte boundary. */
19588 if (TARGET_64BIT)
19590 if (AGGREGATE_TYPE_P (type)
19591 && TYPE_SIZE (type)
19592 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
19593 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
19594 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
19595 return 128;
19598 if (TREE_CODE (type) == ARRAY_TYPE)
19600 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
19601 return 64;
19602 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
19603 return 128;
19605 else if (TREE_CODE (type) == COMPLEX_TYPE)
19608 if (TYPE_MODE (type) == DCmode && align < 64)
19609 return 64;
19610 if ((TYPE_MODE (type) == XCmode
19611 || TYPE_MODE (type) == TCmode) && align < 128)
19612 return 128;
19614 else if ((TREE_CODE (type) == RECORD_TYPE
19615 || TREE_CODE (type) == UNION_TYPE
19616 || TREE_CODE (type) == QUAL_UNION_TYPE)
19617 && TYPE_FIELDS (type))
19619 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
19620 return 64;
19621 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
19622 return 128;
19624 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
19625 || TREE_CODE (type) == INTEGER_TYPE)
19627 if (TYPE_MODE (type) == DFmode && align < 64)
19628 return 64;
19629 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
19630 return 128;
19633 return align;
19636 /* Compute the alignment for a local variable or a stack slot. EXP is
19637 the data type or decl itself, MODE is the widest mode available and
19638 ALIGN is the alignment that the object would ordinarily have. The
19639 value of this macro is used instead of that alignment to align the
19640 object. */
19642 unsigned int
19643 ix86_local_alignment (tree exp, enum machine_mode mode,
19644 unsigned int align)
19646 tree type, decl;
19648 if (exp && DECL_P (exp))
19650 type = TREE_TYPE (exp);
19651 decl = exp;
19653 else
19655 type = exp;
19656 decl = NULL;
19659 /* Don't do dynamic stack realignment for long long objects with
19660 -mpreferred-stack-boundary=2. */
19661 if (!TARGET_64BIT
19662 && align == 64
19663 && ix86_preferred_stack_boundary < 64
19664 && (mode == DImode || (type && TYPE_MODE (type) == DImode))
19665 && (!type || !TYPE_USER_ALIGN (type))
19666 && (!decl || !DECL_USER_ALIGN (decl)))
19667 align = 32;
19669 /* If TYPE is NULL, we are allocating a stack slot for caller-save
19670 register in MODE. We will return the largest alignment of XF
19671 and DF. */
19672 if (!type)
19674 if (mode == XFmode && align < GET_MODE_ALIGNMENT (DFmode))
19675 align = GET_MODE_ALIGNMENT (DFmode);
19676 return align;
19679 /* x86-64 ABI requires arrays greater than 16 bytes to be aligned
19680 to 16byte boundary. */
19681 if (TARGET_64BIT)
19683 if (AGGREGATE_TYPE_P (type)
19684 && TYPE_SIZE (type)
19685 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
19686 && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
19687 || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
19688 return 128;
19690 if (TREE_CODE (type) == ARRAY_TYPE)
19692 if (TYPE_MODE (TREE_TYPE (type)) == DFmode && align < 64)
19693 return 64;
19694 if (ALIGN_MODE_128 (TYPE_MODE (TREE_TYPE (type))) && align < 128)
19695 return 128;
19697 else if (TREE_CODE (type) == COMPLEX_TYPE)
19699 if (TYPE_MODE (type) == DCmode && align < 64)
19700 return 64;
19701 if ((TYPE_MODE (type) == XCmode
19702 || TYPE_MODE (type) == TCmode) && align < 128)
19703 return 128;
19705 else if ((TREE_CODE (type) == RECORD_TYPE
19706 || TREE_CODE (type) == UNION_TYPE
19707 || TREE_CODE (type) == QUAL_UNION_TYPE)
19708 && TYPE_FIELDS (type))
19710 if (DECL_MODE (TYPE_FIELDS (type)) == DFmode && align < 64)
19711 return 64;
19712 if (ALIGN_MODE_128 (DECL_MODE (TYPE_FIELDS (type))) && align < 128)
19713 return 128;
19715 else if (TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == VECTOR_TYPE
19716 || TREE_CODE (type) == INTEGER_TYPE)
19719 if (TYPE_MODE (type) == DFmode && align < 64)
19720 return 64;
19721 if (ALIGN_MODE_128 (TYPE_MODE (type)) && align < 128)
19722 return 128;
19724 return align;
19727 /* Compute the minimum required alignment for dynamic stack realignment
19728 purposes for a local variable, parameter or a stack slot. EXP is
19729 the data type or decl itself, MODE is its mode and ALIGN is the
19730 alignment that the object would ordinarily have. */
19732 unsigned int
19733 ix86_minimum_alignment (tree exp, enum machine_mode mode,
19734 unsigned int align)
19736 tree type, decl;
19738 if (TARGET_64BIT || align != 64 || ix86_preferred_stack_boundary >= 64)
19739 return align;
19741 if (exp && DECL_P (exp))
19743 type = TREE_TYPE (exp);
19744 decl = exp;
19746 else
19748 type = exp;
19749 decl = NULL;
19752 /* Don't do dynamic stack realignment for long long objects with
19753 -mpreferred-stack-boundary=2. */
19754 if ((mode == DImode || (type && TYPE_MODE (type) == DImode))
19755 && (!type || !TYPE_USER_ALIGN (type))
19756 && (!decl || !DECL_USER_ALIGN (decl)))
19757 return 32;
19759 return align;
19762 /* Emit RTL insns to initialize the variable parts of a trampoline.
19763 FNADDR is an RTX for the address of the function's pure code.
19764 CXT is an RTX for the static chain value for the function. */
19765 void
19766 x86_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
19768 if (!TARGET_64BIT)
19770 /* Compute offset from the end of the jmp to the target function. */
19771 rtx disp = expand_binop (SImode, sub_optab, fnaddr,
19772 plus_constant (tramp, 10),
19773 NULL_RTX, 1, OPTAB_DIRECT);
19774 emit_move_insn (gen_rtx_MEM (QImode, tramp),
19775 gen_int_mode (0xb9, QImode));
19776 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 1)), cxt);
19777 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, 5)),
19778 gen_int_mode (0xe9, QImode));
19779 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 6)), disp);
19781 else
19783 int offset = 0;
19784 /* Try to load address using shorter movl instead of movabs.
19785 We may want to support movq for kernel mode, but kernel does not use
19786 trampolines at the moment. */
19787 if (x86_64_zext_immediate_operand (fnaddr, VOIDmode))
19789 fnaddr = copy_to_mode_reg (DImode, fnaddr);
19790 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
19791 gen_int_mode (0xbb41, HImode));
19792 emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, offset + 2)),
19793 gen_lowpart (SImode, fnaddr));
19794 offset += 6;
19796 else
19798 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
19799 gen_int_mode (0xbb49, HImode));
19800 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
19801 fnaddr);
19802 offset += 10;
19804 /* Load static chain using movabs to r10. */
19805 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
19806 gen_int_mode (0xba49, HImode));
19807 emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, offset + 2)),
19808 cxt);
19809 offset += 10;
19810 /* Jump to the r11 */
19811 emit_move_insn (gen_rtx_MEM (HImode, plus_constant (tramp, offset)),
19812 gen_int_mode (0xff49, HImode));
19813 emit_move_insn (gen_rtx_MEM (QImode, plus_constant (tramp, offset+2)),
19814 gen_int_mode (0xe3, QImode));
19815 offset += 3;
19816 gcc_assert (offset <= TRAMPOLINE_SIZE);
19819 #ifdef ENABLE_EXECUTE_STACK
19820 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
19821 LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
19822 #endif
19825 /* Codes for all the SSE/MMX builtins. */
19826 enum ix86_builtins
19828 IX86_BUILTIN_ADDPS,
19829 IX86_BUILTIN_ADDSS,
19830 IX86_BUILTIN_DIVPS,
19831 IX86_BUILTIN_DIVSS,
19832 IX86_BUILTIN_MULPS,
19833 IX86_BUILTIN_MULSS,
19834 IX86_BUILTIN_SUBPS,
19835 IX86_BUILTIN_SUBSS,
19837 IX86_BUILTIN_CMPEQPS,
19838 IX86_BUILTIN_CMPLTPS,
19839 IX86_BUILTIN_CMPLEPS,
19840 IX86_BUILTIN_CMPGTPS,
19841 IX86_BUILTIN_CMPGEPS,
19842 IX86_BUILTIN_CMPNEQPS,
19843 IX86_BUILTIN_CMPNLTPS,
19844 IX86_BUILTIN_CMPNLEPS,
19845 IX86_BUILTIN_CMPNGTPS,
19846 IX86_BUILTIN_CMPNGEPS,
19847 IX86_BUILTIN_CMPORDPS,
19848 IX86_BUILTIN_CMPUNORDPS,
19849 IX86_BUILTIN_CMPEQSS,
19850 IX86_BUILTIN_CMPLTSS,
19851 IX86_BUILTIN_CMPLESS,
19852 IX86_BUILTIN_CMPNEQSS,
19853 IX86_BUILTIN_CMPNLTSS,
19854 IX86_BUILTIN_CMPNLESS,
19855 IX86_BUILTIN_CMPNGTSS,
19856 IX86_BUILTIN_CMPNGESS,
19857 IX86_BUILTIN_CMPORDSS,
19858 IX86_BUILTIN_CMPUNORDSS,
19860 IX86_BUILTIN_COMIEQSS,
19861 IX86_BUILTIN_COMILTSS,
19862 IX86_BUILTIN_COMILESS,
19863 IX86_BUILTIN_COMIGTSS,
19864 IX86_BUILTIN_COMIGESS,
19865 IX86_BUILTIN_COMINEQSS,
19866 IX86_BUILTIN_UCOMIEQSS,
19867 IX86_BUILTIN_UCOMILTSS,
19868 IX86_BUILTIN_UCOMILESS,
19869 IX86_BUILTIN_UCOMIGTSS,
19870 IX86_BUILTIN_UCOMIGESS,
19871 IX86_BUILTIN_UCOMINEQSS,
19873 IX86_BUILTIN_CVTPI2PS,
19874 IX86_BUILTIN_CVTPS2PI,
19875 IX86_BUILTIN_CVTSI2SS,
19876 IX86_BUILTIN_CVTSI642SS,
19877 IX86_BUILTIN_CVTSS2SI,
19878 IX86_BUILTIN_CVTSS2SI64,
19879 IX86_BUILTIN_CVTTPS2PI,
19880 IX86_BUILTIN_CVTTSS2SI,
19881 IX86_BUILTIN_CVTTSS2SI64,
19883 IX86_BUILTIN_MAXPS,
19884 IX86_BUILTIN_MAXSS,
19885 IX86_BUILTIN_MINPS,
19886 IX86_BUILTIN_MINSS,
19888 IX86_BUILTIN_LOADUPS,
19889 IX86_BUILTIN_STOREUPS,
19890 IX86_BUILTIN_MOVSS,
19892 IX86_BUILTIN_MOVHLPS,
19893 IX86_BUILTIN_MOVLHPS,
19894 IX86_BUILTIN_LOADHPS,
19895 IX86_BUILTIN_LOADLPS,
19896 IX86_BUILTIN_STOREHPS,
19897 IX86_BUILTIN_STORELPS,
19899 IX86_BUILTIN_MASKMOVQ,
19900 IX86_BUILTIN_MOVMSKPS,
19901 IX86_BUILTIN_PMOVMSKB,
19903 IX86_BUILTIN_MOVNTPS,
19904 IX86_BUILTIN_MOVNTQ,
19906 IX86_BUILTIN_LOADDQU,
19907 IX86_BUILTIN_STOREDQU,
19909 IX86_BUILTIN_PACKSSWB,
19910 IX86_BUILTIN_PACKSSDW,
19911 IX86_BUILTIN_PACKUSWB,
19913 IX86_BUILTIN_PADDB,
19914 IX86_BUILTIN_PADDW,
19915 IX86_BUILTIN_PADDD,
19916 IX86_BUILTIN_PADDQ,
19917 IX86_BUILTIN_PADDSB,
19918 IX86_BUILTIN_PADDSW,
19919 IX86_BUILTIN_PADDUSB,
19920 IX86_BUILTIN_PADDUSW,
19921 IX86_BUILTIN_PSUBB,
19922 IX86_BUILTIN_PSUBW,
19923 IX86_BUILTIN_PSUBD,
19924 IX86_BUILTIN_PSUBQ,
19925 IX86_BUILTIN_PSUBSB,
19926 IX86_BUILTIN_PSUBSW,
19927 IX86_BUILTIN_PSUBUSB,
19928 IX86_BUILTIN_PSUBUSW,
19930 IX86_BUILTIN_PAND,
19931 IX86_BUILTIN_PANDN,
19932 IX86_BUILTIN_POR,
19933 IX86_BUILTIN_PXOR,
19935 IX86_BUILTIN_PAVGB,
19936 IX86_BUILTIN_PAVGW,
19938 IX86_BUILTIN_PCMPEQB,
19939 IX86_BUILTIN_PCMPEQW,
19940 IX86_BUILTIN_PCMPEQD,
19941 IX86_BUILTIN_PCMPGTB,
19942 IX86_BUILTIN_PCMPGTW,
19943 IX86_BUILTIN_PCMPGTD,
19945 IX86_BUILTIN_PMADDWD,
19947 IX86_BUILTIN_PMAXSW,
19948 IX86_BUILTIN_PMAXUB,
19949 IX86_BUILTIN_PMINSW,
19950 IX86_BUILTIN_PMINUB,
19952 IX86_BUILTIN_PMULHUW,
19953 IX86_BUILTIN_PMULHW,
19954 IX86_BUILTIN_PMULLW,
19956 IX86_BUILTIN_PSADBW,
19957 IX86_BUILTIN_PSHUFW,
19959 IX86_BUILTIN_PSLLW,
19960 IX86_BUILTIN_PSLLD,
19961 IX86_BUILTIN_PSLLQ,
19962 IX86_BUILTIN_PSRAW,
19963 IX86_BUILTIN_PSRAD,
19964 IX86_BUILTIN_PSRLW,
19965 IX86_BUILTIN_PSRLD,
19966 IX86_BUILTIN_PSRLQ,
19967 IX86_BUILTIN_PSLLWI,
19968 IX86_BUILTIN_PSLLDI,
19969 IX86_BUILTIN_PSLLQI,
19970 IX86_BUILTIN_PSRAWI,
19971 IX86_BUILTIN_PSRADI,
19972 IX86_BUILTIN_PSRLWI,
19973 IX86_BUILTIN_PSRLDI,
19974 IX86_BUILTIN_PSRLQI,
19976 IX86_BUILTIN_PUNPCKHBW,
19977 IX86_BUILTIN_PUNPCKHWD,
19978 IX86_BUILTIN_PUNPCKHDQ,
19979 IX86_BUILTIN_PUNPCKLBW,
19980 IX86_BUILTIN_PUNPCKLWD,
19981 IX86_BUILTIN_PUNPCKLDQ,
19983 IX86_BUILTIN_SHUFPS,
19985 IX86_BUILTIN_RCPPS,
19986 IX86_BUILTIN_RCPSS,
19987 IX86_BUILTIN_RSQRTPS,
19988 IX86_BUILTIN_RSQRTPS_NR,
19989 IX86_BUILTIN_RSQRTSS,
19990 IX86_BUILTIN_RSQRTF,
19991 IX86_BUILTIN_SQRTPS,
19992 IX86_BUILTIN_SQRTPS_NR,
19993 IX86_BUILTIN_SQRTSS,
19995 IX86_BUILTIN_UNPCKHPS,
19996 IX86_BUILTIN_UNPCKLPS,
19998 IX86_BUILTIN_ANDPS,
19999 IX86_BUILTIN_ANDNPS,
20000 IX86_BUILTIN_ORPS,
20001 IX86_BUILTIN_XORPS,
20003 IX86_BUILTIN_EMMS,
20004 IX86_BUILTIN_LDMXCSR,
20005 IX86_BUILTIN_STMXCSR,
20006 IX86_BUILTIN_SFENCE,
20008 /* 3DNow! Original */
20009 IX86_BUILTIN_FEMMS,
20010 IX86_BUILTIN_PAVGUSB,
20011 IX86_BUILTIN_PF2ID,
20012 IX86_BUILTIN_PFACC,
20013 IX86_BUILTIN_PFADD,
20014 IX86_BUILTIN_PFCMPEQ,
20015 IX86_BUILTIN_PFCMPGE,
20016 IX86_BUILTIN_PFCMPGT,
20017 IX86_BUILTIN_PFMAX,
20018 IX86_BUILTIN_PFMIN,
20019 IX86_BUILTIN_PFMUL,
20020 IX86_BUILTIN_PFRCP,
20021 IX86_BUILTIN_PFRCPIT1,
20022 IX86_BUILTIN_PFRCPIT2,
20023 IX86_BUILTIN_PFRSQIT1,
20024 IX86_BUILTIN_PFRSQRT,
20025 IX86_BUILTIN_PFSUB,
20026 IX86_BUILTIN_PFSUBR,
20027 IX86_BUILTIN_PI2FD,
20028 IX86_BUILTIN_PMULHRW,
20030 /* 3DNow! Athlon Extensions */
20031 IX86_BUILTIN_PF2IW,
20032 IX86_BUILTIN_PFNACC,
20033 IX86_BUILTIN_PFPNACC,
20034 IX86_BUILTIN_PI2FW,
20035 IX86_BUILTIN_PSWAPDSI,
20036 IX86_BUILTIN_PSWAPDSF,
20038 /* SSE2 */
20039 IX86_BUILTIN_ADDPD,
20040 IX86_BUILTIN_ADDSD,
20041 IX86_BUILTIN_DIVPD,
20042 IX86_BUILTIN_DIVSD,
20043 IX86_BUILTIN_MULPD,
20044 IX86_BUILTIN_MULSD,
20045 IX86_BUILTIN_SUBPD,
20046 IX86_BUILTIN_SUBSD,
20048 IX86_BUILTIN_CMPEQPD,
20049 IX86_BUILTIN_CMPLTPD,
20050 IX86_BUILTIN_CMPLEPD,
20051 IX86_BUILTIN_CMPGTPD,
20052 IX86_BUILTIN_CMPGEPD,
20053 IX86_BUILTIN_CMPNEQPD,
20054 IX86_BUILTIN_CMPNLTPD,
20055 IX86_BUILTIN_CMPNLEPD,
20056 IX86_BUILTIN_CMPNGTPD,
20057 IX86_BUILTIN_CMPNGEPD,
20058 IX86_BUILTIN_CMPORDPD,
20059 IX86_BUILTIN_CMPUNORDPD,
20060 IX86_BUILTIN_CMPEQSD,
20061 IX86_BUILTIN_CMPLTSD,
20062 IX86_BUILTIN_CMPLESD,
20063 IX86_BUILTIN_CMPNEQSD,
20064 IX86_BUILTIN_CMPNLTSD,
20065 IX86_BUILTIN_CMPNLESD,
20066 IX86_BUILTIN_CMPORDSD,
20067 IX86_BUILTIN_CMPUNORDSD,
20069 IX86_BUILTIN_COMIEQSD,
20070 IX86_BUILTIN_COMILTSD,
20071 IX86_BUILTIN_COMILESD,
20072 IX86_BUILTIN_COMIGTSD,
20073 IX86_BUILTIN_COMIGESD,
20074 IX86_BUILTIN_COMINEQSD,
20075 IX86_BUILTIN_UCOMIEQSD,
20076 IX86_BUILTIN_UCOMILTSD,
20077 IX86_BUILTIN_UCOMILESD,
20078 IX86_BUILTIN_UCOMIGTSD,
20079 IX86_BUILTIN_UCOMIGESD,
20080 IX86_BUILTIN_UCOMINEQSD,
20082 IX86_BUILTIN_MAXPD,
20083 IX86_BUILTIN_MAXSD,
20084 IX86_BUILTIN_MINPD,
20085 IX86_BUILTIN_MINSD,
20087 IX86_BUILTIN_ANDPD,
20088 IX86_BUILTIN_ANDNPD,
20089 IX86_BUILTIN_ORPD,
20090 IX86_BUILTIN_XORPD,
20092 IX86_BUILTIN_SQRTPD,
20093 IX86_BUILTIN_SQRTSD,
20095 IX86_BUILTIN_UNPCKHPD,
20096 IX86_BUILTIN_UNPCKLPD,
20098 IX86_BUILTIN_SHUFPD,
20100 IX86_BUILTIN_LOADUPD,
20101 IX86_BUILTIN_STOREUPD,
20102 IX86_BUILTIN_MOVSD,
20104 IX86_BUILTIN_LOADHPD,
20105 IX86_BUILTIN_LOADLPD,
20107 IX86_BUILTIN_CVTDQ2PD,
20108 IX86_BUILTIN_CVTDQ2PS,
20110 IX86_BUILTIN_CVTPD2DQ,
20111 IX86_BUILTIN_CVTPD2PI,
20112 IX86_BUILTIN_CVTPD2PS,
20113 IX86_BUILTIN_CVTTPD2DQ,
20114 IX86_BUILTIN_CVTTPD2PI,
20116 IX86_BUILTIN_CVTPI2PD,
20117 IX86_BUILTIN_CVTSI2SD,
20118 IX86_BUILTIN_CVTSI642SD,
20120 IX86_BUILTIN_CVTSD2SI,
20121 IX86_BUILTIN_CVTSD2SI64,
20122 IX86_BUILTIN_CVTSD2SS,
20123 IX86_BUILTIN_CVTSS2SD,
20124 IX86_BUILTIN_CVTTSD2SI,
20125 IX86_BUILTIN_CVTTSD2SI64,
20127 IX86_BUILTIN_CVTPS2DQ,
20128 IX86_BUILTIN_CVTPS2PD,
20129 IX86_BUILTIN_CVTTPS2DQ,
20131 IX86_BUILTIN_MOVNTI,
20132 IX86_BUILTIN_MOVNTPD,
20133 IX86_BUILTIN_MOVNTDQ,
20135 IX86_BUILTIN_MOVQ128,
20137 /* SSE2 MMX */
20138 IX86_BUILTIN_MASKMOVDQU,
20139 IX86_BUILTIN_MOVMSKPD,
20140 IX86_BUILTIN_PMOVMSKB128,
20142 IX86_BUILTIN_PACKSSWB128,
20143 IX86_BUILTIN_PACKSSDW128,
20144 IX86_BUILTIN_PACKUSWB128,
20146 IX86_BUILTIN_PADDB128,
20147 IX86_BUILTIN_PADDW128,
20148 IX86_BUILTIN_PADDD128,
20149 IX86_BUILTIN_PADDQ128,
20150 IX86_BUILTIN_PADDSB128,
20151 IX86_BUILTIN_PADDSW128,
20152 IX86_BUILTIN_PADDUSB128,
20153 IX86_BUILTIN_PADDUSW128,
20154 IX86_BUILTIN_PSUBB128,
20155 IX86_BUILTIN_PSUBW128,
20156 IX86_BUILTIN_PSUBD128,
20157 IX86_BUILTIN_PSUBQ128,
20158 IX86_BUILTIN_PSUBSB128,
20159 IX86_BUILTIN_PSUBSW128,
20160 IX86_BUILTIN_PSUBUSB128,
20161 IX86_BUILTIN_PSUBUSW128,
20163 IX86_BUILTIN_PAND128,
20164 IX86_BUILTIN_PANDN128,
20165 IX86_BUILTIN_POR128,
20166 IX86_BUILTIN_PXOR128,
20168 IX86_BUILTIN_PAVGB128,
20169 IX86_BUILTIN_PAVGW128,
20171 IX86_BUILTIN_PCMPEQB128,
20172 IX86_BUILTIN_PCMPEQW128,
20173 IX86_BUILTIN_PCMPEQD128,
20174 IX86_BUILTIN_PCMPGTB128,
20175 IX86_BUILTIN_PCMPGTW128,
20176 IX86_BUILTIN_PCMPGTD128,
20178 IX86_BUILTIN_PMADDWD128,
20180 IX86_BUILTIN_PMAXSW128,
20181 IX86_BUILTIN_PMAXUB128,
20182 IX86_BUILTIN_PMINSW128,
20183 IX86_BUILTIN_PMINUB128,
20185 IX86_BUILTIN_PMULUDQ,
20186 IX86_BUILTIN_PMULUDQ128,
20187 IX86_BUILTIN_PMULHUW128,
20188 IX86_BUILTIN_PMULHW128,
20189 IX86_BUILTIN_PMULLW128,
20191 IX86_BUILTIN_PSADBW128,
20192 IX86_BUILTIN_PSHUFHW,
20193 IX86_BUILTIN_PSHUFLW,
20194 IX86_BUILTIN_PSHUFD,
20196 IX86_BUILTIN_PSLLDQI128,
20197 IX86_BUILTIN_PSLLWI128,
20198 IX86_BUILTIN_PSLLDI128,
20199 IX86_BUILTIN_PSLLQI128,
20200 IX86_BUILTIN_PSRAWI128,
20201 IX86_BUILTIN_PSRADI128,
20202 IX86_BUILTIN_PSRLDQI128,
20203 IX86_BUILTIN_PSRLWI128,
20204 IX86_BUILTIN_PSRLDI128,
20205 IX86_BUILTIN_PSRLQI128,
20207 IX86_BUILTIN_PSLLDQ128,
20208 IX86_BUILTIN_PSLLW128,
20209 IX86_BUILTIN_PSLLD128,
20210 IX86_BUILTIN_PSLLQ128,
20211 IX86_BUILTIN_PSRAW128,
20212 IX86_BUILTIN_PSRAD128,
20213 IX86_BUILTIN_PSRLW128,
20214 IX86_BUILTIN_PSRLD128,
20215 IX86_BUILTIN_PSRLQ128,
20217 IX86_BUILTIN_PUNPCKHBW128,
20218 IX86_BUILTIN_PUNPCKHWD128,
20219 IX86_BUILTIN_PUNPCKHDQ128,
20220 IX86_BUILTIN_PUNPCKHQDQ128,
20221 IX86_BUILTIN_PUNPCKLBW128,
20222 IX86_BUILTIN_PUNPCKLWD128,
20223 IX86_BUILTIN_PUNPCKLDQ128,
20224 IX86_BUILTIN_PUNPCKLQDQ128,
20226 IX86_BUILTIN_CLFLUSH,
20227 IX86_BUILTIN_MFENCE,
20228 IX86_BUILTIN_LFENCE,
20230 IX86_BUILTIN_BSRSI,
20231 IX86_BUILTIN_BSRDI,
20232 IX86_BUILTIN_RDPMC,
20233 IX86_BUILTIN_RDTSC,
20234 IX86_BUILTIN_RDTSCP,
20235 IX86_BUILTIN_ROLQI,
20236 IX86_BUILTIN_ROLHI,
20237 IX86_BUILTIN_RORQI,
20238 IX86_BUILTIN_RORHI,
20240 /* SSE3. */
20241 IX86_BUILTIN_ADDSUBPS,
20242 IX86_BUILTIN_HADDPS,
20243 IX86_BUILTIN_HSUBPS,
20244 IX86_BUILTIN_MOVSHDUP,
20245 IX86_BUILTIN_MOVSLDUP,
20246 IX86_BUILTIN_ADDSUBPD,
20247 IX86_BUILTIN_HADDPD,
20248 IX86_BUILTIN_HSUBPD,
20249 IX86_BUILTIN_LDDQU,
20251 IX86_BUILTIN_MONITOR,
20252 IX86_BUILTIN_MWAIT,
20254 /* SSSE3. */
20255 IX86_BUILTIN_PHADDW,
20256 IX86_BUILTIN_PHADDD,
20257 IX86_BUILTIN_PHADDSW,
20258 IX86_BUILTIN_PHSUBW,
20259 IX86_BUILTIN_PHSUBD,
20260 IX86_BUILTIN_PHSUBSW,
20261 IX86_BUILTIN_PMADDUBSW,
20262 IX86_BUILTIN_PMULHRSW,
20263 IX86_BUILTIN_PSHUFB,
20264 IX86_BUILTIN_PSIGNB,
20265 IX86_BUILTIN_PSIGNW,
20266 IX86_BUILTIN_PSIGND,
20267 IX86_BUILTIN_PALIGNR,
20268 IX86_BUILTIN_PABSB,
20269 IX86_BUILTIN_PABSW,
20270 IX86_BUILTIN_PABSD,
20272 IX86_BUILTIN_PHADDW128,
20273 IX86_BUILTIN_PHADDD128,
20274 IX86_BUILTIN_PHADDSW128,
20275 IX86_BUILTIN_PHSUBW128,
20276 IX86_BUILTIN_PHSUBD128,
20277 IX86_BUILTIN_PHSUBSW128,
20278 IX86_BUILTIN_PMADDUBSW128,
20279 IX86_BUILTIN_PMULHRSW128,
20280 IX86_BUILTIN_PSHUFB128,
20281 IX86_BUILTIN_PSIGNB128,
20282 IX86_BUILTIN_PSIGNW128,
20283 IX86_BUILTIN_PSIGND128,
20284 IX86_BUILTIN_PALIGNR128,
20285 IX86_BUILTIN_PABSB128,
20286 IX86_BUILTIN_PABSW128,
20287 IX86_BUILTIN_PABSD128,
20289 /* AMDFAM10 - SSE4A New Instructions. */
20290 IX86_BUILTIN_MOVNTSD,
20291 IX86_BUILTIN_MOVNTSS,
20292 IX86_BUILTIN_EXTRQI,
20293 IX86_BUILTIN_EXTRQ,
20294 IX86_BUILTIN_INSERTQI,
20295 IX86_BUILTIN_INSERTQ,
20297 /* SSE4.1. */
20298 IX86_BUILTIN_BLENDPD,
20299 IX86_BUILTIN_BLENDPS,
20300 IX86_BUILTIN_BLENDVPD,
20301 IX86_BUILTIN_BLENDVPS,
20302 IX86_BUILTIN_PBLENDVB128,
20303 IX86_BUILTIN_PBLENDW128,
20305 IX86_BUILTIN_DPPD,
20306 IX86_BUILTIN_DPPS,
20308 IX86_BUILTIN_INSERTPS128,
20310 IX86_BUILTIN_MOVNTDQA,
20311 IX86_BUILTIN_MPSADBW128,
20312 IX86_BUILTIN_PACKUSDW128,
20313 IX86_BUILTIN_PCMPEQQ,
20314 IX86_BUILTIN_PHMINPOSUW128,
20316 IX86_BUILTIN_PMAXSB128,
20317 IX86_BUILTIN_PMAXSD128,
20318 IX86_BUILTIN_PMAXUD128,
20319 IX86_BUILTIN_PMAXUW128,
20321 IX86_BUILTIN_PMINSB128,
20322 IX86_BUILTIN_PMINSD128,
20323 IX86_BUILTIN_PMINUD128,
20324 IX86_BUILTIN_PMINUW128,
20326 IX86_BUILTIN_PMOVSXBW128,
20327 IX86_BUILTIN_PMOVSXBD128,
20328 IX86_BUILTIN_PMOVSXBQ128,
20329 IX86_BUILTIN_PMOVSXWD128,
20330 IX86_BUILTIN_PMOVSXWQ128,
20331 IX86_BUILTIN_PMOVSXDQ128,
20333 IX86_BUILTIN_PMOVZXBW128,
20334 IX86_BUILTIN_PMOVZXBD128,
20335 IX86_BUILTIN_PMOVZXBQ128,
20336 IX86_BUILTIN_PMOVZXWD128,
20337 IX86_BUILTIN_PMOVZXWQ128,
20338 IX86_BUILTIN_PMOVZXDQ128,
20340 IX86_BUILTIN_PMULDQ128,
20341 IX86_BUILTIN_PMULLD128,
20343 IX86_BUILTIN_ROUNDPD,
20344 IX86_BUILTIN_ROUNDPS,
20345 IX86_BUILTIN_ROUNDSD,
20346 IX86_BUILTIN_ROUNDSS,
20348 IX86_BUILTIN_PTESTZ,
20349 IX86_BUILTIN_PTESTC,
20350 IX86_BUILTIN_PTESTNZC,
20352 IX86_BUILTIN_VEC_INIT_V2SI,
20353 IX86_BUILTIN_VEC_INIT_V4HI,
20354 IX86_BUILTIN_VEC_INIT_V8QI,
20355 IX86_BUILTIN_VEC_EXT_V2DF,
20356 IX86_BUILTIN_VEC_EXT_V2DI,
20357 IX86_BUILTIN_VEC_EXT_V4SF,
20358 IX86_BUILTIN_VEC_EXT_V4SI,
20359 IX86_BUILTIN_VEC_EXT_V8HI,
20360 IX86_BUILTIN_VEC_EXT_V2SI,
20361 IX86_BUILTIN_VEC_EXT_V4HI,
20362 IX86_BUILTIN_VEC_EXT_V16QI,
20363 IX86_BUILTIN_VEC_SET_V2DI,
20364 IX86_BUILTIN_VEC_SET_V4SF,
20365 IX86_BUILTIN_VEC_SET_V4SI,
20366 IX86_BUILTIN_VEC_SET_V8HI,
20367 IX86_BUILTIN_VEC_SET_V4HI,
20368 IX86_BUILTIN_VEC_SET_V16QI,
20370 IX86_BUILTIN_VEC_PACK_SFIX,
20372 /* SSE4.2. */
20373 IX86_BUILTIN_CRC32QI,
20374 IX86_BUILTIN_CRC32HI,
20375 IX86_BUILTIN_CRC32SI,
20376 IX86_BUILTIN_CRC32DI,
20378 IX86_BUILTIN_PCMPESTRI128,
20379 IX86_BUILTIN_PCMPESTRM128,
20380 IX86_BUILTIN_PCMPESTRA128,
20381 IX86_BUILTIN_PCMPESTRC128,
20382 IX86_BUILTIN_PCMPESTRO128,
20383 IX86_BUILTIN_PCMPESTRS128,
20384 IX86_BUILTIN_PCMPESTRZ128,
20385 IX86_BUILTIN_PCMPISTRI128,
20386 IX86_BUILTIN_PCMPISTRM128,
20387 IX86_BUILTIN_PCMPISTRA128,
20388 IX86_BUILTIN_PCMPISTRC128,
20389 IX86_BUILTIN_PCMPISTRO128,
20390 IX86_BUILTIN_PCMPISTRS128,
20391 IX86_BUILTIN_PCMPISTRZ128,
20393 IX86_BUILTIN_PCMPGTQ,
20395 /* AES instructions */
20396 IX86_BUILTIN_AESENC128,
20397 IX86_BUILTIN_AESENCLAST128,
20398 IX86_BUILTIN_AESDEC128,
20399 IX86_BUILTIN_AESDECLAST128,
20400 IX86_BUILTIN_AESIMC128,
20401 IX86_BUILTIN_AESKEYGENASSIST128,
20403 /* PCLMUL instruction */
20404 IX86_BUILTIN_PCLMULQDQ128,
20406 /* AVX */
20407 IX86_BUILTIN_ADDPD256,
20408 IX86_BUILTIN_ADDPS256,
20409 IX86_BUILTIN_ADDSUBPD256,
20410 IX86_BUILTIN_ADDSUBPS256,
20411 IX86_BUILTIN_ANDPD256,
20412 IX86_BUILTIN_ANDPS256,
20413 IX86_BUILTIN_ANDNPD256,
20414 IX86_BUILTIN_ANDNPS256,
20415 IX86_BUILTIN_BLENDPD256,
20416 IX86_BUILTIN_BLENDPS256,
20417 IX86_BUILTIN_BLENDVPD256,
20418 IX86_BUILTIN_BLENDVPS256,
20419 IX86_BUILTIN_DIVPD256,
20420 IX86_BUILTIN_DIVPS256,
20421 IX86_BUILTIN_DPPS256,
20422 IX86_BUILTIN_HADDPD256,
20423 IX86_BUILTIN_HADDPS256,
20424 IX86_BUILTIN_HSUBPD256,
20425 IX86_BUILTIN_HSUBPS256,
20426 IX86_BUILTIN_MAXPD256,
20427 IX86_BUILTIN_MAXPS256,
20428 IX86_BUILTIN_MINPD256,
20429 IX86_BUILTIN_MINPS256,
20430 IX86_BUILTIN_MULPD256,
20431 IX86_BUILTIN_MULPS256,
20432 IX86_BUILTIN_ORPD256,
20433 IX86_BUILTIN_ORPS256,
20434 IX86_BUILTIN_SHUFPD256,
20435 IX86_BUILTIN_SHUFPS256,
20436 IX86_BUILTIN_SUBPD256,
20437 IX86_BUILTIN_SUBPS256,
20438 IX86_BUILTIN_XORPD256,
20439 IX86_BUILTIN_XORPS256,
20440 IX86_BUILTIN_CMPSD,
20441 IX86_BUILTIN_CMPSS,
20442 IX86_BUILTIN_CMPPD,
20443 IX86_BUILTIN_CMPPS,
20444 IX86_BUILTIN_CMPPD256,
20445 IX86_BUILTIN_CMPPS256,
20446 IX86_BUILTIN_CVTDQ2PD256,
20447 IX86_BUILTIN_CVTDQ2PS256,
20448 IX86_BUILTIN_CVTPD2PS256,
20449 IX86_BUILTIN_CVTPS2DQ256,
20450 IX86_BUILTIN_CVTPS2PD256,
20451 IX86_BUILTIN_CVTTPD2DQ256,
20452 IX86_BUILTIN_CVTPD2DQ256,
20453 IX86_BUILTIN_CVTTPS2DQ256,
20454 IX86_BUILTIN_EXTRACTF128PD256,
20455 IX86_BUILTIN_EXTRACTF128PS256,
20456 IX86_BUILTIN_EXTRACTF128SI256,
20457 IX86_BUILTIN_VZEROALL,
20458 IX86_BUILTIN_VZEROUPPER,
20459 IX86_BUILTIN_VZEROUPPER_REX64,
20460 IX86_BUILTIN_VPERMILVARPD,
20461 IX86_BUILTIN_VPERMILVARPS,
20462 IX86_BUILTIN_VPERMILVARPD256,
20463 IX86_BUILTIN_VPERMILVARPS256,
20464 IX86_BUILTIN_VPERMILPD,
20465 IX86_BUILTIN_VPERMILPS,
20466 IX86_BUILTIN_VPERMILPD256,
20467 IX86_BUILTIN_VPERMILPS256,
20468 IX86_BUILTIN_VPERM2F128PD256,
20469 IX86_BUILTIN_VPERM2F128PS256,
20470 IX86_BUILTIN_VPERM2F128SI256,
20471 IX86_BUILTIN_VBROADCASTSS,
20472 IX86_BUILTIN_VBROADCASTSD256,
20473 IX86_BUILTIN_VBROADCASTSS256,
20474 IX86_BUILTIN_VBROADCASTPD256,
20475 IX86_BUILTIN_VBROADCASTPS256,
20476 IX86_BUILTIN_VINSERTF128PD256,
20477 IX86_BUILTIN_VINSERTF128PS256,
20478 IX86_BUILTIN_VINSERTF128SI256,
20479 IX86_BUILTIN_LOADUPD256,
20480 IX86_BUILTIN_LOADUPS256,
20481 IX86_BUILTIN_STOREUPD256,
20482 IX86_BUILTIN_STOREUPS256,
20483 IX86_BUILTIN_LDDQU256,
20484 IX86_BUILTIN_MOVNTDQ256,
20485 IX86_BUILTIN_MOVNTPD256,
20486 IX86_BUILTIN_MOVNTPS256,
20487 IX86_BUILTIN_LOADDQU256,
20488 IX86_BUILTIN_STOREDQU256,
20489 IX86_BUILTIN_MASKLOADPD,
20490 IX86_BUILTIN_MASKLOADPS,
20491 IX86_BUILTIN_MASKSTOREPD,
20492 IX86_BUILTIN_MASKSTOREPS,
20493 IX86_BUILTIN_MASKLOADPD256,
20494 IX86_BUILTIN_MASKLOADPS256,
20495 IX86_BUILTIN_MASKSTOREPD256,
20496 IX86_BUILTIN_MASKSTOREPS256,
20497 IX86_BUILTIN_MOVSHDUP256,
20498 IX86_BUILTIN_MOVSLDUP256,
20499 IX86_BUILTIN_MOVDDUP256,
20501 IX86_BUILTIN_SQRTPD256,
20502 IX86_BUILTIN_SQRTPS256,
20503 IX86_BUILTIN_SQRTPS_NR256,
20504 IX86_BUILTIN_RSQRTPS256,
20505 IX86_BUILTIN_RSQRTPS_NR256,
20507 IX86_BUILTIN_RCPPS256,
20509 IX86_BUILTIN_ROUNDPD256,
20510 IX86_BUILTIN_ROUNDPS256,
20512 IX86_BUILTIN_UNPCKHPD256,
20513 IX86_BUILTIN_UNPCKLPD256,
20514 IX86_BUILTIN_UNPCKHPS256,
20515 IX86_BUILTIN_UNPCKLPS256,
20517 IX86_BUILTIN_SI256_SI,
20518 IX86_BUILTIN_PS256_PS,
20519 IX86_BUILTIN_PD256_PD,
20520 IX86_BUILTIN_SI_SI256,
20521 IX86_BUILTIN_PS_PS256,
20522 IX86_BUILTIN_PD_PD256,
20524 IX86_BUILTIN_VTESTZPD,
20525 IX86_BUILTIN_VTESTCPD,
20526 IX86_BUILTIN_VTESTNZCPD,
20527 IX86_BUILTIN_VTESTZPS,
20528 IX86_BUILTIN_VTESTCPS,
20529 IX86_BUILTIN_VTESTNZCPS,
20530 IX86_BUILTIN_VTESTZPD256,
20531 IX86_BUILTIN_VTESTCPD256,
20532 IX86_BUILTIN_VTESTNZCPD256,
20533 IX86_BUILTIN_VTESTZPS256,
20534 IX86_BUILTIN_VTESTCPS256,
20535 IX86_BUILTIN_VTESTNZCPS256,
20536 IX86_BUILTIN_PTESTZ256,
20537 IX86_BUILTIN_PTESTC256,
20538 IX86_BUILTIN_PTESTNZC256,
20540 IX86_BUILTIN_MOVMSKPD256,
20541 IX86_BUILTIN_MOVMSKPS256,
20543 /* TFmode support builtins. */
20544 IX86_BUILTIN_INFQ,
20545 IX86_BUILTIN_HUGE_VALQ,
20546 IX86_BUILTIN_FABSQ,
20547 IX86_BUILTIN_COPYSIGNQ,
20549 /* Vectorizer support builtins. */
20550 IX86_BUILTIN_CPYSGNPS,
20551 IX86_BUILTIN_CPYSGNPD,
20553 IX86_BUILTIN_CVTUDQ2PS,
20555 IX86_BUILTIN_MAX
20558 /* Table for the ix86 builtin decls. */
20559 static GTY(()) tree ix86_builtins[(int) IX86_BUILTIN_MAX];
20561 /* Table of all of the builtin functions that are possible with different ISA's
20562 but are waiting to be built until a function is declared to use that
20563 ISA. */
20564 struct GTY(()) builtin_isa {
20565 tree type; /* builtin type to use in the declaration */
20566 const char *name; /* function name */
20567 int isa; /* isa_flags this builtin is defined for */
20568 bool const_p; /* true if the declaration is constant */
20571 static GTY(()) struct builtin_isa ix86_builtins_isa[(int) IX86_BUILTIN_MAX];
20574 /* Add an ix86 target builtin function with CODE, NAME and TYPE. Save the MASK
20575 * of which isa_flags to use in the ix86_builtins_isa array. Stores the
20576 * function decl in the ix86_builtins array. Returns the function decl or
20577 * NULL_TREE, if the builtin was not added.
20579 * If the front end has a special hook for builtin functions, delay adding
20580 * builtin functions that aren't in the current ISA until the ISA is changed
20581 * with function specific optimization. Doing so, can save about 300K for the
20582 * default compiler. When the builtin is expanded, check at that time whether
20583 * it is valid.
20585 * If the front end doesn't have a special hook, record all builtins, even if
20586 * it isn't an instruction set in the current ISA in case the user uses
20587 * function specific options for a different ISA, so that we don't get scope
20588 * errors if a builtin is added in the middle of a function scope. */
20590 static inline tree
20591 def_builtin (int mask, const char *name, tree type, enum ix86_builtins code)
20593 tree decl = NULL_TREE;
20595 if (!(mask & OPTION_MASK_ISA_64BIT) || TARGET_64BIT)
20597 ix86_builtins_isa[(int) code].isa = mask;
20599 if ((mask & ix86_isa_flags) != 0
20600 || (lang_hooks.builtin_function
20601 == lang_hooks.builtin_function_ext_scope))
20604 decl = add_builtin_function (name, type, code, BUILT_IN_MD, NULL,
20605 NULL_TREE);
20606 ix86_builtins[(int) code] = decl;
20607 ix86_builtins_isa[(int) code].type = NULL_TREE;
20609 else
20611 ix86_builtins[(int) code] = NULL_TREE;
20612 ix86_builtins_isa[(int) code].const_p = false;
20613 ix86_builtins_isa[(int) code].type = type;
20614 ix86_builtins_isa[(int) code].name = name;
20618 return decl;
20621 /* Like def_builtin, but also marks the function decl "const". */
20623 static inline tree
20624 def_builtin_const (int mask, const char *name, tree type,
20625 enum ix86_builtins code)
20627 tree decl = def_builtin (mask, name, type, code);
20628 if (decl)
20629 TREE_READONLY (decl) = 1;
20630 else
20631 ix86_builtins_isa[(int) code].const_p = true;
20633 return decl;
20636 /* Add any new builtin functions for a given ISA that may not have been
20637 declared. This saves a bit of space compared to adding all of the
20638 declarations to the tree, even if we didn't use them. */
20640 static void
20641 ix86_add_new_builtins (int isa)
20643 int i;
20644 tree decl;
20646 for (i = 0; i < (int)IX86_BUILTIN_MAX; i++)
20648 if ((ix86_builtins_isa[i].isa & isa) != 0
20649 && ix86_builtins_isa[i].type != NULL_TREE)
20651 decl = add_builtin_function_ext_scope (ix86_builtins_isa[i].name,
20652 ix86_builtins_isa[i].type,
20653 i, BUILT_IN_MD, NULL,
20654 NULL_TREE);
20656 ix86_builtins[i] = decl;
20657 ix86_builtins_isa[i].type = NULL_TREE;
20658 if (ix86_builtins_isa[i].const_p)
20659 TREE_READONLY (decl) = 1;
20664 /* Bits for builtin_description.flag. */
20666 /* Set when we don't support the comparison natively, and should
20667 swap_comparison in order to support it. */
20668 #define BUILTIN_DESC_SWAP_OPERANDS 1
20670 struct builtin_description
20672 const unsigned int mask;
20673 const enum insn_code icode;
20674 const char *const name;
20675 const enum ix86_builtins code;
20676 const enum rtx_code comparison;
20677 const int flag;
20680 static const struct builtin_description bdesc_comi[] =
20682 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comieq", IX86_BUILTIN_COMIEQSS, UNEQ, 0 },
20683 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comilt", IX86_BUILTIN_COMILTSS, UNLT, 0 },
20684 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comile", IX86_BUILTIN_COMILESS, UNLE, 0 },
20685 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comigt", IX86_BUILTIN_COMIGTSS, GT, 0 },
20686 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comige", IX86_BUILTIN_COMIGESS, GE, 0 },
20687 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_comi, "__builtin_ia32_comineq", IX86_BUILTIN_COMINEQSS, LTGT, 0 },
20688 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomieq", IX86_BUILTIN_UCOMIEQSS, UNEQ, 0 },
20689 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomilt", IX86_BUILTIN_UCOMILTSS, UNLT, 0 },
20690 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomile", IX86_BUILTIN_UCOMILESS, UNLE, 0 },
20691 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomigt", IX86_BUILTIN_UCOMIGTSS, GT, 0 },
20692 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomige", IX86_BUILTIN_UCOMIGESS, GE, 0 },
20693 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_ucomi, "__builtin_ia32_ucomineq", IX86_BUILTIN_UCOMINEQSS, LTGT, 0 },
20694 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdeq", IX86_BUILTIN_COMIEQSD, UNEQ, 0 },
20695 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdlt", IX86_BUILTIN_COMILTSD, UNLT, 0 },
20696 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdle", IX86_BUILTIN_COMILESD, UNLE, 0 },
20697 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdgt", IX86_BUILTIN_COMIGTSD, GT, 0 },
20698 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdge", IX86_BUILTIN_COMIGESD, GE, 0 },
20699 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_comi, "__builtin_ia32_comisdneq", IX86_BUILTIN_COMINEQSD, LTGT, 0 },
20700 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdeq", IX86_BUILTIN_UCOMIEQSD, UNEQ, 0 },
20701 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdlt", IX86_BUILTIN_UCOMILTSD, UNLT, 0 },
20702 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdle", IX86_BUILTIN_UCOMILESD, UNLE, 0 },
20703 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdgt", IX86_BUILTIN_UCOMIGTSD, GT, 0 },
20704 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdge", IX86_BUILTIN_UCOMIGESD, GE, 0 },
20705 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ucomi, "__builtin_ia32_ucomisdneq", IX86_BUILTIN_UCOMINEQSD, LTGT, 0 },
20708 static const struct builtin_description bdesc_pcmpestr[] =
20710 /* SSE4.2 */
20711 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestri128", IX86_BUILTIN_PCMPESTRI128, UNKNOWN, 0 },
20712 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrm128", IX86_BUILTIN_PCMPESTRM128, UNKNOWN, 0 },
20713 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestria128", IX86_BUILTIN_PCMPESTRA128, UNKNOWN, (int) CCAmode },
20714 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestric128", IX86_BUILTIN_PCMPESTRC128, UNKNOWN, (int) CCCmode },
20715 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestrio128", IX86_BUILTIN_PCMPESTRO128, UNKNOWN, (int) CCOmode },
20716 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestris128", IX86_BUILTIN_PCMPESTRS128, UNKNOWN, (int) CCSmode },
20717 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpestr, "__builtin_ia32_pcmpestriz128", IX86_BUILTIN_PCMPESTRZ128, UNKNOWN, (int) CCZmode },
20720 static const struct builtin_description bdesc_pcmpistr[] =
20722 /* SSE4.2 */
20723 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistri128", IX86_BUILTIN_PCMPISTRI128, UNKNOWN, 0 },
20724 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrm128", IX86_BUILTIN_PCMPISTRM128, UNKNOWN, 0 },
20725 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistria128", IX86_BUILTIN_PCMPISTRA128, UNKNOWN, (int) CCAmode },
20726 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistric128", IX86_BUILTIN_PCMPISTRC128, UNKNOWN, (int) CCCmode },
20727 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistrio128", IX86_BUILTIN_PCMPISTRO128, UNKNOWN, (int) CCOmode },
20728 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistris128", IX86_BUILTIN_PCMPISTRS128, UNKNOWN, (int) CCSmode },
20729 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_pcmpistr, "__builtin_ia32_pcmpistriz128", IX86_BUILTIN_PCMPISTRZ128, UNKNOWN, (int) CCZmode },
20732 /* Special builtin types */
20733 enum ix86_special_builtin_type
20735 SPECIAL_FTYPE_UNKNOWN,
20736 VOID_FTYPE_VOID,
20737 UINT64_FTYPE_VOID,
20738 UINT64_FTYPE_PUNSIGNED,
20739 V32QI_FTYPE_PCCHAR,
20740 V16QI_FTYPE_PCCHAR,
20741 V8SF_FTYPE_PCV4SF,
20742 V8SF_FTYPE_PCFLOAT,
20743 V4DF_FTYPE_PCV2DF,
20744 V4DF_FTYPE_PCDOUBLE,
20745 V4SF_FTYPE_PCFLOAT,
20746 V2DF_FTYPE_PCDOUBLE,
20747 V8SF_FTYPE_PCV8SF_V8SF,
20748 V4DF_FTYPE_PCV4DF_V4DF,
20749 V4SF_FTYPE_V4SF_PCV2SF,
20750 V4SF_FTYPE_PCV4SF_V4SF,
20751 V2DF_FTYPE_V2DF_PCDOUBLE,
20752 V2DF_FTYPE_PCV2DF_V2DF,
20753 V2DI_FTYPE_PV2DI,
20754 VOID_FTYPE_PV2SF_V4SF,
20755 VOID_FTYPE_PV4DI_V4DI,
20756 VOID_FTYPE_PV2DI_V2DI,
20757 VOID_FTYPE_PCHAR_V32QI,
20758 VOID_FTYPE_PCHAR_V16QI,
20759 VOID_FTYPE_PFLOAT_V8SF,
20760 VOID_FTYPE_PFLOAT_V4SF,
20761 VOID_FTYPE_PDOUBLE_V4DF,
20762 VOID_FTYPE_PDOUBLE_V2DF,
20763 VOID_FTYPE_PDI_DI,
20764 VOID_FTYPE_PINT_INT,
20765 VOID_FTYPE_PV8SF_V8SF_V8SF,
20766 VOID_FTYPE_PV4DF_V4DF_V4DF,
20767 VOID_FTYPE_PV4SF_V4SF_V4SF,
20768 VOID_FTYPE_PV2DF_V2DF_V2DF
20771 /* Builtin types */
20772 enum ix86_builtin_type
20774 FTYPE_UNKNOWN,
20775 FLOAT128_FTYPE_FLOAT128,
20776 FLOAT_FTYPE_FLOAT,
20777 FLOAT128_FTYPE_FLOAT128_FLOAT128,
20778 INT_FTYPE_V8SF_V8SF_PTEST,
20779 INT_FTYPE_V4DI_V4DI_PTEST,
20780 INT_FTYPE_V4DF_V4DF_PTEST,
20781 INT_FTYPE_V4SF_V4SF_PTEST,
20782 INT_FTYPE_V2DI_V2DI_PTEST,
20783 INT_FTYPE_V2DF_V2DF_PTEST,
20784 INT_FTYPE_INT,
20785 UINT64_FTYPE_INT,
20786 INT64_FTYPE_INT64,
20787 INT64_FTYPE_V4SF,
20788 INT64_FTYPE_V2DF,
20789 INT_FTYPE_V16QI,
20790 INT_FTYPE_V8QI,
20791 INT_FTYPE_V8SF,
20792 INT_FTYPE_V4DF,
20793 INT_FTYPE_V4SF,
20794 INT_FTYPE_V2DF,
20795 V16QI_FTYPE_V16QI,
20796 V8SI_FTYPE_V8SF,
20797 V8SI_FTYPE_V4SI,
20798 V8HI_FTYPE_V8HI,
20799 V8HI_FTYPE_V16QI,
20800 V8QI_FTYPE_V8QI,
20801 V8SF_FTYPE_V8SF,
20802 V8SF_FTYPE_V8SI,
20803 V8SF_FTYPE_V4SF,
20804 V4SI_FTYPE_V4SI,
20805 V4SI_FTYPE_V16QI,
20806 V4SI_FTYPE_V8SI,
20807 V4SI_FTYPE_V8HI,
20808 V4SI_FTYPE_V4DF,
20809 V4SI_FTYPE_V4SF,
20810 V4SI_FTYPE_V2DF,
20811 V4HI_FTYPE_V4HI,
20812 V4DF_FTYPE_V4DF,
20813 V4DF_FTYPE_V4SI,
20814 V4DF_FTYPE_V4SF,
20815 V4DF_FTYPE_V2DF,
20816 V4SF_FTYPE_V4DF,
20817 V4SF_FTYPE_V4SF,
20818 V4SF_FTYPE_V4SF_VEC_MERGE,
20819 V4SF_FTYPE_V8SF,
20820 V4SF_FTYPE_V4SI,
20821 V4SF_FTYPE_V2DF,
20822 V2DI_FTYPE_V2DI,
20823 V2DI_FTYPE_V16QI,
20824 V2DI_FTYPE_V8HI,
20825 V2DI_FTYPE_V4SI,
20826 V2DF_FTYPE_V2DF,
20827 V2DF_FTYPE_V2DF_VEC_MERGE,
20828 V2DF_FTYPE_V4SI,
20829 V2DF_FTYPE_V4DF,
20830 V2DF_FTYPE_V4SF,
20831 V2DF_FTYPE_V2SI,
20832 V2SI_FTYPE_V2SI,
20833 V2SI_FTYPE_V4SF,
20834 V2SI_FTYPE_V2SF,
20835 V2SI_FTYPE_V2DF,
20836 V2SF_FTYPE_V2SF,
20837 V2SF_FTYPE_V2SI,
20838 V16QI_FTYPE_V16QI_V16QI,
20839 V16QI_FTYPE_V8HI_V8HI,
20840 V8QI_FTYPE_V8QI_V8QI,
20841 V8QI_FTYPE_V4HI_V4HI,
20842 V8HI_FTYPE_V8HI_V8HI,
20843 V8HI_FTYPE_V8HI_V8HI_COUNT,
20844 V8HI_FTYPE_V16QI_V16QI,
20845 V8HI_FTYPE_V4SI_V4SI,
20846 V8HI_FTYPE_V8HI_SI_COUNT,
20847 V8SF_FTYPE_V8SF_V8SF,
20848 V8SF_FTYPE_V8SF_V8SI,
20849 V4SI_FTYPE_V4SI_V4SI,
20850 V4SI_FTYPE_V4SI_V4SI_COUNT,
20851 V4SI_FTYPE_V8HI_V8HI,
20852 V4SI_FTYPE_V4SF_V4SF,
20853 V4SI_FTYPE_V2DF_V2DF,
20854 V4SI_FTYPE_V4SI_SI_COUNT,
20855 V4HI_FTYPE_V4HI_V4HI,
20856 V4HI_FTYPE_V4HI_V4HI_COUNT,
20857 V4HI_FTYPE_V8QI_V8QI,
20858 V4HI_FTYPE_V2SI_V2SI,
20859 V4HI_FTYPE_V4HI_SI_COUNT,
20860 V4DF_FTYPE_V4DF_V4DF,
20861 V4DF_FTYPE_V4DF_V4DI,
20862 V4SF_FTYPE_V4SF_V4SF,
20863 V4SF_FTYPE_V4SF_V4SF_SWAP,
20864 V4SF_FTYPE_V4SF_V4SI,
20865 V4SF_FTYPE_V4SF_V2SI,
20866 V4SF_FTYPE_V4SF_V2DF,
20867 V4SF_FTYPE_V4SF_DI,
20868 V4SF_FTYPE_V4SF_SI,
20869 V2DI_FTYPE_V2DI_V2DI,
20870 V2DI_FTYPE_V2DI_V2DI_COUNT,
20871 V2DI_FTYPE_V16QI_V16QI,
20872 V2DI_FTYPE_V4SI_V4SI,
20873 V2DI_FTYPE_V2DI_V16QI,
20874 V2DI_FTYPE_V2DF_V2DF,
20875 V2DI_FTYPE_V2DI_SI_COUNT,
20876 V2SI_FTYPE_V2SI_V2SI,
20877 V2SI_FTYPE_V2SI_V2SI_COUNT,
20878 V2SI_FTYPE_V4HI_V4HI,
20879 V2SI_FTYPE_V2SF_V2SF,
20880 V2SI_FTYPE_V2SI_SI_COUNT,
20881 V2DF_FTYPE_V2DF_V2DF,
20882 V2DF_FTYPE_V2DF_V2DF_SWAP,
20883 V2DF_FTYPE_V2DF_V4SF,
20884 V2DF_FTYPE_V2DF_V2DI,
20885 V2DF_FTYPE_V2DF_DI,
20886 V2DF_FTYPE_V2DF_SI,
20887 V2SF_FTYPE_V2SF_V2SF,
20888 V1DI_FTYPE_V1DI_V1DI,
20889 V1DI_FTYPE_V1DI_V1DI_COUNT,
20890 V1DI_FTYPE_V8QI_V8QI,
20891 V1DI_FTYPE_V2SI_V2SI,
20892 V1DI_FTYPE_V1DI_SI_COUNT,
20893 UINT64_FTYPE_UINT64_UINT64,
20894 UINT_FTYPE_UINT_UINT,
20895 UINT_FTYPE_UINT_USHORT,
20896 UINT_FTYPE_UINT_UCHAR,
20897 UINT16_FTYPE_UINT16_INT,
20898 UINT8_FTYPE_UINT8_INT,
20899 V8HI_FTYPE_V8HI_INT,
20900 V4SI_FTYPE_V4SI_INT,
20901 V4HI_FTYPE_V4HI_INT,
20902 V8SF_FTYPE_V8SF_INT,
20903 V4SI_FTYPE_V8SI_INT,
20904 V4SF_FTYPE_V8SF_INT,
20905 V2DF_FTYPE_V4DF_INT,
20906 V4DF_FTYPE_V4DF_INT,
20907 V4SF_FTYPE_V4SF_INT,
20908 V2DI_FTYPE_V2DI_INT,
20909 V2DI2TI_FTYPE_V2DI_INT,
20910 V2DF_FTYPE_V2DF_INT,
20911 V16QI_FTYPE_V16QI_V16QI_V16QI,
20912 V8SF_FTYPE_V8SF_V8SF_V8SF,
20913 V4DF_FTYPE_V4DF_V4DF_V4DF,
20914 V4SF_FTYPE_V4SF_V4SF_V4SF,
20915 V2DF_FTYPE_V2DF_V2DF_V2DF,
20916 V16QI_FTYPE_V16QI_V16QI_INT,
20917 V8SI_FTYPE_V8SI_V8SI_INT,
20918 V8SI_FTYPE_V8SI_V4SI_INT,
20919 V8HI_FTYPE_V8HI_V8HI_INT,
20920 V8SF_FTYPE_V8SF_V8SF_INT,
20921 V8SF_FTYPE_V8SF_V4SF_INT,
20922 V4SI_FTYPE_V4SI_V4SI_INT,
20923 V4DF_FTYPE_V4DF_V4DF_INT,
20924 V4DF_FTYPE_V4DF_V2DF_INT,
20925 V4SF_FTYPE_V4SF_V4SF_INT,
20926 V2DI_FTYPE_V2DI_V2DI_INT,
20927 V2DI2TI_FTYPE_V2DI_V2DI_INT,
20928 V1DI2DI_FTYPE_V1DI_V1DI_INT,
20929 V2DF_FTYPE_V2DF_V2DF_INT,
20930 V2DI_FTYPE_V2DI_UINT_UINT,
20931 V2DI_FTYPE_V2DI_V2DI_UINT_UINT
20934 /* Special builtins with variable number of arguments. */
20935 static const struct builtin_description bdesc_special_args[] =
20937 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtsc, "__builtin_ia32_rdtsc", IX86_BUILTIN_RDTSC, UNKNOWN, (int) UINT64_FTYPE_VOID },
20938 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdtscp, "__builtin_ia32_rdtscp", IX86_BUILTIN_RDTSCP, UNKNOWN, (int) UINT64_FTYPE_PUNSIGNED },
20940 /* MMX */
20941 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_emms, "__builtin_ia32_emms", IX86_BUILTIN_EMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
20943 /* 3DNow! */
20944 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_femms, "__builtin_ia32_femms", IX86_BUILTIN_FEMMS, UNKNOWN, (int) VOID_FTYPE_VOID },
20946 /* SSE */
20947 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_storeups", IX86_BUILTIN_STOREUPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
20948 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movntv4sf, "__builtin_ia32_movntps", IX86_BUILTIN_MOVNTPS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
20949 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movups, "__builtin_ia32_loadups", IX86_BUILTIN_LOADUPS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
20951 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadhps_exp, "__builtin_ia32_loadhps", IX86_BUILTIN_LOADHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
20952 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_loadlps_exp, "__builtin_ia32_loadlps", IX86_BUILTIN_LOADLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_PCV2SF },
20953 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storehps, "__builtin_ia32_storehps", IX86_BUILTIN_STOREHPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
20954 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_storelps, "__builtin_ia32_storelps", IX86_BUILTIN_STORELPS, UNKNOWN, (int) VOID_FTYPE_PV2SF_V4SF },
20956 /* SSE or 3DNow!A */
20957 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_sfence, "__builtin_ia32_sfence", IX86_BUILTIN_SFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
20958 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_sse_movntdi, "__builtin_ia32_movntq", IX86_BUILTIN_MOVNTQ, UNKNOWN, (int) VOID_FTYPE_PDI_DI },
20960 /* SSE2 */
20961 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lfence, "__builtin_ia32_lfence", IX86_BUILTIN_LFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
20962 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_mfence, 0, IX86_BUILTIN_MFENCE, UNKNOWN, (int) VOID_FTYPE_VOID },
20963 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_storeupd", IX86_BUILTIN_STOREUPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
20964 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_storedqu", IX86_BUILTIN_STOREDQU, UNKNOWN, (int) VOID_FTYPE_PCHAR_V16QI },
20965 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2df, "__builtin_ia32_movntpd", IX86_BUILTIN_MOVNTPD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
20966 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntv2di, "__builtin_ia32_movntdq", IX86_BUILTIN_MOVNTDQ, UNKNOWN, (int) VOID_FTYPE_PV2DI_V2DI },
20967 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movntsi, "__builtin_ia32_movnti", IX86_BUILTIN_MOVNTI, UNKNOWN, (int) VOID_FTYPE_PINT_INT },
20968 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movupd, "__builtin_ia32_loadupd", IX86_BUILTIN_LOADUPD, UNKNOWN, (int) V2DF_FTYPE_PCDOUBLE },
20969 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movdqu, "__builtin_ia32_loaddqu", IX86_BUILTIN_LOADDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
20971 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadhpd_exp, "__builtin_ia32_loadhpd", IX86_BUILTIN_LOADHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
20972 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_loadlpd_exp, "__builtin_ia32_loadlpd", IX86_BUILTIN_LOADLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_PCDOUBLE },
20974 /* SSE3 */
20975 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_lddqu, "__builtin_ia32_lddqu", IX86_BUILTIN_LDDQU, UNKNOWN, (int) V16QI_FTYPE_PCCHAR },
20977 /* SSE4.1 */
20978 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_movntdqa, "__builtin_ia32_movntdqa", IX86_BUILTIN_MOVNTDQA, UNKNOWN, (int) V2DI_FTYPE_PV2DI },
20980 /* SSE4A */
20981 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv2df, "__builtin_ia32_movntsd", IX86_BUILTIN_MOVNTSD, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V2DF },
20982 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_vmmovntv4sf, "__builtin_ia32_movntss", IX86_BUILTIN_MOVNTSS, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V4SF },
20984 /* AVX */
20985 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroall, "__builtin_ia32_vzeroall", IX86_BUILTIN_VZEROALL, UNKNOWN, (int) VOID_FTYPE_VOID },
20986 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vzeroupper, 0, IX86_BUILTIN_VZEROUPPER, UNKNOWN, (int) VOID_FTYPE_VOID },
20987 { OPTION_MASK_ISA_AVX | OPTION_MASK_ISA_64BIT, CODE_FOR_avx_vzeroupper_rex64, 0, IX86_BUILTIN_VZEROUPPER_REX64, UNKNOWN, (int) VOID_FTYPE_VOID },
20989 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastss, "__builtin_ia32_vbroadcastss", IX86_BUILTIN_VBROADCASTSS, UNKNOWN, (int) V4SF_FTYPE_PCFLOAT },
20990 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastsd256, "__builtin_ia32_vbroadcastsd256", IX86_BUILTIN_VBROADCASTSD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
20991 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastss256, "__builtin_ia32_vbroadcastss256", IX86_BUILTIN_VBROADCASTSS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
20992 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_pd256, "__builtin_ia32_vbroadcastf128_pd256", IX86_BUILTIN_VBROADCASTPD256, UNKNOWN, (int) V4DF_FTYPE_PCV2DF },
20993 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vbroadcastf128_ps256, "__builtin_ia32_vbroadcastf128_ps256", IX86_BUILTIN_VBROADCASTPS256, UNKNOWN, (int) V8SF_FTYPE_PCV4SF },
20995 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_loadupd256", IX86_BUILTIN_LOADUPD256, UNKNOWN, (int) V4DF_FTYPE_PCDOUBLE },
20996 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_loadups256", IX86_BUILTIN_LOADUPS256, UNKNOWN, (int) V8SF_FTYPE_PCFLOAT },
20997 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movupd256, "__builtin_ia32_storeupd256", IX86_BUILTIN_STOREUPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
20998 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movups256, "__builtin_ia32_storeups256", IX86_BUILTIN_STOREUPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
20999 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_loaddqu256", IX86_BUILTIN_LOADDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21000 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movdqu256, "__builtin_ia32_storedqu256", IX86_BUILTIN_STOREDQU256, UNKNOWN, (int) VOID_FTYPE_PCHAR_V32QI },
21001 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_lddqu256, "__builtin_ia32_lddqu256", IX86_BUILTIN_LDDQU256, UNKNOWN, (int) V32QI_FTYPE_PCCHAR },
21003 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4di, "__builtin_ia32_movntdq256", IX86_BUILTIN_MOVNTDQ256, UNKNOWN, (int) VOID_FTYPE_PV4DI_V4DI },
21004 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv4df, "__builtin_ia32_movntpd256", IX86_BUILTIN_MOVNTPD256, UNKNOWN, (int) VOID_FTYPE_PDOUBLE_V4DF },
21005 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movntv8sf, "__builtin_ia32_movntps256", IX86_BUILTIN_MOVNTPS256, UNKNOWN, (int) VOID_FTYPE_PFLOAT_V8SF },
21007 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd, "__builtin_ia32_maskloadpd", IX86_BUILTIN_MASKLOADPD, UNKNOWN, (int) V2DF_FTYPE_PCV2DF_V2DF },
21008 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps, "__builtin_ia32_maskloadps", IX86_BUILTIN_MASKLOADPS, UNKNOWN, (int) V4SF_FTYPE_PCV4SF_V4SF },
21009 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadpd256, "__builtin_ia32_maskloadpd256", IX86_BUILTIN_MASKLOADPD256, UNKNOWN, (int) V4DF_FTYPE_PCV4DF_V4DF },
21010 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskloadps256, "__builtin_ia32_maskloadps256", IX86_BUILTIN_MASKLOADPS256, UNKNOWN, (int) V8SF_FTYPE_PCV8SF_V8SF },
21011 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd, "__builtin_ia32_maskstorepd", IX86_BUILTIN_MASKSTOREPD, UNKNOWN, (int) VOID_FTYPE_PV2DF_V2DF_V2DF },
21012 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps, "__builtin_ia32_maskstoreps", IX86_BUILTIN_MASKSTOREPS, UNKNOWN, (int) VOID_FTYPE_PV4SF_V4SF_V4SF },
21013 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstorepd256, "__builtin_ia32_maskstorepd256", IX86_BUILTIN_MASKSTOREPD256, UNKNOWN, (int) VOID_FTYPE_PV4DF_V4DF_V4DF },
21014 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_maskstoreps256, "__builtin_ia32_maskstoreps256", IX86_BUILTIN_MASKSTOREPS256, UNKNOWN, (int) VOID_FTYPE_PV8SF_V8SF_V8SF },
21017 /* Builtins with variable number of arguments. */
21018 static const struct builtin_description bdesc_args[] =
21020 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_bsr, "__builtin_ia32_bsrsi", IX86_BUILTIN_BSRSI, UNKNOWN, (int) INT_FTYPE_INT },
21021 { OPTION_MASK_ISA_64BIT, CODE_FOR_bsr_rex64, "__builtin_ia32_bsrdi", IX86_BUILTIN_BSRDI, UNKNOWN, (int) INT64_FTYPE_INT64 },
21022 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rdpmc, "__builtin_ia32_rdpmc", IX86_BUILTIN_RDPMC, UNKNOWN, (int) UINT64_FTYPE_INT },
21023 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlqi3, "__builtin_ia32_rolqi", IX86_BUILTIN_ROLQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21024 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotlhi3, "__builtin_ia32_rolhi", IX86_BUILTIN_ROLHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21025 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrqi3, "__builtin_ia32_rorqi", IX86_BUILTIN_RORQI, UNKNOWN, (int) UINT8_FTYPE_UINT8_INT },
21026 { ~OPTION_MASK_ISA_64BIT, CODE_FOR_rotrhi3, "__builtin_ia32_rorhi", IX86_BUILTIN_RORHI, UNKNOWN, (int) UINT16_FTYPE_UINT16_INT },
21028 /* MMX */
21029 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv8qi3, "__builtin_ia32_paddb", IX86_BUILTIN_PADDB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21030 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv4hi3, "__builtin_ia32_paddw", IX86_BUILTIN_PADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21031 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_addv2si3, "__builtin_ia32_paddd", IX86_BUILTIN_PADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21032 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv8qi3, "__builtin_ia32_psubb", IX86_BUILTIN_PSUBB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21033 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv4hi3, "__builtin_ia32_psubw", IX86_BUILTIN_PSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21034 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_subv2si3, "__builtin_ia32_psubd", IX86_BUILTIN_PSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21036 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv8qi3, "__builtin_ia32_paddsb", IX86_BUILTIN_PADDSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21037 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ssaddv4hi3, "__builtin_ia32_paddsw", IX86_BUILTIN_PADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21038 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv8qi3, "__builtin_ia32_psubsb", IX86_BUILTIN_PSUBSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21039 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_sssubv4hi3, "__builtin_ia32_psubsw", IX86_BUILTIN_PSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21040 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv8qi3, "__builtin_ia32_paddusb", IX86_BUILTIN_PADDUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21041 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_usaddv4hi3, "__builtin_ia32_paddusw", IX86_BUILTIN_PADDUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21042 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv8qi3, "__builtin_ia32_psubusb", IX86_BUILTIN_PSUBUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21043 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ussubv4hi3, "__builtin_ia32_psubusw", IX86_BUILTIN_PSUBUSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21045 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_mulv4hi3, "__builtin_ia32_pmullw", IX86_BUILTIN_PMULLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21046 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_smulv4hi3_highpart, "__builtin_ia32_pmulhw", IX86_BUILTIN_PMULHW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21048 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andv2si3, "__builtin_ia32_pand", IX86_BUILTIN_PAND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21049 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_andnotv2si3, "__builtin_ia32_pandn", IX86_BUILTIN_PANDN, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21050 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_iorv2si3, "__builtin_ia32_por", IX86_BUILTIN_POR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21051 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_xorv2si3, "__builtin_ia32_pxor", IX86_BUILTIN_PXOR, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21053 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv8qi3, "__builtin_ia32_pcmpeqb", IX86_BUILTIN_PCMPEQB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21054 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv4hi3, "__builtin_ia32_pcmpeqw", IX86_BUILTIN_PCMPEQW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21055 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_eqv2si3, "__builtin_ia32_pcmpeqd", IX86_BUILTIN_PCMPEQD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21056 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv8qi3, "__builtin_ia32_pcmpgtb", IX86_BUILTIN_PCMPGTB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21057 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv4hi3, "__builtin_ia32_pcmpgtw", IX86_BUILTIN_PCMPGTW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21058 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_gtv2si3, "__builtin_ia32_pcmpgtd", IX86_BUILTIN_PCMPGTD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21060 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhbw, "__builtin_ia32_punpckhbw", IX86_BUILTIN_PUNPCKHBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21061 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhwd, "__builtin_ia32_punpckhwd", IX86_BUILTIN_PUNPCKHWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21062 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckhdq, "__builtin_ia32_punpckhdq", IX86_BUILTIN_PUNPCKHDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21063 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklbw, "__builtin_ia32_punpcklbw", IX86_BUILTIN_PUNPCKLBW, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21064 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpcklwd, "__builtin_ia32_punpcklwd", IX86_BUILTIN_PUNPCKLWD, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI},
21065 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_punpckldq, "__builtin_ia32_punpckldq", IX86_BUILTIN_PUNPCKLDQ, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI},
21067 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packsswb, "__builtin_ia32_packsswb", IX86_BUILTIN_PACKSSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21068 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packssdw, "__builtin_ia32_packssdw", IX86_BUILTIN_PACKSSDW, UNKNOWN, (int) V4HI_FTYPE_V2SI_V2SI },
21069 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_packuswb, "__builtin_ia32_packuswb", IX86_BUILTIN_PACKUSWB, UNKNOWN, (int) V8QI_FTYPE_V4HI_V4HI },
21071 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_pmaddwd, "__builtin_ia32_pmaddwd", IX86_BUILTIN_PMADDWD, UNKNOWN, (int) V2SI_FTYPE_V4HI_V4HI },
21073 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllwi", IX86_BUILTIN_PSLLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21074 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslldi", IX86_BUILTIN_PSLLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21075 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllqi", IX86_BUILTIN_PSLLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21076 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv4hi3, "__builtin_ia32_psllw", IX86_BUILTIN_PSLLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21077 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv2si3, "__builtin_ia32_pslld", IX86_BUILTIN_PSLLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21078 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashlv1di3, "__builtin_ia32_psllq", IX86_BUILTIN_PSLLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21080 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlwi", IX86_BUILTIN_PSRLWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21081 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrldi", IX86_BUILTIN_PSRLDI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21082 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlqi", IX86_BUILTIN_PSRLQI, UNKNOWN, (int) V1DI_FTYPE_V1DI_SI_COUNT },
21083 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv4hi3, "__builtin_ia32_psrlw", IX86_BUILTIN_PSRLW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21084 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv2si3, "__builtin_ia32_psrld", IX86_BUILTIN_PSRLD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21085 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_lshrv1di3, "__builtin_ia32_psrlq", IX86_BUILTIN_PSRLQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI_COUNT },
21087 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psrawi", IX86_BUILTIN_PSRAWI, UNKNOWN, (int) V4HI_FTYPE_V4HI_SI_COUNT },
21088 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psradi", IX86_BUILTIN_PSRADI, UNKNOWN, (int) V2SI_FTYPE_V2SI_SI_COUNT },
21089 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv4hi3, "__builtin_ia32_psraw", IX86_BUILTIN_PSRAW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI_COUNT },
21090 { OPTION_MASK_ISA_MMX, CODE_FOR_mmx_ashrv2si3, "__builtin_ia32_psrad", IX86_BUILTIN_PSRAD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI_COUNT },
21092 /* 3DNow! */
21093 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pf2id, "__builtin_ia32_pf2id", IX86_BUILTIN_PF2ID, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21094 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_floatv2si2, "__builtin_ia32_pi2fd", IX86_BUILTIN_PI2FD, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21095 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpv2sf2, "__builtin_ia32_pfrcp", IX86_BUILTIN_PFRCP, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21096 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqrtv2sf2, "__builtin_ia32_pfrsqrt", IX86_BUILTIN_PFRSQRT, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21098 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgusb", IX86_BUILTIN_PAVGUSB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21099 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_haddv2sf3, "__builtin_ia32_pfacc", IX86_BUILTIN_PFACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21100 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_addv2sf3, "__builtin_ia32_pfadd", IX86_BUILTIN_PFADD, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21101 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_eqv2sf3, "__builtin_ia32_pfcmpeq", IX86_BUILTIN_PFCMPEQ, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21102 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gev2sf3, "__builtin_ia32_pfcmpge", IX86_BUILTIN_PFCMPGE, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21103 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_gtv2sf3, "__builtin_ia32_pfcmpgt", IX86_BUILTIN_PFCMPGT, UNKNOWN, (int) V2SI_FTYPE_V2SF_V2SF },
21104 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_smaxv2sf3, "__builtin_ia32_pfmax", IX86_BUILTIN_PFMAX, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21105 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_sminv2sf3, "__builtin_ia32_pfmin", IX86_BUILTIN_PFMIN, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21106 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_mulv2sf3, "__builtin_ia32_pfmul", IX86_BUILTIN_PFMUL, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21107 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit1v2sf3, "__builtin_ia32_pfrcpit1", IX86_BUILTIN_PFRCPIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21108 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rcpit2v2sf3, "__builtin_ia32_pfrcpit2", IX86_BUILTIN_PFRCPIT2, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21109 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_rsqit1v2sf3, "__builtin_ia32_pfrsqit1", IX86_BUILTIN_PFRSQIT1, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21110 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subv2sf3, "__builtin_ia32_pfsub", IX86_BUILTIN_PFSUB, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21111 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_subrv2sf3, "__builtin_ia32_pfsubr", IX86_BUILTIN_PFSUBR, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21112 { OPTION_MASK_ISA_3DNOW, CODE_FOR_mmx_pmulhrwv4hi3, "__builtin_ia32_pmulhrw", IX86_BUILTIN_PMULHRW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21114 /* 3DNow!A */
21115 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pf2iw, "__builtin_ia32_pf2iw", IX86_BUILTIN_PF2IW, UNKNOWN, (int) V2SI_FTYPE_V2SF },
21116 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pi2fw, "__builtin_ia32_pi2fw", IX86_BUILTIN_PI2FW, UNKNOWN, (int) V2SF_FTYPE_V2SI },
21117 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2si2, "__builtin_ia32_pswapdsi", IX86_BUILTIN_PSWAPDSI, UNKNOWN, (int) V2SI_FTYPE_V2SI },
21118 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pswapdv2sf2, "__builtin_ia32_pswapdsf", IX86_BUILTIN_PSWAPDSF, UNKNOWN, (int) V2SF_FTYPE_V2SF },
21119 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_hsubv2sf3, "__builtin_ia32_pfnacc", IX86_BUILTIN_PFNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21120 { OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_addsubv2sf3, "__builtin_ia32_pfpnacc", IX86_BUILTIN_PFPNACC, UNKNOWN, (int) V2SF_FTYPE_V2SF_V2SF },
21122 /* SSE */
21123 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movmskps, "__builtin_ia32_movmskps", IX86_BUILTIN_MOVMSKPS, UNKNOWN, (int) INT_FTYPE_V4SF },
21124 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_sqrtv4sf2, "__builtin_ia32_sqrtps", IX86_BUILTIN_SQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21125 { OPTION_MASK_ISA_SSE, CODE_FOR_sqrtv4sf2, "__builtin_ia32_sqrtps_nr", IX86_BUILTIN_SQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21126 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rsqrtv4sf2, "__builtin_ia32_rsqrtps", IX86_BUILTIN_RSQRTPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21127 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtv4sf2, "__builtin_ia32_rsqrtps_nr", IX86_BUILTIN_RSQRTPS_NR, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21128 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_rcpv4sf2, "__builtin_ia32_rcpps", IX86_BUILTIN_RCPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21129 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtps2pi, "__builtin_ia32_cvtps2pi", IX86_BUILTIN_CVTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21130 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtss2si, "__builtin_ia32_cvtss2si", IX86_BUILTIN_CVTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21131 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtss2siq, "__builtin_ia32_cvtss2si64", IX86_BUILTIN_CVTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21132 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttps2pi, "__builtin_ia32_cvttps2pi", IX86_BUILTIN_CVTTPS2PI, UNKNOWN, (int) V2SI_FTYPE_V4SF },
21133 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvttss2si, "__builtin_ia32_cvttss2si", IX86_BUILTIN_CVTTSS2SI, UNKNOWN, (int) INT_FTYPE_V4SF },
21134 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvttss2siq, "__builtin_ia32_cvttss2si64", IX86_BUILTIN_CVTTSS2SI64, UNKNOWN, (int) INT64_FTYPE_V4SF },
21136 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_shufps, "__builtin_ia32_shufps", IX86_BUILTIN_SHUFPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21138 { OPTION_MASK_ISA_SSE, CODE_FOR_addv4sf3, "__builtin_ia32_addps", IX86_BUILTIN_ADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21139 { OPTION_MASK_ISA_SSE, CODE_FOR_subv4sf3, "__builtin_ia32_subps", IX86_BUILTIN_SUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21140 { OPTION_MASK_ISA_SSE, CODE_FOR_mulv4sf3, "__builtin_ia32_mulps", IX86_BUILTIN_MULPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21141 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_divv4sf3, "__builtin_ia32_divps", IX86_BUILTIN_DIVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21142 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmaddv4sf3, "__builtin_ia32_addss", IX86_BUILTIN_ADDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21143 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsubv4sf3, "__builtin_ia32_subss", IX86_BUILTIN_SUBSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21144 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmulv4sf3, "__builtin_ia32_mulss", IX86_BUILTIN_MULSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21145 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmdivv4sf3, "__builtin_ia32_divss", IX86_BUILTIN_DIVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21147 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpeqps", IX86_BUILTIN_CMPEQPS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21148 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpltps", IX86_BUILTIN_CMPLTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21149 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpleps", IX86_BUILTIN_CMPLEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21150 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgtps", IX86_BUILTIN_CMPGTPS, LT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21151 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpgeps", IX86_BUILTIN_CMPGEPS, LE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21152 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpunordps", IX86_BUILTIN_CMPUNORDPS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21153 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpneqps", IX86_BUILTIN_CMPNEQPS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21154 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnltps", IX86_BUILTIN_CMPNLTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21155 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpnleps", IX86_BUILTIN_CMPNLEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21156 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngtps", IX86_BUILTIN_CMPNGTPS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21157 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpngeps", IX86_BUILTIN_CMPNGEPS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP},
21158 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_maskcmpv4sf3, "__builtin_ia32_cmpordps", IX86_BUILTIN_CMPORDPS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21159 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpeqss", IX86_BUILTIN_CMPEQSS, EQ, (int) V4SF_FTYPE_V4SF_V4SF },
21160 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpltss", IX86_BUILTIN_CMPLTSS, LT, (int) V4SF_FTYPE_V4SF_V4SF },
21161 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpless", IX86_BUILTIN_CMPLESS, LE, (int) V4SF_FTYPE_V4SF_V4SF },
21162 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpunordss", IX86_BUILTIN_CMPUNORDSS, UNORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21163 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpneqss", IX86_BUILTIN_CMPNEQSS, NE, (int) V4SF_FTYPE_V4SF_V4SF },
21164 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnltss", IX86_BUILTIN_CMPNLTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF },
21165 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpnless", IX86_BUILTIN_CMPNLESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF },
21166 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngtss", IX86_BUILTIN_CMPNGTSS, UNGE, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21167 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpngess", IX86_BUILTIN_CMPNGESS, UNGT, (int) V4SF_FTYPE_V4SF_V4SF_SWAP },
21168 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmmaskcmpv4sf3, "__builtin_ia32_cmpordss", IX86_BUILTIN_CMPORDSS, ORDERED, (int) V4SF_FTYPE_V4SF_V4SF },
21170 { OPTION_MASK_ISA_SSE, CODE_FOR_sminv4sf3, "__builtin_ia32_minps", IX86_BUILTIN_MINPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21171 { OPTION_MASK_ISA_SSE, CODE_FOR_smaxv4sf3, "__builtin_ia32_maxps", IX86_BUILTIN_MAXPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21172 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsminv4sf3, "__builtin_ia32_minss", IX86_BUILTIN_MINSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21173 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsmaxv4sf3, "__builtin_ia32_maxss", IX86_BUILTIN_MAXSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21175 { OPTION_MASK_ISA_SSE, CODE_FOR_andv4sf3, "__builtin_ia32_andps", IX86_BUILTIN_ANDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21176 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_andnotv4sf3, "__builtin_ia32_andnps", IX86_BUILTIN_ANDNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21177 { OPTION_MASK_ISA_SSE, CODE_FOR_iorv4sf3, "__builtin_ia32_orps", IX86_BUILTIN_ORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21178 { OPTION_MASK_ISA_SSE, CODE_FOR_xorv4sf3, "__builtin_ia32_xorps", IX86_BUILTIN_XORPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21180 { OPTION_MASK_ISA_SSE, CODE_FOR_copysignv4sf3, "__builtin_ia32_copysignps", IX86_BUILTIN_CPYSGNPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21182 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movss, "__builtin_ia32_movss", IX86_BUILTIN_MOVSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21183 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movhlps_exp, "__builtin_ia32_movhlps", IX86_BUILTIN_MOVHLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21184 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_movlhps_exp, "__builtin_ia32_movlhps", IX86_BUILTIN_MOVLHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21185 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_unpckhps, "__builtin_ia32_unpckhps", IX86_BUILTIN_UNPCKHPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21186 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_unpcklps, "__builtin_ia32_unpcklps", IX86_BUILTIN_UNPCKLPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21188 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtpi2ps, "__builtin_ia32_cvtpi2ps", IX86_BUILTIN_CVTPI2PS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2SI },
21189 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_cvtsi2ss, "__builtin_ia32_cvtsi2ss", IX86_BUILTIN_CVTSI2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_SI },
21190 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_64BIT, CODE_FOR_sse_cvtsi2ssq, "__builtin_ia32_cvtsi642ss", IX86_BUILTIN_CVTSI642SS, UNKNOWN, V4SF_FTYPE_V4SF_DI },
21192 { OPTION_MASK_ISA_SSE, CODE_FOR_rsqrtsf2, "__builtin_ia32_rsqrtf", IX86_BUILTIN_RSQRTF, UNKNOWN, (int) FLOAT_FTYPE_FLOAT },
21194 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmsqrtv4sf2, "__builtin_ia32_sqrtss", IX86_BUILTIN_SQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21195 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrsqrtv4sf2, "__builtin_ia32_rsqrtss", IX86_BUILTIN_RSQRTSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21196 { OPTION_MASK_ISA_SSE, CODE_FOR_sse_vmrcpv4sf2, "__builtin_ia32_rcpss", IX86_BUILTIN_RCPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_VEC_MERGE },
21198 /* SSE MMX or 3Dnow!A */
21199 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv8qi3, "__builtin_ia32_pavgb", IX86_BUILTIN_PAVGB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21200 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uavgv4hi3, "__builtin_ia32_pavgw", IX86_BUILTIN_PAVGW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21201 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umulv4hi3_highpart, "__builtin_ia32_pmulhuw", IX86_BUILTIN_PMULHUW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21203 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_umaxv8qi3, "__builtin_ia32_pmaxub", IX86_BUILTIN_PMAXUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21204 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_smaxv4hi3, "__builtin_ia32_pmaxsw", IX86_BUILTIN_PMAXSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21205 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_uminv8qi3, "__builtin_ia32_pminub", IX86_BUILTIN_PMINUB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21206 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_sminv4hi3, "__builtin_ia32_pminsw", IX86_BUILTIN_PMINSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21208 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_psadbw, "__builtin_ia32_psadbw", IX86_BUILTIN_PSADBW, UNKNOWN, (int) V1DI_FTYPE_V8QI_V8QI },
21209 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pmovmskb, "__builtin_ia32_pmovmskb", IX86_BUILTIN_PMOVMSKB, UNKNOWN, (int) INT_FTYPE_V8QI },
21211 { OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, CODE_FOR_mmx_pshufw, "__builtin_ia32_pshufw", IX86_BUILTIN_PSHUFW, UNKNOWN, (int) V4HI_FTYPE_V4HI_INT },
21213 /* SSE2 */
21214 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_shufpd, "__builtin_ia32_shufpd", IX86_BUILTIN_SHUFPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21216 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movmskpd, "__builtin_ia32_movmskpd", IX86_BUILTIN_MOVMSKPD, UNKNOWN, (int) INT_FTYPE_V2DF },
21217 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmovmskb, "__builtin_ia32_pmovmskb128", IX86_BUILTIN_PMOVMSKB128, UNKNOWN, (int) INT_FTYPE_V16QI },
21218 { OPTION_MASK_ISA_SSE2, CODE_FOR_sqrtv2df2, "__builtin_ia32_sqrtpd", IX86_BUILTIN_SQRTPD, UNKNOWN, (int) V2DF_FTYPE_V2DF },
21219 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2pd, "__builtin_ia32_cvtdq2pd", IX86_BUILTIN_CVTDQ2PD, UNKNOWN, (int) V2DF_FTYPE_V4SI },
21220 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtdq2ps, "__builtin_ia32_cvtdq2ps", IX86_BUILTIN_CVTDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
21221 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtudq2ps, "__builtin_ia32_cvtudq2ps", IX86_BUILTIN_CVTUDQ2PS, UNKNOWN, (int) V4SF_FTYPE_V4SI },
21223 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2dq, "__builtin_ia32_cvtpd2dq", IX86_BUILTIN_CVTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21224 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2pi, "__builtin_ia32_cvtpd2pi", IX86_BUILTIN_CVTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21225 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpd2ps, "__builtin_ia32_cvtpd2ps", IX86_BUILTIN_CVTPD2PS, UNKNOWN, (int) V4SF_FTYPE_V2DF },
21226 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2dq, "__builtin_ia32_cvttpd2dq", IX86_BUILTIN_CVTTPD2DQ, UNKNOWN, (int) V4SI_FTYPE_V2DF },
21227 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttpd2pi, "__builtin_ia32_cvttpd2pi", IX86_BUILTIN_CVTTPD2PI, UNKNOWN, (int) V2SI_FTYPE_V2DF },
21229 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtpi2pd, "__builtin_ia32_cvtpi2pd", IX86_BUILTIN_CVTPI2PD, UNKNOWN, (int) V2DF_FTYPE_V2SI },
21231 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2si, "__builtin_ia32_cvtsd2si", IX86_BUILTIN_CVTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21232 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttsd2si, "__builtin_ia32_cvttsd2si", IX86_BUILTIN_CVTTSD2SI, UNKNOWN, (int) INT_FTYPE_V2DF },
21233 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsd2siq, "__builtin_ia32_cvtsd2si64", IX86_BUILTIN_CVTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21234 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvttsd2siq, "__builtin_ia32_cvttsd2si64", IX86_BUILTIN_CVTTSD2SI64, UNKNOWN, (int) INT64_FTYPE_V2DF },
21236 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2dq, "__builtin_ia32_cvtps2dq", IX86_BUILTIN_CVTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21237 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtps2pd, "__builtin_ia32_cvtps2pd", IX86_BUILTIN_CVTPS2PD, UNKNOWN, (int) V2DF_FTYPE_V4SF },
21238 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvttps2dq, "__builtin_ia32_cvttps2dq", IX86_BUILTIN_CVTTPS2DQ, UNKNOWN, (int) V4SI_FTYPE_V4SF },
21240 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2df3, "__builtin_ia32_addpd", IX86_BUILTIN_ADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21241 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2df3, "__builtin_ia32_subpd", IX86_BUILTIN_SUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21242 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv2df3, "__builtin_ia32_mulpd", IX86_BUILTIN_MULPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21243 { OPTION_MASK_ISA_SSE2, CODE_FOR_divv2df3, "__builtin_ia32_divpd", IX86_BUILTIN_DIVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21244 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmaddv2df3, "__builtin_ia32_addsd", IX86_BUILTIN_ADDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21245 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsubv2df3, "__builtin_ia32_subsd", IX86_BUILTIN_SUBSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21246 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmulv2df3, "__builtin_ia32_mulsd", IX86_BUILTIN_MULSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21247 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmdivv2df3, "__builtin_ia32_divsd", IX86_BUILTIN_DIVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21249 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpeqpd", IX86_BUILTIN_CMPEQPD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21250 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpltpd", IX86_BUILTIN_CMPLTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21251 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmplepd", IX86_BUILTIN_CMPLEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21252 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgtpd", IX86_BUILTIN_CMPGTPD, LT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21253 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpgepd", IX86_BUILTIN_CMPGEPD, LE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP},
21254 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpunordpd", IX86_BUILTIN_CMPUNORDPD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21255 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpneqpd", IX86_BUILTIN_CMPNEQPD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21256 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnltpd", IX86_BUILTIN_CMPNLTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21257 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpnlepd", IX86_BUILTIN_CMPNLEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21258 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngtpd", IX86_BUILTIN_CMPNGTPD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21259 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpngepd", IX86_BUILTIN_CMPNGEPD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF_SWAP },
21260 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_maskcmpv2df3, "__builtin_ia32_cmpordpd", IX86_BUILTIN_CMPORDPD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21261 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpeqsd", IX86_BUILTIN_CMPEQSD, EQ, (int) V2DF_FTYPE_V2DF_V2DF },
21262 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpltsd", IX86_BUILTIN_CMPLTSD, LT, (int) V2DF_FTYPE_V2DF_V2DF },
21263 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmplesd", IX86_BUILTIN_CMPLESD, LE, (int) V2DF_FTYPE_V2DF_V2DF },
21264 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpunordsd", IX86_BUILTIN_CMPUNORDSD, UNORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21265 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpneqsd", IX86_BUILTIN_CMPNEQSD, NE, (int) V2DF_FTYPE_V2DF_V2DF },
21266 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnltsd", IX86_BUILTIN_CMPNLTSD, UNGE, (int) V2DF_FTYPE_V2DF_V2DF },
21267 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpnlesd", IX86_BUILTIN_CMPNLESD, UNGT, (int) V2DF_FTYPE_V2DF_V2DF },
21268 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmmaskcmpv2df3, "__builtin_ia32_cmpordsd", IX86_BUILTIN_CMPORDSD, ORDERED, (int) V2DF_FTYPE_V2DF_V2DF },
21270 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv2df3, "__builtin_ia32_minpd", IX86_BUILTIN_MINPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21271 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv2df3, "__builtin_ia32_maxpd", IX86_BUILTIN_MAXPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21272 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsminv2df3, "__builtin_ia32_minsd", IX86_BUILTIN_MINSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21273 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsmaxv2df3, "__builtin_ia32_maxsd", IX86_BUILTIN_MAXSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21275 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2df3, "__builtin_ia32_andpd", IX86_BUILTIN_ANDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21276 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2df3, "__builtin_ia32_andnpd", IX86_BUILTIN_ANDNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21277 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2df3, "__builtin_ia32_orpd", IX86_BUILTIN_ORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21278 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2df3, "__builtin_ia32_xorpd", IX86_BUILTIN_XORPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21280 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysignv2df3, "__builtin_ia32_copysignpd", IX86_BUILTIN_CPYSGNPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21282 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_movsd, "__builtin_ia32_movsd", IX86_BUILTIN_MOVSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21283 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_unpckhpd_exp, "__builtin_ia32_unpckhpd", IX86_BUILTIN_UNPCKHPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21284 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_unpcklpd_exp, "__builtin_ia32_unpcklpd", IX86_BUILTIN_UNPCKLPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21286 { OPTION_MASK_ISA_SSE2, CODE_FOR_vec_pack_sfix_v2df, "__builtin_ia32_vec_pack_sfix", IX86_BUILTIN_VEC_PACK_SFIX, UNKNOWN, (int) V4SI_FTYPE_V2DF_V2DF },
21288 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv16qi3, "__builtin_ia32_paddb128", IX86_BUILTIN_PADDB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21289 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv8hi3, "__builtin_ia32_paddw128", IX86_BUILTIN_PADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21290 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv4si3, "__builtin_ia32_paddd128", IX86_BUILTIN_PADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21291 { OPTION_MASK_ISA_SSE2, CODE_FOR_addv2di3, "__builtin_ia32_paddq128", IX86_BUILTIN_PADDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21292 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv16qi3, "__builtin_ia32_psubb128", IX86_BUILTIN_PSUBB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21293 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv8hi3, "__builtin_ia32_psubw128", IX86_BUILTIN_PSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21294 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv4si3, "__builtin_ia32_psubd128", IX86_BUILTIN_PSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21295 { OPTION_MASK_ISA_SSE2, CODE_FOR_subv2di3, "__builtin_ia32_psubq128", IX86_BUILTIN_PSUBQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21297 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv16qi3, "__builtin_ia32_paddsb128", IX86_BUILTIN_PADDSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21298 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ssaddv8hi3, "__builtin_ia32_paddsw128", IX86_BUILTIN_PADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21299 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv16qi3, "__builtin_ia32_psubsb128", IX86_BUILTIN_PSUBSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21300 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_sssubv8hi3, "__builtin_ia32_psubsw128", IX86_BUILTIN_PSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21301 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv16qi3, "__builtin_ia32_paddusb128", IX86_BUILTIN_PADDUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21302 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_usaddv8hi3, "__builtin_ia32_paddusw128", IX86_BUILTIN_PADDUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21303 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv16qi3, "__builtin_ia32_psubusb128", IX86_BUILTIN_PSUBUSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21304 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ussubv8hi3, "__builtin_ia32_psubusw128", IX86_BUILTIN_PSUBUSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21306 { OPTION_MASK_ISA_SSE2, CODE_FOR_mulv8hi3, "__builtin_ia32_pmullw128", IX86_BUILTIN_PMULLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21307 { OPTION_MASK_ISA_SSE2, CODE_FOR_smulv8hi3_highpart, "__builtin_ia32_pmulhw128", IX86_BUILTIN_PMULHW128, UNKNOWN,(int) V8HI_FTYPE_V8HI_V8HI },
21309 { OPTION_MASK_ISA_SSE2, CODE_FOR_andv2di3, "__builtin_ia32_pand128", IX86_BUILTIN_PAND128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21310 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_andnotv2di3, "__builtin_ia32_pandn128", IX86_BUILTIN_PANDN128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21311 { OPTION_MASK_ISA_SSE2, CODE_FOR_iorv2di3, "__builtin_ia32_por128", IX86_BUILTIN_POR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21312 { OPTION_MASK_ISA_SSE2, CODE_FOR_xorv2di3, "__builtin_ia32_pxor128", IX86_BUILTIN_PXOR128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21314 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv16qi3, "__builtin_ia32_pavgb128", IX86_BUILTIN_PAVGB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21315 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_uavgv8hi3, "__builtin_ia32_pavgw128", IX86_BUILTIN_PAVGW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21317 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv16qi3, "__builtin_ia32_pcmpeqb128", IX86_BUILTIN_PCMPEQB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21318 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv8hi3, "__builtin_ia32_pcmpeqw128", IX86_BUILTIN_PCMPEQW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21319 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_eqv4si3, "__builtin_ia32_pcmpeqd128", IX86_BUILTIN_PCMPEQD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21320 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv16qi3, "__builtin_ia32_pcmpgtb128", IX86_BUILTIN_PCMPGTB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21321 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv8hi3, "__builtin_ia32_pcmpgtw128", IX86_BUILTIN_PCMPGTW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21322 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_gtv4si3, "__builtin_ia32_pcmpgtd128", IX86_BUILTIN_PCMPGTD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21324 { OPTION_MASK_ISA_SSE2, CODE_FOR_umaxv16qi3, "__builtin_ia32_pmaxub128", IX86_BUILTIN_PMAXUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21325 { OPTION_MASK_ISA_SSE2, CODE_FOR_smaxv8hi3, "__builtin_ia32_pmaxsw128", IX86_BUILTIN_PMAXSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21326 { OPTION_MASK_ISA_SSE2, CODE_FOR_uminv16qi3, "__builtin_ia32_pminub128", IX86_BUILTIN_PMINUB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21327 { OPTION_MASK_ISA_SSE2, CODE_FOR_sminv8hi3, "__builtin_ia32_pminsw128", IX86_BUILTIN_PMINSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21329 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhbw, "__builtin_ia32_punpckhbw128", IX86_BUILTIN_PUNPCKHBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21330 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhwd, "__builtin_ia32_punpckhwd128", IX86_BUILTIN_PUNPCKHWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21331 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhdq, "__builtin_ia32_punpckhdq128", IX86_BUILTIN_PUNPCKHDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21332 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckhqdq, "__builtin_ia32_punpckhqdq128", IX86_BUILTIN_PUNPCKHQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21333 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklbw, "__builtin_ia32_punpcklbw128", IX86_BUILTIN_PUNPCKLBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21334 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklwd, "__builtin_ia32_punpcklwd128", IX86_BUILTIN_PUNPCKLWD128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21335 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpckldq, "__builtin_ia32_punpckldq128", IX86_BUILTIN_PUNPCKLDQ128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21336 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_punpcklqdq, "__builtin_ia32_punpcklqdq128", IX86_BUILTIN_PUNPCKLQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21338 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packsswb, "__builtin_ia32_packsswb128", IX86_BUILTIN_PACKSSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
21339 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packssdw, "__builtin_ia32_packssdw128", IX86_BUILTIN_PACKSSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
21340 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_packuswb, "__builtin_ia32_packuswb128", IX86_BUILTIN_PACKUSWB128, UNKNOWN, (int) V16QI_FTYPE_V8HI_V8HI },
21342 { OPTION_MASK_ISA_SSE2, CODE_FOR_umulv8hi3_highpart, "__builtin_ia32_pmulhuw128", IX86_BUILTIN_PMULHUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21343 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_psadbw, "__builtin_ia32_psadbw128", IX86_BUILTIN_PSADBW128, UNKNOWN, (int) V2DI_FTYPE_V16QI_V16QI },
21345 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv1siv1di3, "__builtin_ia32_pmuludq", IX86_BUILTIN_PMULUDQ, UNKNOWN, (int) V1DI_FTYPE_V2SI_V2SI },
21346 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_umulv2siv2di3, "__builtin_ia32_pmuludq128", IX86_BUILTIN_PMULUDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
21348 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pmaddwd, "__builtin_ia32_pmaddwd128", IX86_BUILTIN_PMADDWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI_V8HI },
21350 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsi2sd, "__builtin_ia32_cvtsi2sd", IX86_BUILTIN_CVTSI2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_SI },
21351 { OPTION_MASK_ISA_SSE2 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse2_cvtsi2sdq, "__builtin_ia32_cvtsi642sd", IX86_BUILTIN_CVTSI642SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_DI },
21352 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtsd2ss, "__builtin_ia32_cvtsd2ss", IX86_BUILTIN_CVTSD2SS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V2DF },
21353 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_cvtss2sd, "__builtin_ia32_cvtss2sd", IX86_BUILTIN_CVTSS2SD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V4SF },
21355 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_ashlti3, "__builtin_ia32_pslldqi128", IX86_BUILTIN_PSLLDQI128, UNKNOWN, (int) V2DI2TI_FTYPE_V2DI_INT },
21356 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllwi128", IX86_BUILTIN_PSLLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
21357 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslldi128", IX86_BUILTIN_PSLLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
21358 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllqi128", IX86_BUILTIN_PSLLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
21359 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv8hi3, "__builtin_ia32_psllw128", IX86_BUILTIN_PSLLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
21360 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv4si3, "__builtin_ia32_pslld128", IX86_BUILTIN_PSLLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
21361 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashlv2di3, "__builtin_ia32_psllq128", IX86_BUILTIN_PSLLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
21363 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_lshrti3, "__builtin_ia32_psrldqi128", IX86_BUILTIN_PSRLDQI128, UNKNOWN, (int) V2DI2TI_FTYPE_V2DI_INT },
21364 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlwi128", IX86_BUILTIN_PSRLWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
21365 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrldi128", IX86_BUILTIN_PSRLDI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
21366 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlqi128", IX86_BUILTIN_PSRLQI128, UNKNOWN, (int) V2DI_FTYPE_V2DI_SI_COUNT },
21367 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv8hi3, "__builtin_ia32_psrlw128", IX86_BUILTIN_PSRLW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
21368 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv4si3, "__builtin_ia32_psrld128", IX86_BUILTIN_PSRLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
21369 { OPTION_MASK_ISA_SSE2, CODE_FOR_lshrv2di3, "__builtin_ia32_psrlq128", IX86_BUILTIN_PSRLQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_COUNT },
21371 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psrawi128", IX86_BUILTIN_PSRAWI128, UNKNOWN, (int) V8HI_FTYPE_V8HI_SI_COUNT },
21372 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psradi128", IX86_BUILTIN_PSRADI128, UNKNOWN, (int) V4SI_FTYPE_V4SI_SI_COUNT },
21373 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv8hi3, "__builtin_ia32_psraw128", IX86_BUILTIN_PSRAW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_COUNT },
21374 { OPTION_MASK_ISA_SSE2, CODE_FOR_ashrv4si3, "__builtin_ia32_psrad128", IX86_BUILTIN_PSRAD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI_COUNT },
21376 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufd, "__builtin_ia32_pshufd", IX86_BUILTIN_PSHUFD, UNKNOWN, (int) V4SI_FTYPE_V4SI_INT },
21377 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshuflw, "__builtin_ia32_pshuflw", IX86_BUILTIN_PSHUFLW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
21378 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_pshufhw, "__builtin_ia32_pshufhw", IX86_BUILTIN_PSHUFHW, UNKNOWN, (int) V8HI_FTYPE_V8HI_INT },
21380 { OPTION_MASK_ISA_SSE2, CODE_FOR_sse2_vmsqrtv2df2, "__builtin_ia32_sqrtsd", IX86_BUILTIN_SQRTSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_VEC_MERGE },
21382 { OPTION_MASK_ISA_SSE2, CODE_FOR_abstf2, 0, IX86_BUILTIN_FABSQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128 },
21383 { OPTION_MASK_ISA_SSE2, CODE_FOR_copysigntf3, 0, IX86_BUILTIN_COPYSIGNQ, UNKNOWN, (int) FLOAT128_FTYPE_FLOAT128_FLOAT128 },
21385 { OPTION_MASK_ISA_SSE, CODE_FOR_sse2_movq128, "__builtin_ia32_movq128", IX86_BUILTIN_MOVQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
21387 /* SSE2 MMX */
21388 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_addv1di3, "__builtin_ia32_paddq", IX86_BUILTIN_PADDQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
21389 { OPTION_MASK_ISA_SSE2, CODE_FOR_mmx_subv1di3, "__builtin_ia32_psubq", IX86_BUILTIN_PSUBQ, UNKNOWN, (int) V1DI_FTYPE_V1DI_V1DI },
21391 /* SSE3 */
21392 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movshdup, "__builtin_ia32_movshdup", IX86_BUILTIN_MOVSHDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF},
21393 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_movsldup, "__builtin_ia32_movsldup", IX86_BUILTIN_MOVSLDUP, UNKNOWN, (int) V4SF_FTYPE_V4SF },
21395 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv4sf3, "__builtin_ia32_addsubps", IX86_BUILTIN_ADDSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21396 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_addsubv2df3, "__builtin_ia32_addsubpd", IX86_BUILTIN_ADDSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21397 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv4sf3, "__builtin_ia32_haddps", IX86_BUILTIN_HADDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21398 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_haddv2df3, "__builtin_ia32_haddpd", IX86_BUILTIN_HADDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21399 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv4sf3, "__builtin_ia32_hsubps", IX86_BUILTIN_HSUBPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF },
21400 { OPTION_MASK_ISA_SSE3, CODE_FOR_sse3_hsubv2df3, "__builtin_ia32_hsubpd", IX86_BUILTIN_HSUBPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF },
21402 /* SSSE3 */
21403 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv16qi2, "__builtin_ia32_pabsb128", IX86_BUILTIN_PABSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI },
21404 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8qi2, "__builtin_ia32_pabsb", IX86_BUILTIN_PABSB, UNKNOWN, (int) V8QI_FTYPE_V8QI },
21405 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv8hi2, "__builtin_ia32_pabsw128", IX86_BUILTIN_PABSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
21406 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4hi2, "__builtin_ia32_pabsw", IX86_BUILTIN_PABSW, UNKNOWN, (int) V4HI_FTYPE_V4HI },
21407 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv4si2, "__builtin_ia32_pabsd128", IX86_BUILTIN_PABSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI },
21408 { OPTION_MASK_ISA_SSSE3, CODE_FOR_absv2si2, "__builtin_ia32_pabsd", IX86_BUILTIN_PABSD, UNKNOWN, (int) V2SI_FTYPE_V2SI },
21410 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv8hi3, "__builtin_ia32_phaddw128", IX86_BUILTIN_PHADDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21411 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddwv4hi3, "__builtin_ia32_phaddw", IX86_BUILTIN_PHADDW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21412 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv4si3, "__builtin_ia32_phaddd128", IX86_BUILTIN_PHADDD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21413 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phadddv2si3, "__builtin_ia32_phaddd", IX86_BUILTIN_PHADDD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21414 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv8hi3, "__builtin_ia32_phaddsw128", IX86_BUILTIN_PHADDSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21415 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phaddswv4hi3, "__builtin_ia32_phaddsw", IX86_BUILTIN_PHADDSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21416 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv8hi3, "__builtin_ia32_phsubw128", IX86_BUILTIN_PHSUBW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21417 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubwv4hi3, "__builtin_ia32_phsubw", IX86_BUILTIN_PHSUBW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21418 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv4si3, "__builtin_ia32_phsubd128", IX86_BUILTIN_PHSUBD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21419 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubdv2si3, "__builtin_ia32_phsubd", IX86_BUILTIN_PHSUBD, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21420 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv8hi3, "__builtin_ia32_phsubsw128", IX86_BUILTIN_PHSUBSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21421 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_phsubswv4hi3, "__builtin_ia32_phsubsw", IX86_BUILTIN_PHSUBSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21422 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw128, "__builtin_ia32_pmaddubsw128", IX86_BUILTIN_PMADDUBSW128, UNKNOWN, (int) V8HI_FTYPE_V16QI_V16QI },
21423 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmaddubsw, "__builtin_ia32_pmaddubsw", IX86_BUILTIN_PMADDUBSW, UNKNOWN, (int) V4HI_FTYPE_V8QI_V8QI },
21424 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv8hi3, "__builtin_ia32_pmulhrsw128", IX86_BUILTIN_PMULHRSW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21425 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pmulhrswv4hi3, "__builtin_ia32_pmulhrsw", IX86_BUILTIN_PMULHRSW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21426 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv16qi3, "__builtin_ia32_pshufb128", IX86_BUILTIN_PSHUFB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21427 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_pshufbv8qi3, "__builtin_ia32_pshufb", IX86_BUILTIN_PSHUFB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21428 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv16qi3, "__builtin_ia32_psignb128", IX86_BUILTIN_PSIGNB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21429 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8qi3, "__builtin_ia32_psignb", IX86_BUILTIN_PSIGNB, UNKNOWN, (int) V8QI_FTYPE_V8QI_V8QI },
21430 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv8hi3, "__builtin_ia32_psignw128", IX86_BUILTIN_PSIGNW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21431 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4hi3, "__builtin_ia32_psignw", IX86_BUILTIN_PSIGNW, UNKNOWN, (int) V4HI_FTYPE_V4HI_V4HI },
21432 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv4si3, "__builtin_ia32_psignd128", IX86_BUILTIN_PSIGND128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21433 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_psignv2si3, "__builtin_ia32_psignd", IX86_BUILTIN_PSIGND, UNKNOWN, (int) V2SI_FTYPE_V2SI_V2SI },
21435 /* SSSE3. */
21436 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrti, "__builtin_ia32_palignr128", IX86_BUILTIN_PALIGNR128, UNKNOWN, (int) V2DI2TI_FTYPE_V2DI_V2DI_INT },
21437 { OPTION_MASK_ISA_SSSE3, CODE_FOR_ssse3_palignrdi, "__builtin_ia32_palignr", IX86_BUILTIN_PALIGNR, UNKNOWN, (int) V1DI2DI_FTYPE_V1DI_V1DI_INT },
21439 /* SSE4.1 */
21440 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendpd, "__builtin_ia32_blendpd", IX86_BUILTIN_BLENDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21441 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendps, "__builtin_ia32_blendps", IX86_BUILTIN_BLENDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21442 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvpd, "__builtin_ia32_blendvpd", IX86_BUILTIN_BLENDVPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_V2DF },
21443 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_blendvps, "__builtin_ia32_blendvps", IX86_BUILTIN_BLENDVPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_V4SF },
21444 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dppd, "__builtin_ia32_dppd", IX86_BUILTIN_DPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21445 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_dpps, "__builtin_ia32_dpps", IX86_BUILTIN_DPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21446 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_insertps, "__builtin_ia32_insertps128", IX86_BUILTIN_INSERTPS128, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21447 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mpsadbw, "__builtin_ia32_mpsadbw128", IX86_BUILTIN_MPSADBW128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_INT },
21448 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendvb, "__builtin_ia32_pblendvb128", IX86_BUILTIN_PBLENDVB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI_V16QI },
21449 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_pblendw, "__builtin_ia32_pblendw128", IX86_BUILTIN_PBLENDW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI_INT },
21451 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv8qiv8hi2, "__builtin_ia32_pmovsxbw128", IX86_BUILTIN_PMOVSXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
21452 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4qiv4si2, "__builtin_ia32_pmovsxbd128", IX86_BUILTIN_PMOVSXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
21453 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2qiv2di2, "__builtin_ia32_pmovsxbq128", IX86_BUILTIN_PMOVSXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
21454 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv4hiv4si2, "__builtin_ia32_pmovsxwd128", IX86_BUILTIN_PMOVSXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
21455 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2hiv2di2, "__builtin_ia32_pmovsxwq128", IX86_BUILTIN_PMOVSXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
21456 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_extendv2siv2di2, "__builtin_ia32_pmovsxdq128", IX86_BUILTIN_PMOVSXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
21457 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv8qiv8hi2, "__builtin_ia32_pmovzxbw128", IX86_BUILTIN_PMOVZXBW128, UNKNOWN, (int) V8HI_FTYPE_V16QI },
21458 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4qiv4si2, "__builtin_ia32_pmovzxbd128", IX86_BUILTIN_PMOVZXBD128, UNKNOWN, (int) V4SI_FTYPE_V16QI },
21459 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2qiv2di2, "__builtin_ia32_pmovzxbq128", IX86_BUILTIN_PMOVZXBQ128, UNKNOWN, (int) V2DI_FTYPE_V16QI },
21460 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv4hiv4si2, "__builtin_ia32_pmovzxwd128", IX86_BUILTIN_PMOVZXWD128, UNKNOWN, (int) V4SI_FTYPE_V8HI },
21461 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2hiv2di2, "__builtin_ia32_pmovzxwq128", IX86_BUILTIN_PMOVZXWQ128, UNKNOWN, (int) V2DI_FTYPE_V8HI },
21462 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_zero_extendv2siv2di2, "__builtin_ia32_pmovzxdq128", IX86_BUILTIN_PMOVZXDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI },
21463 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_phminposuw, "__builtin_ia32_phminposuw128", IX86_BUILTIN_PHMINPOSUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI },
21465 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_packusdw, "__builtin_ia32_packusdw128", IX86_BUILTIN_PACKUSDW128, UNKNOWN, (int) V8HI_FTYPE_V4SI_V4SI },
21466 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_eqv2di3, "__builtin_ia32_pcmpeqq", IX86_BUILTIN_PCMPEQQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21467 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv16qi3, "__builtin_ia32_pmaxsb128", IX86_BUILTIN_PMAXSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21468 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_smaxv4si3, "__builtin_ia32_pmaxsd128", IX86_BUILTIN_PMAXSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21469 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv4si3, "__builtin_ia32_pmaxud128", IX86_BUILTIN_PMAXUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21470 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_umaxv8hi3, "__builtin_ia32_pmaxuw128", IX86_BUILTIN_PMAXUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21471 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv16qi3, "__builtin_ia32_pminsb128", IX86_BUILTIN_PMINSB128, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI },
21472 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sminv4si3, "__builtin_ia32_pminsd128", IX86_BUILTIN_PMINSD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21473 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv4si3, "__builtin_ia32_pminud128", IX86_BUILTIN_PMINUD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21474 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_uminv8hi3, "__builtin_ia32_pminuw128", IX86_BUILTIN_PMINUW128, UNKNOWN, (int) V8HI_FTYPE_V8HI_V8HI },
21475 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_sse4_1_mulv2siv2di3, "__builtin_ia32_pmuldq128", IX86_BUILTIN_PMULDQ128, UNKNOWN, (int) V2DI_FTYPE_V4SI_V4SI },
21476 { OPTION_MASK_ISA_SSE4_1, CODE_FOR_mulv4si3, "__builtin_ia32_pmulld128", IX86_BUILTIN_PMULLD128, UNKNOWN, (int) V4SI_FTYPE_V4SI_V4SI },
21478 /* SSE4.1 */
21479 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundpd, "__builtin_ia32_roundpd", IX86_BUILTIN_ROUNDPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
21480 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundps, "__builtin_ia32_roundps", IX86_BUILTIN_ROUNDPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
21481 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundsd, "__builtin_ia32_roundsd", IX86_BUILTIN_ROUNDSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21482 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_roundss, "__builtin_ia32_roundss", IX86_BUILTIN_ROUNDSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21484 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestz128", IX86_BUILTIN_PTESTZ, EQ, (int) INT_FTYPE_V2DI_V2DI_PTEST },
21485 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestc128", IX86_BUILTIN_PTESTC, LTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
21486 { OPTION_MASK_ISA_ROUND, CODE_FOR_sse4_1_ptest, "__builtin_ia32_ptestnzc128", IX86_BUILTIN_PTESTNZC, GTU, (int) INT_FTYPE_V2DI_V2DI_PTEST },
21488 /* SSE4.2 */
21489 { OPTION_MASK_ISA_SSE4_2, CODE_FOR_sse4_2_gtv2di3, "__builtin_ia32_pcmpgtq", IX86_BUILTIN_PCMPGTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21490 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32qi, "__builtin_ia32_crc32qi", IX86_BUILTIN_CRC32QI, UNKNOWN, (int) UINT_FTYPE_UINT_UCHAR },
21491 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32hi, "__builtin_ia32_crc32hi", IX86_BUILTIN_CRC32HI, UNKNOWN, (int) UINT_FTYPE_UINT_USHORT },
21492 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32, CODE_FOR_sse4_2_crc32si, "__builtin_ia32_crc32si", IX86_BUILTIN_CRC32SI, UNKNOWN, (int) UINT_FTYPE_UINT_UINT },
21493 { OPTION_MASK_ISA_SSE4_2 | OPTION_MASK_ISA_CRC32 | OPTION_MASK_ISA_64BIT, CODE_FOR_sse4_2_crc32di, "__builtin_ia32_crc32di", IX86_BUILTIN_CRC32DI, UNKNOWN, (int) UINT64_FTYPE_UINT64_UINT64 },
21495 /* SSE4A */
21496 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrqi, "__builtin_ia32_extrqi", IX86_BUILTIN_EXTRQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_UINT_UINT },
21497 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_extrq, "__builtin_ia32_extrq", IX86_BUILTIN_EXTRQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V16QI },
21498 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertqi, "__builtin_ia32_insertqi", IX86_BUILTIN_INSERTQI, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_UINT_UINT },
21499 { OPTION_MASK_ISA_SSE4A, CODE_FOR_sse4a_insertq, "__builtin_ia32_insertq", IX86_BUILTIN_INSERTQ, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21501 /* AES */
21502 { OPTION_MASK_ISA_SSE2, CODE_FOR_aeskeygenassist, 0, IX86_BUILTIN_AESKEYGENASSIST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_INT },
21503 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesimc, 0, IX86_BUILTIN_AESIMC128, UNKNOWN, (int) V2DI_FTYPE_V2DI },
21505 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenc, 0, IX86_BUILTIN_AESENC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21506 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesenclast, 0, IX86_BUILTIN_AESENCLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21507 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdec, 0, IX86_BUILTIN_AESDEC128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21508 { OPTION_MASK_ISA_SSE2, CODE_FOR_aesdeclast, 0, IX86_BUILTIN_AESDECLAST128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI },
21510 /* PCLMUL */
21511 { OPTION_MASK_ISA_SSE2, CODE_FOR_pclmulqdq, 0, IX86_BUILTIN_PCLMULQDQ128, UNKNOWN, (int) V2DI_FTYPE_V2DI_V2DI_INT },
21513 /* AVX */
21514 { OPTION_MASK_ISA_AVX, CODE_FOR_addv4df3, "__builtin_ia32_addpd256", IX86_BUILTIN_ADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21515 { OPTION_MASK_ISA_AVX, CODE_FOR_addv8sf3, "__builtin_ia32_addps256", IX86_BUILTIN_ADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21516 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv4df3, "__builtin_ia32_addsubpd256", IX86_BUILTIN_ADDSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21517 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_addsubv8sf3, "__builtin_ia32_addsubps256", IX86_BUILTIN_ADDSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21518 { OPTION_MASK_ISA_AVX, CODE_FOR_andv4df3, "__builtin_ia32_andpd256", IX86_BUILTIN_ANDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21519 { OPTION_MASK_ISA_AVX, CODE_FOR_andv8sf3, "__builtin_ia32_andps256", IX86_BUILTIN_ANDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21520 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv4df3, "__builtin_ia32_andnpd256", IX86_BUILTIN_ANDNPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21521 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_andnotv8sf3, "__builtin_ia32_andnps256", IX86_BUILTIN_ANDNPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21522 { OPTION_MASK_ISA_AVX, CODE_FOR_divv4df3, "__builtin_ia32_divpd256", IX86_BUILTIN_DIVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21523 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_divv8sf3, "__builtin_ia32_divps256", IX86_BUILTIN_DIVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21524 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv4df3, "__builtin_ia32_haddpd256", IX86_BUILTIN_HADDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21525 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv8sf3, "__builtin_ia32_hsubps256", IX86_BUILTIN_HSUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21526 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_hsubv4df3, "__builtin_ia32_hsubpd256", IX86_BUILTIN_HSUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21527 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_haddv8sf3, "__builtin_ia32_haddps256", IX86_BUILTIN_HADDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21528 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv4df3, "__builtin_ia32_maxpd256", IX86_BUILTIN_MAXPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21529 { OPTION_MASK_ISA_AVX, CODE_FOR_smaxv8sf3, "__builtin_ia32_maxps256", IX86_BUILTIN_MAXPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21530 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv4df3, "__builtin_ia32_minpd256", IX86_BUILTIN_MINPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21531 { OPTION_MASK_ISA_AVX, CODE_FOR_sminv8sf3, "__builtin_ia32_minps256", IX86_BUILTIN_MINPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21532 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv4df3, "__builtin_ia32_mulpd256", IX86_BUILTIN_MULPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21533 { OPTION_MASK_ISA_AVX, CODE_FOR_mulv8sf3, "__builtin_ia32_mulps256", IX86_BUILTIN_MULPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21534 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv4df3, "__builtin_ia32_orpd256", IX86_BUILTIN_ORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21535 { OPTION_MASK_ISA_AVX, CODE_FOR_iorv8sf3, "__builtin_ia32_orps256", IX86_BUILTIN_ORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21536 { OPTION_MASK_ISA_AVX, CODE_FOR_subv4df3, "__builtin_ia32_subpd256", IX86_BUILTIN_SUBPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21537 { OPTION_MASK_ISA_AVX, CODE_FOR_subv8sf3, "__builtin_ia32_subps256", IX86_BUILTIN_SUBPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21538 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv4df3, "__builtin_ia32_xorpd256", IX86_BUILTIN_XORPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21539 { OPTION_MASK_ISA_AVX, CODE_FOR_xorv8sf3, "__builtin_ia32_xorps256", IX86_BUILTIN_XORPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21541 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv2df3, "__builtin_ia32_vpermilvarpd", IX86_BUILTIN_VPERMILVARPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DI },
21542 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4sf3, "__builtin_ia32_vpermilvarps", IX86_BUILTIN_VPERMILVARPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SI },
21543 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv4df3, "__builtin_ia32_vpermilvarpd256", IX86_BUILTIN_VPERMILVARPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DI },
21544 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilvarv8sf3, "__builtin_ia32_vpermilvarps256", IX86_BUILTIN_VPERMILVARPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SI },
21546 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendpd256, "__builtin_ia32_blendpd256", IX86_BUILTIN_BLENDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
21547 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendps256, "__builtin_ia32_blendps256", IX86_BUILTIN_BLENDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
21548 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvpd256, "__builtin_ia32_blendvpd256", IX86_BUILTIN_BLENDVPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_V4DF },
21549 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_blendvps256, "__builtin_ia32_blendvps256", IX86_BUILTIN_BLENDVPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_V8SF },
21550 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_dpps256, "__builtin_ia32_dpps256", IX86_BUILTIN_DPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
21551 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufpd256, "__builtin_ia32_shufpd256", IX86_BUILTIN_SHUFPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
21552 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_shufps256, "__builtin_ia32_shufps256", IX86_BUILTIN_SHUFPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
21553 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpsdv2df3, "__builtin_ia32_cmpsd", IX86_BUILTIN_CMPSD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21554 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmpssv4sf3, "__builtin_ia32_cmpss", IX86_BUILTIN_CMPSS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21555 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv2df3, "__builtin_ia32_cmppd", IX86_BUILTIN_CMPPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_V2DF_INT },
21556 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv4sf3, "__builtin_ia32_cmpps", IX86_BUILTIN_CMPPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_V4SF_INT },
21557 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppdv4df3, "__builtin_ia32_cmppd256", IX86_BUILTIN_CMPPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
21558 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cmppsv8sf3, "__builtin_ia32_cmpps256", IX86_BUILTIN_CMPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
21559 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v4df, "__builtin_ia32_vextractf128_pd256", IX86_BUILTIN_EXTRACTF128PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF_INT },
21560 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8sf, "__builtin_ia32_vextractf128_ps256", IX86_BUILTIN_EXTRACTF128PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF_INT },
21561 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vextractf128v8si, "__builtin_ia32_vextractf128_si256", IX86_BUILTIN_EXTRACTF128SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI_INT },
21562 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2pd256, "__builtin_ia32_cvtdq2pd256", IX86_BUILTIN_CVTDQ2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SI },
21563 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtdq2ps256, "__builtin_ia32_cvtdq2ps256", IX86_BUILTIN_CVTDQ2PS256, UNKNOWN, (int) V8SF_FTYPE_V8SI },
21564 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2ps256, "__builtin_ia32_cvtpd2ps256", IX86_BUILTIN_CVTPD2PS256, UNKNOWN, (int) V4SF_FTYPE_V4DF },
21565 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2dq256, "__builtin_ia32_cvtps2dq256", IX86_BUILTIN_CVTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
21566 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtps2pd256, "__builtin_ia32_cvtps2pd256", IX86_BUILTIN_CVTPS2PD256, UNKNOWN, (int) V4DF_FTYPE_V4SF },
21567 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttpd2dq256, "__builtin_ia32_cvttpd2dq256", IX86_BUILTIN_CVTTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
21568 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvtpd2dq256, "__builtin_ia32_cvtpd2dq256", IX86_BUILTIN_CVTPD2DQ256, UNKNOWN, (int) V4SI_FTYPE_V4DF },
21569 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_cvttps2dq256, "__builtin_ia32_cvttps2dq256", IX86_BUILTIN_CVTTPS2DQ256, UNKNOWN, (int) V8SI_FTYPE_V8SF },
21570 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v4df3, "__builtin_ia32_vperm2f128_pd256", IX86_BUILTIN_VPERM2F128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF_INT },
21571 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8sf3, "__builtin_ia32_vperm2f128_ps256", IX86_BUILTIN_VPERM2F128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF_INT },
21572 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vperm2f128v8si3, "__builtin_ia32_vperm2f128_si256", IX86_BUILTIN_VPERM2F128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V8SI_INT },
21573 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv2df, "__builtin_ia32_vpermilpd", IX86_BUILTIN_VPERMILPD, UNKNOWN, (int) V2DF_FTYPE_V2DF_INT },
21574 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4sf, "__builtin_ia32_vpermilps", IX86_BUILTIN_VPERMILPS, UNKNOWN, (int) V4SF_FTYPE_V4SF_INT },
21575 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv4df, "__builtin_ia32_vpermilpd256", IX86_BUILTIN_VPERMILPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
21576 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vpermilv8sf, "__builtin_ia32_vpermilps256", IX86_BUILTIN_VPERMILPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
21577 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v4df, "__builtin_ia32_vinsertf128_pd256", IX86_BUILTIN_VINSERTF128PD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V2DF_INT },
21578 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8sf, "__builtin_ia32_vinsertf128_ps256", IX86_BUILTIN_VINSERTF128PS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V4SF_INT },
21579 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vinsertf128v8si, "__builtin_ia32_vinsertf128_si256", IX86_BUILTIN_VINSERTF128SI256, UNKNOWN, (int) V8SI_FTYPE_V8SI_V4SI_INT },
21581 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movshdup256, "__builtin_ia32_movshdup256", IX86_BUILTIN_MOVSHDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
21582 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movsldup256, "__builtin_ia32_movsldup256", IX86_BUILTIN_MOVSLDUP256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
21583 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movddup256, "__builtin_ia32_movddup256", IX86_BUILTIN_MOVDDUP256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
21585 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv4df2, "__builtin_ia32_sqrtpd256", IX86_BUILTIN_SQRTPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF },
21586 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_sqrtv8sf2, "__builtin_ia32_sqrtps256", IX86_BUILTIN_SQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
21587 { OPTION_MASK_ISA_AVX, CODE_FOR_sqrtv8sf2, "__builtin_ia32_sqrtps_nr256", IX86_BUILTIN_SQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
21588 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rsqrtv8sf2, "__builtin_ia32_rsqrtps256", IX86_BUILTIN_RSQRTPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
21589 { OPTION_MASK_ISA_AVX, CODE_FOR_rsqrtv8sf2, "__builtin_ia32_rsqrtps_nr256", IX86_BUILTIN_RSQRTPS_NR256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
21591 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_rcpv8sf2, "__builtin_ia32_rcpps256", IX86_BUILTIN_RCPPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF },
21593 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundpd256, "__builtin_ia32_roundpd256", IX86_BUILTIN_ROUNDPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_INT },
21594 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_roundps256, "__builtin_ia32_roundps256", IX86_BUILTIN_ROUNDPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_INT },
21596 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhpd256, "__builtin_ia32_unpckhpd256", IX86_BUILTIN_UNPCKHPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21597 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklpd256, "__builtin_ia32_unpcklpd256", IX86_BUILTIN_UNPCKLPD256, UNKNOWN, (int) V4DF_FTYPE_V4DF_V4DF },
21598 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpckhps256, "__builtin_ia32_unpckhps256", IX86_BUILTIN_UNPCKHPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21599 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_unpcklps256, "__builtin_ia32_unpcklps256", IX86_BUILTIN_UNPCKLPS256, UNKNOWN, (int) V8SF_FTYPE_V8SF_V8SF },
21601 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si256_si, "__builtin_ia32_si256_si", IX86_BUILTIN_SI256_SI, UNKNOWN, (int) V8SI_FTYPE_V4SI },
21602 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps256_ps, "__builtin_ia32_ps256_ps", IX86_BUILTIN_PS256_PS, UNKNOWN, (int) V8SF_FTYPE_V4SF },
21603 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd256_pd, "__builtin_ia32_pd256_pd", IX86_BUILTIN_PD256_PD, UNKNOWN, (int) V4DF_FTYPE_V2DF },
21604 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_si_si256, "__builtin_ia32_si_si256", IX86_BUILTIN_SI_SI256, UNKNOWN, (int) V4SI_FTYPE_V8SI },
21605 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ps_ps256, "__builtin_ia32_ps_ps256", IX86_BUILTIN_PS_PS256, UNKNOWN, (int) V4SF_FTYPE_V8SF },
21606 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_pd_pd256, "__builtin_ia32_pd_pd256", IX86_BUILTIN_PD_PD256, UNKNOWN, (int) V2DF_FTYPE_V4DF },
21608 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestzpd", IX86_BUILTIN_VTESTZPD, EQ, (int) INT_FTYPE_V2DF_V2DF_PTEST },
21609 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestcpd", IX86_BUILTIN_VTESTCPD, LTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
21610 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd, "__builtin_ia32_vtestnzcpd", IX86_BUILTIN_VTESTNZCPD, GTU, (int) INT_FTYPE_V2DF_V2DF_PTEST },
21611 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestzps", IX86_BUILTIN_VTESTZPS, EQ, (int) INT_FTYPE_V4SF_V4SF_PTEST },
21612 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestcps", IX86_BUILTIN_VTESTCPS, LTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
21613 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps, "__builtin_ia32_vtestnzcps", IX86_BUILTIN_VTESTNZCPS, GTU, (int) INT_FTYPE_V4SF_V4SF_PTEST },
21614 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestzpd256", IX86_BUILTIN_VTESTZPD256, EQ, (int) INT_FTYPE_V4DF_V4DF_PTEST },
21615 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestcpd256", IX86_BUILTIN_VTESTCPD256, LTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
21616 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestpd256, "__builtin_ia32_vtestnzcpd256", IX86_BUILTIN_VTESTNZCPD256, GTU, (int) INT_FTYPE_V4DF_V4DF_PTEST },
21617 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestzps256", IX86_BUILTIN_VTESTZPS256, EQ, (int) INT_FTYPE_V8SF_V8SF_PTEST },
21618 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestcps256", IX86_BUILTIN_VTESTCPS256, LTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
21619 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_vtestps256, "__builtin_ia32_vtestnzcps256", IX86_BUILTIN_VTESTNZCPS256, GTU, (int) INT_FTYPE_V8SF_V8SF_PTEST },
21620 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestz256", IX86_BUILTIN_PTESTZ256, EQ, (int) INT_FTYPE_V4DI_V4DI_PTEST },
21621 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestc256", IX86_BUILTIN_PTESTC256, LTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
21622 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_ptest256, "__builtin_ia32_ptestnzc256", IX86_BUILTIN_PTESTNZC256, GTU, (int) INT_FTYPE_V4DI_V4DI_PTEST },
21624 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskpd256, "__builtin_ia32_movmskpd256", IX86_BUILTIN_MOVMSKPD256, UNKNOWN, (int) INT_FTYPE_V4DF },
21625 { OPTION_MASK_ISA_AVX, CODE_FOR_avx_movmskps256, "__builtin_ia32_movmskps256", IX86_BUILTIN_MOVMSKPS256, UNKNOWN, (int) INT_FTYPE_V8SF },
21629 /* Set up all the MMX/SSE builtins, even builtins for instructions that are not
21630 in the current target ISA to allow the user to compile particular modules
21631 with different target specific options that differ from the command line
21632 options. */
21633 static void
21634 ix86_init_mmx_sse_builtins (void)
21636 const struct builtin_description * d;
21637 size_t i;
21639 tree V16QI_type_node = build_vector_type_for_mode (char_type_node, V16QImode);
21640 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
21641 tree V1DI_type_node
21642 = build_vector_type_for_mode (long_long_integer_type_node, V1DImode);
21643 tree V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
21644 tree V2DI_type_node
21645 = build_vector_type_for_mode (long_long_integer_type_node, V2DImode);
21646 tree V2DF_type_node = build_vector_type_for_mode (double_type_node, V2DFmode);
21647 tree V4SF_type_node = build_vector_type_for_mode (float_type_node, V4SFmode);
21648 tree V4SI_type_node = build_vector_type_for_mode (intSI_type_node, V4SImode);
21649 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
21650 tree V8QI_type_node = build_vector_type_for_mode (char_type_node, V8QImode);
21651 tree V8HI_type_node = build_vector_type_for_mode (intHI_type_node, V8HImode);
21653 tree pchar_type_node = build_pointer_type (char_type_node);
21654 tree pcchar_type_node
21655 = build_pointer_type (build_type_variant (char_type_node, 1, 0));
21656 tree pfloat_type_node = build_pointer_type (float_type_node);
21657 tree pcfloat_type_node
21658 = build_pointer_type (build_type_variant (float_type_node, 1, 0));
21659 tree pv2sf_type_node = build_pointer_type (V2SF_type_node);
21660 tree pcv2sf_type_node
21661 = build_pointer_type (build_type_variant (V2SF_type_node, 1, 0));
21662 tree pv2di_type_node = build_pointer_type (V2DI_type_node);
21663 tree pdi_type_node = build_pointer_type (long_long_unsigned_type_node);
21665 /* Comparisons. */
21666 tree int_ftype_v4sf_v4sf
21667 = build_function_type_list (integer_type_node,
21668 V4SF_type_node, V4SF_type_node, NULL_TREE);
21669 tree v4si_ftype_v4sf_v4sf
21670 = build_function_type_list (V4SI_type_node,
21671 V4SF_type_node, V4SF_type_node, NULL_TREE);
21672 /* MMX/SSE/integer conversions. */
21673 tree int_ftype_v4sf
21674 = build_function_type_list (integer_type_node,
21675 V4SF_type_node, NULL_TREE);
21676 tree int64_ftype_v4sf
21677 = build_function_type_list (long_long_integer_type_node,
21678 V4SF_type_node, NULL_TREE);
21679 tree int_ftype_v8qi
21680 = build_function_type_list (integer_type_node, V8QI_type_node, NULL_TREE);
21681 tree v4sf_ftype_v4sf_int
21682 = build_function_type_list (V4SF_type_node,
21683 V4SF_type_node, integer_type_node, NULL_TREE);
21684 tree v4sf_ftype_v4sf_int64
21685 = build_function_type_list (V4SF_type_node,
21686 V4SF_type_node, long_long_integer_type_node,
21687 NULL_TREE);
21688 tree v4sf_ftype_v4sf_v2si
21689 = build_function_type_list (V4SF_type_node,
21690 V4SF_type_node, V2SI_type_node, NULL_TREE);
21692 /* Miscellaneous. */
21693 tree v8qi_ftype_v4hi_v4hi
21694 = build_function_type_list (V8QI_type_node,
21695 V4HI_type_node, V4HI_type_node, NULL_TREE);
21696 tree v4hi_ftype_v2si_v2si
21697 = build_function_type_list (V4HI_type_node,
21698 V2SI_type_node, V2SI_type_node, NULL_TREE);
21699 tree v4sf_ftype_v4sf_v4sf_int
21700 = build_function_type_list (V4SF_type_node,
21701 V4SF_type_node, V4SF_type_node,
21702 integer_type_node, NULL_TREE);
21703 tree v2si_ftype_v4hi_v4hi
21704 = build_function_type_list (V2SI_type_node,
21705 V4HI_type_node, V4HI_type_node, NULL_TREE);
21706 tree v4hi_ftype_v4hi_int
21707 = build_function_type_list (V4HI_type_node,
21708 V4HI_type_node, integer_type_node, NULL_TREE);
21709 tree v2si_ftype_v2si_int
21710 = build_function_type_list (V2SI_type_node,
21711 V2SI_type_node, integer_type_node, NULL_TREE);
21712 tree v1di_ftype_v1di_int
21713 = build_function_type_list (V1DI_type_node,
21714 V1DI_type_node, integer_type_node, NULL_TREE);
21716 tree void_ftype_void
21717 = build_function_type (void_type_node, void_list_node);
21718 tree void_ftype_unsigned
21719 = build_function_type_list (void_type_node, unsigned_type_node, NULL_TREE);
21720 tree void_ftype_unsigned_unsigned
21721 = build_function_type_list (void_type_node, unsigned_type_node,
21722 unsigned_type_node, NULL_TREE);
21723 tree void_ftype_pcvoid_unsigned_unsigned
21724 = build_function_type_list (void_type_node, const_ptr_type_node,
21725 unsigned_type_node, unsigned_type_node,
21726 NULL_TREE);
21727 tree unsigned_ftype_void
21728 = build_function_type (unsigned_type_node, void_list_node);
21729 tree v2si_ftype_v4sf
21730 = build_function_type_list (V2SI_type_node, V4SF_type_node, NULL_TREE);
21731 /* Loads/stores. */
21732 tree void_ftype_v8qi_v8qi_pchar
21733 = build_function_type_list (void_type_node,
21734 V8QI_type_node, V8QI_type_node,
21735 pchar_type_node, NULL_TREE);
21736 tree v4sf_ftype_pcfloat
21737 = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
21738 tree v4sf_ftype_v4sf_pcv2sf
21739 = build_function_type_list (V4SF_type_node,
21740 V4SF_type_node, pcv2sf_type_node, NULL_TREE);
21741 tree void_ftype_pv2sf_v4sf
21742 = build_function_type_list (void_type_node,
21743 pv2sf_type_node, V4SF_type_node, NULL_TREE);
21744 tree void_ftype_pfloat_v4sf
21745 = build_function_type_list (void_type_node,
21746 pfloat_type_node, V4SF_type_node, NULL_TREE);
21747 tree void_ftype_pdi_di
21748 = build_function_type_list (void_type_node,
21749 pdi_type_node, long_long_unsigned_type_node,
21750 NULL_TREE);
21751 tree void_ftype_pv2di_v2di
21752 = build_function_type_list (void_type_node,
21753 pv2di_type_node, V2DI_type_node, NULL_TREE);
21754 /* Normal vector unops. */
21755 tree v4sf_ftype_v4sf
21756 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
21757 tree v16qi_ftype_v16qi
21758 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
21759 tree v8hi_ftype_v8hi
21760 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
21761 tree v4si_ftype_v4si
21762 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
21763 tree v8qi_ftype_v8qi
21764 = build_function_type_list (V8QI_type_node, V8QI_type_node, NULL_TREE);
21765 tree v4hi_ftype_v4hi
21766 = build_function_type_list (V4HI_type_node, V4HI_type_node, NULL_TREE);
21768 /* Normal vector binops. */
21769 tree v4sf_ftype_v4sf_v4sf
21770 = build_function_type_list (V4SF_type_node,
21771 V4SF_type_node, V4SF_type_node, NULL_TREE);
21772 tree v8qi_ftype_v8qi_v8qi
21773 = build_function_type_list (V8QI_type_node,
21774 V8QI_type_node, V8QI_type_node, NULL_TREE);
21775 tree v4hi_ftype_v4hi_v4hi
21776 = build_function_type_list (V4HI_type_node,
21777 V4HI_type_node, V4HI_type_node, NULL_TREE);
21778 tree v2si_ftype_v2si_v2si
21779 = build_function_type_list (V2SI_type_node,
21780 V2SI_type_node, V2SI_type_node, NULL_TREE);
21781 tree v1di_ftype_v1di_v1di
21782 = build_function_type_list (V1DI_type_node,
21783 V1DI_type_node, V1DI_type_node, NULL_TREE);
21784 tree v1di_ftype_v1di_v1di_int
21785 = build_function_type_list (V1DI_type_node,
21786 V1DI_type_node, V1DI_type_node,
21787 integer_type_node, NULL_TREE);
21788 tree v2si_ftype_v2sf
21789 = build_function_type_list (V2SI_type_node, V2SF_type_node, NULL_TREE);
21790 tree v2sf_ftype_v2si
21791 = build_function_type_list (V2SF_type_node, V2SI_type_node, NULL_TREE);
21792 tree v2si_ftype_v2si
21793 = build_function_type_list (V2SI_type_node, V2SI_type_node, NULL_TREE);
21794 tree v2sf_ftype_v2sf
21795 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
21796 tree v2sf_ftype_v2sf_v2sf
21797 = build_function_type_list (V2SF_type_node,
21798 V2SF_type_node, V2SF_type_node, NULL_TREE);
21799 tree v2si_ftype_v2sf_v2sf
21800 = build_function_type_list (V2SI_type_node,
21801 V2SF_type_node, V2SF_type_node, NULL_TREE);
21802 tree pint_type_node = build_pointer_type (integer_type_node);
21803 tree pdouble_type_node = build_pointer_type (double_type_node);
21804 tree pcdouble_type_node = build_pointer_type (
21805 build_type_variant (double_type_node, 1, 0));
21806 tree int_ftype_v2df_v2df
21807 = build_function_type_list (integer_type_node,
21808 V2DF_type_node, V2DF_type_node, NULL_TREE);
21810 tree void_ftype_pcvoid
21811 = build_function_type_list (void_type_node, const_ptr_type_node, NULL_TREE);
21812 tree v4sf_ftype_v4si
21813 = build_function_type_list (V4SF_type_node, V4SI_type_node, NULL_TREE);
21814 tree v4si_ftype_v4sf
21815 = build_function_type_list (V4SI_type_node, V4SF_type_node, NULL_TREE);
21816 tree v2df_ftype_v4si
21817 = build_function_type_list (V2DF_type_node, V4SI_type_node, NULL_TREE);
21818 tree v4si_ftype_v2df
21819 = build_function_type_list (V4SI_type_node, V2DF_type_node, NULL_TREE);
21820 tree v4si_ftype_v2df_v2df
21821 = build_function_type_list (V4SI_type_node,
21822 V2DF_type_node, V2DF_type_node, NULL_TREE);
21823 tree v2si_ftype_v2df
21824 = build_function_type_list (V2SI_type_node, V2DF_type_node, NULL_TREE);
21825 tree v4sf_ftype_v2df
21826 = build_function_type_list (V4SF_type_node, V2DF_type_node, NULL_TREE);
21827 tree v2df_ftype_v2si
21828 = build_function_type_list (V2DF_type_node, V2SI_type_node, NULL_TREE);
21829 tree v2df_ftype_v4sf
21830 = build_function_type_list (V2DF_type_node, V4SF_type_node, NULL_TREE);
21831 tree int_ftype_v2df
21832 = build_function_type_list (integer_type_node, V2DF_type_node, NULL_TREE);
21833 tree int64_ftype_v2df
21834 = build_function_type_list (long_long_integer_type_node,
21835 V2DF_type_node, NULL_TREE);
21836 tree v2df_ftype_v2df_int
21837 = build_function_type_list (V2DF_type_node,
21838 V2DF_type_node, integer_type_node, NULL_TREE);
21839 tree v2df_ftype_v2df_int64
21840 = build_function_type_list (V2DF_type_node,
21841 V2DF_type_node, long_long_integer_type_node,
21842 NULL_TREE);
21843 tree v4sf_ftype_v4sf_v2df
21844 = build_function_type_list (V4SF_type_node,
21845 V4SF_type_node, V2DF_type_node, NULL_TREE);
21846 tree v2df_ftype_v2df_v4sf
21847 = build_function_type_list (V2DF_type_node,
21848 V2DF_type_node, V4SF_type_node, NULL_TREE);
21849 tree v2df_ftype_v2df_v2df_int
21850 = build_function_type_list (V2DF_type_node,
21851 V2DF_type_node, V2DF_type_node,
21852 integer_type_node,
21853 NULL_TREE);
21854 tree v2df_ftype_v2df_pcdouble
21855 = build_function_type_list (V2DF_type_node,
21856 V2DF_type_node, pcdouble_type_node, NULL_TREE);
21857 tree void_ftype_pdouble_v2df
21858 = build_function_type_list (void_type_node,
21859 pdouble_type_node, V2DF_type_node, NULL_TREE);
21860 tree void_ftype_pint_int
21861 = build_function_type_list (void_type_node,
21862 pint_type_node, integer_type_node, NULL_TREE);
21863 tree void_ftype_v16qi_v16qi_pchar
21864 = build_function_type_list (void_type_node,
21865 V16QI_type_node, V16QI_type_node,
21866 pchar_type_node, NULL_TREE);
21867 tree v2df_ftype_pcdouble
21868 = build_function_type_list (V2DF_type_node, pcdouble_type_node, NULL_TREE);
21869 tree v2df_ftype_v2df_v2df
21870 = build_function_type_list (V2DF_type_node,
21871 V2DF_type_node, V2DF_type_node, NULL_TREE);
21872 tree v16qi_ftype_v16qi_v16qi
21873 = build_function_type_list (V16QI_type_node,
21874 V16QI_type_node, V16QI_type_node, NULL_TREE);
21875 tree v8hi_ftype_v8hi_v8hi
21876 = build_function_type_list (V8HI_type_node,
21877 V8HI_type_node, V8HI_type_node, NULL_TREE);
21878 tree v4si_ftype_v4si_v4si
21879 = build_function_type_list (V4SI_type_node,
21880 V4SI_type_node, V4SI_type_node, NULL_TREE);
21881 tree v2di_ftype_v2di_v2di
21882 = build_function_type_list (V2DI_type_node,
21883 V2DI_type_node, V2DI_type_node, NULL_TREE);
21884 tree v2di_ftype_v2df_v2df
21885 = build_function_type_list (V2DI_type_node,
21886 V2DF_type_node, V2DF_type_node, NULL_TREE);
21887 tree v2df_ftype_v2df
21888 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
21889 tree v2di_ftype_v2di_int
21890 = build_function_type_list (V2DI_type_node,
21891 V2DI_type_node, integer_type_node, NULL_TREE);
21892 tree v2di_ftype_v2di_v2di_int
21893 = build_function_type_list (V2DI_type_node, V2DI_type_node,
21894 V2DI_type_node, integer_type_node, NULL_TREE);
21895 tree v4si_ftype_v4si_int
21896 = build_function_type_list (V4SI_type_node,
21897 V4SI_type_node, integer_type_node, NULL_TREE);
21898 tree v8hi_ftype_v8hi_int
21899 = build_function_type_list (V8HI_type_node,
21900 V8HI_type_node, integer_type_node, NULL_TREE);
21901 tree v4si_ftype_v8hi_v8hi
21902 = build_function_type_list (V4SI_type_node,
21903 V8HI_type_node, V8HI_type_node, NULL_TREE);
21904 tree v1di_ftype_v8qi_v8qi
21905 = build_function_type_list (V1DI_type_node,
21906 V8QI_type_node, V8QI_type_node, NULL_TREE);
21907 tree v1di_ftype_v2si_v2si
21908 = build_function_type_list (V1DI_type_node,
21909 V2SI_type_node, V2SI_type_node, NULL_TREE);
21910 tree v2di_ftype_v16qi_v16qi
21911 = build_function_type_list (V2DI_type_node,
21912 V16QI_type_node, V16QI_type_node, NULL_TREE);
21913 tree v2di_ftype_v4si_v4si
21914 = build_function_type_list (V2DI_type_node,
21915 V4SI_type_node, V4SI_type_node, NULL_TREE);
21916 tree int_ftype_v16qi
21917 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
21918 tree v16qi_ftype_pcchar
21919 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
21920 tree void_ftype_pchar_v16qi
21921 = build_function_type_list (void_type_node,
21922 pchar_type_node, V16QI_type_node, NULL_TREE);
21924 tree v2di_ftype_v2di_unsigned_unsigned
21925 = build_function_type_list (V2DI_type_node, V2DI_type_node,
21926 unsigned_type_node, unsigned_type_node,
21927 NULL_TREE);
21928 tree v2di_ftype_v2di_v2di_unsigned_unsigned
21929 = build_function_type_list (V2DI_type_node, V2DI_type_node, V2DI_type_node,
21930 unsigned_type_node, unsigned_type_node,
21931 NULL_TREE);
21932 tree v2di_ftype_v2di_v16qi
21933 = build_function_type_list (V2DI_type_node, V2DI_type_node, V16QI_type_node,
21934 NULL_TREE);
21935 tree v2df_ftype_v2df_v2df_v2df
21936 = build_function_type_list (V2DF_type_node,
21937 V2DF_type_node, V2DF_type_node,
21938 V2DF_type_node, NULL_TREE);
21939 tree v4sf_ftype_v4sf_v4sf_v4sf
21940 = build_function_type_list (V4SF_type_node,
21941 V4SF_type_node, V4SF_type_node,
21942 V4SF_type_node, NULL_TREE);
21943 tree v8hi_ftype_v16qi
21944 = build_function_type_list (V8HI_type_node, V16QI_type_node,
21945 NULL_TREE);
21946 tree v4si_ftype_v16qi
21947 = build_function_type_list (V4SI_type_node, V16QI_type_node,
21948 NULL_TREE);
21949 tree v2di_ftype_v16qi
21950 = build_function_type_list (V2DI_type_node, V16QI_type_node,
21951 NULL_TREE);
21952 tree v4si_ftype_v8hi
21953 = build_function_type_list (V4SI_type_node, V8HI_type_node,
21954 NULL_TREE);
21955 tree v2di_ftype_v8hi
21956 = build_function_type_list (V2DI_type_node, V8HI_type_node,
21957 NULL_TREE);
21958 tree v2di_ftype_v4si
21959 = build_function_type_list (V2DI_type_node, V4SI_type_node,
21960 NULL_TREE);
21961 tree v2di_ftype_pv2di
21962 = build_function_type_list (V2DI_type_node, pv2di_type_node,
21963 NULL_TREE);
21964 tree v16qi_ftype_v16qi_v16qi_int
21965 = build_function_type_list (V16QI_type_node, V16QI_type_node,
21966 V16QI_type_node, integer_type_node,
21967 NULL_TREE);
21968 tree v16qi_ftype_v16qi_v16qi_v16qi
21969 = build_function_type_list (V16QI_type_node, V16QI_type_node,
21970 V16QI_type_node, V16QI_type_node,
21971 NULL_TREE);
21972 tree v8hi_ftype_v8hi_v8hi_int
21973 = build_function_type_list (V8HI_type_node, V8HI_type_node,
21974 V8HI_type_node, integer_type_node,
21975 NULL_TREE);
21976 tree v4si_ftype_v4si_v4si_int
21977 = build_function_type_list (V4SI_type_node, V4SI_type_node,
21978 V4SI_type_node, integer_type_node,
21979 NULL_TREE);
21980 tree int_ftype_v2di_v2di
21981 = build_function_type_list (integer_type_node,
21982 V2DI_type_node, V2DI_type_node,
21983 NULL_TREE);
21984 tree int_ftype_v16qi_int_v16qi_int_int
21985 = build_function_type_list (integer_type_node,
21986 V16QI_type_node,
21987 integer_type_node,
21988 V16QI_type_node,
21989 integer_type_node,
21990 integer_type_node,
21991 NULL_TREE);
21992 tree v16qi_ftype_v16qi_int_v16qi_int_int
21993 = build_function_type_list (V16QI_type_node,
21994 V16QI_type_node,
21995 integer_type_node,
21996 V16QI_type_node,
21997 integer_type_node,
21998 integer_type_node,
21999 NULL_TREE);
22000 tree int_ftype_v16qi_v16qi_int
22001 = build_function_type_list (integer_type_node,
22002 V16QI_type_node,
22003 V16QI_type_node,
22004 integer_type_node,
22005 NULL_TREE);
22008 tree v2di_ftype_v2di
22009 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
22011 tree v16qi_ftype_v8hi_v8hi
22012 = build_function_type_list (V16QI_type_node,
22013 V8HI_type_node, V8HI_type_node,
22014 NULL_TREE);
22015 tree v8hi_ftype_v4si_v4si
22016 = build_function_type_list (V8HI_type_node,
22017 V4SI_type_node, V4SI_type_node,
22018 NULL_TREE);
22019 tree v8hi_ftype_v16qi_v16qi
22020 = build_function_type_list (V8HI_type_node,
22021 V16QI_type_node, V16QI_type_node,
22022 NULL_TREE);
22023 tree v4hi_ftype_v8qi_v8qi
22024 = build_function_type_list (V4HI_type_node,
22025 V8QI_type_node, V8QI_type_node,
22026 NULL_TREE);
22027 tree unsigned_ftype_unsigned_uchar
22028 = build_function_type_list (unsigned_type_node,
22029 unsigned_type_node,
22030 unsigned_char_type_node,
22031 NULL_TREE);
22032 tree unsigned_ftype_unsigned_ushort
22033 = build_function_type_list (unsigned_type_node,
22034 unsigned_type_node,
22035 short_unsigned_type_node,
22036 NULL_TREE);
22037 tree unsigned_ftype_unsigned_unsigned
22038 = build_function_type_list (unsigned_type_node,
22039 unsigned_type_node,
22040 unsigned_type_node,
22041 NULL_TREE);
22042 tree uint64_ftype_uint64_uint64
22043 = build_function_type_list (long_long_unsigned_type_node,
22044 long_long_unsigned_type_node,
22045 long_long_unsigned_type_node,
22046 NULL_TREE);
22047 tree float_ftype_float
22048 = build_function_type_list (float_type_node,
22049 float_type_node,
22050 NULL_TREE);
22052 /* AVX builtins */
22053 tree V32QI_type_node = build_vector_type_for_mode (char_type_node,
22054 V32QImode);
22055 tree V8SI_type_node = build_vector_type_for_mode (intSI_type_node,
22056 V8SImode);
22057 tree V8SF_type_node = build_vector_type_for_mode (float_type_node,
22058 V8SFmode);
22059 tree V4DI_type_node = build_vector_type_for_mode (long_long_integer_type_node,
22060 V4DImode);
22061 tree V4DF_type_node = build_vector_type_for_mode (double_type_node,
22062 V4DFmode);
22063 tree v8sf_ftype_v8sf
22064 = build_function_type_list (V8SF_type_node,
22065 V8SF_type_node,
22066 NULL_TREE);
22067 tree v8si_ftype_v8sf
22068 = build_function_type_list (V8SI_type_node,
22069 V8SF_type_node,
22070 NULL_TREE);
22071 tree v8sf_ftype_v8si
22072 = build_function_type_list (V8SF_type_node,
22073 V8SI_type_node,
22074 NULL_TREE);
22075 tree v4si_ftype_v4df
22076 = build_function_type_list (V4SI_type_node,
22077 V4DF_type_node,
22078 NULL_TREE);
22079 tree v4df_ftype_v4df
22080 = build_function_type_list (V4DF_type_node,
22081 V4DF_type_node,
22082 NULL_TREE);
22083 tree v4df_ftype_v4si
22084 = build_function_type_list (V4DF_type_node,
22085 V4SI_type_node,
22086 NULL_TREE);
22087 tree v4df_ftype_v4sf
22088 = build_function_type_list (V4DF_type_node,
22089 V4SF_type_node,
22090 NULL_TREE);
22091 tree v4sf_ftype_v4df
22092 = build_function_type_list (V4SF_type_node,
22093 V4DF_type_node,
22094 NULL_TREE);
22095 tree v8sf_ftype_v8sf_v8sf
22096 = build_function_type_list (V8SF_type_node,
22097 V8SF_type_node, V8SF_type_node,
22098 NULL_TREE);
22099 tree v4df_ftype_v4df_v4df
22100 = build_function_type_list (V4DF_type_node,
22101 V4DF_type_node, V4DF_type_node,
22102 NULL_TREE);
22103 tree v8sf_ftype_v8sf_int
22104 = build_function_type_list (V8SF_type_node,
22105 V8SF_type_node, integer_type_node,
22106 NULL_TREE);
22107 tree v4si_ftype_v8si_int
22108 = build_function_type_list (V4SI_type_node,
22109 V8SI_type_node, integer_type_node,
22110 NULL_TREE);
22111 tree v4df_ftype_v4df_int
22112 = build_function_type_list (V4DF_type_node,
22113 V4DF_type_node, integer_type_node,
22114 NULL_TREE);
22115 tree v4sf_ftype_v8sf_int
22116 = build_function_type_list (V4SF_type_node,
22117 V8SF_type_node, integer_type_node,
22118 NULL_TREE);
22119 tree v2df_ftype_v4df_int
22120 = build_function_type_list (V2DF_type_node,
22121 V4DF_type_node, integer_type_node,
22122 NULL_TREE);
22123 tree v8sf_ftype_v8sf_v8sf_int
22124 = build_function_type_list (V8SF_type_node,
22125 V8SF_type_node, V8SF_type_node,
22126 integer_type_node,
22127 NULL_TREE);
22128 tree v8sf_ftype_v8sf_v8sf_v8sf
22129 = build_function_type_list (V8SF_type_node,
22130 V8SF_type_node, V8SF_type_node,
22131 V8SF_type_node,
22132 NULL_TREE);
22133 tree v4df_ftype_v4df_v4df_v4df
22134 = build_function_type_list (V4DF_type_node,
22135 V4DF_type_node, V4DF_type_node,
22136 V4DF_type_node,
22137 NULL_TREE);
22138 tree v8si_ftype_v8si_v8si_int
22139 = build_function_type_list (V8SI_type_node,
22140 V8SI_type_node, V8SI_type_node,
22141 integer_type_node,
22142 NULL_TREE);
22143 tree v4df_ftype_v4df_v4df_int
22144 = build_function_type_list (V4DF_type_node,
22145 V4DF_type_node, V4DF_type_node,
22146 integer_type_node,
22147 NULL_TREE);
22148 tree v8sf_ftype_pcfloat
22149 = build_function_type_list (V8SF_type_node,
22150 pcfloat_type_node,
22151 NULL_TREE);
22152 tree v4df_ftype_pcdouble
22153 = build_function_type_list (V4DF_type_node,
22154 pcdouble_type_node,
22155 NULL_TREE);
22156 tree pcv4sf_type_node
22157 = build_pointer_type (build_type_variant (V4SF_type_node, 1, 0));
22158 tree pcv2df_type_node
22159 = build_pointer_type (build_type_variant (V2DF_type_node, 1, 0));
22160 tree v8sf_ftype_pcv4sf
22161 = build_function_type_list (V8SF_type_node,
22162 pcv4sf_type_node,
22163 NULL_TREE);
22164 tree v4df_ftype_pcv2df
22165 = build_function_type_list (V4DF_type_node,
22166 pcv2df_type_node,
22167 NULL_TREE);
22168 tree v32qi_ftype_pcchar
22169 = build_function_type_list (V32QI_type_node,
22170 pcchar_type_node,
22171 NULL_TREE);
22172 tree void_ftype_pchar_v32qi
22173 = build_function_type_list (void_type_node,
22174 pchar_type_node, V32QI_type_node,
22175 NULL_TREE);
22176 tree v8si_ftype_v8si_v4si_int
22177 = build_function_type_list (V8SI_type_node,
22178 V8SI_type_node, V4SI_type_node,
22179 integer_type_node,
22180 NULL_TREE);
22181 tree pv4di_type_node = build_pointer_type (V4DI_type_node);
22182 tree void_ftype_pv4di_v4di
22183 = build_function_type_list (void_type_node,
22184 pv4di_type_node, V4DI_type_node,
22185 NULL_TREE);
22186 tree v8sf_ftype_v8sf_v4sf_int
22187 = build_function_type_list (V8SF_type_node,
22188 V8SF_type_node, V4SF_type_node,
22189 integer_type_node,
22190 NULL_TREE);
22191 tree v4df_ftype_v4df_v2df_int
22192 = build_function_type_list (V4DF_type_node,
22193 V4DF_type_node, V2DF_type_node,
22194 integer_type_node,
22195 NULL_TREE);
22196 tree void_ftype_pfloat_v8sf
22197 = build_function_type_list (void_type_node,
22198 pfloat_type_node, V8SF_type_node,
22199 NULL_TREE);
22200 tree void_ftype_pdouble_v4df
22201 = build_function_type_list (void_type_node,
22202 pdouble_type_node, V4DF_type_node,
22203 NULL_TREE);
22204 tree pv8sf_type_node = build_pointer_type (V8SF_type_node);
22205 tree pv4sf_type_node = build_pointer_type (V4SF_type_node);
22206 tree pv4df_type_node = build_pointer_type (V4DF_type_node);
22207 tree pv2df_type_node = build_pointer_type (V2DF_type_node);
22208 tree pcv8sf_type_node
22209 = build_pointer_type (build_type_variant (V8SF_type_node, 1, 0));
22210 tree pcv4df_type_node
22211 = build_pointer_type (build_type_variant (V4DF_type_node, 1, 0));
22212 tree v8sf_ftype_pcv8sf_v8sf
22213 = build_function_type_list (V8SF_type_node,
22214 pcv8sf_type_node, V8SF_type_node,
22215 NULL_TREE);
22216 tree v4df_ftype_pcv4df_v4df
22217 = build_function_type_list (V4DF_type_node,
22218 pcv4df_type_node, V4DF_type_node,
22219 NULL_TREE);
22220 tree v4sf_ftype_pcv4sf_v4sf
22221 = build_function_type_list (V4SF_type_node,
22222 pcv4sf_type_node, V4SF_type_node,
22223 NULL_TREE);
22224 tree v2df_ftype_pcv2df_v2df
22225 = build_function_type_list (V2DF_type_node,
22226 pcv2df_type_node, V2DF_type_node,
22227 NULL_TREE);
22228 tree void_ftype_pv8sf_v8sf_v8sf
22229 = build_function_type_list (void_type_node,
22230 pv8sf_type_node, V8SF_type_node,
22231 V8SF_type_node,
22232 NULL_TREE);
22233 tree void_ftype_pv4df_v4df_v4df
22234 = build_function_type_list (void_type_node,
22235 pv4df_type_node, V4DF_type_node,
22236 V4DF_type_node,
22237 NULL_TREE);
22238 tree void_ftype_pv4sf_v4sf_v4sf
22239 = build_function_type_list (void_type_node,
22240 pv4sf_type_node, V4SF_type_node,
22241 V4SF_type_node,
22242 NULL_TREE);
22243 tree void_ftype_pv2df_v2df_v2df
22244 = build_function_type_list (void_type_node,
22245 pv2df_type_node, V2DF_type_node,
22246 V2DF_type_node,
22247 NULL_TREE);
22248 tree v4df_ftype_v2df
22249 = build_function_type_list (V4DF_type_node,
22250 V2DF_type_node,
22251 NULL_TREE);
22252 tree v8sf_ftype_v4sf
22253 = build_function_type_list (V8SF_type_node,
22254 V4SF_type_node,
22255 NULL_TREE);
22256 tree v8si_ftype_v4si
22257 = build_function_type_list (V8SI_type_node,
22258 V4SI_type_node,
22259 NULL_TREE);
22260 tree v2df_ftype_v4df
22261 = build_function_type_list (V2DF_type_node,
22262 V4DF_type_node,
22263 NULL_TREE);
22264 tree v4sf_ftype_v8sf
22265 = build_function_type_list (V4SF_type_node,
22266 V8SF_type_node,
22267 NULL_TREE);
22268 tree v4si_ftype_v8si
22269 = build_function_type_list (V4SI_type_node,
22270 V8SI_type_node,
22271 NULL_TREE);
22272 tree int_ftype_v4df
22273 = build_function_type_list (integer_type_node,
22274 V4DF_type_node,
22275 NULL_TREE);
22276 tree int_ftype_v8sf
22277 = build_function_type_list (integer_type_node,
22278 V8SF_type_node,
22279 NULL_TREE);
22280 tree int_ftype_v8sf_v8sf
22281 = build_function_type_list (integer_type_node,
22282 V8SF_type_node, V8SF_type_node,
22283 NULL_TREE);
22284 tree int_ftype_v4di_v4di
22285 = build_function_type_list (integer_type_node,
22286 V4DI_type_node, V4DI_type_node,
22287 NULL_TREE);
22288 tree int_ftype_v4df_v4df
22289 = build_function_type_list (integer_type_node,
22290 V4DF_type_node, V4DF_type_node,
22291 NULL_TREE);
22292 tree v8sf_ftype_v8sf_v8si
22293 = build_function_type_list (V8SF_type_node,
22294 V8SF_type_node, V8SI_type_node,
22295 NULL_TREE);
22296 tree v4df_ftype_v4df_v4di
22297 = build_function_type_list (V4DF_type_node,
22298 V4DF_type_node, V4DI_type_node,
22299 NULL_TREE);
22300 tree v4sf_ftype_v4sf_v4si
22301 = build_function_type_list (V4SF_type_node,
22302 V4SF_type_node, V4SI_type_node, NULL_TREE);
22303 tree v2df_ftype_v2df_v2di
22304 = build_function_type_list (V2DF_type_node,
22305 V2DF_type_node, V2DI_type_node, NULL_TREE);
22307 /* Integer intrinsics. */
22308 tree uint64_ftype_void
22309 = build_function_type (long_long_unsigned_type_node,
22310 void_list_node);
22311 tree int_ftype_int
22312 = build_function_type_list (integer_type_node,
22313 integer_type_node, NULL_TREE);
22314 tree int64_ftype_int64
22315 = build_function_type_list (long_long_integer_type_node,
22316 long_long_integer_type_node,
22317 NULL_TREE);
22318 tree uint64_ftype_int
22319 = build_function_type_list (long_long_unsigned_type_node,
22320 integer_type_node, NULL_TREE);
22321 tree punsigned_type_node = build_pointer_type (unsigned_type_node);
22322 tree uint64_ftype_punsigned
22323 = build_function_type_list (long_long_unsigned_type_node,
22324 punsigned_type_node, NULL_TREE);
22325 tree ushort_ftype_ushort_int
22326 = build_function_type_list (short_unsigned_type_node,
22327 short_unsigned_type_node,
22328 integer_type_node,
22329 NULL_TREE);
22330 tree uchar_ftype_uchar_int
22331 = build_function_type_list (unsigned_char_type_node,
22332 unsigned_char_type_node,
22333 integer_type_node,
22334 NULL_TREE);
22336 tree ftype;
22338 /* Add all special builtins with variable number of operands. */
22339 for (i = 0, d = bdesc_special_args;
22340 i < ARRAY_SIZE (bdesc_special_args);
22341 i++, d++)
22343 tree type;
22345 if (d->name == 0)
22346 continue;
22348 switch ((enum ix86_special_builtin_type) d->flag)
22350 case VOID_FTYPE_VOID:
22351 type = void_ftype_void;
22352 break;
22353 case UINT64_FTYPE_VOID:
22354 type = uint64_ftype_void;
22355 break;
22356 case UINT64_FTYPE_PUNSIGNED:
22357 type = uint64_ftype_punsigned;
22358 break;
22359 case V32QI_FTYPE_PCCHAR:
22360 type = v32qi_ftype_pcchar;
22361 break;
22362 case V16QI_FTYPE_PCCHAR:
22363 type = v16qi_ftype_pcchar;
22364 break;
22365 case V8SF_FTYPE_PCV4SF:
22366 type = v8sf_ftype_pcv4sf;
22367 break;
22368 case V8SF_FTYPE_PCFLOAT:
22369 type = v8sf_ftype_pcfloat;
22370 break;
22371 case V4DF_FTYPE_PCV2DF:
22372 type = v4df_ftype_pcv2df;
22373 break;
22374 case V4DF_FTYPE_PCDOUBLE:
22375 type = v4df_ftype_pcdouble;
22376 break;
22377 case V4SF_FTYPE_PCFLOAT:
22378 type = v4sf_ftype_pcfloat;
22379 break;
22380 case V2DI_FTYPE_PV2DI:
22381 type = v2di_ftype_pv2di;
22382 break;
22383 case V2DF_FTYPE_PCDOUBLE:
22384 type = v2df_ftype_pcdouble;
22385 break;
22386 case V8SF_FTYPE_PCV8SF_V8SF:
22387 type = v8sf_ftype_pcv8sf_v8sf;
22388 break;
22389 case V4DF_FTYPE_PCV4DF_V4DF:
22390 type = v4df_ftype_pcv4df_v4df;
22391 break;
22392 case V4SF_FTYPE_V4SF_PCV2SF:
22393 type = v4sf_ftype_v4sf_pcv2sf;
22394 break;
22395 case V4SF_FTYPE_PCV4SF_V4SF:
22396 type = v4sf_ftype_pcv4sf_v4sf;
22397 break;
22398 case V2DF_FTYPE_V2DF_PCDOUBLE:
22399 type = v2df_ftype_v2df_pcdouble;
22400 break;
22401 case V2DF_FTYPE_PCV2DF_V2DF:
22402 type = v2df_ftype_pcv2df_v2df;
22403 break;
22404 case VOID_FTYPE_PV2SF_V4SF:
22405 type = void_ftype_pv2sf_v4sf;
22406 break;
22407 case VOID_FTYPE_PV4DI_V4DI:
22408 type = void_ftype_pv4di_v4di;
22409 break;
22410 case VOID_FTYPE_PV2DI_V2DI:
22411 type = void_ftype_pv2di_v2di;
22412 break;
22413 case VOID_FTYPE_PCHAR_V32QI:
22414 type = void_ftype_pchar_v32qi;
22415 break;
22416 case VOID_FTYPE_PCHAR_V16QI:
22417 type = void_ftype_pchar_v16qi;
22418 break;
22419 case VOID_FTYPE_PFLOAT_V8SF:
22420 type = void_ftype_pfloat_v8sf;
22421 break;
22422 case VOID_FTYPE_PFLOAT_V4SF:
22423 type = void_ftype_pfloat_v4sf;
22424 break;
22425 case VOID_FTYPE_PDOUBLE_V4DF:
22426 type = void_ftype_pdouble_v4df;
22427 break;
22428 case VOID_FTYPE_PDOUBLE_V2DF:
22429 type = void_ftype_pdouble_v2df;
22430 break;
22431 case VOID_FTYPE_PDI_DI:
22432 type = void_ftype_pdi_di;
22433 break;
22434 case VOID_FTYPE_PINT_INT:
22435 type = void_ftype_pint_int;
22436 break;
22437 case VOID_FTYPE_PV8SF_V8SF_V8SF:
22438 type = void_ftype_pv8sf_v8sf_v8sf;
22439 break;
22440 case VOID_FTYPE_PV4DF_V4DF_V4DF:
22441 type = void_ftype_pv4df_v4df_v4df;
22442 break;
22443 case VOID_FTYPE_PV4SF_V4SF_V4SF:
22444 type = void_ftype_pv4sf_v4sf_v4sf;
22445 break;
22446 case VOID_FTYPE_PV2DF_V2DF_V2DF:
22447 type = void_ftype_pv2df_v2df_v2df;
22448 break;
22449 default:
22450 gcc_unreachable ();
22453 def_builtin (d->mask, d->name, type, d->code);
22456 /* Add all builtins with variable number of operands. */
22457 for (i = 0, d = bdesc_args;
22458 i < ARRAY_SIZE (bdesc_args);
22459 i++, d++)
22461 tree type;
22463 if (d->name == 0)
22464 continue;
22466 switch ((enum ix86_builtin_type) d->flag)
22468 case FLOAT_FTYPE_FLOAT:
22469 type = float_ftype_float;
22470 break;
22471 case INT_FTYPE_V8SF_V8SF_PTEST:
22472 type = int_ftype_v8sf_v8sf;
22473 break;
22474 case INT_FTYPE_V4DI_V4DI_PTEST:
22475 type = int_ftype_v4di_v4di;
22476 break;
22477 case INT_FTYPE_V4DF_V4DF_PTEST:
22478 type = int_ftype_v4df_v4df;
22479 break;
22480 case INT_FTYPE_V4SF_V4SF_PTEST:
22481 type = int_ftype_v4sf_v4sf;
22482 break;
22483 case INT_FTYPE_V2DI_V2DI_PTEST:
22484 type = int_ftype_v2di_v2di;
22485 break;
22486 case INT_FTYPE_V2DF_V2DF_PTEST:
22487 type = int_ftype_v2df_v2df;
22488 break;
22489 case INT_FTYPE_INT:
22490 type = int_ftype_int;
22491 break;
22492 case UINT64_FTYPE_INT:
22493 type = uint64_ftype_int;
22494 break;
22495 case INT64_FTYPE_INT64:
22496 type = int64_ftype_int64;
22497 break;
22498 case INT64_FTYPE_V4SF:
22499 type = int64_ftype_v4sf;
22500 break;
22501 case INT64_FTYPE_V2DF:
22502 type = int64_ftype_v2df;
22503 break;
22504 case INT_FTYPE_V16QI:
22505 type = int_ftype_v16qi;
22506 break;
22507 case INT_FTYPE_V8QI:
22508 type = int_ftype_v8qi;
22509 break;
22510 case INT_FTYPE_V8SF:
22511 type = int_ftype_v8sf;
22512 break;
22513 case INT_FTYPE_V4DF:
22514 type = int_ftype_v4df;
22515 break;
22516 case INT_FTYPE_V4SF:
22517 type = int_ftype_v4sf;
22518 break;
22519 case INT_FTYPE_V2DF:
22520 type = int_ftype_v2df;
22521 break;
22522 case V16QI_FTYPE_V16QI:
22523 type = v16qi_ftype_v16qi;
22524 break;
22525 case V8SI_FTYPE_V8SF:
22526 type = v8si_ftype_v8sf;
22527 break;
22528 case V8SI_FTYPE_V4SI:
22529 type = v8si_ftype_v4si;
22530 break;
22531 case V8HI_FTYPE_V8HI:
22532 type = v8hi_ftype_v8hi;
22533 break;
22534 case V8HI_FTYPE_V16QI:
22535 type = v8hi_ftype_v16qi;
22536 break;
22537 case V8QI_FTYPE_V8QI:
22538 type = v8qi_ftype_v8qi;
22539 break;
22540 case V8SF_FTYPE_V8SF:
22541 type = v8sf_ftype_v8sf;
22542 break;
22543 case V8SF_FTYPE_V8SI:
22544 type = v8sf_ftype_v8si;
22545 break;
22546 case V8SF_FTYPE_V4SF:
22547 type = v8sf_ftype_v4sf;
22548 break;
22549 case V4SI_FTYPE_V4DF:
22550 type = v4si_ftype_v4df;
22551 break;
22552 case V4SI_FTYPE_V4SI:
22553 type = v4si_ftype_v4si;
22554 break;
22555 case V4SI_FTYPE_V16QI:
22556 type = v4si_ftype_v16qi;
22557 break;
22558 case V4SI_FTYPE_V8SI:
22559 type = v4si_ftype_v8si;
22560 break;
22561 case V4SI_FTYPE_V8HI:
22562 type = v4si_ftype_v8hi;
22563 break;
22564 case V4SI_FTYPE_V4SF:
22565 type = v4si_ftype_v4sf;
22566 break;
22567 case V4SI_FTYPE_V2DF:
22568 type = v4si_ftype_v2df;
22569 break;
22570 case V4HI_FTYPE_V4HI:
22571 type = v4hi_ftype_v4hi;
22572 break;
22573 case V4DF_FTYPE_V4DF:
22574 type = v4df_ftype_v4df;
22575 break;
22576 case V4DF_FTYPE_V4SI:
22577 type = v4df_ftype_v4si;
22578 break;
22579 case V4DF_FTYPE_V4SF:
22580 type = v4df_ftype_v4sf;
22581 break;
22582 case V4DF_FTYPE_V2DF:
22583 type = v4df_ftype_v2df;
22584 break;
22585 case V4SF_FTYPE_V4SF:
22586 case V4SF_FTYPE_V4SF_VEC_MERGE:
22587 type = v4sf_ftype_v4sf;
22588 break;
22589 case V4SF_FTYPE_V8SF:
22590 type = v4sf_ftype_v8sf;
22591 break;
22592 case V4SF_FTYPE_V4SI:
22593 type = v4sf_ftype_v4si;
22594 break;
22595 case V4SF_FTYPE_V4DF:
22596 type = v4sf_ftype_v4df;
22597 break;
22598 case V4SF_FTYPE_V2DF:
22599 type = v4sf_ftype_v2df;
22600 break;
22601 case V2DI_FTYPE_V2DI:
22602 type = v2di_ftype_v2di;
22603 break;
22604 case V2DI_FTYPE_V16QI:
22605 type = v2di_ftype_v16qi;
22606 break;
22607 case V2DI_FTYPE_V8HI:
22608 type = v2di_ftype_v8hi;
22609 break;
22610 case V2DI_FTYPE_V4SI:
22611 type = v2di_ftype_v4si;
22612 break;
22613 case V2SI_FTYPE_V2SI:
22614 type = v2si_ftype_v2si;
22615 break;
22616 case V2SI_FTYPE_V4SF:
22617 type = v2si_ftype_v4sf;
22618 break;
22619 case V2SI_FTYPE_V2DF:
22620 type = v2si_ftype_v2df;
22621 break;
22622 case V2SI_FTYPE_V2SF:
22623 type = v2si_ftype_v2sf;
22624 break;
22625 case V2DF_FTYPE_V4DF:
22626 type = v2df_ftype_v4df;
22627 break;
22628 case V2DF_FTYPE_V4SF:
22629 type = v2df_ftype_v4sf;
22630 break;
22631 case V2DF_FTYPE_V2DF:
22632 case V2DF_FTYPE_V2DF_VEC_MERGE:
22633 type = v2df_ftype_v2df;
22634 break;
22635 case V2DF_FTYPE_V2SI:
22636 type = v2df_ftype_v2si;
22637 break;
22638 case V2DF_FTYPE_V4SI:
22639 type = v2df_ftype_v4si;
22640 break;
22641 case V2SF_FTYPE_V2SF:
22642 type = v2sf_ftype_v2sf;
22643 break;
22644 case V2SF_FTYPE_V2SI:
22645 type = v2sf_ftype_v2si;
22646 break;
22647 case V16QI_FTYPE_V16QI_V16QI:
22648 type = v16qi_ftype_v16qi_v16qi;
22649 break;
22650 case V16QI_FTYPE_V8HI_V8HI:
22651 type = v16qi_ftype_v8hi_v8hi;
22652 break;
22653 case V8QI_FTYPE_V8QI_V8QI:
22654 type = v8qi_ftype_v8qi_v8qi;
22655 break;
22656 case V8QI_FTYPE_V4HI_V4HI:
22657 type = v8qi_ftype_v4hi_v4hi;
22658 break;
22659 case V8HI_FTYPE_V8HI_V8HI:
22660 case V8HI_FTYPE_V8HI_V8HI_COUNT:
22661 type = v8hi_ftype_v8hi_v8hi;
22662 break;
22663 case V8HI_FTYPE_V16QI_V16QI:
22664 type = v8hi_ftype_v16qi_v16qi;
22665 break;
22666 case V8HI_FTYPE_V4SI_V4SI:
22667 type = v8hi_ftype_v4si_v4si;
22668 break;
22669 case V8HI_FTYPE_V8HI_SI_COUNT:
22670 type = v8hi_ftype_v8hi_int;
22671 break;
22672 case V8SF_FTYPE_V8SF_V8SF:
22673 type = v8sf_ftype_v8sf_v8sf;
22674 break;
22675 case V8SF_FTYPE_V8SF_V8SI:
22676 type = v8sf_ftype_v8sf_v8si;
22677 break;
22678 case V4SI_FTYPE_V4SI_V4SI:
22679 case V4SI_FTYPE_V4SI_V4SI_COUNT:
22680 type = v4si_ftype_v4si_v4si;
22681 break;
22682 case V4SI_FTYPE_V8HI_V8HI:
22683 type = v4si_ftype_v8hi_v8hi;
22684 break;
22685 case V4SI_FTYPE_V4SF_V4SF:
22686 type = v4si_ftype_v4sf_v4sf;
22687 break;
22688 case V4SI_FTYPE_V2DF_V2DF:
22689 type = v4si_ftype_v2df_v2df;
22690 break;
22691 case V4SI_FTYPE_V4SI_SI_COUNT:
22692 type = v4si_ftype_v4si_int;
22693 break;
22694 case V4HI_FTYPE_V4HI_V4HI:
22695 case V4HI_FTYPE_V4HI_V4HI_COUNT:
22696 type = v4hi_ftype_v4hi_v4hi;
22697 break;
22698 case V4HI_FTYPE_V8QI_V8QI:
22699 type = v4hi_ftype_v8qi_v8qi;
22700 break;
22701 case V4HI_FTYPE_V2SI_V2SI:
22702 type = v4hi_ftype_v2si_v2si;
22703 break;
22704 case V4HI_FTYPE_V4HI_SI_COUNT:
22705 type = v4hi_ftype_v4hi_int;
22706 break;
22707 case V4DF_FTYPE_V4DF_V4DF:
22708 type = v4df_ftype_v4df_v4df;
22709 break;
22710 case V4DF_FTYPE_V4DF_V4DI:
22711 type = v4df_ftype_v4df_v4di;
22712 break;
22713 case V4SF_FTYPE_V4SF_V4SF:
22714 case V4SF_FTYPE_V4SF_V4SF_SWAP:
22715 type = v4sf_ftype_v4sf_v4sf;
22716 break;
22717 case V4SF_FTYPE_V4SF_V4SI:
22718 type = v4sf_ftype_v4sf_v4si;
22719 break;
22720 case V4SF_FTYPE_V4SF_V2SI:
22721 type = v4sf_ftype_v4sf_v2si;
22722 break;
22723 case V4SF_FTYPE_V4SF_V2DF:
22724 type = v4sf_ftype_v4sf_v2df;
22725 break;
22726 case V4SF_FTYPE_V4SF_DI:
22727 type = v4sf_ftype_v4sf_int64;
22728 break;
22729 case V4SF_FTYPE_V4SF_SI:
22730 type = v4sf_ftype_v4sf_int;
22731 break;
22732 case V2DI_FTYPE_V2DI_V2DI:
22733 case V2DI_FTYPE_V2DI_V2DI_COUNT:
22734 type = v2di_ftype_v2di_v2di;
22735 break;
22736 case V2DI_FTYPE_V16QI_V16QI:
22737 type = v2di_ftype_v16qi_v16qi;
22738 break;
22739 case V2DI_FTYPE_V4SI_V4SI:
22740 type = v2di_ftype_v4si_v4si;
22741 break;
22742 case V2DI_FTYPE_V2DI_V16QI:
22743 type = v2di_ftype_v2di_v16qi;
22744 break;
22745 case V2DI_FTYPE_V2DF_V2DF:
22746 type = v2di_ftype_v2df_v2df;
22747 break;
22748 case V2DI_FTYPE_V2DI_SI_COUNT:
22749 type = v2di_ftype_v2di_int;
22750 break;
22751 case V2SI_FTYPE_V2SI_V2SI:
22752 case V2SI_FTYPE_V2SI_V2SI_COUNT:
22753 type = v2si_ftype_v2si_v2si;
22754 break;
22755 case V2SI_FTYPE_V4HI_V4HI:
22756 type = v2si_ftype_v4hi_v4hi;
22757 break;
22758 case V2SI_FTYPE_V2SF_V2SF:
22759 type = v2si_ftype_v2sf_v2sf;
22760 break;
22761 case V2SI_FTYPE_V2SI_SI_COUNT:
22762 type = v2si_ftype_v2si_int;
22763 break;
22764 case V2DF_FTYPE_V2DF_V2DF:
22765 case V2DF_FTYPE_V2DF_V2DF_SWAP:
22766 type = v2df_ftype_v2df_v2df;
22767 break;
22768 case V2DF_FTYPE_V2DF_V4SF:
22769 type = v2df_ftype_v2df_v4sf;
22770 break;
22771 case V2DF_FTYPE_V2DF_V2DI:
22772 type = v2df_ftype_v2df_v2di;
22773 break;
22774 case V2DF_FTYPE_V2DF_DI:
22775 type = v2df_ftype_v2df_int64;
22776 break;
22777 case V2DF_FTYPE_V2DF_SI:
22778 type = v2df_ftype_v2df_int;
22779 break;
22780 case V2SF_FTYPE_V2SF_V2SF:
22781 type = v2sf_ftype_v2sf_v2sf;
22782 break;
22783 case V1DI_FTYPE_V1DI_V1DI:
22784 case V1DI_FTYPE_V1DI_V1DI_COUNT:
22785 type = v1di_ftype_v1di_v1di;
22786 break;
22787 case V1DI_FTYPE_V8QI_V8QI:
22788 type = v1di_ftype_v8qi_v8qi;
22789 break;
22790 case V1DI_FTYPE_V2SI_V2SI:
22791 type = v1di_ftype_v2si_v2si;
22792 break;
22793 case V1DI_FTYPE_V1DI_SI_COUNT:
22794 type = v1di_ftype_v1di_int;
22795 break;
22796 case UINT64_FTYPE_UINT64_UINT64:
22797 type = uint64_ftype_uint64_uint64;
22798 break;
22799 case UINT_FTYPE_UINT_UINT:
22800 type = unsigned_ftype_unsigned_unsigned;
22801 break;
22802 case UINT_FTYPE_UINT_USHORT:
22803 type = unsigned_ftype_unsigned_ushort;
22804 break;
22805 case UINT_FTYPE_UINT_UCHAR:
22806 type = unsigned_ftype_unsigned_uchar;
22807 break;
22808 case UINT16_FTYPE_UINT16_INT:
22809 type = ushort_ftype_ushort_int;
22810 break;
22811 case UINT8_FTYPE_UINT8_INT:
22812 type = uchar_ftype_uchar_int;
22813 break;
22814 case V8HI_FTYPE_V8HI_INT:
22815 type = v8hi_ftype_v8hi_int;
22816 break;
22817 case V8SF_FTYPE_V8SF_INT:
22818 type = v8sf_ftype_v8sf_int;
22819 break;
22820 case V4SI_FTYPE_V4SI_INT:
22821 type = v4si_ftype_v4si_int;
22822 break;
22823 case V4SI_FTYPE_V8SI_INT:
22824 type = v4si_ftype_v8si_int;
22825 break;
22826 case V4HI_FTYPE_V4HI_INT:
22827 type = v4hi_ftype_v4hi_int;
22828 break;
22829 case V4DF_FTYPE_V4DF_INT:
22830 type = v4df_ftype_v4df_int;
22831 break;
22832 case V4SF_FTYPE_V4SF_INT:
22833 type = v4sf_ftype_v4sf_int;
22834 break;
22835 case V4SF_FTYPE_V8SF_INT:
22836 type = v4sf_ftype_v8sf_int;
22837 break;
22838 case V2DI_FTYPE_V2DI_INT:
22839 case V2DI2TI_FTYPE_V2DI_INT:
22840 type = v2di_ftype_v2di_int;
22841 break;
22842 case V2DF_FTYPE_V2DF_INT:
22843 type = v2df_ftype_v2df_int;
22844 break;
22845 case V2DF_FTYPE_V4DF_INT:
22846 type = v2df_ftype_v4df_int;
22847 break;
22848 case V16QI_FTYPE_V16QI_V16QI_V16QI:
22849 type = v16qi_ftype_v16qi_v16qi_v16qi;
22850 break;
22851 case V8SF_FTYPE_V8SF_V8SF_V8SF:
22852 type = v8sf_ftype_v8sf_v8sf_v8sf;
22853 break;
22854 case V4DF_FTYPE_V4DF_V4DF_V4DF:
22855 type = v4df_ftype_v4df_v4df_v4df;
22856 break;
22857 case V4SF_FTYPE_V4SF_V4SF_V4SF:
22858 type = v4sf_ftype_v4sf_v4sf_v4sf;
22859 break;
22860 case V2DF_FTYPE_V2DF_V2DF_V2DF:
22861 type = v2df_ftype_v2df_v2df_v2df;
22862 break;
22863 case V16QI_FTYPE_V16QI_V16QI_INT:
22864 type = v16qi_ftype_v16qi_v16qi_int;
22865 break;
22866 case V8SI_FTYPE_V8SI_V8SI_INT:
22867 type = v8si_ftype_v8si_v8si_int;
22868 break;
22869 case V8SI_FTYPE_V8SI_V4SI_INT:
22870 type = v8si_ftype_v8si_v4si_int;
22871 break;
22872 case V8HI_FTYPE_V8HI_V8HI_INT:
22873 type = v8hi_ftype_v8hi_v8hi_int;
22874 break;
22875 case V8SF_FTYPE_V8SF_V8SF_INT:
22876 type = v8sf_ftype_v8sf_v8sf_int;
22877 break;
22878 case V8SF_FTYPE_V8SF_V4SF_INT:
22879 type = v8sf_ftype_v8sf_v4sf_int;
22880 break;
22881 case V4SI_FTYPE_V4SI_V4SI_INT:
22882 type = v4si_ftype_v4si_v4si_int;
22883 break;
22884 case V4DF_FTYPE_V4DF_V4DF_INT:
22885 type = v4df_ftype_v4df_v4df_int;
22886 break;
22887 case V4DF_FTYPE_V4DF_V2DF_INT:
22888 type = v4df_ftype_v4df_v2df_int;
22889 break;
22890 case V4SF_FTYPE_V4SF_V4SF_INT:
22891 type = v4sf_ftype_v4sf_v4sf_int;
22892 break;
22893 case V2DI_FTYPE_V2DI_V2DI_INT:
22894 case V2DI2TI_FTYPE_V2DI_V2DI_INT:
22895 type = v2di_ftype_v2di_v2di_int;
22896 break;
22897 case V2DF_FTYPE_V2DF_V2DF_INT:
22898 type = v2df_ftype_v2df_v2df_int;
22899 break;
22900 case V2DI_FTYPE_V2DI_UINT_UINT:
22901 type = v2di_ftype_v2di_unsigned_unsigned;
22902 break;
22903 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
22904 type = v2di_ftype_v2di_v2di_unsigned_unsigned;
22905 break;
22906 case V1DI2DI_FTYPE_V1DI_V1DI_INT:
22907 type = v1di_ftype_v1di_v1di_int;
22908 break;
22909 default:
22910 gcc_unreachable ();
22913 def_builtin_const (d->mask, d->name, type, d->code);
22916 /* pcmpestr[im] insns. */
22917 for (i = 0, d = bdesc_pcmpestr;
22918 i < ARRAY_SIZE (bdesc_pcmpestr);
22919 i++, d++)
22921 if (d->code == IX86_BUILTIN_PCMPESTRM128)
22922 ftype = v16qi_ftype_v16qi_int_v16qi_int_int;
22923 else
22924 ftype = int_ftype_v16qi_int_v16qi_int_int;
22925 def_builtin_const (d->mask, d->name, ftype, d->code);
22928 /* pcmpistr[im] insns. */
22929 for (i = 0, d = bdesc_pcmpistr;
22930 i < ARRAY_SIZE (bdesc_pcmpistr);
22931 i++, d++)
22933 if (d->code == IX86_BUILTIN_PCMPISTRM128)
22934 ftype = v16qi_ftype_v16qi_v16qi_int;
22935 else
22936 ftype = int_ftype_v16qi_v16qi_int;
22937 def_builtin_const (d->mask, d->name, ftype, d->code);
22940 /* comi/ucomi insns. */
22941 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
22942 if (d->mask == OPTION_MASK_ISA_SSE2)
22943 def_builtin_const (d->mask, d->name, int_ftype_v2df_v2df, d->code);
22944 else
22945 def_builtin_const (d->mask, d->name, int_ftype_v4sf_v4sf, d->code);
22947 /* SSE */
22948 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_ldmxcsr", void_ftype_unsigned, IX86_BUILTIN_LDMXCSR);
22949 def_builtin (OPTION_MASK_ISA_SSE, "__builtin_ia32_stmxcsr", unsigned_ftype_void, IX86_BUILTIN_STMXCSR);
22951 /* SSE or 3DNow!A */
22952 def_builtin (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_maskmovq", void_ftype_v8qi_v8qi_pchar, IX86_BUILTIN_MASKMOVQ);
22954 /* SSE2 */
22955 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_maskmovdqu", void_ftype_v16qi_v16qi_pchar, IX86_BUILTIN_MASKMOVDQU);
22957 def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_clflush", void_ftype_pcvoid, IX86_BUILTIN_CLFLUSH);
22958 x86_mfence = def_builtin (OPTION_MASK_ISA_SSE2, "__builtin_ia32_mfence", void_ftype_void, IX86_BUILTIN_MFENCE);
22960 /* SSE3. */
22961 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_monitor", void_ftype_pcvoid_unsigned_unsigned, IX86_BUILTIN_MONITOR);
22962 def_builtin (OPTION_MASK_ISA_SSE3, "__builtin_ia32_mwait", void_ftype_unsigned_unsigned, IX86_BUILTIN_MWAIT);
22964 /* AES */
22965 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenc128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESENC128);
22966 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesenclast128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESENCLAST128);
22967 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdec128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESDEC128);
22968 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesdeclast128", v2di_ftype_v2di_v2di, IX86_BUILTIN_AESDECLAST128);
22969 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aesimc128", v2di_ftype_v2di, IX86_BUILTIN_AESIMC128);
22970 def_builtin_const (OPTION_MASK_ISA_AES, "__builtin_ia32_aeskeygenassist128", v2di_ftype_v2di_int, IX86_BUILTIN_AESKEYGENASSIST128);
22972 /* PCLMUL */
22973 def_builtin_const (OPTION_MASK_ISA_PCLMUL, "__builtin_ia32_pclmulqdq128", v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PCLMULQDQ128);
22975 /* AVX */
22976 def_builtin (OPTION_MASK_ISA_AVX, "__builtin_ia32_vzeroupper", void_ftype_void,
22977 TARGET_64BIT ? IX86_BUILTIN_VZEROUPPER_REX64 : IX86_BUILTIN_VZEROUPPER);
22979 /* Access to the vec_init patterns. */
22980 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
22981 integer_type_node, NULL_TREE);
22982 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v2si", ftype, IX86_BUILTIN_VEC_INIT_V2SI);
22984 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
22985 short_integer_type_node,
22986 short_integer_type_node,
22987 short_integer_type_node, NULL_TREE);
22988 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v4hi", ftype, IX86_BUILTIN_VEC_INIT_V4HI);
22990 ftype = build_function_type_list (V8QI_type_node, char_type_node,
22991 char_type_node, char_type_node,
22992 char_type_node, char_type_node,
22993 char_type_node, char_type_node,
22994 char_type_node, NULL_TREE);
22995 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_init_v8qi", ftype, IX86_BUILTIN_VEC_INIT_V8QI);
22997 /* Access to the vec_extract patterns. */
22998 ftype = build_function_type_list (double_type_node, V2DF_type_node,
22999 integer_type_node, NULL_TREE);
23000 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2df", ftype, IX86_BUILTIN_VEC_EXT_V2DF);
23002 ftype = build_function_type_list (long_long_integer_type_node,
23003 V2DI_type_node, integer_type_node,
23004 NULL_TREE);
23005 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v2di", ftype, IX86_BUILTIN_VEC_EXT_V2DI);
23007 ftype = build_function_type_list (float_type_node, V4SF_type_node,
23008 integer_type_node, NULL_TREE);
23009 def_builtin_const (OPTION_MASK_ISA_SSE, "__builtin_ia32_vec_ext_v4sf", ftype, IX86_BUILTIN_VEC_EXT_V4SF);
23011 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
23012 integer_type_node, NULL_TREE);
23013 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v4si", ftype, IX86_BUILTIN_VEC_EXT_V4SI);
23015 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
23016 integer_type_node, NULL_TREE);
23017 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v8hi", ftype, IX86_BUILTIN_VEC_EXT_V8HI);
23019 ftype = build_function_type_list (intHI_type_node, V4HI_type_node,
23020 integer_type_node, NULL_TREE);
23021 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_vec_ext_v4hi", ftype, IX86_BUILTIN_VEC_EXT_V4HI);
23023 ftype = build_function_type_list (intSI_type_node, V2SI_type_node,
23024 integer_type_node, NULL_TREE);
23025 def_builtin_const (OPTION_MASK_ISA_MMX, "__builtin_ia32_vec_ext_v2si", ftype, IX86_BUILTIN_VEC_EXT_V2SI);
23027 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
23028 integer_type_node, NULL_TREE);
23029 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_ext_v16qi", ftype, IX86_BUILTIN_VEC_EXT_V16QI);
23031 /* Access to the vec_set patterns. */
23032 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
23033 intDI_type_node,
23034 integer_type_node, NULL_TREE);
23035 def_builtin_const (OPTION_MASK_ISA_SSE4_1 | OPTION_MASK_ISA_64BIT, "__builtin_ia32_vec_set_v2di", ftype, IX86_BUILTIN_VEC_SET_V2DI);
23037 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
23038 float_type_node,
23039 integer_type_node, NULL_TREE);
23040 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4sf", ftype, IX86_BUILTIN_VEC_SET_V4SF);
23042 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
23043 intSI_type_node,
23044 integer_type_node, NULL_TREE);
23045 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v4si", ftype, IX86_BUILTIN_VEC_SET_V4SI);
23047 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
23048 intHI_type_node,
23049 integer_type_node, NULL_TREE);
23050 def_builtin_const (OPTION_MASK_ISA_SSE2, "__builtin_ia32_vec_set_v8hi", ftype, IX86_BUILTIN_VEC_SET_V8HI);
23052 ftype = build_function_type_list (V4HI_type_node, V4HI_type_node,
23053 intHI_type_node,
23054 integer_type_node, NULL_TREE);
23055 def_builtin_const (OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_3DNOW_A, "__builtin_ia32_vec_set_v4hi", ftype, IX86_BUILTIN_VEC_SET_V4HI);
23057 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
23058 intQI_type_node,
23059 integer_type_node, NULL_TREE);
23060 def_builtin_const (OPTION_MASK_ISA_SSE4_1, "__builtin_ia32_vec_set_v16qi", ftype, IX86_BUILTIN_VEC_SET_V16QI);
23063 /* Internal method for ix86_init_builtins. */
23065 static void
23066 ix86_init_builtins_va_builtins_abi (void)
23068 tree ms_va_ref, sysv_va_ref;
23069 tree fnvoid_va_end_ms, fnvoid_va_end_sysv;
23070 tree fnvoid_va_start_ms, fnvoid_va_start_sysv;
23071 tree fnvoid_va_copy_ms, fnvoid_va_copy_sysv;
23072 tree fnattr_ms = NULL_TREE, fnattr_sysv = NULL_TREE;
23074 if (!TARGET_64BIT)
23075 return;
23076 fnattr_ms = build_tree_list (get_identifier ("ms_abi"), NULL_TREE);
23077 fnattr_sysv = build_tree_list (get_identifier ("sysv_abi"), NULL_TREE);
23078 ms_va_ref = build_reference_type (ms_va_list_type_node);
23079 sysv_va_ref =
23080 build_pointer_type (TREE_TYPE (sysv_va_list_type_node));
23082 fnvoid_va_end_ms =
23083 build_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
23084 fnvoid_va_start_ms =
23085 build_varargs_function_type_list (void_type_node, ms_va_ref, NULL_TREE);
23086 fnvoid_va_end_sysv =
23087 build_function_type_list (void_type_node, sysv_va_ref, NULL_TREE);
23088 fnvoid_va_start_sysv =
23089 build_varargs_function_type_list (void_type_node, sysv_va_ref,
23090 NULL_TREE);
23091 fnvoid_va_copy_ms =
23092 build_function_type_list (void_type_node, ms_va_ref, ms_va_list_type_node,
23093 NULL_TREE);
23094 fnvoid_va_copy_sysv =
23095 build_function_type_list (void_type_node, sysv_va_ref,
23096 sysv_va_ref, NULL_TREE);
23098 add_builtin_function ("__builtin_ms_va_start", fnvoid_va_start_ms,
23099 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_ms);
23100 add_builtin_function ("__builtin_ms_va_end", fnvoid_va_end_ms,
23101 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_ms);
23102 add_builtin_function ("__builtin_ms_va_copy", fnvoid_va_copy_ms,
23103 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_ms);
23104 add_builtin_function ("__builtin_sysv_va_start", fnvoid_va_start_sysv,
23105 BUILT_IN_VA_START, BUILT_IN_NORMAL, NULL, fnattr_sysv);
23106 add_builtin_function ("__builtin_sysv_va_end", fnvoid_va_end_sysv,
23107 BUILT_IN_VA_END, BUILT_IN_NORMAL, NULL, fnattr_sysv);
23108 add_builtin_function ("__builtin_sysv_va_copy", fnvoid_va_copy_sysv,
23109 BUILT_IN_VA_COPY, BUILT_IN_NORMAL, NULL, fnattr_sysv);
23112 static void
23113 ix86_init_builtins (void)
23115 tree float128_type_node = make_node (REAL_TYPE);
23116 tree ftype, decl;
23118 /* The __float80 type. */
23119 if (TYPE_MODE (long_double_type_node) == XFmode)
23120 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
23121 "__float80");
23122 else
23124 /* The __float80 type. */
23125 tree float80_type_node = make_node (REAL_TYPE);
23127 TYPE_PRECISION (float80_type_node) = 80;
23128 layout_type (float80_type_node);
23129 (*lang_hooks.types.register_builtin_type) (float80_type_node,
23130 "__float80");
23133 /* The __float128 type. */
23134 TYPE_PRECISION (float128_type_node) = 128;
23135 layout_type (float128_type_node);
23136 (*lang_hooks.types.register_builtin_type) (float128_type_node,
23137 "__float128");
23139 /* TFmode support builtins. */
23140 ftype = build_function_type (float128_type_node, void_list_node);
23141 decl = add_builtin_function ("__builtin_infq", ftype,
23142 IX86_BUILTIN_INFQ, BUILT_IN_MD,
23143 NULL, NULL_TREE);
23144 ix86_builtins[(int) IX86_BUILTIN_INFQ] = decl;
23146 decl = add_builtin_function ("__builtin_huge_valq", ftype,
23147 IX86_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
23148 NULL, NULL_TREE);
23149 ix86_builtins[(int) IX86_BUILTIN_HUGE_VALQ] = decl;
23151 /* We will expand them to normal call if SSE2 isn't available since
23152 they are used by libgcc. */
23153 ftype = build_function_type_list (float128_type_node,
23154 float128_type_node,
23155 NULL_TREE);
23156 decl = add_builtin_function ("__builtin_fabsq", ftype,
23157 IX86_BUILTIN_FABSQ, BUILT_IN_MD,
23158 "__fabstf2", NULL_TREE);
23159 ix86_builtins[(int) IX86_BUILTIN_FABSQ] = decl;
23160 TREE_READONLY (decl) = 1;
23162 ftype = build_function_type_list (float128_type_node,
23163 float128_type_node,
23164 float128_type_node,
23165 NULL_TREE);
23166 decl = add_builtin_function ("__builtin_copysignq", ftype,
23167 IX86_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
23168 "__copysigntf3", NULL_TREE);
23169 ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = decl;
23170 TREE_READONLY (decl) = 1;
23172 ix86_init_mmx_sse_builtins ();
23173 if (TARGET_64BIT)
23174 ix86_init_builtins_va_builtins_abi ();
23177 /* Errors in the source file can cause expand_expr to return const0_rtx
23178 where we expect a vector. To avoid crashing, use one of the vector
23179 clear instructions. */
23180 static rtx
23181 safe_vector_operand (rtx x, enum machine_mode mode)
23183 if (x == const0_rtx)
23184 x = CONST0_RTX (mode);
23185 return x;
23188 /* Subroutine of ix86_expand_builtin to take care of binop insns. */
23190 static rtx
23191 ix86_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
23193 rtx pat;
23194 tree arg0 = CALL_EXPR_ARG (exp, 0);
23195 tree arg1 = CALL_EXPR_ARG (exp, 1);
23196 rtx op0 = expand_normal (arg0);
23197 rtx op1 = expand_normal (arg1);
23198 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23199 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23200 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
23202 if (VECTOR_MODE_P (mode0))
23203 op0 = safe_vector_operand (op0, mode0);
23204 if (VECTOR_MODE_P (mode1))
23205 op1 = safe_vector_operand (op1, mode1);
23207 if (optimize || !target
23208 || GET_MODE (target) != tmode
23209 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23210 target = gen_reg_rtx (tmode);
23212 if (GET_MODE (op1) == SImode && mode1 == TImode)
23214 rtx x = gen_reg_rtx (V4SImode);
23215 emit_insn (gen_sse2_loadd (x, op1));
23216 op1 = gen_lowpart (TImode, x);
23219 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
23220 op0 = copy_to_mode_reg (mode0, op0);
23221 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
23222 op1 = copy_to_mode_reg (mode1, op1);
23224 pat = GEN_FCN (icode) (target, op0, op1);
23225 if (! pat)
23226 return 0;
23228 emit_insn (pat);
23230 return target;
23233 /* Subroutine of ix86_expand_args_builtin to take care of scalar unop
23234 insns with vec_merge. */
23236 static rtx
23237 ix86_expand_unop_vec_merge_builtin (enum insn_code icode, tree exp,
23238 rtx target)
23240 rtx pat;
23241 tree arg0 = CALL_EXPR_ARG (exp, 0);
23242 rtx op1, op0 = expand_normal (arg0);
23243 enum machine_mode tmode = insn_data[icode].operand[0].mode;
23244 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
23246 if (optimize || !target
23247 || GET_MODE (target) != tmode
23248 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
23249 target = gen_reg_rtx (tmode);
23251 if (VECTOR_MODE_P (mode0))
23252 op0 = safe_vector_operand (op0, mode0);
23254 if ((optimize && !register_operand (op0, mode0))
23255 || ! (*insn_data[icode].operand[1].predicate) (op0, mode0))
23256 op0 = copy_to_mode_reg (mode0, op0);
23258 op1 = op0;
23259 if (! (*insn_data[icode].operand[2].predicate) (op1, mode0))
23260 op1 = copy_to_mode_reg (mode0, op1);
23262 pat = GEN_FCN (icode) (target, op0, op1);
23263 if (! pat)
23264 return 0;
23265 emit_insn (pat);
23266 return target;
23269 /* Subroutine of ix86_expand_builtin to take care of comparison insns. */
23271 static rtx
23272 ix86_expand_sse_compare (const struct builtin_description *d,
23273 tree exp, rtx target, bool swap)
23275 rtx pat;
23276 tree arg0 = CALL_EXPR_ARG (exp, 0);
23277 tree arg1 = CALL_EXPR_ARG (exp, 1);
23278 rtx op0 = expand_normal (arg0);
23279 rtx op1 = expand_normal (arg1);
23280 rtx op2;
23281 enum machine_mode tmode = insn_data[d->icode].operand[0].mode;
23282 enum machine_mode mode0 = insn_data[d->icode].operand[1].mode;
23283 enum machine_mode mode1 = insn_data[d->icode].operand[2].mode;
23284 enum rtx_code comparison = d->comparison;
23286 if (VECTOR_MODE_P (mode0))
23287 op0 = safe_vector_operand (op0, mode0);
23288 if (VECTOR_MODE_P (mode1))
23289 op1 = safe_vector_operand (op1, mode1);
23291 /* Swap operands if we have a comparison that isn't available in
23292 hardware. */
23293 if (swap)
23295 rtx tmp = gen_reg_rtx (mode1);
23296 emit_move_insn (tmp, op1);
23297 op1 = op0;
23298 op0 = tmp;
23301 if (optimize || !target
23302 || GET_MODE (target) != tmode
23303 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode))
23304 target = gen_reg_rtx (tmode);
23306 if ((optimize && !register_operand (op0, mode0))
23307 || ! (*insn_data[d->icode].operand[1].predicate) (op0, mode0))
23308 op0 = copy_to_mode_reg (mode0, op0);
23309 if ((optimize && !register_operand (op1, mode1))
23310 || ! (*insn_data[d->icode].operand[2].predicate) (op1, mode1))
23311 op1 = copy_to_mode_reg (mode1, op1);
23313 op2 = gen_rtx_fmt_ee (comparison, mode0, op0, op1);
23314 pat = GEN_FCN (d->icode) (target, op0, op1, op2);
23315 if (! pat)
23316 return 0;
23317 emit_insn (pat);
23318 return target;
23321 /* Subroutine of ix86_expand_builtin to take care of comi insns. */
23323 static rtx
23324 ix86_expand_sse_comi (const struct builtin_description *d, tree exp,
23325 rtx target)
23327 rtx pat;
23328 tree arg0 = CALL_EXPR_ARG (exp, 0);
23329 tree arg1 = CALL_EXPR_ARG (exp, 1);
23330 rtx op0 = expand_normal (arg0);
23331 rtx op1 = expand_normal (arg1);
23332 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23333 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23334 enum rtx_code comparison = d->comparison;
23336 if (VECTOR_MODE_P (mode0))
23337 op0 = safe_vector_operand (op0, mode0);
23338 if (VECTOR_MODE_P (mode1))
23339 op1 = safe_vector_operand (op1, mode1);
23341 /* Swap operands if we have a comparison that isn't available in
23342 hardware. */
23343 if (d->flag & BUILTIN_DESC_SWAP_OPERANDS)
23345 rtx tmp = op1;
23346 op1 = op0;
23347 op0 = tmp;
23350 target = gen_reg_rtx (SImode);
23351 emit_move_insn (target, const0_rtx);
23352 target = gen_rtx_SUBREG (QImode, target, 0);
23354 if ((optimize && !register_operand (op0, mode0))
23355 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23356 op0 = copy_to_mode_reg (mode0, op0);
23357 if ((optimize && !register_operand (op1, mode1))
23358 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23359 op1 = copy_to_mode_reg (mode1, op1);
23361 pat = GEN_FCN (d->icode) (op0, op1);
23362 if (! pat)
23363 return 0;
23364 emit_insn (pat);
23365 emit_insn (gen_rtx_SET (VOIDmode,
23366 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23367 gen_rtx_fmt_ee (comparison, QImode,
23368 SET_DEST (pat),
23369 const0_rtx)));
23371 return SUBREG_REG (target);
23374 /* Subroutine of ix86_expand_builtin to take care of ptest insns. */
23376 static rtx
23377 ix86_expand_sse_ptest (const struct builtin_description *d, tree exp,
23378 rtx target)
23380 rtx pat;
23381 tree arg0 = CALL_EXPR_ARG (exp, 0);
23382 tree arg1 = CALL_EXPR_ARG (exp, 1);
23383 rtx op0 = expand_normal (arg0);
23384 rtx op1 = expand_normal (arg1);
23385 enum machine_mode mode0 = insn_data[d->icode].operand[0].mode;
23386 enum machine_mode mode1 = insn_data[d->icode].operand[1].mode;
23387 enum rtx_code comparison = d->comparison;
23389 if (VECTOR_MODE_P (mode0))
23390 op0 = safe_vector_operand (op0, mode0);
23391 if (VECTOR_MODE_P (mode1))
23392 op1 = safe_vector_operand (op1, mode1);
23394 target = gen_reg_rtx (SImode);
23395 emit_move_insn (target, const0_rtx);
23396 target = gen_rtx_SUBREG (QImode, target, 0);
23398 if ((optimize && !register_operand (op0, mode0))
23399 || !(*insn_data[d->icode].operand[0].predicate) (op0, mode0))
23400 op0 = copy_to_mode_reg (mode0, op0);
23401 if ((optimize && !register_operand (op1, mode1))
23402 || !(*insn_data[d->icode].operand[1].predicate) (op1, mode1))
23403 op1 = copy_to_mode_reg (mode1, op1);
23405 pat = GEN_FCN (d->icode) (op0, op1);
23406 if (! pat)
23407 return 0;
23408 emit_insn (pat);
23409 emit_insn (gen_rtx_SET (VOIDmode,
23410 gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23411 gen_rtx_fmt_ee (comparison, QImode,
23412 SET_DEST (pat),
23413 const0_rtx)));
23415 return SUBREG_REG (target);
23418 /* Subroutine of ix86_expand_builtin to take care of pcmpestr[im] insns. */
23420 static rtx
23421 ix86_expand_sse_pcmpestr (const struct builtin_description *d,
23422 tree exp, rtx target)
23424 rtx pat;
23425 tree arg0 = CALL_EXPR_ARG (exp, 0);
23426 tree arg1 = CALL_EXPR_ARG (exp, 1);
23427 tree arg2 = CALL_EXPR_ARG (exp, 2);
23428 tree arg3 = CALL_EXPR_ARG (exp, 3);
23429 tree arg4 = CALL_EXPR_ARG (exp, 4);
23430 rtx scratch0, scratch1;
23431 rtx op0 = expand_normal (arg0);
23432 rtx op1 = expand_normal (arg1);
23433 rtx op2 = expand_normal (arg2);
23434 rtx op3 = expand_normal (arg3);
23435 rtx op4 = expand_normal (arg4);
23436 enum machine_mode tmode0, tmode1, modev2, modei3, modev4, modei5, modeimm;
23438 tmode0 = insn_data[d->icode].operand[0].mode;
23439 tmode1 = insn_data[d->icode].operand[1].mode;
23440 modev2 = insn_data[d->icode].operand[2].mode;
23441 modei3 = insn_data[d->icode].operand[3].mode;
23442 modev4 = insn_data[d->icode].operand[4].mode;
23443 modei5 = insn_data[d->icode].operand[5].mode;
23444 modeimm = insn_data[d->icode].operand[6].mode;
23446 if (VECTOR_MODE_P (modev2))
23447 op0 = safe_vector_operand (op0, modev2);
23448 if (VECTOR_MODE_P (modev4))
23449 op2 = safe_vector_operand (op2, modev4);
23451 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23452 op0 = copy_to_mode_reg (modev2, op0);
23453 if (! (*insn_data[d->icode].operand[3].predicate) (op1, modei3))
23454 op1 = copy_to_mode_reg (modei3, op1);
23455 if ((optimize && !register_operand (op2, modev4))
23456 || !(*insn_data[d->icode].operand[4].predicate) (op2, modev4))
23457 op2 = copy_to_mode_reg (modev4, op2);
23458 if (! (*insn_data[d->icode].operand[5].predicate) (op3, modei5))
23459 op3 = copy_to_mode_reg (modei5, op3);
23461 if (! (*insn_data[d->icode].operand[6].predicate) (op4, modeimm))
23463 error ("the fifth argument must be a 8-bit immediate");
23464 return const0_rtx;
23467 if (d->code == IX86_BUILTIN_PCMPESTRI128)
23469 if (optimize || !target
23470 || GET_MODE (target) != tmode0
23471 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23472 target = gen_reg_rtx (tmode0);
23474 scratch1 = gen_reg_rtx (tmode1);
23476 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2, op3, op4);
23478 else if (d->code == IX86_BUILTIN_PCMPESTRM128)
23480 if (optimize || !target
23481 || GET_MODE (target) != tmode1
23482 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23483 target = gen_reg_rtx (tmode1);
23485 scratch0 = gen_reg_rtx (tmode0);
23487 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2, op3, op4);
23489 else
23491 gcc_assert (d->flag);
23493 scratch0 = gen_reg_rtx (tmode0);
23494 scratch1 = gen_reg_rtx (tmode1);
23496 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2, op3, op4);
23499 if (! pat)
23500 return 0;
23502 emit_insn (pat);
23504 if (d->flag)
23506 target = gen_reg_rtx (SImode);
23507 emit_move_insn (target, const0_rtx);
23508 target = gen_rtx_SUBREG (QImode, target, 0);
23510 emit_insn
23511 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23512 gen_rtx_fmt_ee (EQ, QImode,
23513 gen_rtx_REG ((enum machine_mode) d->flag,
23514 FLAGS_REG),
23515 const0_rtx)));
23516 return SUBREG_REG (target);
23518 else
23519 return target;
23523 /* Subroutine of ix86_expand_builtin to take care of pcmpistr[im] insns. */
23525 static rtx
23526 ix86_expand_sse_pcmpistr (const struct builtin_description *d,
23527 tree exp, rtx target)
23529 rtx pat;
23530 tree arg0 = CALL_EXPR_ARG (exp, 0);
23531 tree arg1 = CALL_EXPR_ARG (exp, 1);
23532 tree arg2 = CALL_EXPR_ARG (exp, 2);
23533 rtx scratch0, scratch1;
23534 rtx op0 = expand_normal (arg0);
23535 rtx op1 = expand_normal (arg1);
23536 rtx op2 = expand_normal (arg2);
23537 enum machine_mode tmode0, tmode1, modev2, modev3, modeimm;
23539 tmode0 = insn_data[d->icode].operand[0].mode;
23540 tmode1 = insn_data[d->icode].operand[1].mode;
23541 modev2 = insn_data[d->icode].operand[2].mode;
23542 modev3 = insn_data[d->icode].operand[3].mode;
23543 modeimm = insn_data[d->icode].operand[4].mode;
23545 if (VECTOR_MODE_P (modev2))
23546 op0 = safe_vector_operand (op0, modev2);
23547 if (VECTOR_MODE_P (modev3))
23548 op1 = safe_vector_operand (op1, modev3);
23550 if (! (*insn_data[d->icode].operand[2].predicate) (op0, modev2))
23551 op0 = copy_to_mode_reg (modev2, op0);
23552 if ((optimize && !register_operand (op1, modev3))
23553 || !(*insn_data[d->icode].operand[3].predicate) (op1, modev3))
23554 op1 = copy_to_mode_reg (modev3, op1);
23556 if (! (*insn_data[d->icode].operand[4].predicate) (op2, modeimm))
23558 error ("the third argument must be a 8-bit immediate");
23559 return const0_rtx;
23562 if (d->code == IX86_BUILTIN_PCMPISTRI128)
23564 if (optimize || !target
23565 || GET_MODE (target) != tmode0
23566 || ! (*insn_data[d->icode].operand[0].predicate) (target, tmode0))
23567 target = gen_reg_rtx (tmode0);
23569 scratch1 = gen_reg_rtx (tmode1);
23571 pat = GEN_FCN (d->icode) (target, scratch1, op0, op1, op2);
23573 else if (d->code == IX86_BUILTIN_PCMPISTRM128)
23575 if (optimize || !target
23576 || GET_MODE (target) != tmode1
23577 || ! (*insn_data[d->icode].operand[1].predicate) (target, tmode1))
23578 target = gen_reg_rtx (tmode1);
23580 scratch0 = gen_reg_rtx (tmode0);
23582 pat = GEN_FCN (d->icode) (scratch0, target, op0, op1, op2);
23584 else
23586 gcc_assert (d->flag);
23588 scratch0 = gen_reg_rtx (tmode0);
23589 scratch1 = gen_reg_rtx (tmode1);
23591 pat = GEN_FCN (d->icode) (scratch0, scratch1, op0, op1, op2);
23594 if (! pat)
23595 return 0;
23597 emit_insn (pat);
23599 if (d->flag)
23601 target = gen_reg_rtx (SImode);
23602 emit_move_insn (target, const0_rtx);
23603 target = gen_rtx_SUBREG (QImode, target, 0);
23605 emit_insn
23606 (gen_rtx_SET (VOIDmode, gen_rtx_STRICT_LOW_PART (VOIDmode, target),
23607 gen_rtx_fmt_ee (EQ, QImode,
23608 gen_rtx_REG ((enum machine_mode) d->flag,
23609 FLAGS_REG),
23610 const0_rtx)));
23611 return SUBREG_REG (target);
23613 else
23614 return target;
23617 /* Subroutine of ix86_expand_builtin to take care of insns with
23618 variable number of operands. */
23620 static rtx
23621 ix86_expand_args_builtin (const struct builtin_description *d,
23622 tree exp, rtx target)
23624 rtx pat, real_target;
23625 unsigned int i, nargs;
23626 unsigned int nargs_constant = 0;
23627 int num_memory = 0;
23628 struct
23630 rtx op;
23631 enum machine_mode mode;
23632 } args[4];
23633 bool last_arg_count = false;
23634 enum insn_code icode = d->icode;
23635 const struct insn_data *insn_p = &insn_data[icode];
23636 enum machine_mode tmode = insn_p->operand[0].mode;
23637 enum machine_mode rmode = VOIDmode;
23638 bool swap = false;
23639 enum rtx_code comparison = d->comparison;
23641 switch ((enum ix86_builtin_type) d->flag)
23643 case INT_FTYPE_V8SF_V8SF_PTEST:
23644 case INT_FTYPE_V4DI_V4DI_PTEST:
23645 case INT_FTYPE_V4DF_V4DF_PTEST:
23646 case INT_FTYPE_V4SF_V4SF_PTEST:
23647 case INT_FTYPE_V2DI_V2DI_PTEST:
23648 case INT_FTYPE_V2DF_V2DF_PTEST:
23649 return ix86_expand_sse_ptest (d, exp, target);
23650 case FLOAT128_FTYPE_FLOAT128:
23651 case FLOAT_FTYPE_FLOAT:
23652 case INT_FTYPE_INT:
23653 case UINT64_FTYPE_INT:
23654 case INT64_FTYPE_INT64:
23655 case INT64_FTYPE_V4SF:
23656 case INT64_FTYPE_V2DF:
23657 case INT_FTYPE_V16QI:
23658 case INT_FTYPE_V8QI:
23659 case INT_FTYPE_V8SF:
23660 case INT_FTYPE_V4DF:
23661 case INT_FTYPE_V4SF:
23662 case INT_FTYPE_V2DF:
23663 case V16QI_FTYPE_V16QI:
23664 case V8SI_FTYPE_V8SF:
23665 case V8SI_FTYPE_V4SI:
23666 case V8HI_FTYPE_V8HI:
23667 case V8HI_FTYPE_V16QI:
23668 case V8QI_FTYPE_V8QI:
23669 case V8SF_FTYPE_V8SF:
23670 case V8SF_FTYPE_V8SI:
23671 case V8SF_FTYPE_V4SF:
23672 case V4SI_FTYPE_V4SI:
23673 case V4SI_FTYPE_V16QI:
23674 case V4SI_FTYPE_V4SF:
23675 case V4SI_FTYPE_V8SI:
23676 case V4SI_FTYPE_V8HI:
23677 case V4SI_FTYPE_V4DF:
23678 case V4SI_FTYPE_V2DF:
23679 case V4HI_FTYPE_V4HI:
23680 case V4DF_FTYPE_V4DF:
23681 case V4DF_FTYPE_V4SI:
23682 case V4DF_FTYPE_V4SF:
23683 case V4DF_FTYPE_V2DF:
23684 case V4SF_FTYPE_V4SF:
23685 case V4SF_FTYPE_V4SI:
23686 case V4SF_FTYPE_V8SF:
23687 case V4SF_FTYPE_V4DF:
23688 case V4SF_FTYPE_V2DF:
23689 case V2DI_FTYPE_V2DI:
23690 case V2DI_FTYPE_V16QI:
23691 case V2DI_FTYPE_V8HI:
23692 case V2DI_FTYPE_V4SI:
23693 case V2DF_FTYPE_V2DF:
23694 case V2DF_FTYPE_V4SI:
23695 case V2DF_FTYPE_V4DF:
23696 case V2DF_FTYPE_V4SF:
23697 case V2DF_FTYPE_V2SI:
23698 case V2SI_FTYPE_V2SI:
23699 case V2SI_FTYPE_V4SF:
23700 case V2SI_FTYPE_V2SF:
23701 case V2SI_FTYPE_V2DF:
23702 case V2SF_FTYPE_V2SF:
23703 case V2SF_FTYPE_V2SI:
23704 nargs = 1;
23705 break;
23706 case V4SF_FTYPE_V4SF_VEC_MERGE:
23707 case V2DF_FTYPE_V2DF_VEC_MERGE:
23708 return ix86_expand_unop_vec_merge_builtin (icode, exp, target);
23709 case FLOAT128_FTYPE_FLOAT128_FLOAT128:
23710 case V16QI_FTYPE_V16QI_V16QI:
23711 case V16QI_FTYPE_V8HI_V8HI:
23712 case V8QI_FTYPE_V8QI_V8QI:
23713 case V8QI_FTYPE_V4HI_V4HI:
23714 case V8HI_FTYPE_V8HI_V8HI:
23715 case V8HI_FTYPE_V16QI_V16QI:
23716 case V8HI_FTYPE_V4SI_V4SI:
23717 case V8SF_FTYPE_V8SF_V8SF:
23718 case V8SF_FTYPE_V8SF_V8SI:
23719 case V4SI_FTYPE_V4SI_V4SI:
23720 case V4SI_FTYPE_V8HI_V8HI:
23721 case V4SI_FTYPE_V4SF_V4SF:
23722 case V4SI_FTYPE_V2DF_V2DF:
23723 case V4HI_FTYPE_V4HI_V4HI:
23724 case V4HI_FTYPE_V8QI_V8QI:
23725 case V4HI_FTYPE_V2SI_V2SI:
23726 case V4DF_FTYPE_V4DF_V4DF:
23727 case V4DF_FTYPE_V4DF_V4DI:
23728 case V4SF_FTYPE_V4SF_V4SF:
23729 case V4SF_FTYPE_V4SF_V4SI:
23730 case V4SF_FTYPE_V4SF_V2SI:
23731 case V4SF_FTYPE_V4SF_V2DF:
23732 case V4SF_FTYPE_V4SF_DI:
23733 case V4SF_FTYPE_V4SF_SI:
23734 case V2DI_FTYPE_V2DI_V2DI:
23735 case V2DI_FTYPE_V16QI_V16QI:
23736 case V2DI_FTYPE_V4SI_V4SI:
23737 case V2DI_FTYPE_V2DI_V16QI:
23738 case V2DI_FTYPE_V2DF_V2DF:
23739 case V2SI_FTYPE_V2SI_V2SI:
23740 case V2SI_FTYPE_V4HI_V4HI:
23741 case V2SI_FTYPE_V2SF_V2SF:
23742 case V2DF_FTYPE_V2DF_V2DF:
23743 case V2DF_FTYPE_V2DF_V4SF:
23744 case V2DF_FTYPE_V2DF_V2DI:
23745 case V2DF_FTYPE_V2DF_DI:
23746 case V2DF_FTYPE_V2DF_SI:
23747 case V2SF_FTYPE_V2SF_V2SF:
23748 case V1DI_FTYPE_V1DI_V1DI:
23749 case V1DI_FTYPE_V8QI_V8QI:
23750 case V1DI_FTYPE_V2SI_V2SI:
23751 if (comparison == UNKNOWN)
23752 return ix86_expand_binop_builtin (icode, exp, target);
23753 nargs = 2;
23754 break;
23755 case V4SF_FTYPE_V4SF_V4SF_SWAP:
23756 case V2DF_FTYPE_V2DF_V2DF_SWAP:
23757 gcc_assert (comparison != UNKNOWN);
23758 nargs = 2;
23759 swap = true;
23760 break;
23761 case V8HI_FTYPE_V8HI_V8HI_COUNT:
23762 case V8HI_FTYPE_V8HI_SI_COUNT:
23763 case V4SI_FTYPE_V4SI_V4SI_COUNT:
23764 case V4SI_FTYPE_V4SI_SI_COUNT:
23765 case V4HI_FTYPE_V4HI_V4HI_COUNT:
23766 case V4HI_FTYPE_V4HI_SI_COUNT:
23767 case V2DI_FTYPE_V2DI_V2DI_COUNT:
23768 case V2DI_FTYPE_V2DI_SI_COUNT:
23769 case V2SI_FTYPE_V2SI_V2SI_COUNT:
23770 case V2SI_FTYPE_V2SI_SI_COUNT:
23771 case V1DI_FTYPE_V1DI_V1DI_COUNT:
23772 case V1DI_FTYPE_V1DI_SI_COUNT:
23773 nargs = 2;
23774 last_arg_count = true;
23775 break;
23776 case UINT64_FTYPE_UINT64_UINT64:
23777 case UINT_FTYPE_UINT_UINT:
23778 case UINT_FTYPE_UINT_USHORT:
23779 case UINT_FTYPE_UINT_UCHAR:
23780 case UINT16_FTYPE_UINT16_INT:
23781 case UINT8_FTYPE_UINT8_INT:
23782 nargs = 2;
23783 break;
23784 case V2DI2TI_FTYPE_V2DI_INT:
23785 nargs = 2;
23786 rmode = V2DImode;
23787 nargs_constant = 1;
23788 break;
23789 case V8HI_FTYPE_V8HI_INT:
23790 case V8SF_FTYPE_V8SF_INT:
23791 case V4SI_FTYPE_V4SI_INT:
23792 case V4SI_FTYPE_V8SI_INT:
23793 case V4HI_FTYPE_V4HI_INT:
23794 case V4DF_FTYPE_V4DF_INT:
23795 case V4SF_FTYPE_V4SF_INT:
23796 case V4SF_FTYPE_V8SF_INT:
23797 case V2DI_FTYPE_V2DI_INT:
23798 case V2DF_FTYPE_V2DF_INT:
23799 case V2DF_FTYPE_V4DF_INT:
23800 nargs = 2;
23801 nargs_constant = 1;
23802 break;
23803 case V16QI_FTYPE_V16QI_V16QI_V16QI:
23804 case V8SF_FTYPE_V8SF_V8SF_V8SF:
23805 case V4DF_FTYPE_V4DF_V4DF_V4DF:
23806 case V4SF_FTYPE_V4SF_V4SF_V4SF:
23807 case V2DF_FTYPE_V2DF_V2DF_V2DF:
23808 nargs = 3;
23809 break;
23810 case V16QI_FTYPE_V16QI_V16QI_INT:
23811 case V8HI_FTYPE_V8HI_V8HI_INT:
23812 case V8SI_FTYPE_V8SI_V8SI_INT:
23813 case V8SI_FTYPE_V8SI_V4SI_INT:
23814 case V8SF_FTYPE_V8SF_V8SF_INT:
23815 case V8SF_FTYPE_V8SF_V4SF_INT:
23816 case V4SI_FTYPE_V4SI_V4SI_INT:
23817 case V4DF_FTYPE_V4DF_V4DF_INT:
23818 case V4DF_FTYPE_V4DF_V2DF_INT:
23819 case V4SF_FTYPE_V4SF_V4SF_INT:
23820 case V2DI_FTYPE_V2DI_V2DI_INT:
23821 case V2DF_FTYPE_V2DF_V2DF_INT:
23822 nargs = 3;
23823 nargs_constant = 1;
23824 break;
23825 case V2DI2TI_FTYPE_V2DI_V2DI_INT:
23826 nargs = 3;
23827 rmode = V2DImode;
23828 nargs_constant = 1;
23829 break;
23830 case V1DI2DI_FTYPE_V1DI_V1DI_INT:
23831 nargs = 3;
23832 rmode = DImode;
23833 nargs_constant = 1;
23834 break;
23835 case V2DI_FTYPE_V2DI_UINT_UINT:
23836 nargs = 3;
23837 nargs_constant = 2;
23838 break;
23839 case V2DI_FTYPE_V2DI_V2DI_UINT_UINT:
23840 nargs = 4;
23841 nargs_constant = 2;
23842 break;
23843 default:
23844 gcc_unreachable ();
23847 gcc_assert (nargs <= ARRAY_SIZE (args));
23849 if (comparison != UNKNOWN)
23851 gcc_assert (nargs == 2);
23852 return ix86_expand_sse_compare (d, exp, target, swap);
23855 if (rmode == VOIDmode || rmode == tmode)
23857 if (optimize
23858 || target == 0
23859 || GET_MODE (target) != tmode
23860 || ! (*insn_p->operand[0].predicate) (target, tmode))
23861 target = gen_reg_rtx (tmode);
23862 real_target = target;
23864 else
23866 target = gen_reg_rtx (rmode);
23867 real_target = simplify_gen_subreg (tmode, target, rmode, 0);
23870 for (i = 0; i < nargs; i++)
23872 tree arg = CALL_EXPR_ARG (exp, i);
23873 rtx op = expand_normal (arg);
23874 enum machine_mode mode = insn_p->operand[i + 1].mode;
23875 bool match = (*insn_p->operand[i + 1].predicate) (op, mode);
23877 if (last_arg_count && (i + 1) == nargs)
23879 /* SIMD shift insns take either an 8-bit immediate or
23880 register as count. But builtin functions take int as
23881 count. If count doesn't match, we put it in register. */
23882 if (!match)
23884 op = simplify_gen_subreg (SImode, op, GET_MODE (op), 0);
23885 if (!(*insn_p->operand[i + 1].predicate) (op, mode))
23886 op = copy_to_reg (op);
23889 else if ((nargs - i) <= nargs_constant)
23891 if (!match)
23892 switch (icode)
23894 case CODE_FOR_sse4_1_roundpd:
23895 case CODE_FOR_sse4_1_roundps:
23896 case CODE_FOR_sse4_1_roundsd:
23897 case CODE_FOR_sse4_1_roundss:
23898 case CODE_FOR_sse4_1_blendps:
23899 case CODE_FOR_avx_blendpd256:
23900 case CODE_FOR_avx_vpermilv4df:
23901 case CODE_FOR_avx_roundpd256:
23902 case CODE_FOR_avx_roundps256:
23903 error ("the last argument must be a 4-bit immediate");
23904 return const0_rtx;
23906 case CODE_FOR_sse4_1_blendpd:
23907 case CODE_FOR_avx_vpermilv2df:
23908 error ("the last argument must be a 2-bit immediate");
23909 return const0_rtx;
23911 case CODE_FOR_avx_vextractf128v4df:
23912 case CODE_FOR_avx_vextractf128v8sf:
23913 case CODE_FOR_avx_vextractf128v8si:
23914 case CODE_FOR_avx_vinsertf128v4df:
23915 case CODE_FOR_avx_vinsertf128v8sf:
23916 case CODE_FOR_avx_vinsertf128v8si:
23917 error ("the last argument must be a 1-bit immediate");
23918 return const0_rtx;
23920 case CODE_FOR_avx_cmpsdv2df3:
23921 case CODE_FOR_avx_cmpssv4sf3:
23922 case CODE_FOR_avx_cmppdv2df3:
23923 case CODE_FOR_avx_cmppsv4sf3:
23924 case CODE_FOR_avx_cmppdv4df3:
23925 case CODE_FOR_avx_cmppsv8sf3:
23926 error ("the last argument must be a 5-bit immediate");
23927 return const0_rtx;
23929 default:
23930 switch (nargs_constant)
23932 case 2:
23933 if ((nargs - i) == nargs_constant)
23935 error ("the next to last argument must be an 8-bit immediate");
23936 break;
23938 case 1:
23939 error ("the last argument must be an 8-bit immediate");
23940 break;
23941 default:
23942 gcc_unreachable ();
23944 return const0_rtx;
23947 else
23949 if (VECTOR_MODE_P (mode))
23950 op = safe_vector_operand (op, mode);
23952 /* If we aren't optimizing, only allow one memory operand to
23953 be generated. */
23954 if (memory_operand (op, mode))
23955 num_memory++;
23957 if (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
23959 if (optimize || !match || num_memory > 1)
23960 op = copy_to_mode_reg (mode, op);
23962 else
23964 op = copy_to_reg (op);
23965 op = simplify_gen_subreg (mode, op, GET_MODE (op), 0);
23969 args[i].op = op;
23970 args[i].mode = mode;
23973 switch (nargs)
23975 case 1:
23976 pat = GEN_FCN (icode) (real_target, args[0].op);
23977 break;
23978 case 2:
23979 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op);
23980 break;
23981 case 3:
23982 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23983 args[2].op);
23984 break;
23985 case 4:
23986 pat = GEN_FCN (icode) (real_target, args[0].op, args[1].op,
23987 args[2].op, args[3].op);
23988 break;
23989 default:
23990 gcc_unreachable ();
23993 if (! pat)
23994 return 0;
23996 emit_insn (pat);
23997 return target;
24000 /* Subroutine of ix86_expand_builtin to take care of special insns
24001 with variable number of operands. */
24003 static rtx
24004 ix86_expand_special_args_builtin (const struct builtin_description *d,
24005 tree exp, rtx target)
24007 tree arg;
24008 rtx pat, op;
24009 unsigned int i, nargs, arg_adjust, memory;
24010 struct
24012 rtx op;
24013 enum machine_mode mode;
24014 } args[2];
24015 enum insn_code icode = d->icode;
24016 bool last_arg_constant = false;
24017 const struct insn_data *insn_p = &insn_data[icode];
24018 enum machine_mode tmode = insn_p->operand[0].mode;
24019 enum { load, store } klass;
24021 switch ((enum ix86_special_builtin_type) d->flag)
24023 case VOID_FTYPE_VOID:
24024 emit_insn (GEN_FCN (icode) (target));
24025 return 0;
24026 case UINT64_FTYPE_VOID:
24027 nargs = 0;
24028 klass = load;
24029 memory = 0;
24030 break;
24031 case UINT64_FTYPE_PUNSIGNED:
24032 case V2DI_FTYPE_PV2DI:
24033 case V32QI_FTYPE_PCCHAR:
24034 case V16QI_FTYPE_PCCHAR:
24035 case V8SF_FTYPE_PCV4SF:
24036 case V8SF_FTYPE_PCFLOAT:
24037 case V4SF_FTYPE_PCFLOAT:
24038 case V4DF_FTYPE_PCV2DF:
24039 case V4DF_FTYPE_PCDOUBLE:
24040 case V2DF_FTYPE_PCDOUBLE:
24041 nargs = 1;
24042 klass = load;
24043 memory = 0;
24044 break;
24045 case VOID_FTYPE_PV2SF_V4SF:
24046 case VOID_FTYPE_PV4DI_V4DI:
24047 case VOID_FTYPE_PV2DI_V2DI:
24048 case VOID_FTYPE_PCHAR_V32QI:
24049 case VOID_FTYPE_PCHAR_V16QI:
24050 case VOID_FTYPE_PFLOAT_V8SF:
24051 case VOID_FTYPE_PFLOAT_V4SF:
24052 case VOID_FTYPE_PDOUBLE_V4DF:
24053 case VOID_FTYPE_PDOUBLE_V2DF:
24054 case VOID_FTYPE_PDI_DI:
24055 case VOID_FTYPE_PINT_INT:
24056 nargs = 1;
24057 klass = store;
24058 /* Reserve memory operand for target. */
24059 memory = ARRAY_SIZE (args);
24060 break;
24061 case V4SF_FTYPE_V4SF_PCV2SF:
24062 case V2DF_FTYPE_V2DF_PCDOUBLE:
24063 nargs = 2;
24064 klass = load;
24065 memory = 1;
24066 break;
24067 case V8SF_FTYPE_PCV8SF_V8SF:
24068 case V4DF_FTYPE_PCV4DF_V4DF:
24069 case V4SF_FTYPE_PCV4SF_V4SF:
24070 case V2DF_FTYPE_PCV2DF_V2DF:
24071 nargs = 2;
24072 klass = load;
24073 memory = 0;
24074 break;
24075 case VOID_FTYPE_PV8SF_V8SF_V8SF:
24076 case VOID_FTYPE_PV4DF_V4DF_V4DF:
24077 case VOID_FTYPE_PV4SF_V4SF_V4SF:
24078 case VOID_FTYPE_PV2DF_V2DF_V2DF:
24079 nargs = 2;
24080 klass = store;
24081 /* Reserve memory operand for target. */
24082 memory = ARRAY_SIZE (args);
24083 break;
24084 default:
24085 gcc_unreachable ();
24088 gcc_assert (nargs <= ARRAY_SIZE (args));
24090 if (klass == store)
24092 arg = CALL_EXPR_ARG (exp, 0);
24093 op = expand_normal (arg);
24094 gcc_assert (target == 0);
24095 target = gen_rtx_MEM (tmode, copy_to_mode_reg (Pmode, op));
24096 arg_adjust = 1;
24098 else
24100 arg_adjust = 0;
24101 if (optimize
24102 || target == 0
24103 || GET_MODE (target) != tmode
24104 || ! (*insn_p->operand[0].predicate) (target, tmode))
24105 target = gen_reg_rtx (tmode);
24108 for (i = 0; i < nargs; i++)
24110 enum machine_mode mode = insn_p->operand[i + 1].mode;
24111 bool match;
24113 arg = CALL_EXPR_ARG (exp, i + arg_adjust);
24114 op = expand_normal (arg);
24115 match = (*insn_p->operand[i + 1].predicate) (op, mode);
24117 if (last_arg_constant && (i + 1) == nargs)
24119 if (!match)
24120 switch (icode)
24122 default:
24123 error ("the last argument must be an 8-bit immediate");
24124 return const0_rtx;
24127 else
24129 if (i == memory)
24131 /* This must be the memory operand. */
24132 op = gen_rtx_MEM (mode, copy_to_mode_reg (Pmode, op));
24133 gcc_assert (GET_MODE (op) == mode
24134 || GET_MODE (op) == VOIDmode);
24136 else
24138 /* This must be register. */
24139 if (VECTOR_MODE_P (mode))
24140 op = safe_vector_operand (op, mode);
24142 gcc_assert (GET_MODE (op) == mode
24143 || GET_MODE (op) == VOIDmode);
24144 op = copy_to_mode_reg (mode, op);
24148 args[i].op = op;
24149 args[i].mode = mode;
24152 switch (nargs)
24154 case 0:
24155 pat = GEN_FCN (icode) (target);
24156 break;
24157 case 1:
24158 pat = GEN_FCN (icode) (target, args[0].op);
24159 break;
24160 case 2:
24161 pat = GEN_FCN (icode) (target, args[0].op, args[1].op);
24162 break;
24163 default:
24164 gcc_unreachable ();
24167 if (! pat)
24168 return 0;
24169 emit_insn (pat);
24170 return klass == store ? 0 : target;
24173 /* Return the integer constant in ARG. Constrain it to be in the range
24174 of the subparts of VEC_TYPE; issue an error if not. */
24176 static int
24177 get_element_number (tree vec_type, tree arg)
24179 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
24181 if (!host_integerp (arg, 1)
24182 || (elt = tree_low_cst (arg, 1), elt > max))
24184 error ("selector must be an integer constant in the range 0..%wi", max);
24185 return 0;
24188 return elt;
24191 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24192 ix86_expand_vector_init. We DO have language-level syntax for this, in
24193 the form of (type){ init-list }. Except that since we can't place emms
24194 instructions from inside the compiler, we can't allow the use of MMX
24195 registers unless the user explicitly asks for it. So we do *not* define
24196 vec_set/vec_extract/vec_init patterns for MMX modes in mmx.md. Instead
24197 we have builtins invoked by mmintrin.h that gives us license to emit
24198 these sorts of instructions. */
24200 static rtx
24201 ix86_expand_vec_init_builtin (tree type, tree exp, rtx target)
24203 enum machine_mode tmode = TYPE_MODE (type);
24204 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
24205 int i, n_elt = GET_MODE_NUNITS (tmode);
24206 rtvec v = rtvec_alloc (n_elt);
24208 gcc_assert (VECTOR_MODE_P (tmode));
24209 gcc_assert (call_expr_nargs (exp) == n_elt);
24211 for (i = 0; i < n_elt; ++i)
24213 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
24214 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
24217 if (!target || !register_operand (target, tmode))
24218 target = gen_reg_rtx (tmode);
24220 ix86_expand_vector_init (true, target, gen_rtx_PARALLEL (tmode, v));
24221 return target;
24224 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24225 ix86_expand_vector_extract. They would be redundant (for non-MMX) if we
24226 had a language-level syntax for referencing vector elements. */
24228 static rtx
24229 ix86_expand_vec_ext_builtin (tree exp, rtx target)
24231 enum machine_mode tmode, mode0;
24232 tree arg0, arg1;
24233 int elt;
24234 rtx op0;
24236 arg0 = CALL_EXPR_ARG (exp, 0);
24237 arg1 = CALL_EXPR_ARG (exp, 1);
24239 op0 = expand_normal (arg0);
24240 elt = get_element_number (TREE_TYPE (arg0), arg1);
24242 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24243 mode0 = TYPE_MODE (TREE_TYPE (arg0));
24244 gcc_assert (VECTOR_MODE_P (mode0));
24246 op0 = force_reg (mode0, op0);
24248 if (optimize || !target || !register_operand (target, tmode))
24249 target = gen_reg_rtx (tmode);
24251 ix86_expand_vector_extract (true, target, op0, elt);
24253 return target;
24256 /* A subroutine of ix86_expand_builtin. These builtins are a wrapper around
24257 ix86_expand_vector_set. They would be redundant (for non-MMX) if we had
24258 a language-level syntax for referencing vector elements. */
24260 static rtx
24261 ix86_expand_vec_set_builtin (tree exp)
24263 enum machine_mode tmode, mode1;
24264 tree arg0, arg1, arg2;
24265 int elt;
24266 rtx op0, op1, target;
24268 arg0 = CALL_EXPR_ARG (exp, 0);
24269 arg1 = CALL_EXPR_ARG (exp, 1);
24270 arg2 = CALL_EXPR_ARG (exp, 2);
24272 tmode = TYPE_MODE (TREE_TYPE (arg0));
24273 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
24274 gcc_assert (VECTOR_MODE_P (tmode));
24276 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
24277 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
24278 elt = get_element_number (TREE_TYPE (arg0), arg2);
24280 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
24281 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
24283 op0 = force_reg (tmode, op0);
24284 op1 = force_reg (mode1, op1);
24286 /* OP0 is the source of these builtin functions and shouldn't be
24287 modified. Create a copy, use it and return it as target. */
24288 target = gen_reg_rtx (tmode);
24289 emit_move_insn (target, op0);
24290 ix86_expand_vector_set (true, target, op1, elt);
24292 return target;
24295 /* Expand an expression EXP that calls a built-in function,
24296 with result going to TARGET if that's convenient
24297 (and in mode MODE if that's convenient).
24298 SUBTARGET may be used as the target for computing one of EXP's operands.
24299 IGNORE is nonzero if the value is to be ignored. */
24301 static rtx
24302 ix86_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
24303 enum machine_mode mode ATTRIBUTE_UNUSED,
24304 int ignore ATTRIBUTE_UNUSED)
24306 const struct builtin_description *d;
24307 size_t i;
24308 enum insn_code icode;
24309 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
24310 tree arg0, arg1, arg2;
24311 rtx op0, op1, op2, pat;
24312 enum machine_mode mode0, mode1, mode2;
24313 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
24315 /* Determine whether the builtin function is available under the current ISA.
24316 Originally the builtin was not created if it wasn't applicable to the
24317 current ISA based on the command line switches. With function specific
24318 options, we need to check in the context of the function making the call
24319 whether it is supported. */
24320 if (ix86_builtins_isa[fcode].isa
24321 && !(ix86_builtins_isa[fcode].isa & ix86_isa_flags))
24323 char *opts = ix86_target_string (ix86_builtins_isa[fcode].isa, 0, NULL,
24324 NULL, NULL, false);
24326 if (!opts)
24327 error ("%qE needs unknown isa option", fndecl);
24328 else
24330 gcc_assert (opts != NULL);
24331 error ("%qE needs isa option %s", fndecl, opts);
24332 free (opts);
24334 return const0_rtx;
24337 switch (fcode)
24339 case IX86_BUILTIN_MASKMOVQ:
24340 case IX86_BUILTIN_MASKMOVDQU:
24341 icode = (fcode == IX86_BUILTIN_MASKMOVQ
24342 ? CODE_FOR_mmx_maskmovq
24343 : CODE_FOR_sse2_maskmovdqu);
24344 /* Note the arg order is different from the operand order. */
24345 arg1 = CALL_EXPR_ARG (exp, 0);
24346 arg2 = CALL_EXPR_ARG (exp, 1);
24347 arg0 = CALL_EXPR_ARG (exp, 2);
24348 op0 = expand_normal (arg0);
24349 op1 = expand_normal (arg1);
24350 op2 = expand_normal (arg2);
24351 mode0 = insn_data[icode].operand[0].mode;
24352 mode1 = insn_data[icode].operand[1].mode;
24353 mode2 = insn_data[icode].operand[2].mode;
24355 op0 = force_reg (Pmode, op0);
24356 op0 = gen_rtx_MEM (mode1, op0);
24358 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
24359 op0 = copy_to_mode_reg (mode0, op0);
24360 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
24361 op1 = copy_to_mode_reg (mode1, op1);
24362 if (! (*insn_data[icode].operand[2].predicate) (op2, mode2))
24363 op2 = copy_to_mode_reg (mode2, op2);
24364 pat = GEN_FCN (icode) (op0, op1, op2);
24365 if (! pat)
24366 return 0;
24367 emit_insn (pat);
24368 return 0;
24370 case IX86_BUILTIN_LDMXCSR:
24371 op0 = expand_normal (CALL_EXPR_ARG (exp, 0));
24372 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24373 emit_move_insn (target, op0);
24374 emit_insn (gen_sse_ldmxcsr (target));
24375 return 0;
24377 case IX86_BUILTIN_STMXCSR:
24378 target = assign_386_stack_local (SImode, SLOT_VIRTUAL);
24379 emit_insn (gen_sse_stmxcsr (target));
24380 return copy_to_mode_reg (SImode, target);
24382 case IX86_BUILTIN_CLFLUSH:
24383 arg0 = CALL_EXPR_ARG (exp, 0);
24384 op0 = expand_normal (arg0);
24385 icode = CODE_FOR_sse2_clflush;
24386 if (! (*insn_data[icode].operand[0].predicate) (op0, Pmode))
24387 op0 = copy_to_mode_reg (Pmode, op0);
24389 emit_insn (gen_sse2_clflush (op0));
24390 return 0;
24392 case IX86_BUILTIN_MONITOR:
24393 arg0 = CALL_EXPR_ARG (exp, 0);
24394 arg1 = CALL_EXPR_ARG (exp, 1);
24395 arg2 = CALL_EXPR_ARG (exp, 2);
24396 op0 = expand_normal (arg0);
24397 op1 = expand_normal (arg1);
24398 op2 = expand_normal (arg2);
24399 if (!REG_P (op0))
24400 op0 = copy_to_mode_reg (Pmode, op0);
24401 if (!REG_P (op1))
24402 op1 = copy_to_mode_reg (SImode, op1);
24403 if (!REG_P (op2))
24404 op2 = copy_to_mode_reg (SImode, op2);
24405 emit_insn ((*ix86_gen_monitor) (op0, op1, op2));
24406 return 0;
24408 case IX86_BUILTIN_MWAIT:
24409 arg0 = CALL_EXPR_ARG (exp, 0);
24410 arg1 = CALL_EXPR_ARG (exp, 1);
24411 op0 = expand_normal (arg0);
24412 op1 = expand_normal (arg1);
24413 if (!REG_P (op0))
24414 op0 = copy_to_mode_reg (SImode, op0);
24415 if (!REG_P (op1))
24416 op1 = copy_to_mode_reg (SImode, op1);
24417 emit_insn (gen_sse3_mwait (op0, op1));
24418 return 0;
24420 case IX86_BUILTIN_VEC_INIT_V2SI:
24421 case IX86_BUILTIN_VEC_INIT_V4HI:
24422 case IX86_BUILTIN_VEC_INIT_V8QI:
24423 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
24425 case IX86_BUILTIN_VEC_EXT_V2DF:
24426 case IX86_BUILTIN_VEC_EXT_V2DI:
24427 case IX86_BUILTIN_VEC_EXT_V4SF:
24428 case IX86_BUILTIN_VEC_EXT_V4SI:
24429 case IX86_BUILTIN_VEC_EXT_V8HI:
24430 case IX86_BUILTIN_VEC_EXT_V2SI:
24431 case IX86_BUILTIN_VEC_EXT_V4HI:
24432 case IX86_BUILTIN_VEC_EXT_V16QI:
24433 return ix86_expand_vec_ext_builtin (exp, target);
24435 case IX86_BUILTIN_VEC_SET_V2DI:
24436 case IX86_BUILTIN_VEC_SET_V4SF:
24437 case IX86_BUILTIN_VEC_SET_V4SI:
24438 case IX86_BUILTIN_VEC_SET_V8HI:
24439 case IX86_BUILTIN_VEC_SET_V4HI:
24440 case IX86_BUILTIN_VEC_SET_V16QI:
24441 return ix86_expand_vec_set_builtin (exp);
24443 case IX86_BUILTIN_INFQ:
24444 case IX86_BUILTIN_HUGE_VALQ:
24446 REAL_VALUE_TYPE inf;
24447 rtx tmp;
24449 real_inf (&inf);
24450 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, mode);
24452 tmp = validize_mem (force_const_mem (mode, tmp));
24454 if (target == 0)
24455 target = gen_reg_rtx (mode);
24457 emit_move_insn (target, tmp);
24458 return target;
24461 default:
24462 break;
24465 for (i = 0, d = bdesc_special_args;
24466 i < ARRAY_SIZE (bdesc_special_args);
24467 i++, d++)
24468 if (d->code == fcode)
24469 return ix86_expand_special_args_builtin (d, exp, target);
24471 for (i = 0, d = bdesc_args;
24472 i < ARRAY_SIZE (bdesc_args);
24473 i++, d++)
24474 if (d->code == fcode)
24475 switch (fcode)
24477 case IX86_BUILTIN_FABSQ:
24478 case IX86_BUILTIN_COPYSIGNQ:
24479 if (!TARGET_SSE2)
24480 /* Emit a normal call if SSE2 isn't available. */
24481 return expand_call (exp, target, ignore);
24482 default:
24483 return ix86_expand_args_builtin (d, exp, target);
24486 for (i = 0, d = bdesc_comi; i < ARRAY_SIZE (bdesc_comi); i++, d++)
24487 if (d->code == fcode)
24488 return ix86_expand_sse_comi (d, exp, target);
24490 for (i = 0, d = bdesc_pcmpestr;
24491 i < ARRAY_SIZE (bdesc_pcmpestr);
24492 i++, d++)
24493 if (d->code == fcode)
24494 return ix86_expand_sse_pcmpestr (d, exp, target);
24496 for (i = 0, d = bdesc_pcmpistr;
24497 i < ARRAY_SIZE (bdesc_pcmpistr);
24498 i++, d++)
24499 if (d->code == fcode)
24500 return ix86_expand_sse_pcmpistr (d, exp, target);
24502 gcc_unreachable ();
24505 /* Returns a function decl for a vectorized version of the builtin function
24506 with builtin function code FN and the result vector type TYPE, or NULL_TREE
24507 if it is not available. */
24509 static tree
24510 ix86_builtin_vectorized_function (unsigned int fn, tree type_out,
24511 tree type_in)
24513 enum machine_mode in_mode, out_mode;
24514 int in_n, out_n;
24516 if (TREE_CODE (type_out) != VECTOR_TYPE
24517 || TREE_CODE (type_in) != VECTOR_TYPE)
24518 return NULL_TREE;
24520 out_mode = TYPE_MODE (TREE_TYPE (type_out));
24521 out_n = TYPE_VECTOR_SUBPARTS (type_out);
24522 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24523 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24525 switch (fn)
24527 case BUILT_IN_SQRT:
24528 if (out_mode == DFmode && out_n == 2
24529 && in_mode == DFmode && in_n == 2)
24530 return ix86_builtins[IX86_BUILTIN_SQRTPD];
24531 break;
24533 case BUILT_IN_SQRTF:
24534 if (out_mode == SFmode && out_n == 4
24535 && in_mode == SFmode && in_n == 4)
24536 return ix86_builtins[IX86_BUILTIN_SQRTPS_NR];
24537 break;
24539 case BUILT_IN_LRINT:
24540 if (out_mode == SImode && out_n == 4
24541 && in_mode == DFmode && in_n == 2)
24542 return ix86_builtins[IX86_BUILTIN_VEC_PACK_SFIX];
24543 break;
24545 case BUILT_IN_LRINTF:
24546 if (out_mode == SImode && out_n == 4
24547 && in_mode == SFmode && in_n == 4)
24548 return ix86_builtins[IX86_BUILTIN_CVTPS2DQ];
24549 break;
24551 case BUILT_IN_COPYSIGN:
24552 if (out_mode == DFmode && out_n == 2
24553 && in_mode == DFmode && in_n == 2)
24554 return ix86_builtins[IX86_BUILTIN_CPYSGNPD];
24555 break;
24557 case BUILT_IN_COPYSIGNF:
24558 if (out_mode == SFmode && out_n == 4
24559 && in_mode == SFmode && in_n == 4)
24560 return ix86_builtins[IX86_BUILTIN_CPYSGNPS];
24561 break;
24563 default:
24567 /* Dispatch to a handler for a vectorization library. */
24568 if (ix86_veclib_handler)
24569 return (*ix86_veclib_handler) ((enum built_in_function) fn, type_out,
24570 type_in);
24572 return NULL_TREE;
24575 /* Handler for an SVML-style interface to
24576 a library with vectorized intrinsics. */
24578 static tree
24579 ix86_veclibabi_svml (enum built_in_function fn, tree type_out, tree type_in)
24581 char name[20];
24582 tree fntype, new_fndecl, args;
24583 unsigned arity;
24584 const char *bname;
24585 enum machine_mode el_mode, in_mode;
24586 int n, in_n;
24588 /* The SVML is suitable for unsafe math only. */
24589 if (!flag_unsafe_math_optimizations)
24590 return NULL_TREE;
24592 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24593 n = TYPE_VECTOR_SUBPARTS (type_out);
24594 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24595 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24596 if (el_mode != in_mode
24597 || n != in_n)
24598 return NULL_TREE;
24600 switch (fn)
24602 case BUILT_IN_EXP:
24603 case BUILT_IN_LOG:
24604 case BUILT_IN_LOG10:
24605 case BUILT_IN_POW:
24606 case BUILT_IN_TANH:
24607 case BUILT_IN_TAN:
24608 case BUILT_IN_ATAN:
24609 case BUILT_IN_ATAN2:
24610 case BUILT_IN_ATANH:
24611 case BUILT_IN_CBRT:
24612 case BUILT_IN_SINH:
24613 case BUILT_IN_SIN:
24614 case BUILT_IN_ASINH:
24615 case BUILT_IN_ASIN:
24616 case BUILT_IN_COSH:
24617 case BUILT_IN_COS:
24618 case BUILT_IN_ACOSH:
24619 case BUILT_IN_ACOS:
24620 if (el_mode != DFmode || n != 2)
24621 return NULL_TREE;
24622 break;
24624 case BUILT_IN_EXPF:
24625 case BUILT_IN_LOGF:
24626 case BUILT_IN_LOG10F:
24627 case BUILT_IN_POWF:
24628 case BUILT_IN_TANHF:
24629 case BUILT_IN_TANF:
24630 case BUILT_IN_ATANF:
24631 case BUILT_IN_ATAN2F:
24632 case BUILT_IN_ATANHF:
24633 case BUILT_IN_CBRTF:
24634 case BUILT_IN_SINHF:
24635 case BUILT_IN_SINF:
24636 case BUILT_IN_ASINHF:
24637 case BUILT_IN_ASINF:
24638 case BUILT_IN_COSHF:
24639 case BUILT_IN_COSF:
24640 case BUILT_IN_ACOSHF:
24641 case BUILT_IN_ACOSF:
24642 if (el_mode != SFmode || n != 4)
24643 return NULL_TREE;
24644 break;
24646 default:
24647 return NULL_TREE;
24650 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24652 if (fn == BUILT_IN_LOGF)
24653 strcpy (name, "vmlsLn4");
24654 else if (fn == BUILT_IN_LOG)
24655 strcpy (name, "vmldLn2");
24656 else if (n == 4)
24658 sprintf (name, "vmls%s", bname+10);
24659 name[strlen (name)-1] = '4';
24661 else
24662 sprintf (name, "vmld%s2", bname+10);
24664 /* Convert to uppercase. */
24665 name[4] &= ~0x20;
24667 arity = 0;
24668 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24669 args = TREE_CHAIN (args))
24670 arity++;
24672 if (arity == 1)
24673 fntype = build_function_type_list (type_out, type_in, NULL);
24674 else
24675 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24677 /* Build a function declaration for the vectorized function. */
24678 new_fndecl = build_decl (BUILTINS_LOCATION,
24679 FUNCTION_DECL, get_identifier (name), fntype);
24680 TREE_PUBLIC (new_fndecl) = 1;
24681 DECL_EXTERNAL (new_fndecl) = 1;
24682 DECL_IS_NOVOPS (new_fndecl) = 1;
24683 TREE_READONLY (new_fndecl) = 1;
24685 return new_fndecl;
24688 /* Handler for an ACML-style interface to
24689 a library with vectorized intrinsics. */
24691 static tree
24692 ix86_veclibabi_acml (enum built_in_function fn, tree type_out, tree type_in)
24694 char name[20] = "__vr.._";
24695 tree fntype, new_fndecl, args;
24696 unsigned arity;
24697 const char *bname;
24698 enum machine_mode el_mode, in_mode;
24699 int n, in_n;
24701 /* The ACML is 64bits only and suitable for unsafe math only as
24702 it does not correctly support parts of IEEE with the required
24703 precision such as denormals. */
24704 if (!TARGET_64BIT
24705 || !flag_unsafe_math_optimizations)
24706 return NULL_TREE;
24708 el_mode = TYPE_MODE (TREE_TYPE (type_out));
24709 n = TYPE_VECTOR_SUBPARTS (type_out);
24710 in_mode = TYPE_MODE (TREE_TYPE (type_in));
24711 in_n = TYPE_VECTOR_SUBPARTS (type_in);
24712 if (el_mode != in_mode
24713 || n != in_n)
24714 return NULL_TREE;
24716 switch (fn)
24718 case BUILT_IN_SIN:
24719 case BUILT_IN_COS:
24720 case BUILT_IN_EXP:
24721 case BUILT_IN_LOG:
24722 case BUILT_IN_LOG2:
24723 case BUILT_IN_LOG10:
24724 name[4] = 'd';
24725 name[5] = '2';
24726 if (el_mode != DFmode
24727 || n != 2)
24728 return NULL_TREE;
24729 break;
24731 case BUILT_IN_SINF:
24732 case BUILT_IN_COSF:
24733 case BUILT_IN_EXPF:
24734 case BUILT_IN_POWF:
24735 case BUILT_IN_LOGF:
24736 case BUILT_IN_LOG2F:
24737 case BUILT_IN_LOG10F:
24738 name[4] = 's';
24739 name[5] = '4';
24740 if (el_mode != SFmode
24741 || n != 4)
24742 return NULL_TREE;
24743 break;
24745 default:
24746 return NULL_TREE;
24749 bname = IDENTIFIER_POINTER (DECL_NAME (implicit_built_in_decls[fn]));
24750 sprintf (name + 7, "%s", bname+10);
24752 arity = 0;
24753 for (args = DECL_ARGUMENTS (implicit_built_in_decls[fn]); args;
24754 args = TREE_CHAIN (args))
24755 arity++;
24757 if (arity == 1)
24758 fntype = build_function_type_list (type_out, type_in, NULL);
24759 else
24760 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
24762 /* Build a function declaration for the vectorized function. */
24763 new_fndecl = build_decl (BUILTINS_LOCATION,
24764 FUNCTION_DECL, get_identifier (name), fntype);
24765 TREE_PUBLIC (new_fndecl) = 1;
24766 DECL_EXTERNAL (new_fndecl) = 1;
24767 DECL_IS_NOVOPS (new_fndecl) = 1;
24768 TREE_READONLY (new_fndecl) = 1;
24770 return new_fndecl;
24774 /* Returns a decl of a function that implements conversion of an integer vector
24775 into a floating-point vector, or vice-versa. TYPE is the type of the integer
24776 side of the conversion.
24777 Return NULL_TREE if it is not available. */
24779 static tree
24780 ix86_vectorize_builtin_conversion (unsigned int code, tree type)
24782 if (! (TARGET_SSE2 && TREE_CODE (type) == VECTOR_TYPE))
24783 return NULL_TREE;
24785 switch (code)
24787 case FLOAT_EXPR:
24788 switch (TYPE_MODE (type))
24790 case V4SImode:
24791 return TYPE_UNSIGNED (type)
24792 ? ix86_builtins[IX86_BUILTIN_CVTUDQ2PS]
24793 : ix86_builtins[IX86_BUILTIN_CVTDQ2PS];
24794 default:
24795 return NULL_TREE;
24798 case FIX_TRUNC_EXPR:
24799 switch (TYPE_MODE (type))
24801 case V4SImode:
24802 return TYPE_UNSIGNED (type)
24803 ? NULL_TREE
24804 : ix86_builtins[IX86_BUILTIN_CVTTPS2DQ];
24805 default:
24806 return NULL_TREE;
24808 default:
24809 return NULL_TREE;
24814 /* Returns a code for a target-specific builtin that implements
24815 reciprocal of the function, or NULL_TREE if not available. */
24817 static tree
24818 ix86_builtin_reciprocal (unsigned int fn, bool md_fn,
24819 bool sqrt ATTRIBUTE_UNUSED)
24821 if (! (TARGET_SSE_MATH && TARGET_RECIP && !optimize_insn_for_size_p ()
24822 && flag_finite_math_only && !flag_trapping_math
24823 && flag_unsafe_math_optimizations))
24824 return NULL_TREE;
24826 if (md_fn)
24827 /* Machine dependent builtins. */
24828 switch (fn)
24830 /* Vectorized version of sqrt to rsqrt conversion. */
24831 case IX86_BUILTIN_SQRTPS_NR:
24832 return ix86_builtins[IX86_BUILTIN_RSQRTPS_NR];
24834 default:
24835 return NULL_TREE;
24837 else
24838 /* Normal builtins. */
24839 switch (fn)
24841 /* Sqrt to rsqrt conversion. */
24842 case BUILT_IN_SQRTF:
24843 return ix86_builtins[IX86_BUILTIN_RSQRTF];
24845 default:
24846 return NULL_TREE;
24850 /* Store OPERAND to the memory after reload is completed. This means
24851 that we can't easily use assign_stack_local. */
24853 ix86_force_to_memory (enum machine_mode mode, rtx operand)
24855 rtx result;
24857 gcc_assert (reload_completed);
24858 if (!TARGET_64BIT_MS_ABI && TARGET_RED_ZONE)
24860 result = gen_rtx_MEM (mode,
24861 gen_rtx_PLUS (Pmode,
24862 stack_pointer_rtx,
24863 GEN_INT (-RED_ZONE_SIZE)));
24864 emit_move_insn (result, operand);
24866 else if ((TARGET_64BIT_MS_ABI || !TARGET_RED_ZONE) && TARGET_64BIT)
24868 switch (mode)
24870 case HImode:
24871 case SImode:
24872 operand = gen_lowpart (DImode, operand);
24873 /* FALLTHRU */
24874 case DImode:
24875 emit_insn (
24876 gen_rtx_SET (VOIDmode,
24877 gen_rtx_MEM (DImode,
24878 gen_rtx_PRE_DEC (DImode,
24879 stack_pointer_rtx)),
24880 operand));
24881 break;
24882 default:
24883 gcc_unreachable ();
24885 result = gen_rtx_MEM (mode, stack_pointer_rtx);
24887 else
24889 switch (mode)
24891 case DImode:
24893 rtx operands[2];
24894 split_di (&operand, 1, operands, operands + 1);
24895 emit_insn (
24896 gen_rtx_SET (VOIDmode,
24897 gen_rtx_MEM (SImode,
24898 gen_rtx_PRE_DEC (Pmode,
24899 stack_pointer_rtx)),
24900 operands[1]));
24901 emit_insn (
24902 gen_rtx_SET (VOIDmode,
24903 gen_rtx_MEM (SImode,
24904 gen_rtx_PRE_DEC (Pmode,
24905 stack_pointer_rtx)),
24906 operands[0]));
24908 break;
24909 case HImode:
24910 /* Store HImodes as SImodes. */
24911 operand = gen_lowpart (SImode, operand);
24912 /* FALLTHRU */
24913 case SImode:
24914 emit_insn (
24915 gen_rtx_SET (VOIDmode,
24916 gen_rtx_MEM (GET_MODE (operand),
24917 gen_rtx_PRE_DEC (SImode,
24918 stack_pointer_rtx)),
24919 operand));
24920 break;
24921 default:
24922 gcc_unreachable ();
24924 result = gen_rtx_MEM (mode, stack_pointer_rtx);
24926 return result;
24929 /* Free operand from the memory. */
24930 void
24931 ix86_free_from_memory (enum machine_mode mode)
24933 if (!TARGET_RED_ZONE || TARGET_64BIT_MS_ABI)
24935 int size;
24937 if (mode == DImode || TARGET_64BIT)
24938 size = 8;
24939 else
24940 size = 4;
24941 /* Use LEA to deallocate stack space. In peephole2 it will be converted
24942 to pop or add instruction if registers are available. */
24943 emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
24944 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
24945 GEN_INT (size))));
24949 /* Implement TARGET_IRA_COVER_CLASSES. If -mfpmath=sse, we prefer
24950 SSE_REGS to FLOAT_REGS if their costs for a pseudo are the
24951 same. */
24952 static const enum reg_class *
24953 i386_ira_cover_classes (void)
24955 static const enum reg_class sse_fpmath_classes[] = {
24956 GENERAL_REGS, SSE_REGS, MMX_REGS, FLOAT_REGS, LIM_REG_CLASSES
24958 static const enum reg_class no_sse_fpmath_classes[] = {
24959 GENERAL_REGS, FLOAT_REGS, MMX_REGS, SSE_REGS, LIM_REG_CLASSES
24962 return TARGET_SSE_MATH ? sse_fpmath_classes : no_sse_fpmath_classes;
24965 /* Put float CONST_DOUBLE in the constant pool instead of fp regs.
24966 QImode must go into class Q_REGS.
24967 Narrow ALL_REGS to GENERAL_REGS. This supports allowing movsf and
24968 movdf to do mem-to-mem moves through integer regs. */
24969 enum reg_class
24970 ix86_preferred_reload_class (rtx x, enum reg_class regclass)
24972 enum machine_mode mode = GET_MODE (x);
24974 /* We're only allowed to return a subclass of CLASS. Many of the
24975 following checks fail for NO_REGS, so eliminate that early. */
24976 if (regclass == NO_REGS)
24977 return NO_REGS;
24979 /* All classes can load zeros. */
24980 if (x == CONST0_RTX (mode))
24981 return regclass;
24983 /* Force constants into memory if we are loading a (nonzero) constant into
24984 an MMX or SSE register. This is because there are no MMX/SSE instructions
24985 to load from a constant. */
24986 if (CONSTANT_P (x)
24987 && (MAYBE_MMX_CLASS_P (regclass) || MAYBE_SSE_CLASS_P (regclass)))
24988 return NO_REGS;
24990 /* Prefer SSE regs only, if we can use them for math. */
24991 if (TARGET_SSE_MATH && !TARGET_MIX_SSE_I387 && SSE_FLOAT_MODE_P (mode))
24992 return SSE_CLASS_P (regclass) ? regclass : NO_REGS;
24994 /* Floating-point constants need more complex checks. */
24995 if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
24997 /* General regs can load everything. */
24998 if (reg_class_subset_p (regclass, GENERAL_REGS))
24999 return regclass;
25001 /* Floats can load 0 and 1 plus some others. Note that we eliminated
25002 zero above. We only want to wind up preferring 80387 registers if
25003 we plan on doing computation with them. */
25004 if (TARGET_80387
25005 && standard_80387_constant_p (x))
25007 /* Limit class to non-sse. */
25008 if (regclass == FLOAT_SSE_REGS)
25009 return FLOAT_REGS;
25010 if (regclass == FP_TOP_SSE_REGS)
25011 return FP_TOP_REG;
25012 if (regclass == FP_SECOND_SSE_REGS)
25013 return FP_SECOND_REG;
25014 if (regclass == FLOAT_INT_REGS || regclass == FLOAT_REGS)
25015 return regclass;
25018 return NO_REGS;
25021 /* Generally when we see PLUS here, it's the function invariant
25022 (plus soft-fp const_int). Which can only be computed into general
25023 regs. */
25024 if (GET_CODE (x) == PLUS)
25025 return reg_class_subset_p (regclass, GENERAL_REGS) ? regclass : NO_REGS;
25027 /* QImode constants are easy to load, but non-constant QImode data
25028 must go into Q_REGS. */
25029 if (GET_MODE (x) == QImode && !CONSTANT_P (x))
25031 if (reg_class_subset_p (regclass, Q_REGS))
25032 return regclass;
25033 if (reg_class_subset_p (Q_REGS, regclass))
25034 return Q_REGS;
25035 return NO_REGS;
25038 return regclass;
25041 /* Discourage putting floating-point values in SSE registers unless
25042 SSE math is being used, and likewise for the 387 registers. */
25043 enum reg_class
25044 ix86_preferred_output_reload_class (rtx x, enum reg_class regclass)
25046 enum machine_mode mode = GET_MODE (x);
25048 /* Restrict the output reload class to the register bank that we are doing
25049 math on. If we would like not to return a subset of CLASS, reject this
25050 alternative: if reload cannot do this, it will still use its choice. */
25051 mode = GET_MODE (x);
25052 if (TARGET_SSE_MATH && SSE_FLOAT_MODE_P (mode))
25053 return MAYBE_SSE_CLASS_P (regclass) ? SSE_REGS : NO_REGS;
25055 if (X87_FLOAT_MODE_P (mode))
25057 if (regclass == FP_TOP_SSE_REGS)
25058 return FP_TOP_REG;
25059 else if (regclass == FP_SECOND_SSE_REGS)
25060 return FP_SECOND_REG;
25061 else
25062 return FLOAT_CLASS_P (regclass) ? regclass : NO_REGS;
25065 return regclass;
25068 static enum reg_class
25069 ix86_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
25070 enum machine_mode mode,
25071 secondary_reload_info *sri ATTRIBUTE_UNUSED)
25073 /* QImode spills from non-QI registers require
25074 intermediate register on 32bit targets. */
25075 if (!in_p && mode == QImode && !TARGET_64BIT
25076 && (rclass == GENERAL_REGS
25077 || rclass == LEGACY_REGS
25078 || rclass == INDEX_REGS))
25080 int regno;
25082 if (REG_P (x))
25083 regno = REGNO (x);
25084 else
25085 regno = -1;
25087 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
25088 regno = true_regnum (x);
25090 /* Return Q_REGS if the operand is in memory. */
25091 if (regno == -1)
25092 return Q_REGS;
25095 return NO_REGS;
25098 /* If we are copying between general and FP registers, we need a memory
25099 location. The same is true for SSE and MMX registers.
25101 To optimize register_move_cost performance, allow inline variant.
25103 The macro can't work reliably when one of the CLASSES is class containing
25104 registers from multiple units (SSE, MMX, integer). We avoid this by never
25105 combining those units in single alternative in the machine description.
25106 Ensure that this constraint holds to avoid unexpected surprises.
25108 When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
25109 enforce these sanity checks. */
25111 static inline int
25112 inline_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25113 enum machine_mode mode, int strict)
25115 if (MAYBE_FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class1)
25116 || MAYBE_FLOAT_CLASS_P (class2) != FLOAT_CLASS_P (class2)
25117 || MAYBE_SSE_CLASS_P (class1) != SSE_CLASS_P (class1)
25118 || MAYBE_SSE_CLASS_P (class2) != SSE_CLASS_P (class2)
25119 || MAYBE_MMX_CLASS_P (class1) != MMX_CLASS_P (class1)
25120 || MAYBE_MMX_CLASS_P (class2) != MMX_CLASS_P (class2))
25122 gcc_assert (!strict);
25123 return true;
25126 if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
25127 return true;
25129 /* ??? This is a lie. We do have moves between mmx/general, and for
25130 mmx/sse2. But by saying we need secondary memory we discourage the
25131 register allocator from using the mmx registers unless needed. */
25132 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
25133 return true;
25135 if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25137 /* SSE1 doesn't have any direct moves from other classes. */
25138 if (!TARGET_SSE2)
25139 return true;
25141 /* If the target says that inter-unit moves are more expensive
25142 than moving through memory, then don't generate them. */
25143 if (!TARGET_INTER_UNIT_MOVES)
25144 return true;
25146 /* Between SSE and general, we have moves no larger than word size. */
25147 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
25148 return true;
25151 return false;
25155 ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
25156 enum machine_mode mode, int strict)
25158 return inline_secondary_memory_needed (class1, class2, mode, strict);
25161 /* Return true if the registers in CLASS cannot represent the change from
25162 modes FROM to TO. */
25164 bool
25165 ix86_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
25166 enum reg_class regclass)
25168 if (from == to)
25169 return false;
25171 /* x87 registers can't do subreg at all, as all values are reformatted
25172 to extended precision. */
25173 if (MAYBE_FLOAT_CLASS_P (regclass))
25174 return true;
25176 if (MAYBE_SSE_CLASS_P (regclass) || MAYBE_MMX_CLASS_P (regclass))
25178 /* Vector registers do not support QI or HImode loads. If we don't
25179 disallow a change to these modes, reload will assume it's ok to
25180 drop the subreg from (subreg:SI (reg:HI 100) 0). This affects
25181 the vec_dupv4hi pattern. */
25182 if (GET_MODE_SIZE (from) < 4)
25183 return true;
25185 /* Vector registers do not support subreg with nonzero offsets, which
25186 are otherwise valid for integer registers. Since we can't see
25187 whether we have a nonzero offset from here, prohibit all
25188 nonparadoxical subregs changing size. */
25189 if (GET_MODE_SIZE (to) < GET_MODE_SIZE (from))
25190 return true;
25193 return false;
25196 /* Return the cost of moving data of mode M between a
25197 register and memory. A value of 2 is the default; this cost is
25198 relative to those in `REGISTER_MOVE_COST'.
25200 This function is used extensively by register_move_cost that is used to
25201 build tables at startup. Make it inline in this case.
25202 When IN is 2, return maximum of in and out move cost.
25204 If moving between registers and memory is more expensive than
25205 between two registers, you should define this macro to express the
25206 relative cost.
25208 Model also increased moving costs of QImode registers in non
25209 Q_REGS classes.
25211 static inline int
25212 inline_memory_move_cost (enum machine_mode mode, enum reg_class regclass,
25213 int in)
25215 int cost;
25216 if (FLOAT_CLASS_P (regclass))
25218 int index;
25219 switch (mode)
25221 case SFmode:
25222 index = 0;
25223 break;
25224 case DFmode:
25225 index = 1;
25226 break;
25227 case XFmode:
25228 index = 2;
25229 break;
25230 default:
25231 return 100;
25233 if (in == 2)
25234 return MAX (ix86_cost->fp_load [index], ix86_cost->fp_store [index]);
25235 return in ? ix86_cost->fp_load [index] : ix86_cost->fp_store [index];
25237 if (SSE_CLASS_P (regclass))
25239 int index;
25240 switch (GET_MODE_SIZE (mode))
25242 case 4:
25243 index = 0;
25244 break;
25245 case 8:
25246 index = 1;
25247 break;
25248 case 16:
25249 index = 2;
25250 break;
25251 default:
25252 return 100;
25254 if (in == 2)
25255 return MAX (ix86_cost->sse_load [index], ix86_cost->sse_store [index]);
25256 return in ? ix86_cost->sse_load [index] : ix86_cost->sse_store [index];
25258 if (MMX_CLASS_P (regclass))
25260 int index;
25261 switch (GET_MODE_SIZE (mode))
25263 case 4:
25264 index = 0;
25265 break;
25266 case 8:
25267 index = 1;
25268 break;
25269 default:
25270 return 100;
25272 if (in)
25273 return MAX (ix86_cost->mmx_load [index], ix86_cost->mmx_store [index]);
25274 return in ? ix86_cost->mmx_load [index] : ix86_cost->mmx_store [index];
25276 switch (GET_MODE_SIZE (mode))
25278 case 1:
25279 if (Q_CLASS_P (regclass) || TARGET_64BIT)
25281 if (!in)
25282 return ix86_cost->int_store[0];
25283 if (TARGET_PARTIAL_REG_DEPENDENCY
25284 && optimize_function_for_speed_p (cfun))
25285 cost = ix86_cost->movzbl_load;
25286 else
25287 cost = ix86_cost->int_load[0];
25288 if (in == 2)
25289 return MAX (cost, ix86_cost->int_store[0]);
25290 return cost;
25292 else
25294 if (in == 2)
25295 return MAX (ix86_cost->movzbl_load, ix86_cost->int_store[0] + 4);
25296 if (in)
25297 return ix86_cost->movzbl_load;
25298 else
25299 return ix86_cost->int_store[0] + 4;
25301 break;
25302 case 2:
25303 if (in == 2)
25304 return MAX (ix86_cost->int_load[1], ix86_cost->int_store[1]);
25305 return in ? ix86_cost->int_load[1] : ix86_cost->int_store[1];
25306 default:
25307 /* Compute number of 32bit moves needed. TFmode is moved as XFmode. */
25308 if (mode == TFmode)
25309 mode = XFmode;
25310 if (in == 2)
25311 cost = MAX (ix86_cost->int_load[2] , ix86_cost->int_store[2]);
25312 else if (in)
25313 cost = ix86_cost->int_load[2];
25314 else
25315 cost = ix86_cost->int_store[2];
25316 return (cost * (((int) GET_MODE_SIZE (mode)
25317 + UNITS_PER_WORD - 1) / UNITS_PER_WORD));
25322 ix86_memory_move_cost (enum machine_mode mode, enum reg_class regclass, int in)
25324 return inline_memory_move_cost (mode, regclass, in);
25328 /* Return the cost of moving data from a register in class CLASS1 to
25329 one in class CLASS2.
25331 It is not required that the cost always equal 2 when FROM is the same as TO;
25332 on some machines it is expensive to move between registers if they are not
25333 general registers. */
25336 ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
25337 enum reg_class class2)
25339 /* In case we require secondary memory, compute cost of the store followed
25340 by load. In order to avoid bad register allocation choices, we need
25341 for this to be *at least* as high as the symmetric MEMORY_MOVE_COST. */
25343 if (inline_secondary_memory_needed (class1, class2, mode, 0))
25345 int cost = 1;
25347 cost += inline_memory_move_cost (mode, class1, 2);
25348 cost += inline_memory_move_cost (mode, class2, 2);
25350 /* In case of copying from general_purpose_register we may emit multiple
25351 stores followed by single load causing memory size mismatch stall.
25352 Count this as arbitrarily high cost of 20. */
25353 if (CLASS_MAX_NREGS (class1, mode) > CLASS_MAX_NREGS (class2, mode))
25354 cost += 20;
25356 /* In the case of FP/MMX moves, the registers actually overlap, and we
25357 have to switch modes in order to treat them differently. */
25358 if ((MMX_CLASS_P (class1) && MAYBE_FLOAT_CLASS_P (class2))
25359 || (MMX_CLASS_P (class2) && MAYBE_FLOAT_CLASS_P (class1)))
25360 cost += 20;
25362 return cost;
25365 /* Moves between SSE/MMX and integer unit are expensive. */
25366 if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2)
25367 || SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
25369 /* ??? By keeping returned value relatively high, we limit the number
25370 of moves between integer and MMX/SSE registers for all targets.
25371 Additionally, high value prevents problem with x86_modes_tieable_p(),
25372 where integer modes in MMX/SSE registers are not tieable
25373 because of missing QImode and HImode moves to, from or between
25374 MMX/SSE registers. */
25375 return MAX (8, ix86_cost->mmxsse_to_integer);
25377 if (MAYBE_FLOAT_CLASS_P (class1))
25378 return ix86_cost->fp_move;
25379 if (MAYBE_SSE_CLASS_P (class1))
25380 return ix86_cost->sse_move;
25381 if (MAYBE_MMX_CLASS_P (class1))
25382 return ix86_cost->mmx_move;
25383 return 2;
25386 /* Return 1 if hard register REGNO can hold a value of machine-mode MODE. */
25388 bool
25389 ix86_hard_regno_mode_ok (int regno, enum machine_mode mode)
25391 /* Flags and only flags can only hold CCmode values. */
25392 if (CC_REGNO_P (regno))
25393 return GET_MODE_CLASS (mode) == MODE_CC;
25394 if (GET_MODE_CLASS (mode) == MODE_CC
25395 || GET_MODE_CLASS (mode) == MODE_RANDOM
25396 || GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
25397 return 0;
25398 if (FP_REGNO_P (regno))
25399 return VALID_FP_MODE_P (mode);
25400 if (SSE_REGNO_P (regno))
25402 /* We implement the move patterns for all vector modes into and
25403 out of SSE registers, even when no operation instructions
25404 are available. OImode move is available only when AVX is
25405 enabled. */
25406 return ((TARGET_AVX && mode == OImode)
25407 || VALID_AVX256_REG_MODE (mode)
25408 || VALID_SSE_REG_MODE (mode)
25409 || VALID_SSE2_REG_MODE (mode)
25410 || VALID_MMX_REG_MODE (mode)
25411 || VALID_MMX_REG_MODE_3DNOW (mode));
25413 if (MMX_REGNO_P (regno))
25415 /* We implement the move patterns for 3DNOW modes even in MMX mode,
25416 so if the register is available at all, then we can move data of
25417 the given mode into or out of it. */
25418 return (VALID_MMX_REG_MODE (mode)
25419 || VALID_MMX_REG_MODE_3DNOW (mode));
25422 if (mode == QImode)
25424 /* Take care for QImode values - they can be in non-QI regs,
25425 but then they do cause partial register stalls. */
25426 if (regno <= BX_REG || TARGET_64BIT)
25427 return 1;
25428 if (!TARGET_PARTIAL_REG_STALL)
25429 return 1;
25430 return reload_in_progress || reload_completed;
25432 /* We handle both integer and floats in the general purpose registers. */
25433 else if (VALID_INT_MODE_P (mode))
25434 return 1;
25435 else if (VALID_FP_MODE_P (mode))
25436 return 1;
25437 else if (VALID_DFP_MODE_P (mode))
25438 return 1;
25439 /* Lots of MMX code casts 8 byte vector modes to DImode. If we then go
25440 on to use that value in smaller contexts, this can easily force a
25441 pseudo to be allocated to GENERAL_REGS. Since this is no worse than
25442 supporting DImode, allow it. */
25443 else if (VALID_MMX_REG_MODE_3DNOW (mode) || VALID_MMX_REG_MODE (mode))
25444 return 1;
25446 return 0;
25449 /* A subroutine of ix86_modes_tieable_p. Return true if MODE is a
25450 tieable integer mode. */
25452 static bool
25453 ix86_tieable_integer_mode_p (enum machine_mode mode)
25455 switch (mode)
25457 case HImode:
25458 case SImode:
25459 return true;
25461 case QImode:
25462 return TARGET_64BIT || !TARGET_PARTIAL_REG_STALL;
25464 case DImode:
25465 return TARGET_64BIT;
25467 default:
25468 return false;
25472 /* Return true if MODE1 is accessible in a register that can hold MODE2
25473 without copying. That is, all register classes that can hold MODE2
25474 can also hold MODE1. */
25476 bool
25477 ix86_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
25479 if (mode1 == mode2)
25480 return true;
25482 if (ix86_tieable_integer_mode_p (mode1)
25483 && ix86_tieable_integer_mode_p (mode2))
25484 return true;
25486 /* MODE2 being XFmode implies fp stack or general regs, which means we
25487 can tie any smaller floating point modes to it. Note that we do not
25488 tie this with TFmode. */
25489 if (mode2 == XFmode)
25490 return mode1 == SFmode || mode1 == DFmode;
25492 /* MODE2 being DFmode implies fp stack, general or sse regs, which means
25493 that we can tie it with SFmode. */
25494 if (mode2 == DFmode)
25495 return mode1 == SFmode;
25497 /* If MODE2 is only appropriate for an SSE register, then tie with
25498 any other mode acceptable to SSE registers. */
25499 if (GET_MODE_SIZE (mode2) == 16
25500 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode2))
25501 return (GET_MODE_SIZE (mode1) == 16
25502 && ix86_hard_regno_mode_ok (FIRST_SSE_REG, mode1));
25504 /* If MODE2 is appropriate for an MMX register, then tie
25505 with any other mode acceptable to MMX registers. */
25506 if (GET_MODE_SIZE (mode2) == 8
25507 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode2))
25508 return (GET_MODE_SIZE (mode1) == 8
25509 && ix86_hard_regno_mode_ok (FIRST_MMX_REG, mode1));
25511 return false;
25514 /* Compute a (partial) cost for rtx X. Return true if the complete
25515 cost has been computed, and false if subexpressions should be
25516 scanned. In either case, *TOTAL contains the cost result. */
25518 static bool
25519 ix86_rtx_costs (rtx x, int code, int outer_code_i, int *total, bool speed)
25521 enum rtx_code outer_code = (enum rtx_code) outer_code_i;
25522 enum machine_mode mode = GET_MODE (x);
25523 const struct processor_costs *cost = speed ? ix86_cost : &ix86_size_cost;
25525 switch (code)
25527 case CONST_INT:
25528 case CONST:
25529 case LABEL_REF:
25530 case SYMBOL_REF:
25531 if (TARGET_64BIT && !x86_64_immediate_operand (x, VOIDmode))
25532 *total = 3;
25533 else if (TARGET_64BIT && !x86_64_zext_immediate_operand (x, VOIDmode))
25534 *total = 2;
25535 else if (flag_pic && SYMBOLIC_CONST (x)
25536 && (!TARGET_64BIT
25537 || (!GET_CODE (x) != LABEL_REF
25538 && (GET_CODE (x) != SYMBOL_REF
25539 || !SYMBOL_REF_LOCAL_P (x)))))
25540 *total = 1;
25541 else
25542 *total = 0;
25543 return true;
25545 case CONST_DOUBLE:
25546 if (mode == VOIDmode)
25547 *total = 0;
25548 else
25549 switch (standard_80387_constant_p (x))
25551 case 1: /* 0.0 */
25552 *total = 1;
25553 break;
25554 default: /* Other constants */
25555 *total = 2;
25556 break;
25557 case 0:
25558 case -1:
25559 /* Start with (MEM (SYMBOL_REF)), since that's where
25560 it'll probably end up. Add a penalty for size. */
25561 *total = (COSTS_N_INSNS (1)
25562 + (flag_pic != 0 && !TARGET_64BIT)
25563 + (mode == SFmode ? 0 : mode == DFmode ? 1 : 2));
25564 break;
25566 return true;
25568 case ZERO_EXTEND:
25569 /* The zero extensions is often completely free on x86_64, so make
25570 it as cheap as possible. */
25571 if (TARGET_64BIT && mode == DImode
25572 && GET_MODE (XEXP (x, 0)) == SImode)
25573 *total = 1;
25574 else if (TARGET_ZERO_EXTEND_WITH_AND)
25575 *total = cost->add;
25576 else
25577 *total = cost->movzx;
25578 return false;
25580 case SIGN_EXTEND:
25581 *total = cost->movsx;
25582 return false;
25584 case ASHIFT:
25585 if (CONST_INT_P (XEXP (x, 1))
25586 && (GET_MODE (XEXP (x, 0)) != DImode || TARGET_64BIT))
25588 HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25589 if (value == 1)
25591 *total = cost->add;
25592 return false;
25594 if ((value == 2 || value == 3)
25595 && cost->lea <= cost->shift_const)
25597 *total = cost->lea;
25598 return false;
25601 /* FALLTHRU */
25603 case ROTATE:
25604 case ASHIFTRT:
25605 case LSHIFTRT:
25606 case ROTATERT:
25607 if (!TARGET_64BIT && GET_MODE (XEXP (x, 0)) == DImode)
25609 if (CONST_INT_P (XEXP (x, 1)))
25611 if (INTVAL (XEXP (x, 1)) > 32)
25612 *total = cost->shift_const + COSTS_N_INSNS (2);
25613 else
25614 *total = cost->shift_const * 2;
25616 else
25618 if (GET_CODE (XEXP (x, 1)) == AND)
25619 *total = cost->shift_var * 2;
25620 else
25621 *total = cost->shift_var * 6 + COSTS_N_INSNS (2);
25624 else
25626 if (CONST_INT_P (XEXP (x, 1)))
25627 *total = cost->shift_const;
25628 else
25629 *total = cost->shift_var;
25631 return false;
25633 case MULT:
25634 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25636 /* ??? SSE scalar cost should be used here. */
25637 *total = cost->fmul;
25638 return false;
25640 else if (X87_FLOAT_MODE_P (mode))
25642 *total = cost->fmul;
25643 return false;
25645 else if (FLOAT_MODE_P (mode))
25647 /* ??? SSE vector cost should be used here. */
25648 *total = cost->fmul;
25649 return false;
25651 else
25653 rtx op0 = XEXP (x, 0);
25654 rtx op1 = XEXP (x, 1);
25655 int nbits;
25656 if (CONST_INT_P (XEXP (x, 1)))
25658 unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
25659 for (nbits = 0; value != 0; value &= value - 1)
25660 nbits++;
25662 else
25663 /* This is arbitrary. */
25664 nbits = 7;
25666 /* Compute costs correctly for widening multiplication. */
25667 if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND)
25668 && GET_MODE_SIZE (GET_MODE (XEXP (op0, 0))) * 2
25669 == GET_MODE_SIZE (mode))
25671 int is_mulwiden = 0;
25672 enum machine_mode inner_mode = GET_MODE (op0);
25674 if (GET_CODE (op0) == GET_CODE (op1))
25675 is_mulwiden = 1, op1 = XEXP (op1, 0);
25676 else if (CONST_INT_P (op1))
25678 if (GET_CODE (op0) == SIGN_EXTEND)
25679 is_mulwiden = trunc_int_for_mode (INTVAL (op1), inner_mode)
25680 == INTVAL (op1);
25681 else
25682 is_mulwiden = !(INTVAL (op1) & ~GET_MODE_MASK (inner_mode));
25685 if (is_mulwiden)
25686 op0 = XEXP (op0, 0), mode = GET_MODE (op0);
25689 *total = (cost->mult_init[MODE_INDEX (mode)]
25690 + nbits * cost->mult_bit
25691 + rtx_cost (op0, outer_code, speed) + rtx_cost (op1, outer_code, speed));
25693 return true;
25696 case DIV:
25697 case UDIV:
25698 case MOD:
25699 case UMOD:
25700 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25701 /* ??? SSE cost should be used here. */
25702 *total = cost->fdiv;
25703 else if (X87_FLOAT_MODE_P (mode))
25704 *total = cost->fdiv;
25705 else if (FLOAT_MODE_P (mode))
25706 /* ??? SSE vector cost should be used here. */
25707 *total = cost->fdiv;
25708 else
25709 *total = cost->divide[MODE_INDEX (mode)];
25710 return false;
25712 case PLUS:
25713 if (GET_MODE_CLASS (mode) == MODE_INT
25714 && GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (Pmode))
25716 if (GET_CODE (XEXP (x, 0)) == PLUS
25717 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
25718 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
25719 && CONSTANT_P (XEXP (x, 1)))
25721 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1));
25722 if (val == 2 || val == 4 || val == 8)
25724 *total = cost->lea;
25725 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25726 *total += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0),
25727 outer_code, speed);
25728 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25729 return true;
25732 else if (GET_CODE (XEXP (x, 0)) == MULT
25733 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
25735 HOST_WIDE_INT val = INTVAL (XEXP (XEXP (x, 0), 1));
25736 if (val == 2 || val == 4 || val == 8)
25738 *total = cost->lea;
25739 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25740 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25741 return true;
25744 else if (GET_CODE (XEXP (x, 0)) == PLUS)
25746 *total = cost->lea;
25747 *total += rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed);
25748 *total += rtx_cost (XEXP (XEXP (x, 0), 1), outer_code, speed);
25749 *total += rtx_cost (XEXP (x, 1), outer_code, speed);
25750 return true;
25753 /* FALLTHRU */
25755 case MINUS:
25756 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25758 /* ??? SSE cost should be used here. */
25759 *total = cost->fadd;
25760 return false;
25762 else if (X87_FLOAT_MODE_P (mode))
25764 *total = cost->fadd;
25765 return false;
25767 else if (FLOAT_MODE_P (mode))
25769 /* ??? SSE vector cost should be used here. */
25770 *total = cost->fadd;
25771 return false;
25773 /* FALLTHRU */
25775 case AND:
25776 case IOR:
25777 case XOR:
25778 if (!TARGET_64BIT && mode == DImode)
25780 *total = (cost->add * 2
25781 + (rtx_cost (XEXP (x, 0), outer_code, speed)
25782 << (GET_MODE (XEXP (x, 0)) != DImode))
25783 + (rtx_cost (XEXP (x, 1), outer_code, speed)
25784 << (GET_MODE (XEXP (x, 1)) != DImode)));
25785 return true;
25787 /* FALLTHRU */
25789 case NEG:
25790 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25792 /* ??? SSE cost should be used here. */
25793 *total = cost->fchs;
25794 return false;
25796 else if (X87_FLOAT_MODE_P (mode))
25798 *total = cost->fchs;
25799 return false;
25801 else if (FLOAT_MODE_P (mode))
25803 /* ??? SSE vector cost should be used here. */
25804 *total = cost->fchs;
25805 return false;
25807 /* FALLTHRU */
25809 case NOT:
25810 if (!TARGET_64BIT && mode == DImode)
25811 *total = cost->add * 2;
25812 else
25813 *total = cost->add;
25814 return false;
25816 case COMPARE:
25817 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTRACT
25818 && XEXP (XEXP (x, 0), 1) == const1_rtx
25819 && CONST_INT_P (XEXP (XEXP (x, 0), 2))
25820 && XEXP (x, 1) == const0_rtx)
25822 /* This kind of construct is implemented using test[bwl].
25823 Treat it as if we had an AND. */
25824 *total = (cost->add
25825 + rtx_cost (XEXP (XEXP (x, 0), 0), outer_code, speed)
25826 + rtx_cost (const1_rtx, outer_code, speed));
25827 return true;
25829 return false;
25831 case FLOAT_EXTEND:
25832 if (!(SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH))
25833 *total = 0;
25834 return false;
25836 case ABS:
25837 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25838 /* ??? SSE cost should be used here. */
25839 *total = cost->fabs;
25840 else if (X87_FLOAT_MODE_P (mode))
25841 *total = cost->fabs;
25842 else if (FLOAT_MODE_P (mode))
25843 /* ??? SSE vector cost should be used here. */
25844 *total = cost->fabs;
25845 return false;
25847 case SQRT:
25848 if (SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH)
25849 /* ??? SSE cost should be used here. */
25850 *total = cost->fsqrt;
25851 else if (X87_FLOAT_MODE_P (mode))
25852 *total = cost->fsqrt;
25853 else if (FLOAT_MODE_P (mode))
25854 /* ??? SSE vector cost should be used here. */
25855 *total = cost->fsqrt;
25856 return false;
25858 case UNSPEC:
25859 if (XINT (x, 1) == UNSPEC_TP)
25860 *total = 0;
25861 return false;
25863 default:
25864 return false;
25868 #if TARGET_MACHO
25870 static int current_machopic_label_num;
25872 /* Given a symbol name and its associated stub, write out the
25873 definition of the stub. */
25875 void
25876 machopic_output_stub (FILE *file, const char *symb, const char *stub)
25878 unsigned int length;
25879 char *binder_name, *symbol_name, lazy_ptr_name[32];
25880 int label = ++current_machopic_label_num;
25882 /* For 64-bit we shouldn't get here. */
25883 gcc_assert (!TARGET_64BIT);
25885 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
25886 symb = (*targetm.strip_name_encoding) (symb);
25888 length = strlen (stub);
25889 binder_name = XALLOCAVEC (char, length + 32);
25890 GEN_BINDER_NAME_FOR_STUB (binder_name, stub, length);
25892 length = strlen (symb);
25893 symbol_name = XALLOCAVEC (char, length + 32);
25894 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
25896 sprintf (lazy_ptr_name, "L%d$lz", label);
25898 if (MACHOPIC_PURE)
25899 switch_to_section (darwin_sections[machopic_picsymbol_stub_section]);
25900 else
25901 switch_to_section (darwin_sections[machopic_symbol_stub_section]);
25903 fprintf (file, "%s:\n", stub);
25904 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25906 if (MACHOPIC_PURE)
25908 fprintf (file, "\tcall\tLPC$%d\nLPC$%d:\tpopl\t%%eax\n", label, label);
25909 fprintf (file, "\tmovl\t%s-LPC$%d(%%eax),%%edx\n", lazy_ptr_name, label);
25910 fprintf (file, "\tjmp\t*%%edx\n");
25912 else
25913 fprintf (file, "\tjmp\t*%s\n", lazy_ptr_name);
25915 fprintf (file, "%s:\n", binder_name);
25917 if (MACHOPIC_PURE)
25919 fprintf (file, "\tlea\t%s-LPC$%d(%%eax),%%eax\n", lazy_ptr_name, label);
25920 fputs ("\tpushl\t%eax\n", file);
25922 else
25923 fprintf (file, "\tpushl\t$%s\n", lazy_ptr_name);
25925 fputs ("\tjmp\tdyld_stub_binding_helper\n", file);
25927 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
25928 fprintf (file, "%s:\n", lazy_ptr_name);
25929 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
25930 fprintf (file, ASM_LONG "%s\n", binder_name);
25933 void
25934 darwin_x86_file_end (void)
25936 darwin_file_end ();
25937 ix86_file_end ();
25939 #endif /* TARGET_MACHO */
25941 /* Order the registers for register allocator. */
25943 void
25944 x86_order_regs_for_local_alloc (void)
25946 int pos = 0;
25947 int i;
25949 /* First allocate the local general purpose registers. */
25950 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
25951 if (GENERAL_REGNO_P (i) && call_used_regs[i])
25952 reg_alloc_order [pos++] = i;
25954 /* Global general purpose registers. */
25955 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
25956 if (GENERAL_REGNO_P (i) && !call_used_regs[i])
25957 reg_alloc_order [pos++] = i;
25959 /* x87 registers come first in case we are doing FP math
25960 using them. */
25961 if (!TARGET_SSE_MATH)
25962 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
25963 reg_alloc_order [pos++] = i;
25965 /* SSE registers. */
25966 for (i = FIRST_SSE_REG; i <= LAST_SSE_REG; i++)
25967 reg_alloc_order [pos++] = i;
25968 for (i = FIRST_REX_SSE_REG; i <= LAST_REX_SSE_REG; i++)
25969 reg_alloc_order [pos++] = i;
25971 /* x87 registers. */
25972 if (TARGET_SSE_MATH)
25973 for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
25974 reg_alloc_order [pos++] = i;
25976 for (i = FIRST_MMX_REG; i <= LAST_MMX_REG; i++)
25977 reg_alloc_order [pos++] = i;
25979 /* Initialize the rest of array as we do not allocate some registers
25980 at all. */
25981 while (pos < FIRST_PSEUDO_REGISTER)
25982 reg_alloc_order [pos++] = 0;
25985 /* Handle a "ms_abi" or "sysv" attribute; arguments as in
25986 struct attribute_spec.handler. */
25987 static tree
25988 ix86_handle_abi_attribute (tree *node, tree name,
25989 tree args ATTRIBUTE_UNUSED,
25990 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
25992 if (TREE_CODE (*node) != FUNCTION_TYPE
25993 && TREE_CODE (*node) != METHOD_TYPE
25994 && TREE_CODE (*node) != FIELD_DECL
25995 && TREE_CODE (*node) != TYPE_DECL)
25997 warning (OPT_Wattributes, "%qE attribute only applies to functions",
25998 name);
25999 *no_add_attrs = true;
26000 return NULL_TREE;
26002 if (!TARGET_64BIT)
26004 warning (OPT_Wattributes, "%qE attribute only available for 64-bit",
26005 name);
26006 *no_add_attrs = true;
26007 return NULL_TREE;
26010 /* Can combine regparm with all attributes but fastcall. */
26011 if (is_attribute_p ("ms_abi", name))
26013 if (lookup_attribute ("sysv_abi", TYPE_ATTRIBUTES (*node)))
26015 error ("ms_abi and sysv_abi attributes are not compatible");
26018 return NULL_TREE;
26020 else if (is_attribute_p ("sysv_abi", name))
26022 if (lookup_attribute ("ms_abi", TYPE_ATTRIBUTES (*node)))
26024 error ("ms_abi and sysv_abi attributes are not compatible");
26027 return NULL_TREE;
26030 return NULL_TREE;
26033 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
26034 struct attribute_spec.handler. */
26035 static tree
26036 ix86_handle_struct_attribute (tree *node, tree name,
26037 tree args ATTRIBUTE_UNUSED,
26038 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
26040 tree *type = NULL;
26041 if (DECL_P (*node))
26043 if (TREE_CODE (*node) == TYPE_DECL)
26044 type = &TREE_TYPE (*node);
26046 else
26047 type = node;
26049 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
26050 || TREE_CODE (*type) == UNION_TYPE)))
26052 warning (OPT_Wattributes, "%qE attribute ignored",
26053 name);
26054 *no_add_attrs = true;
26057 else if ((is_attribute_p ("ms_struct", name)
26058 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
26059 || ((is_attribute_p ("gcc_struct", name)
26060 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
26062 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
26063 name);
26064 *no_add_attrs = true;
26067 return NULL_TREE;
26070 static bool
26071 ix86_ms_bitfield_layout_p (const_tree record_type)
26073 return (TARGET_MS_BITFIELD_LAYOUT &&
26074 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
26075 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
26078 /* Returns an expression indicating where the this parameter is
26079 located on entry to the FUNCTION. */
26081 static rtx
26082 x86_this_parameter (tree function)
26084 tree type = TREE_TYPE (function);
26085 bool aggr = aggregate_value_p (TREE_TYPE (type), type) != 0;
26086 int nregs;
26088 if (TARGET_64BIT)
26090 const int *parm_regs;
26092 if (ix86_function_type_abi (type) == MS_ABI)
26093 parm_regs = x86_64_ms_abi_int_parameter_registers;
26094 else
26095 parm_regs = x86_64_int_parameter_registers;
26096 return gen_rtx_REG (DImode, parm_regs[aggr]);
26099 nregs = ix86_function_regparm (type, function);
26101 if (nregs > 0 && !stdarg_p (type))
26103 int regno;
26105 if (lookup_attribute ("fastcall", TYPE_ATTRIBUTES (type)))
26106 regno = aggr ? DX_REG : CX_REG;
26107 else
26109 regno = AX_REG;
26110 if (aggr)
26112 regno = DX_REG;
26113 if (nregs == 1)
26114 return gen_rtx_MEM (SImode,
26115 plus_constant (stack_pointer_rtx, 4));
26118 return gen_rtx_REG (SImode, regno);
26121 return gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, aggr ? 8 : 4));
26124 /* Determine whether x86_output_mi_thunk can succeed. */
26126 static bool
26127 x86_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
26128 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
26129 HOST_WIDE_INT vcall_offset, const_tree function)
26131 /* 64-bit can handle anything. */
26132 if (TARGET_64BIT)
26133 return true;
26135 /* For 32-bit, everything's fine if we have one free register. */
26136 if (ix86_function_regparm (TREE_TYPE (function), function) < 3)
26137 return true;
26139 /* Need a free register for vcall_offset. */
26140 if (vcall_offset)
26141 return false;
26143 /* Need a free register for GOT references. */
26144 if (flag_pic && !(*targetm.binds_local_p) (function))
26145 return false;
26147 /* Otherwise ok. */
26148 return true;
26151 /* Output the assembler code for a thunk function. THUNK_DECL is the
26152 declaration for the thunk function itself, FUNCTION is the decl for
26153 the target function. DELTA is an immediate constant offset to be
26154 added to THIS. If VCALL_OFFSET is nonzero, the word at
26155 *(*this + vcall_offset) should be added to THIS. */
26157 static void
26158 x86_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
26159 tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
26160 HOST_WIDE_INT vcall_offset, tree function)
26162 rtx xops[3];
26163 rtx this_param = x86_this_parameter (function);
26164 rtx this_reg, tmp;
26166 /* If VCALL_OFFSET, we'll need THIS in a register. Might as well
26167 pull it in now and let DELTA benefit. */
26168 if (REG_P (this_param))
26169 this_reg = this_param;
26170 else if (vcall_offset)
26172 /* Put the this parameter into %eax. */
26173 xops[0] = this_param;
26174 xops[1] = this_reg = gen_rtx_REG (Pmode, AX_REG);
26175 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26177 else
26178 this_reg = NULL_RTX;
26180 /* Adjust the this parameter by a fixed constant. */
26181 if (delta)
26183 xops[0] = GEN_INT (delta);
26184 xops[1] = this_reg ? this_reg : this_param;
26185 if (TARGET_64BIT)
26187 if (!x86_64_general_operand (xops[0], DImode))
26189 tmp = gen_rtx_REG (DImode, R10_REG);
26190 xops[1] = tmp;
26191 output_asm_insn ("mov{q}\t{%1, %0|%0, %1}", xops);
26192 xops[0] = tmp;
26193 xops[1] = this_param;
26195 output_asm_insn ("add{q}\t{%0, %1|%1, %0}", xops);
26197 else
26198 output_asm_insn ("add{l}\t{%0, %1|%1, %0}", xops);
26201 /* Adjust the this parameter by a value stored in the vtable. */
26202 if (vcall_offset)
26204 if (TARGET_64BIT)
26205 tmp = gen_rtx_REG (DImode, R10_REG);
26206 else
26208 int tmp_regno = CX_REG;
26209 if (lookup_attribute ("fastcall",
26210 TYPE_ATTRIBUTES (TREE_TYPE (function))))
26211 tmp_regno = AX_REG;
26212 tmp = gen_rtx_REG (SImode, tmp_regno);
26215 xops[0] = gen_rtx_MEM (Pmode, this_reg);
26216 xops[1] = tmp;
26217 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26219 /* Adjust the this parameter. */
26220 xops[0] = gen_rtx_MEM (Pmode, plus_constant (tmp, vcall_offset));
26221 if (TARGET_64BIT && !memory_operand (xops[0], Pmode))
26223 rtx tmp2 = gen_rtx_REG (DImode, R11_REG);
26224 xops[0] = GEN_INT (vcall_offset);
26225 xops[1] = tmp2;
26226 output_asm_insn ("mov{q}\t{%0, %1|%1, %0}", xops);
26227 xops[0] = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, tmp, tmp2));
26229 xops[1] = this_reg;
26230 output_asm_insn ("add%z1\t{%0, %1|%1, %0}", xops);
26233 /* If necessary, drop THIS back to its stack slot. */
26234 if (this_reg && this_reg != this_param)
26236 xops[0] = this_reg;
26237 xops[1] = this_param;
26238 output_asm_insn ("mov%z1\t{%0, %1|%1, %0}", xops);
26241 xops[0] = XEXP (DECL_RTL (function), 0);
26242 if (TARGET_64BIT)
26244 if (!flag_pic || (*targetm.binds_local_p) (function))
26245 output_asm_insn ("jmp\t%P0", xops);
26246 /* All thunks should be in the same object as their target,
26247 and thus binds_local_p should be true. */
26248 else if (TARGET_64BIT && cfun->machine->call_abi == MS_ABI)
26249 gcc_unreachable ();
26250 else
26252 tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, xops[0]), UNSPEC_GOTPCREL);
26253 tmp = gen_rtx_CONST (Pmode, tmp);
26254 tmp = gen_rtx_MEM (QImode, tmp);
26255 xops[0] = tmp;
26256 output_asm_insn ("jmp\t%A0", xops);
26259 else
26261 if (!flag_pic || (*targetm.binds_local_p) (function))
26262 output_asm_insn ("jmp\t%P0", xops);
26263 else
26264 #if TARGET_MACHO
26265 if (TARGET_MACHO)
26267 rtx sym_ref = XEXP (DECL_RTL (function), 0);
26268 tmp = (gen_rtx_SYMBOL_REF
26269 (Pmode,
26270 machopic_indirection_name (sym_ref, /*stub_p=*/true)));
26271 tmp = gen_rtx_MEM (QImode, tmp);
26272 xops[0] = tmp;
26273 output_asm_insn ("jmp\t%0", xops);
26275 else
26276 #endif /* TARGET_MACHO */
26278 tmp = gen_rtx_REG (SImode, CX_REG);
26279 output_set_got (tmp, NULL_RTX);
26281 xops[1] = tmp;
26282 output_asm_insn ("mov{l}\t{%0@GOT(%1), %1|%1, %0@GOT[%1]}", xops);
26283 output_asm_insn ("jmp\t{*}%1", xops);
26288 static void
26289 x86_file_start (void)
26291 default_file_start ();
26292 #if TARGET_MACHO
26293 darwin_file_start ();
26294 #endif
26295 if (X86_FILE_START_VERSION_DIRECTIVE)
26296 fputs ("\t.version\t\"01.01\"\n", asm_out_file);
26297 if (X86_FILE_START_FLTUSED)
26298 fputs ("\t.global\t__fltused\n", asm_out_file);
26299 if (ix86_asm_dialect == ASM_INTEL)
26300 fputs ("\t.intel_syntax noprefix\n", asm_out_file);
26304 x86_field_alignment (tree field, int computed)
26306 enum machine_mode mode;
26307 tree type = TREE_TYPE (field);
26309 if (TARGET_64BIT || TARGET_ALIGN_DOUBLE)
26310 return computed;
26311 mode = TYPE_MODE (strip_array_types (type));
26312 if (mode == DFmode || mode == DCmode
26313 || GET_MODE_CLASS (mode) == MODE_INT
26314 || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
26315 return MIN (32, computed);
26316 return computed;
26319 /* Output assembler code to FILE to increment profiler label # LABELNO
26320 for profiling a function entry. */
26321 void
26322 x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
26324 if (TARGET_64BIT)
26326 #ifndef NO_PROFILE_COUNTERS
26327 fprintf (file, "\tleaq\t" LPREFIX "P%d@(%%rip),%%r11\n", labelno);
26328 #endif
26330 if (DEFAULT_ABI == SYSV_ABI && flag_pic)
26331 fputs ("\tcall\t*" MCOUNT_NAME "@GOTPCREL(%rip)\n", file);
26332 else
26333 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26335 else if (flag_pic)
26337 #ifndef NO_PROFILE_COUNTERS
26338 fprintf (file, "\tleal\t" LPREFIX "P%d@GOTOFF(%%ebx),%%" PROFILE_COUNT_REGISTER "\n",
26339 labelno);
26340 #endif
26341 fputs ("\tcall\t*" MCOUNT_NAME "@GOT(%ebx)\n", file);
26343 else
26345 #ifndef NO_PROFILE_COUNTERS
26346 fprintf (file, "\tmovl\t$" LPREFIX "P%d,%%" PROFILE_COUNT_REGISTER "\n",
26347 labelno);
26348 #endif
26349 fputs ("\tcall\t" MCOUNT_NAME "\n", file);
26353 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26354 /* We don't have exact information about the insn sizes, but we may assume
26355 quite safely that we are informed about all 1 byte insns and memory
26356 address sizes. This is enough to eliminate unnecessary padding in
26357 99% of cases. */
26359 static int
26360 min_insn_size (rtx insn)
26362 int l = 0, len;
26364 if (!INSN_P (insn) || !active_insn_p (insn))
26365 return 0;
26367 /* Discard alignments we've emit and jump instructions. */
26368 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
26369 && XINT (PATTERN (insn), 1) == UNSPECV_ALIGN)
26370 return 0;
26371 if (JUMP_TABLE_DATA_P (insn))
26372 return 0;
26374 /* Important case - calls are always 5 bytes.
26375 It is common to have many calls in the row. */
26376 if (CALL_P (insn)
26377 && symbolic_reference_mentioned_p (PATTERN (insn))
26378 && !SIBLING_CALL_P (insn))
26379 return 5;
26380 len = get_attr_length (insn);
26381 if (len <= 1)
26382 return 1;
26384 /* For normal instructions we rely on get_attr_length being exact,
26385 with a few exceptions. */
26386 if (!JUMP_P (insn))
26388 enum attr_type type = get_attr_type (insn);
26390 switch (type)
26392 case TYPE_MULTI:
26393 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
26394 || asm_noperands (PATTERN (insn)) >= 0)
26395 return 0;
26396 break;
26397 case TYPE_OTHER:
26398 case TYPE_FCMP:
26399 break;
26400 default:
26401 /* Otherwise trust get_attr_length. */
26402 return len;
26405 l = get_attr_length_address (insn);
26406 if (l < 4 && symbolic_reference_mentioned_p (PATTERN (insn)))
26407 l = 4;
26409 if (l)
26410 return 1+l;
26411 else
26412 return 2;
26415 /* AMD K8 core mispredicts jumps when there are more than 3 jumps in 16 byte
26416 window. */
26418 static void
26419 ix86_avoid_jump_mispredicts (void)
26421 rtx insn, start = get_insns ();
26422 int nbytes = 0, njumps = 0;
26423 int isjump = 0;
26425 /* Look for all minimal intervals of instructions containing 4 jumps.
26426 The intervals are bounded by START and INSN. NBYTES is the total
26427 size of instructions in the interval including INSN and not including
26428 START. When the NBYTES is smaller than 16 bytes, it is possible
26429 that the end of START and INSN ends up in the same 16byte page.
26431 The smallest offset in the page INSN can start is the case where START
26432 ends on the offset 0. Offset of INSN is then NBYTES - sizeof (INSN).
26433 We add p2align to 16byte window with maxskip 15 - NBYTES + sizeof (INSN).
26435 for (insn = start; insn; insn = NEXT_INSN (insn))
26437 int min_size;
26439 if (LABEL_P (insn))
26441 int align = label_to_alignment (insn);
26442 int max_skip = label_to_max_skip (insn);
26444 if (max_skip > 15)
26445 max_skip = 15;
26446 /* If align > 3, only up to 16 - max_skip - 1 bytes can be
26447 already in the current 16 byte page, because otherwise
26448 ASM_OUTPUT_MAX_SKIP_ALIGN could skip max_skip or fewer
26449 bytes to reach 16 byte boundary. */
26450 if (align <= 0
26451 || (align <= 3 && max_skip != (1 << align) - 1))
26452 max_skip = 0;
26453 if (dump_file)
26454 fprintf (dump_file, "Label %i with max_skip %i\n",
26455 INSN_UID (insn), max_skip);
26456 if (max_skip)
26458 while (nbytes + max_skip >= 16)
26460 start = NEXT_INSN (start);
26461 if ((JUMP_P (start)
26462 && GET_CODE (PATTERN (start)) != ADDR_VEC
26463 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26464 || CALL_P (start))
26465 njumps--, isjump = 1;
26466 else
26467 isjump = 0;
26468 nbytes -= min_insn_size (start);
26471 continue;
26474 min_size = min_insn_size (insn);
26475 nbytes += min_size;
26476 if (dump_file)
26477 fprintf (dump_file, "Insn %i estimated to %i bytes\n",
26478 INSN_UID (insn), min_size);
26479 if ((JUMP_P (insn)
26480 && GET_CODE (PATTERN (insn)) != ADDR_VEC
26481 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
26482 || CALL_P (insn))
26483 njumps++;
26484 else
26485 continue;
26487 while (njumps > 3)
26489 start = NEXT_INSN (start);
26490 if ((JUMP_P (start)
26491 && GET_CODE (PATTERN (start)) != ADDR_VEC
26492 && GET_CODE (PATTERN (start)) != ADDR_DIFF_VEC)
26493 || CALL_P (start))
26494 njumps--, isjump = 1;
26495 else
26496 isjump = 0;
26497 nbytes -= min_insn_size (start);
26499 gcc_assert (njumps >= 0);
26500 if (dump_file)
26501 fprintf (dump_file, "Interval %i to %i has %i bytes\n",
26502 INSN_UID (start), INSN_UID (insn), nbytes);
26504 if (njumps == 3 && isjump && nbytes < 16)
26506 int padsize = 15 - nbytes + min_insn_size (insn);
26508 if (dump_file)
26509 fprintf (dump_file, "Padding insn %i by %i bytes!\n",
26510 INSN_UID (insn), padsize);
26511 emit_insn_before (gen_pad (GEN_INT (padsize)), insn);
26515 #endif
26517 /* AMD Athlon works faster
26518 when RET is not destination of conditional jump or directly preceded
26519 by other jump instruction. We avoid the penalty by inserting NOP just
26520 before the RET instructions in such cases. */
26521 static void
26522 ix86_pad_returns (void)
26524 edge e;
26525 edge_iterator ei;
26527 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
26529 basic_block bb = e->src;
26530 rtx ret = BB_END (bb);
26531 rtx prev;
26532 bool replace = false;
26534 if (!JUMP_P (ret) || GET_CODE (PATTERN (ret)) != RETURN
26535 || optimize_bb_for_size_p (bb))
26536 continue;
26537 for (prev = PREV_INSN (ret); prev; prev = PREV_INSN (prev))
26538 if (active_insn_p (prev) || LABEL_P (prev))
26539 break;
26540 if (prev && LABEL_P (prev))
26542 edge e;
26543 edge_iterator ei;
26545 FOR_EACH_EDGE (e, ei, bb->preds)
26546 if (EDGE_FREQUENCY (e) && e->src->index >= 0
26547 && !(e->flags & EDGE_FALLTHRU))
26548 replace = true;
26550 if (!replace)
26552 prev = prev_active_insn (ret);
26553 if (prev
26554 && ((JUMP_P (prev) && any_condjump_p (prev))
26555 || CALL_P (prev)))
26556 replace = true;
26557 /* Empty functions get branch mispredict even when the jump destination
26558 is not visible to us. */
26559 if (!prev && cfun->function_frequency > FUNCTION_FREQUENCY_UNLIKELY_EXECUTED)
26560 replace = true;
26562 if (replace)
26564 emit_jump_insn_before (gen_return_internal_long (), ret);
26565 delete_insn (ret);
26570 /* Implement machine specific optimizations. We implement padding of returns
26571 for K8 CPUs and pass to avoid 4 jumps in the single 16 byte window. */
26572 static void
26573 ix86_reorg (void)
26575 if (optimize && optimize_function_for_speed_p (cfun))
26577 if (TARGET_PAD_RETURNS)
26578 ix86_pad_returns ();
26579 #ifdef ASM_OUTPUT_MAX_SKIP_PAD
26580 if (TARGET_FOUR_JUMP_LIMIT)
26581 ix86_avoid_jump_mispredicts ();
26582 #endif
26586 /* Return nonzero when QImode register that must be represented via REX prefix
26587 is used. */
26588 bool
26589 x86_extended_QIreg_mentioned_p (rtx insn)
26591 int i;
26592 extract_insn_cached (insn);
26593 for (i = 0; i < recog_data.n_operands; i++)
26594 if (REG_P (recog_data.operand[i])
26595 && REGNO (recog_data.operand[i]) > BX_REG)
26596 return true;
26597 return false;
26600 /* Return nonzero when P points to register encoded via REX prefix.
26601 Called via for_each_rtx. */
26602 static int
26603 extended_reg_mentioned_1 (rtx *p, void *data ATTRIBUTE_UNUSED)
26605 unsigned int regno;
26606 if (!REG_P (*p))
26607 return 0;
26608 regno = REGNO (*p);
26609 return REX_INT_REGNO_P (regno) || REX_SSE_REGNO_P (regno);
26612 /* Return true when INSN mentions register that must be encoded using REX
26613 prefix. */
26614 bool
26615 x86_extended_reg_mentioned_p (rtx insn)
26617 return for_each_rtx (INSN_P (insn) ? &PATTERN (insn) : &insn,
26618 extended_reg_mentioned_1, NULL);
26621 /* Generate an unsigned DImode/SImode to FP conversion. This is the same code
26622 optabs would emit if we didn't have TFmode patterns. */
26624 void
26625 x86_emit_floatuns (rtx operands[2])
26627 rtx neglab, donelab, i0, i1, f0, in, out;
26628 enum machine_mode mode, inmode;
26630 inmode = GET_MODE (operands[1]);
26631 gcc_assert (inmode == SImode || inmode == DImode);
26633 out = operands[0];
26634 in = force_reg (inmode, operands[1]);
26635 mode = GET_MODE (out);
26636 neglab = gen_label_rtx ();
26637 donelab = gen_label_rtx ();
26638 f0 = gen_reg_rtx (mode);
26640 emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, inmode, 0, neglab);
26642 expand_float (out, in, 0);
26644 emit_jump_insn (gen_jump (donelab));
26645 emit_barrier ();
26647 emit_label (neglab);
26649 i0 = expand_simple_binop (inmode, LSHIFTRT, in, const1_rtx, NULL,
26650 1, OPTAB_DIRECT);
26651 i1 = expand_simple_binop (inmode, AND, in, const1_rtx, NULL,
26652 1, OPTAB_DIRECT);
26653 i0 = expand_simple_binop (inmode, IOR, i0, i1, i0, 1, OPTAB_DIRECT);
26655 expand_float (f0, i0, 0);
26657 emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
26659 emit_label (donelab);
26662 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26663 with all elements equal to VAR. Return true if successful. */
26665 static bool
26666 ix86_expand_vector_init_duplicate (bool mmx_ok, enum machine_mode mode,
26667 rtx target, rtx val)
26669 enum machine_mode hmode, smode, wsmode, wvmode;
26670 rtx x;
26672 switch (mode)
26674 case V2SImode:
26675 case V2SFmode:
26676 if (!mmx_ok)
26677 return false;
26678 /* FALLTHRU */
26680 case V2DFmode:
26681 case V2DImode:
26682 case V4SFmode:
26683 case V4SImode:
26684 val = force_reg (GET_MODE_INNER (mode), val);
26685 x = gen_rtx_VEC_DUPLICATE (mode, val);
26686 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26687 return true;
26689 case V4HImode:
26690 if (!mmx_ok)
26691 return false;
26692 if (TARGET_SSE || TARGET_3DNOW_A)
26694 val = gen_lowpart (SImode, val);
26695 x = gen_rtx_TRUNCATE (HImode, val);
26696 x = gen_rtx_VEC_DUPLICATE (mode, x);
26697 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26698 return true;
26700 else
26702 smode = HImode;
26703 wsmode = SImode;
26704 wvmode = V2SImode;
26705 goto widen;
26708 case V8QImode:
26709 if (!mmx_ok)
26710 return false;
26711 smode = QImode;
26712 wsmode = HImode;
26713 wvmode = V4HImode;
26714 goto widen;
26715 case V8HImode:
26716 if (TARGET_SSE2)
26718 rtx tmp1, tmp2;
26719 /* Extend HImode to SImode using a paradoxical SUBREG. */
26720 tmp1 = gen_reg_rtx (SImode);
26721 emit_move_insn (tmp1, gen_lowpart (SImode, val));
26722 /* Insert the SImode value as low element of V4SImode vector. */
26723 tmp2 = gen_reg_rtx (V4SImode);
26724 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
26725 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
26726 CONST0_RTX (V4SImode),
26727 const1_rtx);
26728 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
26729 /* Cast the V4SImode vector back to a V8HImode vector. */
26730 tmp1 = gen_reg_rtx (V8HImode);
26731 emit_move_insn (tmp1, gen_lowpart (V8HImode, tmp2));
26732 /* Duplicate the low short through the whole low SImode word. */
26733 emit_insn (gen_sse2_punpcklwd (tmp1, tmp1, tmp1));
26734 /* Cast the V8HImode vector back to a V4SImode vector. */
26735 tmp2 = gen_reg_rtx (V4SImode);
26736 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
26737 /* Replicate the low element of the V4SImode vector. */
26738 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
26739 /* Cast the V2SImode back to V8HImode, and store in target. */
26740 emit_move_insn (target, gen_lowpart (V8HImode, tmp2));
26741 return true;
26743 smode = HImode;
26744 wsmode = SImode;
26745 wvmode = V4SImode;
26746 goto widen;
26747 case V16QImode:
26748 if (TARGET_SSE2)
26750 rtx tmp1, tmp2;
26751 /* Extend QImode to SImode using a paradoxical SUBREG. */
26752 tmp1 = gen_reg_rtx (SImode);
26753 emit_move_insn (tmp1, gen_lowpart (SImode, val));
26754 /* Insert the SImode value as low element of V4SImode vector. */
26755 tmp2 = gen_reg_rtx (V4SImode);
26756 tmp1 = gen_rtx_VEC_MERGE (V4SImode,
26757 gen_rtx_VEC_DUPLICATE (V4SImode, tmp1),
26758 CONST0_RTX (V4SImode),
26759 const1_rtx);
26760 emit_insn (gen_rtx_SET (VOIDmode, tmp2, tmp1));
26761 /* Cast the V4SImode vector back to a V16QImode vector. */
26762 tmp1 = gen_reg_rtx (V16QImode);
26763 emit_move_insn (tmp1, gen_lowpart (V16QImode, tmp2));
26764 /* Duplicate the low byte through the whole low SImode word. */
26765 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
26766 emit_insn (gen_sse2_punpcklbw (tmp1, tmp1, tmp1));
26767 /* Cast the V16QImode vector back to a V4SImode vector. */
26768 tmp2 = gen_reg_rtx (V4SImode);
26769 emit_move_insn (tmp2, gen_lowpart (V4SImode, tmp1));
26770 /* Replicate the low element of the V4SImode vector. */
26771 emit_insn (gen_sse2_pshufd (tmp2, tmp2, const0_rtx));
26772 /* Cast the V2SImode back to V16QImode, and store in target. */
26773 emit_move_insn (target, gen_lowpart (V16QImode, tmp2));
26774 return true;
26776 smode = QImode;
26777 wsmode = HImode;
26778 wvmode = V8HImode;
26779 goto widen;
26780 widen:
26781 /* Replicate the value once into the next wider mode and recurse. */
26782 val = convert_modes (wsmode, smode, val, true);
26783 x = expand_simple_binop (wsmode, ASHIFT, val,
26784 GEN_INT (GET_MODE_BITSIZE (smode)),
26785 NULL_RTX, 1, OPTAB_LIB_WIDEN);
26786 val = expand_simple_binop (wsmode, IOR, val, x, x, 1, OPTAB_LIB_WIDEN);
26788 x = gen_reg_rtx (wvmode);
26789 if (!ix86_expand_vector_init_duplicate (mmx_ok, wvmode, x, val))
26790 gcc_unreachable ();
26791 emit_move_insn (target, gen_lowpart (mode, x));
26792 return true;
26794 case V4DFmode:
26795 hmode = V2DFmode;
26796 goto half;
26797 case V4DImode:
26798 hmode = V2DImode;
26799 goto half;
26800 case V8SFmode:
26801 hmode = V4SFmode;
26802 goto half;
26803 case V8SImode:
26804 hmode = V4SImode;
26805 goto half;
26806 case V16HImode:
26807 hmode = V8HImode;
26808 goto half;
26809 case V32QImode:
26810 hmode = V16QImode;
26811 goto half;
26812 half:
26814 rtx tmp = gen_reg_rtx (hmode);
26815 ix86_expand_vector_init_duplicate (mmx_ok, hmode, tmp, val);
26816 emit_insn (gen_rtx_SET (VOIDmode, target,
26817 gen_rtx_VEC_CONCAT (mode, tmp, tmp)));
26819 return true;
26821 default:
26822 return false;
26826 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26827 whose ONE_VAR element is VAR, and other elements are zero. Return true
26828 if successful. */
26830 static bool
26831 ix86_expand_vector_init_one_nonzero (bool mmx_ok, enum machine_mode mode,
26832 rtx target, rtx var, int one_var)
26834 enum machine_mode vsimode;
26835 rtx new_target;
26836 rtx x, tmp;
26837 bool use_vector_set = false;
26839 switch (mode)
26841 case V2DImode:
26842 /* For SSE4.1, we normally use vector set. But if the second
26843 element is zero and inter-unit moves are OK, we use movq
26844 instead. */
26845 use_vector_set = (TARGET_64BIT
26846 && TARGET_SSE4_1
26847 && !(TARGET_INTER_UNIT_MOVES
26848 && one_var == 0));
26849 break;
26850 case V16QImode:
26851 case V4SImode:
26852 case V4SFmode:
26853 use_vector_set = TARGET_SSE4_1;
26854 break;
26855 case V8HImode:
26856 use_vector_set = TARGET_SSE2;
26857 break;
26858 case V4HImode:
26859 use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
26860 break;
26861 case V32QImode:
26862 case V16HImode:
26863 case V8SImode:
26864 case V8SFmode:
26865 case V4DFmode:
26866 use_vector_set = TARGET_AVX;
26867 break;
26868 case V4DImode:
26869 /* Use ix86_expand_vector_set in 64bit mode only. */
26870 use_vector_set = TARGET_AVX && TARGET_64BIT;
26871 break;
26872 default:
26873 break;
26876 if (use_vector_set)
26878 emit_insn (gen_rtx_SET (VOIDmode, target, CONST0_RTX (mode)));
26879 var = force_reg (GET_MODE_INNER (mode), var);
26880 ix86_expand_vector_set (mmx_ok, target, var, one_var);
26881 return true;
26884 switch (mode)
26886 case V2SFmode:
26887 case V2SImode:
26888 if (!mmx_ok)
26889 return false;
26890 /* FALLTHRU */
26892 case V2DFmode:
26893 case V2DImode:
26894 if (one_var != 0)
26895 return false;
26896 var = force_reg (GET_MODE_INNER (mode), var);
26897 x = gen_rtx_VEC_CONCAT (mode, var, CONST0_RTX (GET_MODE_INNER (mode)));
26898 emit_insn (gen_rtx_SET (VOIDmode, target, x));
26899 return true;
26901 case V4SFmode:
26902 case V4SImode:
26903 if (!REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
26904 new_target = gen_reg_rtx (mode);
26905 else
26906 new_target = target;
26907 var = force_reg (GET_MODE_INNER (mode), var);
26908 x = gen_rtx_VEC_DUPLICATE (mode, var);
26909 x = gen_rtx_VEC_MERGE (mode, x, CONST0_RTX (mode), const1_rtx);
26910 emit_insn (gen_rtx_SET (VOIDmode, new_target, x));
26911 if (one_var != 0)
26913 /* We need to shuffle the value to the correct position, so
26914 create a new pseudo to store the intermediate result. */
26916 /* With SSE2, we can use the integer shuffle insns. */
26917 if (mode != V4SFmode && TARGET_SSE2)
26919 emit_insn (gen_sse2_pshufd_1 (new_target, new_target,
26920 const1_rtx,
26921 GEN_INT (one_var == 1 ? 0 : 1),
26922 GEN_INT (one_var == 2 ? 0 : 1),
26923 GEN_INT (one_var == 3 ? 0 : 1)));
26924 if (target != new_target)
26925 emit_move_insn (target, new_target);
26926 return true;
26929 /* Otherwise convert the intermediate result to V4SFmode and
26930 use the SSE1 shuffle instructions. */
26931 if (mode != V4SFmode)
26933 tmp = gen_reg_rtx (V4SFmode);
26934 emit_move_insn (tmp, gen_lowpart (V4SFmode, new_target));
26936 else
26937 tmp = new_target;
26939 emit_insn (gen_sse_shufps_v4sf (tmp, tmp, tmp,
26940 const1_rtx,
26941 GEN_INT (one_var == 1 ? 0 : 1),
26942 GEN_INT (one_var == 2 ? 0+4 : 1+4),
26943 GEN_INT (one_var == 3 ? 0+4 : 1+4)));
26945 if (mode != V4SFmode)
26946 emit_move_insn (target, gen_lowpart (V4SImode, tmp));
26947 else if (tmp != target)
26948 emit_move_insn (target, tmp);
26950 else if (target != new_target)
26951 emit_move_insn (target, new_target);
26952 return true;
26954 case V8HImode:
26955 case V16QImode:
26956 vsimode = V4SImode;
26957 goto widen;
26958 case V4HImode:
26959 case V8QImode:
26960 if (!mmx_ok)
26961 return false;
26962 vsimode = V2SImode;
26963 goto widen;
26964 widen:
26965 if (one_var != 0)
26966 return false;
26968 /* Zero extend the variable element to SImode and recurse. */
26969 var = convert_modes (SImode, GET_MODE_INNER (mode), var, true);
26971 x = gen_reg_rtx (vsimode);
26972 if (!ix86_expand_vector_init_one_nonzero (mmx_ok, vsimode, x,
26973 var, one_var))
26974 gcc_unreachable ();
26976 emit_move_insn (target, gen_lowpart (mode, x));
26977 return true;
26979 default:
26980 return false;
26984 /* A subroutine of ix86_expand_vector_init. Store into TARGET a vector
26985 consisting of the values in VALS. It is known that all elements
26986 except ONE_VAR are constants. Return true if successful. */
26988 static bool
26989 ix86_expand_vector_init_one_var (bool mmx_ok, enum machine_mode mode,
26990 rtx target, rtx vals, int one_var)
26992 rtx var = XVECEXP (vals, 0, one_var);
26993 enum machine_mode wmode;
26994 rtx const_vec, x;
26996 const_vec = copy_rtx (vals);
26997 XVECEXP (const_vec, 0, one_var) = CONST0_RTX (GET_MODE_INNER (mode));
26998 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (const_vec, 0));
27000 switch (mode)
27002 case V2DFmode:
27003 case V2DImode:
27004 case V2SFmode:
27005 case V2SImode:
27006 /* For the two element vectors, it's just as easy to use
27007 the general case. */
27008 return false;
27010 case V4DImode:
27011 /* Use ix86_expand_vector_set in 64bit mode only. */
27012 if (!TARGET_64BIT)
27013 return false;
27014 case V4DFmode:
27015 case V8SFmode:
27016 case V8SImode:
27017 case V16HImode:
27018 case V32QImode:
27019 case V4SFmode:
27020 case V4SImode:
27021 case V8HImode:
27022 case V4HImode:
27023 break;
27025 case V16QImode:
27026 if (TARGET_SSE4_1)
27027 break;
27028 wmode = V8HImode;
27029 goto widen;
27030 case V8QImode:
27031 wmode = V4HImode;
27032 goto widen;
27033 widen:
27034 /* There's no way to set one QImode entry easily. Combine
27035 the variable value with its adjacent constant value, and
27036 promote to an HImode set. */
27037 x = XVECEXP (vals, 0, one_var ^ 1);
27038 if (one_var & 1)
27040 var = convert_modes (HImode, QImode, var, true);
27041 var = expand_simple_binop (HImode, ASHIFT, var, GEN_INT (8),
27042 NULL_RTX, 1, OPTAB_LIB_WIDEN);
27043 x = GEN_INT (INTVAL (x) & 0xff);
27045 else
27047 var = convert_modes (HImode, QImode, var, true);
27048 x = gen_int_mode (INTVAL (x) << 8, HImode);
27050 if (x != const0_rtx)
27051 var = expand_simple_binop (HImode, IOR, var, x, var,
27052 1, OPTAB_LIB_WIDEN);
27054 x = gen_reg_rtx (wmode);
27055 emit_move_insn (x, gen_lowpart (wmode, const_vec));
27056 ix86_expand_vector_set (mmx_ok, x, var, one_var >> 1);
27058 emit_move_insn (target, gen_lowpart (mode, x));
27059 return true;
27061 default:
27062 return false;
27065 emit_move_insn (target, const_vec);
27066 ix86_expand_vector_set (mmx_ok, target, var, one_var);
27067 return true;
27070 /* A subroutine of ix86_expand_vector_init_general. Use vector
27071 concatenate to handle the most general case: all values variable,
27072 and none identical. */
27074 static void
27075 ix86_expand_vector_init_concat (enum machine_mode mode,
27076 rtx target, rtx *ops, int n)
27078 enum machine_mode cmode, hmode = VOIDmode;
27079 rtx first[8], second[4];
27080 rtvec v;
27081 int i, j;
27083 switch (n)
27085 case 2:
27086 switch (mode)
27088 case V8SImode:
27089 cmode = V4SImode;
27090 break;
27091 case V8SFmode:
27092 cmode = V4SFmode;
27093 break;
27094 case V4DImode:
27095 cmode = V2DImode;
27096 break;
27097 case V4DFmode:
27098 cmode = V2DFmode;
27099 break;
27100 case V4SImode:
27101 cmode = V2SImode;
27102 break;
27103 case V4SFmode:
27104 cmode = V2SFmode;
27105 break;
27106 case V2DImode:
27107 cmode = DImode;
27108 break;
27109 case V2SImode:
27110 cmode = SImode;
27111 break;
27112 case V2DFmode:
27113 cmode = DFmode;
27114 break;
27115 case V2SFmode:
27116 cmode = SFmode;
27117 break;
27118 default:
27119 gcc_unreachable ();
27122 if (!register_operand (ops[1], cmode))
27123 ops[1] = force_reg (cmode, ops[1]);
27124 if (!register_operand (ops[0], cmode))
27125 ops[0] = force_reg (cmode, ops[0]);
27126 emit_insn (gen_rtx_SET (VOIDmode, target,
27127 gen_rtx_VEC_CONCAT (mode, ops[0],
27128 ops[1])));
27129 break;
27131 case 4:
27132 switch (mode)
27134 case V4DImode:
27135 cmode = V2DImode;
27136 break;
27137 case V4DFmode:
27138 cmode = V2DFmode;
27139 break;
27140 case V4SImode:
27141 cmode = V2SImode;
27142 break;
27143 case V4SFmode:
27144 cmode = V2SFmode;
27145 break;
27146 default:
27147 gcc_unreachable ();
27149 goto half;
27151 case 8:
27152 switch (mode)
27154 case V8SImode:
27155 cmode = V2SImode;
27156 hmode = V4SImode;
27157 break;
27158 case V8SFmode:
27159 cmode = V2SFmode;
27160 hmode = V4SFmode;
27161 break;
27162 default:
27163 gcc_unreachable ();
27165 goto half;
27167 half:
27168 /* FIXME: We process inputs backward to help RA. PR 36222. */
27169 i = n - 1;
27170 j = (n >> 1) - 1;
27171 for (; i > 0; i -= 2, j--)
27173 first[j] = gen_reg_rtx (cmode);
27174 v = gen_rtvec (2, ops[i - 1], ops[i]);
27175 ix86_expand_vector_init (false, first[j],
27176 gen_rtx_PARALLEL (cmode, v));
27179 n >>= 1;
27180 if (n > 2)
27182 gcc_assert (hmode != VOIDmode);
27183 for (i = j = 0; i < n; i += 2, j++)
27185 second[j] = gen_reg_rtx (hmode);
27186 ix86_expand_vector_init_concat (hmode, second [j],
27187 &first [i], 2);
27189 n >>= 1;
27190 ix86_expand_vector_init_concat (mode, target, second, n);
27192 else
27193 ix86_expand_vector_init_concat (mode, target, first, n);
27194 break;
27196 default:
27197 gcc_unreachable ();
27201 /* A subroutine of ix86_expand_vector_init_general. Use vector
27202 interleave to handle the most general case: all values variable,
27203 and none identical. */
27205 static void
27206 ix86_expand_vector_init_interleave (enum machine_mode mode,
27207 rtx target, rtx *ops, int n)
27209 enum machine_mode first_imode, second_imode, third_imode, inner_mode;
27210 int i, j;
27211 rtx op0, op1;
27212 rtx (*gen_load_even) (rtx, rtx, rtx);
27213 rtx (*gen_interleave_first_low) (rtx, rtx, rtx);
27214 rtx (*gen_interleave_second_low) (rtx, rtx, rtx);
27216 switch (mode)
27218 case V8HImode:
27219 gen_load_even = gen_vec_setv8hi;
27220 gen_interleave_first_low = gen_vec_interleave_lowv4si;
27221 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27222 inner_mode = HImode;
27223 first_imode = V4SImode;
27224 second_imode = V2DImode;
27225 third_imode = VOIDmode;
27226 break;
27227 case V16QImode:
27228 gen_load_even = gen_vec_setv16qi;
27229 gen_interleave_first_low = gen_vec_interleave_lowv8hi;
27230 gen_interleave_second_low = gen_vec_interleave_lowv4si;
27231 inner_mode = QImode;
27232 first_imode = V8HImode;
27233 second_imode = V4SImode;
27234 third_imode = V2DImode;
27235 break;
27236 default:
27237 gcc_unreachable ();
27240 for (i = 0; i < n; i++)
27242 /* Extend the odd elment to SImode using a paradoxical SUBREG. */
27243 op0 = gen_reg_rtx (SImode);
27244 emit_move_insn (op0, gen_lowpart (SImode, ops [i + i]));
27246 /* Insert the SImode value as low element of V4SImode vector. */
27247 op1 = gen_reg_rtx (V4SImode);
27248 op0 = gen_rtx_VEC_MERGE (V4SImode,
27249 gen_rtx_VEC_DUPLICATE (V4SImode,
27250 op0),
27251 CONST0_RTX (V4SImode),
27252 const1_rtx);
27253 emit_insn (gen_rtx_SET (VOIDmode, op1, op0));
27255 /* Cast the V4SImode vector back to a vector in orignal mode. */
27256 op0 = gen_reg_rtx (mode);
27257 emit_move_insn (op0, gen_lowpart (mode, op1));
27259 /* Load even elements into the second positon. */
27260 emit_insn ((*gen_load_even) (op0,
27261 force_reg (inner_mode,
27262 ops [i + i + 1]),
27263 const1_rtx));
27265 /* Cast vector to FIRST_IMODE vector. */
27266 ops[i] = gen_reg_rtx (first_imode);
27267 emit_move_insn (ops[i], gen_lowpart (first_imode, op0));
27270 /* Interleave low FIRST_IMODE vectors. */
27271 for (i = j = 0; i < n; i += 2, j++)
27273 op0 = gen_reg_rtx (first_imode);
27274 emit_insn ((*gen_interleave_first_low) (op0, ops[i], ops[i + 1]));
27276 /* Cast FIRST_IMODE vector to SECOND_IMODE vector. */
27277 ops[j] = gen_reg_rtx (second_imode);
27278 emit_move_insn (ops[j], gen_lowpart (second_imode, op0));
27281 /* Interleave low SECOND_IMODE vectors. */
27282 switch (second_imode)
27284 case V4SImode:
27285 for (i = j = 0; i < n / 2; i += 2, j++)
27287 op0 = gen_reg_rtx (second_imode);
27288 emit_insn ((*gen_interleave_second_low) (op0, ops[i],
27289 ops[i + 1]));
27291 /* Cast the SECOND_IMODE vector to the THIRD_IMODE
27292 vector. */
27293 ops[j] = gen_reg_rtx (third_imode);
27294 emit_move_insn (ops[j], gen_lowpart (third_imode, op0));
27296 second_imode = V2DImode;
27297 gen_interleave_second_low = gen_vec_interleave_lowv2di;
27298 /* FALLTHRU */
27300 case V2DImode:
27301 op0 = gen_reg_rtx (second_imode);
27302 emit_insn ((*gen_interleave_second_low) (op0, ops[0],
27303 ops[1]));
27305 /* Cast the SECOND_IMODE vector back to a vector on original
27306 mode. */
27307 emit_insn (gen_rtx_SET (VOIDmode, target,
27308 gen_lowpart (mode, op0)));
27309 break;
27311 default:
27312 gcc_unreachable ();
27316 /* A subroutine of ix86_expand_vector_init. Handle the most general case:
27317 all values variable, and none identical. */
27319 static void
27320 ix86_expand_vector_init_general (bool mmx_ok, enum machine_mode mode,
27321 rtx target, rtx vals)
27323 rtx ops[32], op0, op1;
27324 enum machine_mode half_mode = VOIDmode;
27325 int n, i;
27327 switch (mode)
27329 case V2SFmode:
27330 case V2SImode:
27331 if (!mmx_ok && !TARGET_SSE)
27332 break;
27333 /* FALLTHRU */
27335 case V8SFmode:
27336 case V8SImode:
27337 case V4DFmode:
27338 case V4DImode:
27339 case V4SFmode:
27340 case V4SImode:
27341 case V2DFmode:
27342 case V2DImode:
27343 n = GET_MODE_NUNITS (mode);
27344 for (i = 0; i < n; i++)
27345 ops[i] = XVECEXP (vals, 0, i);
27346 ix86_expand_vector_init_concat (mode, target, ops, n);
27347 return;
27349 case V32QImode:
27350 half_mode = V16QImode;
27351 goto half;
27353 case V16HImode:
27354 half_mode = V8HImode;
27355 goto half;
27357 half:
27358 n = GET_MODE_NUNITS (mode);
27359 for (i = 0; i < n; i++)
27360 ops[i] = XVECEXP (vals, 0, i);
27361 op0 = gen_reg_rtx (half_mode);
27362 op1 = gen_reg_rtx (half_mode);
27363 ix86_expand_vector_init_interleave (half_mode, op0, ops,
27364 n >> 2);
27365 ix86_expand_vector_init_interleave (half_mode, op1,
27366 &ops [n >> 1], n >> 2);
27367 emit_insn (gen_rtx_SET (VOIDmode, target,
27368 gen_rtx_VEC_CONCAT (mode, op0, op1)));
27369 return;
27371 case V16QImode:
27372 if (!TARGET_SSE4_1)
27373 break;
27374 /* FALLTHRU */
27376 case V8HImode:
27377 if (!TARGET_SSE2)
27378 break;
27380 /* Don't use ix86_expand_vector_init_interleave if we can't
27381 move from GPR to SSE register directly. */
27382 if (!TARGET_INTER_UNIT_MOVES)
27383 break;
27385 n = GET_MODE_NUNITS (mode);
27386 for (i = 0; i < n; i++)
27387 ops[i] = XVECEXP (vals, 0, i);
27388 ix86_expand_vector_init_interleave (mode, target, ops, n >> 1);
27389 return;
27391 case V4HImode:
27392 case V8QImode:
27393 break;
27395 default:
27396 gcc_unreachable ();
27400 int i, j, n_elts, n_words, n_elt_per_word;
27401 enum machine_mode inner_mode;
27402 rtx words[4], shift;
27404 inner_mode = GET_MODE_INNER (mode);
27405 n_elts = GET_MODE_NUNITS (mode);
27406 n_words = GET_MODE_SIZE (mode) / UNITS_PER_WORD;
27407 n_elt_per_word = n_elts / n_words;
27408 shift = GEN_INT (GET_MODE_BITSIZE (inner_mode));
27410 for (i = 0; i < n_words; ++i)
27412 rtx word = NULL_RTX;
27414 for (j = 0; j < n_elt_per_word; ++j)
27416 rtx elt = XVECEXP (vals, 0, (i+1)*n_elt_per_word - j - 1);
27417 elt = convert_modes (word_mode, inner_mode, elt, true);
27419 if (j == 0)
27420 word = elt;
27421 else
27423 word = expand_simple_binop (word_mode, ASHIFT, word, shift,
27424 word, 1, OPTAB_LIB_WIDEN);
27425 word = expand_simple_binop (word_mode, IOR, word, elt,
27426 word, 1, OPTAB_LIB_WIDEN);
27430 words[i] = word;
27433 if (n_words == 1)
27434 emit_move_insn (target, gen_lowpart (mode, words[0]));
27435 else if (n_words == 2)
27437 rtx tmp = gen_reg_rtx (mode);
27438 emit_clobber (tmp);
27439 emit_move_insn (gen_lowpart (word_mode, tmp), words[0]);
27440 emit_move_insn (gen_highpart (word_mode, tmp), words[1]);
27441 emit_move_insn (target, tmp);
27443 else if (n_words == 4)
27445 rtx tmp = gen_reg_rtx (V4SImode);
27446 gcc_assert (word_mode == SImode);
27447 vals = gen_rtx_PARALLEL (V4SImode, gen_rtvec_v (4, words));
27448 ix86_expand_vector_init_general (false, V4SImode, tmp, vals);
27449 emit_move_insn (target, gen_lowpart (mode, tmp));
27451 else
27452 gcc_unreachable ();
27456 /* Initialize vector TARGET via VALS. Suppress the use of MMX
27457 instructions unless MMX_OK is true. */
27459 void
27460 ix86_expand_vector_init (bool mmx_ok, rtx target, rtx vals)
27462 enum machine_mode mode = GET_MODE (target);
27463 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27464 int n_elts = GET_MODE_NUNITS (mode);
27465 int n_var = 0, one_var = -1;
27466 bool all_same = true, all_const_zero = true;
27467 int i;
27468 rtx x;
27470 for (i = 0; i < n_elts; ++i)
27472 x = XVECEXP (vals, 0, i);
27473 if (!(CONST_INT_P (x)
27474 || GET_CODE (x) == CONST_DOUBLE
27475 || GET_CODE (x) == CONST_FIXED))
27476 n_var++, one_var = i;
27477 else if (x != CONST0_RTX (inner_mode))
27478 all_const_zero = false;
27479 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
27480 all_same = false;
27483 /* Constants are best loaded from the constant pool. */
27484 if (n_var == 0)
27486 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
27487 return;
27490 /* If all values are identical, broadcast the value. */
27491 if (all_same
27492 && ix86_expand_vector_init_duplicate (mmx_ok, mode, target,
27493 XVECEXP (vals, 0, 0)))
27494 return;
27496 /* Values where only one field is non-constant are best loaded from
27497 the pool and overwritten via move later. */
27498 if (n_var == 1)
27500 if (all_const_zero
27501 && ix86_expand_vector_init_one_nonzero (mmx_ok, mode, target,
27502 XVECEXP (vals, 0, one_var),
27503 one_var))
27504 return;
27506 if (ix86_expand_vector_init_one_var (mmx_ok, mode, target, vals, one_var))
27507 return;
27510 ix86_expand_vector_init_general (mmx_ok, mode, target, vals);
27513 void
27514 ix86_expand_vector_set (bool mmx_ok, rtx target, rtx val, int elt)
27516 enum machine_mode mode = GET_MODE (target);
27517 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27518 enum machine_mode half_mode;
27519 bool use_vec_merge = false;
27520 rtx tmp;
27521 static rtx (*gen_extract[6][2]) (rtx, rtx)
27523 { gen_vec_extract_lo_v32qi, gen_vec_extract_hi_v32qi },
27524 { gen_vec_extract_lo_v16hi, gen_vec_extract_hi_v16hi },
27525 { gen_vec_extract_lo_v8si, gen_vec_extract_hi_v8si },
27526 { gen_vec_extract_lo_v4di, gen_vec_extract_hi_v4di },
27527 { gen_vec_extract_lo_v8sf, gen_vec_extract_hi_v8sf },
27528 { gen_vec_extract_lo_v4df, gen_vec_extract_hi_v4df }
27530 static rtx (*gen_insert[6][2]) (rtx, rtx, rtx)
27532 { gen_vec_set_lo_v32qi, gen_vec_set_hi_v32qi },
27533 { gen_vec_set_lo_v16hi, gen_vec_set_hi_v16hi },
27534 { gen_vec_set_lo_v8si, gen_vec_set_hi_v8si },
27535 { gen_vec_set_lo_v4di, gen_vec_set_hi_v4di },
27536 { gen_vec_set_lo_v8sf, gen_vec_set_hi_v8sf },
27537 { gen_vec_set_lo_v4df, gen_vec_set_hi_v4df }
27539 int i, j, n;
27541 switch (mode)
27543 case V2SFmode:
27544 case V2SImode:
27545 if (mmx_ok)
27547 tmp = gen_reg_rtx (GET_MODE_INNER (mode));
27548 ix86_expand_vector_extract (true, tmp, target, 1 - elt);
27549 if (elt == 0)
27550 tmp = gen_rtx_VEC_CONCAT (mode, tmp, val);
27551 else
27552 tmp = gen_rtx_VEC_CONCAT (mode, val, tmp);
27553 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27554 return;
27556 break;
27558 case V2DImode:
27559 use_vec_merge = TARGET_SSE4_1;
27560 if (use_vec_merge)
27561 break;
27563 case V2DFmode:
27565 rtx op0, op1;
27567 /* For the two element vectors, we implement a VEC_CONCAT with
27568 the extraction of the other element. */
27570 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (1 - elt)));
27571 tmp = gen_rtx_VEC_SELECT (inner_mode, target, tmp);
27573 if (elt == 0)
27574 op0 = val, op1 = tmp;
27575 else
27576 op0 = tmp, op1 = val;
27578 tmp = gen_rtx_VEC_CONCAT (mode, op0, op1);
27579 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27581 return;
27583 case V4SFmode:
27584 use_vec_merge = TARGET_SSE4_1;
27585 if (use_vec_merge)
27586 break;
27588 switch (elt)
27590 case 0:
27591 use_vec_merge = true;
27592 break;
27594 case 1:
27595 /* tmp = target = A B C D */
27596 tmp = copy_to_reg (target);
27597 /* target = A A B B */
27598 emit_insn (gen_sse_unpcklps (target, target, target));
27599 /* target = X A B B */
27600 ix86_expand_vector_set (false, target, val, 0);
27601 /* target = A X C D */
27602 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27603 const1_rtx, const0_rtx,
27604 GEN_INT (2+4), GEN_INT (3+4)));
27605 return;
27607 case 2:
27608 /* tmp = target = A B C D */
27609 tmp = copy_to_reg (target);
27610 /* tmp = X B C D */
27611 ix86_expand_vector_set (false, tmp, val, 0);
27612 /* target = A B X D */
27613 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27614 const0_rtx, const1_rtx,
27615 GEN_INT (0+4), GEN_INT (3+4)));
27616 return;
27618 case 3:
27619 /* tmp = target = A B C D */
27620 tmp = copy_to_reg (target);
27621 /* tmp = X B C D */
27622 ix86_expand_vector_set (false, tmp, val, 0);
27623 /* target = A B X D */
27624 emit_insn (gen_sse_shufps_v4sf (target, target, tmp,
27625 const0_rtx, const1_rtx,
27626 GEN_INT (2+4), GEN_INT (0+4)));
27627 return;
27629 default:
27630 gcc_unreachable ();
27632 break;
27634 case V4SImode:
27635 use_vec_merge = TARGET_SSE4_1;
27636 if (use_vec_merge)
27637 break;
27639 /* Element 0 handled by vec_merge below. */
27640 if (elt == 0)
27642 use_vec_merge = true;
27643 break;
27646 if (TARGET_SSE2)
27648 /* With SSE2, use integer shuffles to swap element 0 and ELT,
27649 store into element 0, then shuffle them back. */
27651 rtx order[4];
27653 order[0] = GEN_INT (elt);
27654 order[1] = const1_rtx;
27655 order[2] = const2_rtx;
27656 order[3] = GEN_INT (3);
27657 order[elt] = const0_rtx;
27659 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27660 order[1], order[2], order[3]));
27662 ix86_expand_vector_set (false, target, val, 0);
27664 emit_insn (gen_sse2_pshufd_1 (target, target, order[0],
27665 order[1], order[2], order[3]));
27667 else
27669 /* For SSE1, we have to reuse the V4SF code. */
27670 ix86_expand_vector_set (false, gen_lowpart (V4SFmode, target),
27671 gen_lowpart (SFmode, val), elt);
27673 return;
27675 case V8HImode:
27676 use_vec_merge = TARGET_SSE2;
27677 break;
27678 case V4HImode:
27679 use_vec_merge = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
27680 break;
27682 case V16QImode:
27683 use_vec_merge = TARGET_SSE4_1;
27684 break;
27686 case V8QImode:
27687 break;
27689 case V32QImode:
27690 half_mode = V16QImode;
27691 j = 0;
27692 n = 16;
27693 goto half;
27695 case V16HImode:
27696 half_mode = V8HImode;
27697 j = 1;
27698 n = 8;
27699 goto half;
27701 case V8SImode:
27702 half_mode = V4SImode;
27703 j = 2;
27704 n = 4;
27705 goto half;
27707 case V4DImode:
27708 half_mode = V2DImode;
27709 j = 3;
27710 n = 2;
27711 goto half;
27713 case V8SFmode:
27714 half_mode = V4SFmode;
27715 j = 4;
27716 n = 4;
27717 goto half;
27719 case V4DFmode:
27720 half_mode = V2DFmode;
27721 j = 5;
27722 n = 2;
27723 goto half;
27725 half:
27726 /* Compute offset. */
27727 i = elt / n;
27728 elt %= n;
27730 gcc_assert (i <= 1);
27732 /* Extract the half. */
27733 tmp = gen_reg_rtx (half_mode);
27734 emit_insn ((*gen_extract[j][i]) (tmp, target));
27736 /* Put val in tmp at elt. */
27737 ix86_expand_vector_set (false, tmp, val, elt);
27739 /* Put it back. */
27740 emit_insn ((*gen_insert[j][i]) (target, target, tmp));
27741 return;
27743 default:
27744 break;
27747 if (use_vec_merge)
27749 tmp = gen_rtx_VEC_DUPLICATE (mode, val);
27750 tmp = gen_rtx_VEC_MERGE (mode, tmp, target, GEN_INT (1 << elt));
27751 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27753 else
27755 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
27757 emit_move_insn (mem, target);
27759 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
27760 emit_move_insn (tmp, val);
27762 emit_move_insn (target, mem);
27766 void
27767 ix86_expand_vector_extract (bool mmx_ok, rtx target, rtx vec, int elt)
27769 enum machine_mode mode = GET_MODE (vec);
27770 enum machine_mode inner_mode = GET_MODE_INNER (mode);
27771 bool use_vec_extr = false;
27772 rtx tmp;
27774 switch (mode)
27776 case V2SImode:
27777 case V2SFmode:
27778 if (!mmx_ok)
27779 break;
27780 /* FALLTHRU */
27782 case V2DFmode:
27783 case V2DImode:
27784 use_vec_extr = true;
27785 break;
27787 case V4SFmode:
27788 use_vec_extr = TARGET_SSE4_1;
27789 if (use_vec_extr)
27790 break;
27792 switch (elt)
27794 case 0:
27795 tmp = vec;
27796 break;
27798 case 1:
27799 case 3:
27800 tmp = gen_reg_rtx (mode);
27801 emit_insn (gen_sse_shufps_v4sf (tmp, vec, vec,
27802 GEN_INT (elt), GEN_INT (elt),
27803 GEN_INT (elt+4), GEN_INT (elt+4)));
27804 break;
27806 case 2:
27807 tmp = gen_reg_rtx (mode);
27808 emit_insn (gen_sse_unpckhps (tmp, vec, vec));
27809 break;
27811 default:
27812 gcc_unreachable ();
27814 vec = tmp;
27815 use_vec_extr = true;
27816 elt = 0;
27817 break;
27819 case V4SImode:
27820 use_vec_extr = TARGET_SSE4_1;
27821 if (use_vec_extr)
27822 break;
27824 if (TARGET_SSE2)
27826 switch (elt)
27828 case 0:
27829 tmp = vec;
27830 break;
27832 case 1:
27833 case 3:
27834 tmp = gen_reg_rtx (mode);
27835 emit_insn (gen_sse2_pshufd_1 (tmp, vec,
27836 GEN_INT (elt), GEN_INT (elt),
27837 GEN_INT (elt), GEN_INT (elt)));
27838 break;
27840 case 2:
27841 tmp = gen_reg_rtx (mode);
27842 emit_insn (gen_sse2_punpckhdq (tmp, vec, vec));
27843 break;
27845 default:
27846 gcc_unreachable ();
27848 vec = tmp;
27849 use_vec_extr = true;
27850 elt = 0;
27852 else
27854 /* For SSE1, we have to reuse the V4SF code. */
27855 ix86_expand_vector_extract (false, gen_lowpart (SFmode, target),
27856 gen_lowpart (V4SFmode, vec), elt);
27857 return;
27859 break;
27861 case V8HImode:
27862 use_vec_extr = TARGET_SSE2;
27863 break;
27864 case V4HImode:
27865 use_vec_extr = mmx_ok && (TARGET_SSE || TARGET_3DNOW_A);
27866 break;
27868 case V16QImode:
27869 use_vec_extr = TARGET_SSE4_1;
27870 break;
27872 case V8QImode:
27873 /* ??? Could extract the appropriate HImode element and shift. */
27874 default:
27875 break;
27878 if (use_vec_extr)
27880 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
27881 tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
27883 /* Let the rtl optimizers know about the zero extension performed. */
27884 if (inner_mode == QImode || inner_mode == HImode)
27886 tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
27887 target = gen_lowpart (SImode, target);
27890 emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
27892 else
27894 rtx mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), false);
27896 emit_move_insn (mem, vec);
27898 tmp = adjust_address (mem, inner_mode, elt*GET_MODE_SIZE (inner_mode));
27899 emit_move_insn (target, tmp);
27903 /* Expand a vector reduction on V4SFmode for SSE1. FN is the binary
27904 pattern to reduce; DEST is the destination; IN is the input vector. */
27906 void
27907 ix86_expand_reduc_v4sf (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
27909 rtx tmp1, tmp2, tmp3;
27911 tmp1 = gen_reg_rtx (V4SFmode);
27912 tmp2 = gen_reg_rtx (V4SFmode);
27913 tmp3 = gen_reg_rtx (V4SFmode);
27915 emit_insn (gen_sse_movhlps (tmp1, in, in));
27916 emit_insn (fn (tmp2, tmp1, in));
27918 emit_insn (gen_sse_shufps_v4sf (tmp3, tmp2, tmp2,
27919 const1_rtx, const1_rtx,
27920 GEN_INT (1+4), GEN_INT (1+4)));
27921 emit_insn (fn (dest, tmp2, tmp3));
27924 /* Target hook for scalar_mode_supported_p. */
27925 static bool
27926 ix86_scalar_mode_supported_p (enum machine_mode mode)
27928 if (DECIMAL_FLOAT_MODE_P (mode))
27929 return true;
27930 else if (mode == TFmode)
27931 return true;
27932 else
27933 return default_scalar_mode_supported_p (mode);
27936 /* Implements target hook vector_mode_supported_p. */
27937 static bool
27938 ix86_vector_mode_supported_p (enum machine_mode mode)
27940 if (TARGET_SSE && VALID_SSE_REG_MODE (mode))
27941 return true;
27942 if (TARGET_SSE2 && VALID_SSE2_REG_MODE (mode))
27943 return true;
27944 if (TARGET_AVX && VALID_AVX256_REG_MODE (mode))
27945 return true;
27946 if (TARGET_MMX && VALID_MMX_REG_MODE (mode))
27947 return true;
27948 if (TARGET_3DNOW && VALID_MMX_REG_MODE_3DNOW (mode))
27949 return true;
27950 return false;
27953 /* Target hook for c_mode_for_suffix. */
27954 static enum machine_mode
27955 ix86_c_mode_for_suffix (char suffix)
27957 if (suffix == 'q')
27958 return TFmode;
27959 if (suffix == 'w')
27960 return XFmode;
27962 return VOIDmode;
27965 /* Worker function for TARGET_MD_ASM_CLOBBERS.
27967 We do this in the new i386 backend to maintain source compatibility
27968 with the old cc0-based compiler. */
27970 static tree
27971 ix86_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
27972 tree inputs ATTRIBUTE_UNUSED,
27973 tree clobbers)
27975 clobbers = tree_cons (NULL_TREE, build_string (5, "flags"),
27976 clobbers);
27977 clobbers = tree_cons (NULL_TREE, build_string (4, "fpsr"),
27978 clobbers);
27979 return clobbers;
27982 /* Implements target vector targetm.asm.encode_section_info. This
27983 is not used by netware. */
27985 static void ATTRIBUTE_UNUSED
27986 ix86_encode_section_info (tree decl, rtx rtl, int first)
27988 default_encode_section_info (decl, rtl, first);
27990 if (TREE_CODE (decl) == VAR_DECL
27991 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
27992 && ix86_in_large_data_p (decl))
27993 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FAR_ADDR;
27996 /* Worker function for REVERSE_CONDITION. */
27998 enum rtx_code
27999 ix86_reverse_condition (enum rtx_code code, enum machine_mode mode)
28001 return (mode != CCFPmode && mode != CCFPUmode
28002 ? reverse_condition (code)
28003 : reverse_condition_maybe_unordered (code));
28006 /* Output code to perform an x87 FP register move, from OPERANDS[1]
28007 to OPERANDS[0]. */
28009 const char *
28010 output_387_reg_move (rtx insn, rtx *operands)
28012 if (REG_P (operands[0]))
28014 if (REG_P (operands[1])
28015 && find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28017 if (REGNO (operands[0]) == FIRST_STACK_REG)
28018 return output_387_ffreep (operands, 0);
28019 return "fstp\t%y0";
28021 if (STACK_TOP_P (operands[0]))
28022 return "fld%Z1\t%y1";
28023 return "fst\t%y0";
28025 else if (MEM_P (operands[0]))
28027 gcc_assert (REG_P (operands[1]));
28028 if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
28029 return "fstp%Z0\t%y0";
28030 else
28032 /* There is no non-popping store to memory for XFmode.
28033 So if we need one, follow the store with a load. */
28034 if (GET_MODE (operands[0]) == XFmode)
28035 return "fstp%Z0\t%y0\n\tfld%Z0\t%y0";
28036 else
28037 return "fst%Z0\t%y0";
28040 else
28041 gcc_unreachable();
28044 /* Output code to perform a conditional jump to LABEL, if C2 flag in
28045 FP status register is set. */
28047 void
28048 ix86_emit_fp_unordered_jump (rtx label)
28050 rtx reg = gen_reg_rtx (HImode);
28051 rtx temp;
28053 emit_insn (gen_x86_fnstsw_1 (reg));
28055 if (TARGET_SAHF && (TARGET_USE_SAHF || optimize_insn_for_size_p ()))
28057 emit_insn (gen_x86_sahf_1 (reg));
28059 temp = gen_rtx_REG (CCmode, FLAGS_REG);
28060 temp = gen_rtx_UNORDERED (VOIDmode, temp, const0_rtx);
28062 else
28064 emit_insn (gen_testqi_ext_ccno_0 (reg, GEN_INT (0x04)));
28066 temp = gen_rtx_REG (CCNOmode, FLAGS_REG);
28067 temp = gen_rtx_NE (VOIDmode, temp, const0_rtx);
28070 temp = gen_rtx_IF_THEN_ELSE (VOIDmode, temp,
28071 gen_rtx_LABEL_REF (VOIDmode, label),
28072 pc_rtx);
28073 temp = gen_rtx_SET (VOIDmode, pc_rtx, temp);
28075 emit_jump_insn (temp);
28076 predict_jump (REG_BR_PROB_BASE * 10 / 100);
28079 /* Output code to perform a log1p XFmode calculation. */
28081 void ix86_emit_i387_log1p (rtx op0, rtx op1)
28083 rtx label1 = gen_label_rtx ();
28084 rtx label2 = gen_label_rtx ();
28086 rtx tmp = gen_reg_rtx (XFmode);
28087 rtx tmp2 = gen_reg_rtx (XFmode);
28088 rtx test;
28090 emit_insn (gen_absxf2 (tmp, op1));
28091 test = gen_rtx_GE (VOIDmode, tmp,
28092 CONST_DOUBLE_FROM_REAL_VALUE (
28093 REAL_VALUE_ATOF ("0.29289321881345247561810596348408353", XFmode),
28094 XFmode));
28095 emit_jump_insn (gen_cbranchxf4 (test, XEXP (test, 0), XEXP (test, 1), label1));
28097 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28098 emit_insn (gen_fyl2xp1xf3_i387 (op0, op1, tmp2));
28099 emit_jump (label2);
28101 emit_label (label1);
28102 emit_move_insn (tmp, CONST1_RTX (XFmode));
28103 emit_insn (gen_addxf3 (tmp, op1, tmp));
28104 emit_move_insn (tmp2, standard_80387_constant_rtx (4)); /* fldln2 */
28105 emit_insn (gen_fyl2xxf3_i387 (op0, tmp, tmp2));
28107 emit_label (label2);
28110 /* Output code to perform a Newton-Rhapson approximation of a single precision
28111 floating point divide [http://en.wikipedia.org/wiki/N-th_root_algorithm]. */
28113 void ix86_emit_swdivsf (rtx res, rtx a, rtx b, enum machine_mode mode)
28115 rtx x0, x1, e0, e1, two;
28117 x0 = gen_reg_rtx (mode);
28118 e0 = gen_reg_rtx (mode);
28119 e1 = gen_reg_rtx (mode);
28120 x1 = gen_reg_rtx (mode);
28122 two = CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode);
28124 if (VECTOR_MODE_P (mode))
28125 two = ix86_build_const_vector (SFmode, true, two);
28127 two = force_reg (mode, two);
28129 /* a / b = a * rcp(b) * (2.0 - b * rcp(b)) */
28131 /* x0 = rcp(b) estimate */
28132 emit_insn (gen_rtx_SET (VOIDmode, x0,
28133 gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
28134 UNSPEC_RCP)));
28135 /* e0 = x0 * b */
28136 emit_insn (gen_rtx_SET (VOIDmode, e0,
28137 gen_rtx_MULT (mode, x0, b)));
28138 /* e1 = 2. - e0 */
28139 emit_insn (gen_rtx_SET (VOIDmode, e1,
28140 gen_rtx_MINUS (mode, two, e0)));
28141 /* x1 = x0 * e1 */
28142 emit_insn (gen_rtx_SET (VOIDmode, x1,
28143 gen_rtx_MULT (mode, x0, e1)));
28144 /* res = a * x1 */
28145 emit_insn (gen_rtx_SET (VOIDmode, res,
28146 gen_rtx_MULT (mode, a, x1)));
28149 /* Output code to perform a Newton-Rhapson approximation of a
28150 single precision floating point [reciprocal] square root. */
28152 void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
28153 bool recip)
28155 rtx x0, e0, e1, e2, e3, mthree, mhalf;
28156 REAL_VALUE_TYPE r;
28158 x0 = gen_reg_rtx (mode);
28159 e0 = gen_reg_rtx (mode);
28160 e1 = gen_reg_rtx (mode);
28161 e2 = gen_reg_rtx (mode);
28162 e3 = gen_reg_rtx (mode);
28164 real_from_integer (&r, VOIDmode, -3, -1, 0);
28165 mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28167 real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
28168 mhalf = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
28170 if (VECTOR_MODE_P (mode))
28172 mthree = ix86_build_const_vector (SFmode, true, mthree);
28173 mhalf = ix86_build_const_vector (SFmode, true, mhalf);
28176 /* sqrt(a) = -0.5 * a * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0)
28177 rsqrt(a) = -0.5 * rsqrtss(a) * (a * rsqrtss(a) * rsqrtss(a) - 3.0) */
28179 /* x0 = rsqrt(a) estimate */
28180 emit_insn (gen_rtx_SET (VOIDmode, x0,
28181 gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
28182 UNSPEC_RSQRT)));
28184 /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0). */
28185 if (!recip)
28187 rtx zero, mask;
28189 zero = gen_reg_rtx (mode);
28190 mask = gen_reg_rtx (mode);
28192 zero = force_reg (mode, CONST0_RTX(mode));
28193 emit_insn (gen_rtx_SET (VOIDmode, mask,
28194 gen_rtx_NE (mode, zero, a)));
28196 emit_insn (gen_rtx_SET (VOIDmode, x0,
28197 gen_rtx_AND (mode, x0, mask)));
28200 /* e0 = x0 * a */
28201 emit_insn (gen_rtx_SET (VOIDmode, e0,
28202 gen_rtx_MULT (mode, x0, a)));
28203 /* e1 = e0 * x0 */
28204 emit_insn (gen_rtx_SET (VOIDmode, e1,
28205 gen_rtx_MULT (mode, e0, x0)));
28207 /* e2 = e1 - 3. */
28208 mthree = force_reg (mode, mthree);
28209 emit_insn (gen_rtx_SET (VOIDmode, e2,
28210 gen_rtx_PLUS (mode, e1, mthree)));
28212 mhalf = force_reg (mode, mhalf);
28213 if (recip)
28214 /* e3 = -.5 * x0 */
28215 emit_insn (gen_rtx_SET (VOIDmode, e3,
28216 gen_rtx_MULT (mode, x0, mhalf)));
28217 else
28218 /* e3 = -.5 * e0 */
28219 emit_insn (gen_rtx_SET (VOIDmode, e3,
28220 gen_rtx_MULT (mode, e0, mhalf)));
28221 /* ret = e2 * e3 */
28222 emit_insn (gen_rtx_SET (VOIDmode, res,
28223 gen_rtx_MULT (mode, e2, e3)));
28226 /* Solaris implementation of TARGET_ASM_NAMED_SECTION. */
28228 static void ATTRIBUTE_UNUSED
28229 i386_solaris_elf_named_section (const char *name, unsigned int flags,
28230 tree decl)
28232 /* With Binutils 2.15, the "@unwind" marker must be specified on
28233 every occurrence of the ".eh_frame" section, not just the first
28234 one. */
28235 if (TARGET_64BIT
28236 && strcmp (name, ".eh_frame") == 0)
28238 fprintf (asm_out_file, "\t.section\t%s,\"%s\",@unwind\n", name,
28239 flags & SECTION_WRITE ? "aw" : "a");
28240 return;
28242 default_elf_asm_named_section (name, flags, decl);
28245 /* Return the mangling of TYPE if it is an extended fundamental type. */
28247 static const char *
28248 ix86_mangle_type (const_tree type)
28250 type = TYPE_MAIN_VARIANT (type);
28252 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28253 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28254 return NULL;
28256 switch (TYPE_MODE (type))
28258 case TFmode:
28259 /* __float128 is "g". */
28260 return "g";
28261 case XFmode:
28262 /* "long double" or __float80 is "e". */
28263 return "e";
28264 default:
28265 return NULL;
28269 /* For 32-bit code we can save PIC register setup by using
28270 __stack_chk_fail_local hidden function instead of calling
28271 __stack_chk_fail directly. 64-bit code doesn't need to setup any PIC
28272 register, so it is better to call __stack_chk_fail directly. */
28274 static tree
28275 ix86_stack_protect_fail (void)
28277 return TARGET_64BIT
28278 ? default_external_stack_protect_fail ()
28279 : default_hidden_stack_protect_fail ();
28282 /* Select a format to encode pointers in exception handling data. CODE
28283 is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
28284 true if the symbol may be affected by dynamic relocations.
28286 ??? All x86 object file formats are capable of representing this.
28287 After all, the relocation needed is the same as for the call insn.
28288 Whether or not a particular assembler allows us to enter such, I
28289 guess we'll have to see. */
28291 asm_preferred_eh_data_format (int code, int global)
28293 if (flag_pic)
28295 int type = DW_EH_PE_sdata8;
28296 if (!TARGET_64BIT
28297 || ix86_cmodel == CM_SMALL_PIC
28298 || (ix86_cmodel == CM_MEDIUM_PIC && (global || code)))
28299 type = DW_EH_PE_sdata4;
28300 return (global ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | type;
28302 if (ix86_cmodel == CM_SMALL
28303 || (ix86_cmodel == CM_MEDIUM && code))
28304 return DW_EH_PE_udata4;
28305 return DW_EH_PE_absptr;
28308 /* Expand copysign from SIGN to the positive value ABS_VALUE
28309 storing in RESULT. If MASK is non-null, it shall be a mask to mask out
28310 the sign-bit. */
28311 static void
28312 ix86_sse_copysign_to_positive (rtx result, rtx abs_value, rtx sign, rtx mask)
28314 enum machine_mode mode = GET_MODE (sign);
28315 rtx sgn = gen_reg_rtx (mode);
28316 if (mask == NULL_RTX)
28318 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), false);
28319 if (!VECTOR_MODE_P (mode))
28321 /* We need to generate a scalar mode mask in this case. */
28322 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28323 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28324 mask = gen_reg_rtx (mode);
28325 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28328 else
28329 mask = gen_rtx_NOT (mode, mask);
28330 emit_insn (gen_rtx_SET (VOIDmode, sgn,
28331 gen_rtx_AND (mode, mask, sign)));
28332 emit_insn (gen_rtx_SET (VOIDmode, result,
28333 gen_rtx_IOR (mode, abs_value, sgn)));
28336 /* Expand fabs (OP0) and return a new rtx that holds the result. The
28337 mask for masking out the sign-bit is stored in *SMASK, if that is
28338 non-null. */
28339 static rtx
28340 ix86_expand_sse_fabs (rtx op0, rtx *smask)
28342 enum machine_mode mode = GET_MODE (op0);
28343 rtx xa, mask;
28345 xa = gen_reg_rtx (mode);
28346 mask = ix86_build_signbit_mask (mode, VECTOR_MODE_P (mode), true);
28347 if (!VECTOR_MODE_P (mode))
28349 /* We need to generate a scalar mode mask in this case. */
28350 rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
28351 tmp = gen_rtx_VEC_SELECT (mode, mask, tmp);
28352 mask = gen_reg_rtx (mode);
28353 emit_insn (gen_rtx_SET (VOIDmode, mask, tmp));
28355 emit_insn (gen_rtx_SET (VOIDmode, xa,
28356 gen_rtx_AND (mode, op0, mask)));
28358 if (smask)
28359 *smask = mask;
28361 return xa;
28364 /* Expands a comparison of OP0 with OP1 using comparison code CODE,
28365 swapping the operands if SWAP_OPERANDS is true. The expanded
28366 code is a forward jump to a newly created label in case the
28367 comparison is true. The generated label rtx is returned. */
28368 static rtx
28369 ix86_expand_sse_compare_and_jump (enum rtx_code code, rtx op0, rtx op1,
28370 bool swap_operands)
28372 rtx label, tmp;
28374 if (swap_operands)
28376 tmp = op0;
28377 op0 = op1;
28378 op1 = tmp;
28381 label = gen_label_rtx ();
28382 tmp = gen_rtx_REG (CCFPUmode, FLAGS_REG);
28383 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28384 gen_rtx_COMPARE (CCFPUmode, op0, op1)));
28385 tmp = gen_rtx_fmt_ee (code, VOIDmode, tmp, const0_rtx);
28386 tmp = gen_rtx_IF_THEN_ELSE (VOIDmode, tmp,
28387 gen_rtx_LABEL_REF (VOIDmode, label), pc_rtx);
28388 tmp = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, tmp));
28389 JUMP_LABEL (tmp) = label;
28391 return label;
28394 /* Expand a mask generating SSE comparison instruction comparing OP0 with OP1
28395 using comparison code CODE. Operands are swapped for the comparison if
28396 SWAP_OPERANDS is true. Returns a rtx for the generated mask. */
28397 static rtx
28398 ix86_expand_sse_compare_mask (enum rtx_code code, rtx op0, rtx op1,
28399 bool swap_operands)
28401 enum machine_mode mode = GET_MODE (op0);
28402 rtx mask = gen_reg_rtx (mode);
28404 if (swap_operands)
28406 rtx tmp = op0;
28407 op0 = op1;
28408 op1 = tmp;
28411 if (mode == DFmode)
28412 emit_insn (gen_sse2_maskcmpdf3 (mask, op0, op1,
28413 gen_rtx_fmt_ee (code, mode, op0, op1)));
28414 else
28415 emit_insn (gen_sse_maskcmpsf3 (mask, op0, op1,
28416 gen_rtx_fmt_ee (code, mode, op0, op1)));
28418 return mask;
28421 /* Generate and return a rtx of mode MODE for 2**n where n is the number
28422 of bits of the mantissa of MODE, which must be one of DFmode or SFmode. */
28423 static rtx
28424 ix86_gen_TWO52 (enum machine_mode mode)
28426 REAL_VALUE_TYPE TWO52r;
28427 rtx TWO52;
28429 real_ldexp (&TWO52r, &dconst1, mode == DFmode ? 52 : 23);
28430 TWO52 = const_double_from_real_value (TWO52r, mode);
28431 TWO52 = force_reg (mode, TWO52);
28433 return TWO52;
28436 /* Expand SSE sequence for computing lround from OP1 storing
28437 into OP0. */
28438 void
28439 ix86_expand_lround (rtx op0, rtx op1)
28441 /* C code for the stuff we're doing below:
28442 tmp = op1 + copysign (nextafter (0.5, 0.0), op1)
28443 return (long)tmp;
28445 enum machine_mode mode = GET_MODE (op1);
28446 const struct real_format *fmt;
28447 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28448 rtx adj;
28450 /* load nextafter (0.5, 0.0) */
28451 fmt = REAL_MODE_FORMAT (mode);
28452 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28453 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28455 /* adj = copysign (0.5, op1) */
28456 adj = force_reg (mode, const_double_from_real_value (pred_half, mode));
28457 ix86_sse_copysign_to_positive (adj, adj, force_reg (mode, op1), NULL_RTX);
28459 /* adj = op1 + adj */
28460 adj = expand_simple_binop (mode, PLUS, adj, op1, NULL_RTX, 0, OPTAB_DIRECT);
28462 /* op0 = (imode)adj */
28463 expand_fix (op0, adj, 0);
28466 /* Expand SSE2 sequence for computing lround from OPERAND1 storing
28467 into OPERAND0. */
28468 void
28469 ix86_expand_lfloorceil (rtx op0, rtx op1, bool do_floor)
28471 /* C code for the stuff we're doing below (for do_floor):
28472 xi = (long)op1;
28473 xi -= (double)xi > op1 ? 1 : 0;
28474 return xi;
28476 enum machine_mode fmode = GET_MODE (op1);
28477 enum machine_mode imode = GET_MODE (op0);
28478 rtx ireg, freg, label, tmp;
28480 /* reg = (long)op1 */
28481 ireg = gen_reg_rtx (imode);
28482 expand_fix (ireg, op1, 0);
28484 /* freg = (double)reg */
28485 freg = gen_reg_rtx (fmode);
28486 expand_float (freg, ireg, 0);
28488 /* ireg = (freg > op1) ? ireg - 1 : ireg */
28489 label = ix86_expand_sse_compare_and_jump (UNLE,
28490 freg, op1, !do_floor);
28491 tmp = expand_simple_binop (imode, do_floor ? MINUS : PLUS,
28492 ireg, const1_rtx, NULL_RTX, 0, OPTAB_DIRECT);
28493 emit_move_insn (ireg, tmp);
28495 emit_label (label);
28496 LABEL_NUSES (label) = 1;
28498 emit_move_insn (op0, ireg);
28501 /* Expand rint (IEEE round to nearest) rounding OPERAND1 and storing the
28502 result in OPERAND0. */
28503 void
28504 ix86_expand_rint (rtx operand0, rtx operand1)
28506 /* C code for the stuff we're doing below:
28507 xa = fabs (operand1);
28508 if (!isless (xa, 2**52))
28509 return operand1;
28510 xa = xa + 2**52 - 2**52;
28511 return copysign (xa, operand1);
28513 enum machine_mode mode = GET_MODE (operand0);
28514 rtx res, xa, label, TWO52, mask;
28516 res = gen_reg_rtx (mode);
28517 emit_move_insn (res, operand1);
28519 /* xa = abs (operand1) */
28520 xa = ix86_expand_sse_fabs (res, &mask);
28522 /* if (!isless (xa, TWO52)) goto label; */
28523 TWO52 = ix86_gen_TWO52 (mode);
28524 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28526 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28527 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28529 ix86_sse_copysign_to_positive (res, xa, res, mask);
28531 emit_label (label);
28532 LABEL_NUSES (label) = 1;
28534 emit_move_insn (operand0, res);
28537 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28538 into OPERAND0. */
28539 void
28540 ix86_expand_floorceildf_32 (rtx operand0, rtx operand1, bool do_floor)
28542 /* C code for the stuff we expand below.
28543 double xa = fabs (x), x2;
28544 if (!isless (xa, TWO52))
28545 return x;
28546 xa = xa + TWO52 - TWO52;
28547 x2 = copysign (xa, x);
28548 Compensate. Floor:
28549 if (x2 > x)
28550 x2 -= 1;
28551 Compensate. Ceil:
28552 if (x2 < x)
28553 x2 -= -1;
28554 return x2;
28556 enum machine_mode mode = GET_MODE (operand0);
28557 rtx xa, TWO52, tmp, label, one, res, mask;
28559 TWO52 = ix86_gen_TWO52 (mode);
28561 /* Temporary for holding the result, initialized to the input
28562 operand to ease control flow. */
28563 res = gen_reg_rtx (mode);
28564 emit_move_insn (res, operand1);
28566 /* xa = abs (operand1) */
28567 xa = ix86_expand_sse_fabs (res, &mask);
28569 /* if (!isless (xa, TWO52)) goto label; */
28570 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28572 /* xa = xa + TWO52 - TWO52; */
28573 xa = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28574 xa = expand_simple_binop (mode, MINUS, xa, TWO52, xa, 0, OPTAB_DIRECT);
28576 /* xa = copysign (xa, operand1) */
28577 ix86_sse_copysign_to_positive (xa, xa, res, mask);
28579 /* generate 1.0 or -1.0 */
28580 one = force_reg (mode,
28581 const_double_from_real_value (do_floor
28582 ? dconst1 : dconstm1, mode));
28584 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28585 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28586 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28587 gen_rtx_AND (mode, one, tmp)));
28588 /* We always need to subtract here to preserve signed zero. */
28589 tmp = expand_simple_binop (mode, MINUS,
28590 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28591 emit_move_insn (res, tmp);
28593 emit_label (label);
28594 LABEL_NUSES (label) = 1;
28596 emit_move_insn (operand0, res);
28599 /* Expand SSE2 sequence for computing floor or ceil from OPERAND1 storing
28600 into OPERAND0. */
28601 void
28602 ix86_expand_floorceil (rtx operand0, rtx operand1, bool do_floor)
28604 /* C code for the stuff we expand below.
28605 double xa = fabs (x), x2;
28606 if (!isless (xa, TWO52))
28607 return x;
28608 x2 = (double)(long)x;
28609 Compensate. Floor:
28610 if (x2 > x)
28611 x2 -= 1;
28612 Compensate. Ceil:
28613 if (x2 < x)
28614 x2 += 1;
28615 if (HONOR_SIGNED_ZEROS (mode))
28616 return copysign (x2, x);
28617 return x2;
28619 enum machine_mode mode = GET_MODE (operand0);
28620 rtx xa, xi, TWO52, tmp, label, one, res, mask;
28622 TWO52 = ix86_gen_TWO52 (mode);
28624 /* Temporary for holding the result, initialized to the input
28625 operand to ease control flow. */
28626 res = gen_reg_rtx (mode);
28627 emit_move_insn (res, operand1);
28629 /* xa = abs (operand1) */
28630 xa = ix86_expand_sse_fabs (res, &mask);
28632 /* if (!isless (xa, TWO52)) goto label; */
28633 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28635 /* xa = (double)(long)x */
28636 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28637 expand_fix (xi, res, 0);
28638 expand_float (xa, xi, 0);
28640 /* generate 1.0 */
28641 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
28643 /* Compensate: xa = xa - (xa > operand1 ? 1 : 0) */
28644 tmp = ix86_expand_sse_compare_mask (UNGT, xa, res, !do_floor);
28645 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28646 gen_rtx_AND (mode, one, tmp)));
28647 tmp = expand_simple_binop (mode, do_floor ? MINUS : PLUS,
28648 xa, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28649 emit_move_insn (res, tmp);
28651 if (HONOR_SIGNED_ZEROS (mode))
28652 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28654 emit_label (label);
28655 LABEL_NUSES (label) = 1;
28657 emit_move_insn (operand0, res);
28660 /* Expand SSE sequence for computing round from OPERAND1 storing
28661 into OPERAND0. Sequence that works without relying on DImode truncation
28662 via cvttsd2siq that is only available on 64bit targets. */
28663 void
28664 ix86_expand_rounddf_32 (rtx operand0, rtx operand1)
28666 /* C code for the stuff we expand below.
28667 double xa = fabs (x), xa2, x2;
28668 if (!isless (xa, TWO52))
28669 return x;
28670 Using the absolute value and copying back sign makes
28671 -0.0 -> -0.0 correct.
28672 xa2 = xa + TWO52 - TWO52;
28673 Compensate.
28674 dxa = xa2 - xa;
28675 if (dxa <= -0.5)
28676 xa2 += 1;
28677 else if (dxa > 0.5)
28678 xa2 -= 1;
28679 x2 = copysign (xa2, x);
28680 return x2;
28682 enum machine_mode mode = GET_MODE (operand0);
28683 rtx xa, xa2, dxa, TWO52, tmp, label, half, mhalf, one, res, mask;
28685 TWO52 = ix86_gen_TWO52 (mode);
28687 /* Temporary for holding the result, initialized to the input
28688 operand to ease control flow. */
28689 res = gen_reg_rtx (mode);
28690 emit_move_insn (res, operand1);
28692 /* xa = abs (operand1) */
28693 xa = ix86_expand_sse_fabs (res, &mask);
28695 /* if (!isless (xa, TWO52)) goto label; */
28696 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28698 /* xa2 = xa + TWO52 - TWO52; */
28699 xa2 = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28700 xa2 = expand_simple_binop (mode, MINUS, xa2, TWO52, xa2, 0, OPTAB_DIRECT);
28702 /* dxa = xa2 - xa; */
28703 dxa = expand_simple_binop (mode, MINUS, xa2, xa, NULL_RTX, 0, OPTAB_DIRECT);
28705 /* generate 0.5, 1.0 and -0.5 */
28706 half = force_reg (mode, const_double_from_real_value (dconsthalf, mode));
28707 one = expand_simple_binop (mode, PLUS, half, half, NULL_RTX, 0, OPTAB_DIRECT);
28708 mhalf = expand_simple_binop (mode, MINUS, half, one, NULL_RTX,
28709 0, OPTAB_DIRECT);
28711 /* Compensate. */
28712 tmp = gen_reg_rtx (mode);
28713 /* xa2 = xa2 - (dxa > 0.5 ? 1 : 0) */
28714 tmp = ix86_expand_sse_compare_mask (UNGT, dxa, half, false);
28715 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28716 gen_rtx_AND (mode, one, tmp)));
28717 xa2 = expand_simple_binop (mode, MINUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28718 /* xa2 = xa2 + (dxa <= -0.5 ? 1 : 0) */
28719 tmp = ix86_expand_sse_compare_mask (UNGE, mhalf, dxa, false);
28720 emit_insn (gen_rtx_SET (VOIDmode, tmp,
28721 gen_rtx_AND (mode, one, tmp)));
28722 xa2 = expand_simple_binop (mode, PLUS, xa2, tmp, NULL_RTX, 0, OPTAB_DIRECT);
28724 /* res = copysign (xa2, operand1) */
28725 ix86_sse_copysign_to_positive (res, xa2, force_reg (mode, operand1), mask);
28727 emit_label (label);
28728 LABEL_NUSES (label) = 1;
28730 emit_move_insn (operand0, res);
28733 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28734 into OPERAND0. */
28735 void
28736 ix86_expand_trunc (rtx operand0, rtx operand1)
28738 /* C code for SSE variant we expand below.
28739 double xa = fabs (x), x2;
28740 if (!isless (xa, TWO52))
28741 return x;
28742 x2 = (double)(long)x;
28743 if (HONOR_SIGNED_ZEROS (mode))
28744 return copysign (x2, x);
28745 return x2;
28747 enum machine_mode mode = GET_MODE (operand0);
28748 rtx xa, xi, TWO52, label, res, mask;
28750 TWO52 = ix86_gen_TWO52 (mode);
28752 /* Temporary for holding the result, initialized to the input
28753 operand to ease control flow. */
28754 res = gen_reg_rtx (mode);
28755 emit_move_insn (res, operand1);
28757 /* xa = abs (operand1) */
28758 xa = ix86_expand_sse_fabs (res, &mask);
28760 /* if (!isless (xa, TWO52)) goto label; */
28761 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28763 /* x = (double)(long)x */
28764 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28765 expand_fix (xi, res, 0);
28766 expand_float (res, xi, 0);
28768 if (HONOR_SIGNED_ZEROS (mode))
28769 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), mask);
28771 emit_label (label);
28772 LABEL_NUSES (label) = 1;
28774 emit_move_insn (operand0, res);
28777 /* Expand SSE sequence for computing trunc from OPERAND1 storing
28778 into OPERAND0. */
28779 void
28780 ix86_expand_truncdf_32 (rtx operand0, rtx operand1)
28782 enum machine_mode mode = GET_MODE (operand0);
28783 rtx xa, mask, TWO52, label, one, res, smask, tmp;
28785 /* C code for SSE variant we expand below.
28786 double xa = fabs (x), x2;
28787 if (!isless (xa, TWO52))
28788 return x;
28789 xa2 = xa + TWO52 - TWO52;
28790 Compensate:
28791 if (xa2 > xa)
28792 xa2 -= 1.0;
28793 x2 = copysign (xa2, x);
28794 return x2;
28797 TWO52 = ix86_gen_TWO52 (mode);
28799 /* Temporary for holding the result, initialized to the input
28800 operand to ease control flow. */
28801 res = gen_reg_rtx (mode);
28802 emit_move_insn (res, operand1);
28804 /* xa = abs (operand1) */
28805 xa = ix86_expand_sse_fabs (res, &smask);
28807 /* if (!isless (xa, TWO52)) goto label; */
28808 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28810 /* res = xa + TWO52 - TWO52; */
28811 tmp = expand_simple_binop (mode, PLUS, xa, TWO52, NULL_RTX, 0, OPTAB_DIRECT);
28812 tmp = expand_simple_binop (mode, MINUS, tmp, TWO52, tmp, 0, OPTAB_DIRECT);
28813 emit_move_insn (res, tmp);
28815 /* generate 1.0 */
28816 one = force_reg (mode, const_double_from_real_value (dconst1, mode));
28818 /* Compensate: res = xa2 - (res > xa ? 1 : 0) */
28819 mask = ix86_expand_sse_compare_mask (UNGT, res, xa, false);
28820 emit_insn (gen_rtx_SET (VOIDmode, mask,
28821 gen_rtx_AND (mode, mask, one)));
28822 tmp = expand_simple_binop (mode, MINUS,
28823 res, mask, NULL_RTX, 0, OPTAB_DIRECT);
28824 emit_move_insn (res, tmp);
28826 /* res = copysign (res, operand1) */
28827 ix86_sse_copysign_to_positive (res, res, force_reg (mode, operand1), smask);
28829 emit_label (label);
28830 LABEL_NUSES (label) = 1;
28832 emit_move_insn (operand0, res);
28835 /* Expand SSE sequence for computing round from OPERAND1 storing
28836 into OPERAND0. */
28837 void
28838 ix86_expand_round (rtx operand0, rtx operand1)
28840 /* C code for the stuff we're doing below:
28841 double xa = fabs (x);
28842 if (!isless (xa, TWO52))
28843 return x;
28844 xa = (double)(long)(xa + nextafter (0.5, 0.0));
28845 return copysign (xa, x);
28847 enum machine_mode mode = GET_MODE (operand0);
28848 rtx res, TWO52, xa, label, xi, half, mask;
28849 const struct real_format *fmt;
28850 REAL_VALUE_TYPE pred_half, half_minus_pred_half;
28852 /* Temporary for holding the result, initialized to the input
28853 operand to ease control flow. */
28854 res = gen_reg_rtx (mode);
28855 emit_move_insn (res, operand1);
28857 TWO52 = ix86_gen_TWO52 (mode);
28858 xa = ix86_expand_sse_fabs (res, &mask);
28859 label = ix86_expand_sse_compare_and_jump (UNLE, TWO52, xa, false);
28861 /* load nextafter (0.5, 0.0) */
28862 fmt = REAL_MODE_FORMAT (mode);
28863 real_2expN (&half_minus_pred_half, -(fmt->p) - 1, mode);
28864 REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
28866 /* xa = xa + 0.5 */
28867 half = force_reg (mode, const_double_from_real_value (pred_half, mode));
28868 xa = expand_simple_binop (mode, PLUS, xa, half, NULL_RTX, 0, OPTAB_DIRECT);
28870 /* xa = (double)(int64_t)xa */
28871 xi = gen_reg_rtx (mode == DFmode ? DImode : SImode);
28872 expand_fix (xi, xa, 0);
28873 expand_float (xa, xi, 0);
28875 /* res = copysign (xa, operand1) */
28876 ix86_sse_copysign_to_positive (res, xa, force_reg (mode, operand1), mask);
28878 emit_label (label);
28879 LABEL_NUSES (label) = 1;
28881 emit_move_insn (operand0, res);
28884 /* Table of valid machine attributes. */
28885 static const struct attribute_spec ix86_attribute_table[] =
28887 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
28888 /* Stdcall attribute says callee is responsible for popping arguments
28889 if they are not variable. */
28890 { "stdcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28891 /* Fastcall attribute says callee is responsible for popping arguments
28892 if they are not variable. */
28893 { "fastcall", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28894 /* Cdecl attribute says the callee is a normal C declaration */
28895 { "cdecl", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28896 /* Regparm attribute specifies how many integer arguments are to be
28897 passed in registers. */
28898 { "regparm", 1, 1, false, true, true, ix86_handle_cconv_attribute },
28899 /* Sseregparm attribute says we are using x86_64 calling conventions
28900 for FP arguments. */
28901 { "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute },
28902 /* force_align_arg_pointer says this function realigns the stack at entry. */
28903 { (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
28904 false, true, true, ix86_handle_cconv_attribute },
28905 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
28906 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
28907 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
28908 { "shared", 0, 0, true, false, false, ix86_handle_shared_attribute },
28909 #endif
28910 { "ms_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
28911 { "gcc_struct", 0, 0, false, false, false, ix86_handle_struct_attribute },
28912 #ifdef SUBTARGET_ATTRIBUTE_TABLE
28913 SUBTARGET_ATTRIBUTE_TABLE,
28914 #endif
28915 /* ms_abi and sysv_abi calling convention function attributes. */
28916 { "ms_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
28917 { "sysv_abi", 0, 0, false, true, true, ix86_handle_abi_attribute },
28918 /* End element. */
28919 { NULL, 0, 0, false, false, false, NULL }
28922 /* Implement targetm.vectorize.builtin_vectorization_cost. */
28923 static int
28924 x86_builtin_vectorization_cost (bool runtime_test)
28926 /* If the branch of the runtime test is taken - i.e. - the vectorized
28927 version is skipped - this incurs a misprediction cost (because the
28928 vectorized version is expected to be the fall-through). So we subtract
28929 the latency of a mispredicted branch from the costs that are incured
28930 when the vectorized version is executed.
28932 TODO: The values in individual target tables have to be tuned or new
28933 fields may be needed. For eg. on K8, the default branch path is the
28934 not-taken path. If the taken path is predicted correctly, the minimum
28935 penalty of going down the taken-path is 1 cycle. If the taken-path is
28936 not predicted correctly, then the minimum penalty is 10 cycles. */
28938 if (runtime_test)
28940 return (-(ix86_cost->cond_taken_branch_cost));
28942 else
28943 return 0;
28946 /* This function returns the calling abi specific va_list type node.
28947 It returns the FNDECL specific va_list type. */
28949 tree
28950 ix86_fn_abi_va_list (tree fndecl)
28952 if (!TARGET_64BIT)
28953 return va_list_type_node;
28954 gcc_assert (fndecl != NULL_TREE);
28956 if (ix86_function_abi ((const_tree) fndecl) == MS_ABI)
28957 return ms_va_list_type_node;
28958 else
28959 return sysv_va_list_type_node;
28962 /* Returns the canonical va_list type specified by TYPE. If there
28963 is no valid TYPE provided, it return NULL_TREE. */
28965 tree
28966 ix86_canonical_va_list_type (tree type)
28968 tree wtype, htype;
28970 /* Resolve references and pointers to va_list type. */
28971 if (INDIRECT_REF_P (type))
28972 type = TREE_TYPE (type);
28973 else if (POINTER_TYPE_P (type) && POINTER_TYPE_P (TREE_TYPE(type)))
28974 type = TREE_TYPE (type);
28976 if (TARGET_64BIT)
28978 wtype = va_list_type_node;
28979 gcc_assert (wtype != NULL_TREE);
28980 htype = type;
28981 if (TREE_CODE (wtype) == ARRAY_TYPE)
28983 /* If va_list is an array type, the argument may have decayed
28984 to a pointer type, e.g. by being passed to another function.
28985 In that case, unwrap both types so that we can compare the
28986 underlying records. */
28987 if (TREE_CODE (htype) == ARRAY_TYPE
28988 || POINTER_TYPE_P (htype))
28990 wtype = TREE_TYPE (wtype);
28991 htype = TREE_TYPE (htype);
28994 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
28995 return va_list_type_node;
28996 wtype = sysv_va_list_type_node;
28997 gcc_assert (wtype != NULL_TREE);
28998 htype = type;
28999 if (TREE_CODE (wtype) == ARRAY_TYPE)
29001 /* If va_list is an array type, the argument may have decayed
29002 to a pointer type, e.g. by being passed to another function.
29003 In that case, unwrap both types so that we can compare the
29004 underlying records. */
29005 if (TREE_CODE (htype) == ARRAY_TYPE
29006 || POINTER_TYPE_P (htype))
29008 wtype = TREE_TYPE (wtype);
29009 htype = TREE_TYPE (htype);
29012 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
29013 return sysv_va_list_type_node;
29014 wtype = ms_va_list_type_node;
29015 gcc_assert (wtype != NULL_TREE);
29016 htype = type;
29017 if (TREE_CODE (wtype) == ARRAY_TYPE)
29019 /* If va_list is an array type, the argument may have decayed
29020 to a pointer type, e.g. by being passed to another function.
29021 In that case, unwrap both types so that we can compare the
29022 underlying records. */
29023 if (TREE_CODE (htype) == ARRAY_TYPE
29024 || POINTER_TYPE_P (htype))
29026 wtype = TREE_TYPE (wtype);
29027 htype = TREE_TYPE (htype);
29030 if (TYPE_MAIN_VARIANT (wtype) == TYPE_MAIN_VARIANT (htype))
29031 return ms_va_list_type_node;
29032 return NULL_TREE;
29034 return std_canonical_va_list_type (type);
29037 /* Iterate through the target-specific builtin types for va_list.
29038 IDX denotes the iterator, *PTREE is set to the result type of
29039 the va_list builtin, and *PNAME to its internal type.
29040 Returns zero if there is no element for this index, otherwise
29041 IDX should be increased upon the next call.
29042 Note, do not iterate a base builtin's name like __builtin_va_list.
29043 Used from c_common_nodes_and_builtins. */
29046 ix86_enum_va_list (int idx, const char **pname, tree *ptree)
29048 if (!TARGET_64BIT)
29049 return 0;
29050 switch (idx) {
29051 case 0:
29052 *ptree = ms_va_list_type_node;
29053 *pname = "__builtin_ms_va_list";
29054 break;
29055 case 1:
29056 *ptree = sysv_va_list_type_node;
29057 *pname = "__builtin_sysv_va_list";
29058 break;
29059 default:
29060 return 0;
29062 return 1;
29065 /* Initialize the GCC target structure. */
29066 #undef TARGET_RETURN_IN_MEMORY
29067 #define TARGET_RETURN_IN_MEMORY ix86_return_in_memory
29069 #undef TARGET_LEGITIMIZE_ADDRESS
29070 #define TARGET_LEGITIMIZE_ADDRESS ix86_legitimize_address
29072 #undef TARGET_ATTRIBUTE_TABLE
29073 #define TARGET_ATTRIBUTE_TABLE ix86_attribute_table
29074 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
29075 # undef TARGET_MERGE_DECL_ATTRIBUTES
29076 # define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
29077 #endif
29079 #undef TARGET_COMP_TYPE_ATTRIBUTES
29080 #define TARGET_COMP_TYPE_ATTRIBUTES ix86_comp_type_attributes
29082 #undef TARGET_INIT_BUILTINS
29083 #define TARGET_INIT_BUILTINS ix86_init_builtins
29084 #undef TARGET_EXPAND_BUILTIN
29085 #define TARGET_EXPAND_BUILTIN ix86_expand_builtin
29087 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
29088 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
29089 ix86_builtin_vectorized_function
29091 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
29092 #define TARGET_VECTORIZE_BUILTIN_CONVERSION ix86_vectorize_builtin_conversion
29094 #undef TARGET_BUILTIN_RECIPROCAL
29095 #define TARGET_BUILTIN_RECIPROCAL ix86_builtin_reciprocal
29097 #undef TARGET_ASM_FUNCTION_EPILOGUE
29098 #define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
29100 #undef TARGET_ENCODE_SECTION_INFO
29101 #ifndef SUBTARGET_ENCODE_SECTION_INFO
29102 #define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info
29103 #else
29104 #define TARGET_ENCODE_SECTION_INFO SUBTARGET_ENCODE_SECTION_INFO
29105 #endif
29107 #undef TARGET_ASM_OPEN_PAREN
29108 #define TARGET_ASM_OPEN_PAREN ""
29109 #undef TARGET_ASM_CLOSE_PAREN
29110 #define TARGET_ASM_CLOSE_PAREN ""
29112 #undef TARGET_ASM_BYTE_OP
29113 #define TARGET_ASM_BYTE_OP ASM_BYTE
29115 #undef TARGET_ASM_ALIGNED_HI_OP
29116 #define TARGET_ASM_ALIGNED_HI_OP ASM_SHORT
29117 #undef TARGET_ASM_ALIGNED_SI_OP
29118 #define TARGET_ASM_ALIGNED_SI_OP ASM_LONG
29119 #ifdef ASM_QUAD
29120 #undef TARGET_ASM_ALIGNED_DI_OP
29121 #define TARGET_ASM_ALIGNED_DI_OP ASM_QUAD
29122 #endif
29124 #undef TARGET_ASM_UNALIGNED_HI_OP
29125 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
29126 #undef TARGET_ASM_UNALIGNED_SI_OP
29127 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
29128 #undef TARGET_ASM_UNALIGNED_DI_OP
29129 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
29131 #undef TARGET_SCHED_ADJUST_COST
29132 #define TARGET_SCHED_ADJUST_COST ix86_adjust_cost
29133 #undef TARGET_SCHED_ISSUE_RATE
29134 #define TARGET_SCHED_ISSUE_RATE ix86_issue_rate
29135 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
29136 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
29137 ia32_multipass_dfa_lookahead
29139 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
29140 #define TARGET_FUNCTION_OK_FOR_SIBCALL ix86_function_ok_for_sibcall
29142 #ifdef HAVE_AS_TLS
29143 #undef TARGET_HAVE_TLS
29144 #define TARGET_HAVE_TLS true
29145 #endif
29146 #undef TARGET_CANNOT_FORCE_CONST_MEM
29147 #define TARGET_CANNOT_FORCE_CONST_MEM ix86_cannot_force_const_mem
29148 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
29149 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
29151 #undef TARGET_DELEGITIMIZE_ADDRESS
29152 #define TARGET_DELEGITIMIZE_ADDRESS ix86_delegitimize_address
29154 #undef TARGET_MS_BITFIELD_LAYOUT_P
29155 #define TARGET_MS_BITFIELD_LAYOUT_P ix86_ms_bitfield_layout_p
29157 #if TARGET_MACHO
29158 #undef TARGET_BINDS_LOCAL_P
29159 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
29160 #endif
29161 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
29162 #undef TARGET_BINDS_LOCAL_P
29163 #define TARGET_BINDS_LOCAL_P i386_pe_binds_local_p
29164 #endif
29166 #undef TARGET_ASM_OUTPUT_MI_THUNK
29167 #define TARGET_ASM_OUTPUT_MI_THUNK x86_output_mi_thunk
29168 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
29169 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK x86_can_output_mi_thunk
29171 #undef TARGET_ASM_FILE_START
29172 #define TARGET_ASM_FILE_START x86_file_start
29174 #undef TARGET_DEFAULT_TARGET_FLAGS
29175 #define TARGET_DEFAULT_TARGET_FLAGS \
29176 (TARGET_DEFAULT \
29177 | TARGET_SUBTARGET_DEFAULT \
29178 | TARGET_TLS_DIRECT_SEG_REFS_DEFAULT)
29180 #undef TARGET_HANDLE_OPTION
29181 #define TARGET_HANDLE_OPTION ix86_handle_option
29183 #undef TARGET_RTX_COSTS
29184 #define TARGET_RTX_COSTS ix86_rtx_costs
29185 #undef TARGET_ADDRESS_COST
29186 #define TARGET_ADDRESS_COST ix86_address_cost
29188 #undef TARGET_FIXED_CONDITION_CODE_REGS
29189 #define TARGET_FIXED_CONDITION_CODE_REGS ix86_fixed_condition_code_regs
29190 #undef TARGET_CC_MODES_COMPATIBLE
29191 #define TARGET_CC_MODES_COMPATIBLE ix86_cc_modes_compatible
29193 #undef TARGET_MACHINE_DEPENDENT_REORG
29194 #define TARGET_MACHINE_DEPENDENT_REORG ix86_reorg
29196 #undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
29197 #define TARGET_BUILTIN_SETJMP_FRAME_VALUE ix86_builtin_setjmp_frame_value
29199 #undef TARGET_BUILD_BUILTIN_VA_LIST
29200 #define TARGET_BUILD_BUILTIN_VA_LIST ix86_build_builtin_va_list
29202 #undef TARGET_FN_ABI_VA_LIST
29203 #define TARGET_FN_ABI_VA_LIST ix86_fn_abi_va_list
29205 #undef TARGET_CANONICAL_VA_LIST_TYPE
29206 #define TARGET_CANONICAL_VA_LIST_TYPE ix86_canonical_va_list_type
29208 #undef TARGET_EXPAND_BUILTIN_VA_START
29209 #define TARGET_EXPAND_BUILTIN_VA_START ix86_va_start
29211 #undef TARGET_MD_ASM_CLOBBERS
29212 #define TARGET_MD_ASM_CLOBBERS ix86_md_asm_clobbers
29214 #undef TARGET_PROMOTE_PROTOTYPES
29215 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
29216 #undef TARGET_STRUCT_VALUE_RTX
29217 #define TARGET_STRUCT_VALUE_RTX ix86_struct_value_rtx
29218 #undef TARGET_SETUP_INCOMING_VARARGS
29219 #define TARGET_SETUP_INCOMING_VARARGS ix86_setup_incoming_varargs
29220 #undef TARGET_MUST_PASS_IN_STACK
29221 #define TARGET_MUST_PASS_IN_STACK ix86_must_pass_in_stack
29222 #undef TARGET_PASS_BY_REFERENCE
29223 #define TARGET_PASS_BY_REFERENCE ix86_pass_by_reference
29224 #undef TARGET_INTERNAL_ARG_POINTER
29225 #define TARGET_INTERNAL_ARG_POINTER ix86_internal_arg_pointer
29226 #undef TARGET_UPDATE_STACK_BOUNDARY
29227 #define TARGET_UPDATE_STACK_BOUNDARY ix86_update_stack_boundary
29228 #undef TARGET_GET_DRAP_RTX
29229 #define TARGET_GET_DRAP_RTX ix86_get_drap_rtx
29230 #undef TARGET_STRICT_ARGUMENT_NAMING
29231 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
29233 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
29234 #define TARGET_GIMPLIFY_VA_ARG_EXPR ix86_gimplify_va_arg
29236 #undef TARGET_SCALAR_MODE_SUPPORTED_P
29237 #define TARGET_SCALAR_MODE_SUPPORTED_P ix86_scalar_mode_supported_p
29239 #undef TARGET_VECTOR_MODE_SUPPORTED_P
29240 #define TARGET_VECTOR_MODE_SUPPORTED_P ix86_vector_mode_supported_p
29242 #undef TARGET_C_MODE_FOR_SUFFIX
29243 #define TARGET_C_MODE_FOR_SUFFIX ix86_c_mode_for_suffix
29245 #ifdef HAVE_AS_TLS
29246 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
29247 #define TARGET_ASM_OUTPUT_DWARF_DTPREL i386_output_dwarf_dtprel
29248 #endif
29250 #ifdef SUBTARGET_INSERT_ATTRIBUTES
29251 #undef TARGET_INSERT_ATTRIBUTES
29252 #define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
29253 #endif
29255 #undef TARGET_MANGLE_TYPE
29256 #define TARGET_MANGLE_TYPE ix86_mangle_type
29258 #undef TARGET_STACK_PROTECT_FAIL
29259 #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
29261 #undef TARGET_FUNCTION_VALUE
29262 #define TARGET_FUNCTION_VALUE ix86_function_value
29264 #undef TARGET_SECONDARY_RELOAD
29265 #define TARGET_SECONDARY_RELOAD ix86_secondary_reload
29267 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
29268 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST x86_builtin_vectorization_cost
29270 #undef TARGET_SET_CURRENT_FUNCTION
29271 #define TARGET_SET_CURRENT_FUNCTION ix86_set_current_function
29273 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
29274 #define TARGET_OPTION_VALID_ATTRIBUTE_P ix86_valid_target_attribute_p
29276 #undef TARGET_OPTION_SAVE
29277 #define TARGET_OPTION_SAVE ix86_function_specific_save
29279 #undef TARGET_OPTION_RESTORE
29280 #define TARGET_OPTION_RESTORE ix86_function_specific_restore
29282 #undef TARGET_OPTION_PRINT
29283 #define TARGET_OPTION_PRINT ix86_function_specific_print
29285 #undef TARGET_CAN_INLINE_P
29286 #define TARGET_CAN_INLINE_P ix86_can_inline_p
29288 #undef TARGET_EXPAND_TO_RTL_HOOK
29289 #define TARGET_EXPAND_TO_RTL_HOOK ix86_maybe_switch_abi
29291 #undef TARGET_LEGITIMATE_ADDRESS_P
29292 #define TARGET_LEGITIMATE_ADDRESS_P ix86_legitimate_address_p
29294 #undef TARGET_IRA_COVER_CLASSES
29295 #define TARGET_IRA_COVER_CLASSES i386_ira_cover_classes
29297 #undef TARGET_FRAME_POINTER_REQUIRED
29298 #define TARGET_FRAME_POINTER_REQUIRED ix86_frame_pointer_required
29300 #undef TARGET_CAN_ELIMINATE
29301 #define TARGET_CAN_ELIMINATE ix86_can_eliminate
29303 struct gcc_target targetm = TARGET_INITIALIZER;
29305 #include "gt-i386.h"